aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/tools
diff options
context:
space:
mode:
authorGravatar Martin Wicke <577277+martinwicke@users.noreply.github.com>2018-09-22 09:45:11 -0700
committerGravatar GitHub <noreply@github.com>2018-09-22 09:45:11 -0700
commit413ac36f33deb0c354dd687963d2410eab048970 (patch)
treefd4dc4e9fc5a76efd62c78c213b0e34983359256 /tensorflow/tools
parentc22d996c3d6a16db292bd3464b2ef7b91adae676 (diff)
parente692dda4c8b199555e2fa32132a7784e0893c870 (diff)
Merge branch 'master' into fix_expand_dims
Diffstat (limited to 'tensorflow/tools')
-rw-r--r--tensorflow/tools/api/generator/BUILD136
-rw-r--r--tensorflow/tools/api/generator/create_python_api.py291
-rw-r--r--tensorflow/tools/api/generator/create_python_api_test.py86
-rw-r--r--tensorflow/tools/api/golden/BUILD9
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-attr-value.-list-value.pbtxt108
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-attr-value.pbtxt120
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-config-proto.-device-count-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt282
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-event.pbtxt112
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt116
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-graph-def.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-graph-options.pbtxt112
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-histogram-proto.pbtxt104
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-log-message.pbtxt112
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-meta-info-def.pbtxt104
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-meta-graph-def.pbtxt112
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-name-attr-list.-attr-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-name-attr-list.pbtxt88
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-node-def.-attr-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-node-def.pbtxt100
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-optimizer-options.pbtxt132
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-run-metadata.pbtxt88
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-run-options.pbtxt120
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-session-log.pbtxt108
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-summary-metadata.-plugin-data.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-summary-metadata.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-summary.-audio.pbtxt96
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-summary.-image.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-summary.-value.pbtxt112
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-summary.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-tensor-info.-coo-sparse.pbtxt88
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-tensor-info.pbtxt96
-rw-r--r--tensorflow/tools/api/golden/tensorflow.data.-iterator.pbtxt1
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt10
-rw-r--r--tensorflow/tools/api/golden/tensorflow.image.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt26
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.densenet.pbtxt23
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.inception_v3.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.mobilenet.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.nasnet.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt87
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.applications.xception.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt26
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt23
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-image-data-generator.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt23
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt63
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.-timeseries-generator.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.-tokenizer.pbtxt33
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/tensorflow.manip.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/tensorflow.math.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checker.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.pbtxt88
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.pbtxt188
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-multi-graph-node-proto.pbtxt160
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.pbtxt88
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-event.pbtxt112
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-session-log.pbtxt108
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-summary-description.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-summary.-audio.pbtxt96
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-summary.-image.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-summary.-value.pbtxt112
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-summary.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/tensorflow.summary.-tagged-run-metadata.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-bytes-list.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-cluster-def.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-example.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-feature-list.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-feature-lists.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-feature.pbtxt88
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-features.-feature-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-features.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-float-list.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-int64-list.pbtxt80
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-job-def.-tasks-entry.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-job-def.pbtxt88
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-saver-def.pbtxt120
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-sequence-example.pbtxt84
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-server-def.pbtxt96
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-aggregation-method.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-aggregation-method.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-attr-value.-list-value.pbtxt70
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-attr-value.pbtxt151
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-conditional-accumulator-base.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-conditional-accumulator-base.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-conditional-accumulator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-conditional-accumulator.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-device-count-entry.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-experimental.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-config-proto.pbtxt146
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-d-type.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-d-type.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-device-spec.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-device-spec.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-dimension.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-dimension.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-event.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-f-i-f-o-queue.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-f-i-f-o-queue.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-fixed-len-feature.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-fixed-len-feature.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-fixed-len-sequence-feature.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-fixed-len-sequence-feature.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-fixed-length-record-reader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-fixed-length-record-reader.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-g-p-u-options.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-gradient-tape.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-gradient-tape.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-graph-def.pbtxt36
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-graph-keys.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-graph-keys.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-graph-options.pbtxt67
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-graph.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-graph.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-histogram-proto.pbtxt54
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-identity-reader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-identity-reader.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-indexed-slices.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-indexed-slices.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-interactive-session.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-interactive-session.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-l-m-d-b-reader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-l-m-d-b-reader.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-log-message.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-meta-info-def.pbtxt50
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.pbtxt133
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.-attr-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-node-def.-attr-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-node-def.pbtxt56
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-op-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-op-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-operation.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-operation.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-optimizer-options.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-padding-f-i-f-o-queue.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-padding-f-i-f-o-queue.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-priority-queue.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-priority-queue.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-queue-base.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-queue-base.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-random-shuffle-queue.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-random-shuffle-queue.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-reader-base.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-reader-base.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-register-gradient.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-register-gradient.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-run-metadata.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-run-options.-experimental.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-run-options.pbtxt83
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-session-log.pbtxt44
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-session.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-session.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-sparse-conditional-accumulator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-sparse-conditional-accumulator.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-sparse-feature.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-sparse-feature.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-sparse-tensor-value.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-sparse-tensor-value.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-sparse-tensor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-sparse-tensor.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.-plugin-data.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.pbtxt40
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-summary.-audio.pbtxt36
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-summary.-image.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-summary.-value.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-summary.pbtxt144
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-t-f-record-reader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-t-f-record-reader.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-tensor-array.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-tensor-array.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.-coo-sparse.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.pbtxt59
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-tensor-shape.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-tensor-shape.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-tensor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-tensor.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-text-line-reader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-text-line-reader.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-var-len-feature.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-var-len-feature.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-variable-aggregation.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-variable-scope.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-variable-synchronization.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-variable.-save-slice-info.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-variable.-save-slice-info.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-variable.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-variable.pbtxt)36
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.-whole-file-reader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.-whole-file-reader.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.app.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.app.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.bitwise.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.bitwise.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.compat.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.compat.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.constant_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.constant_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-dataset.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-dataset.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-fixed-length-record-dataset.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-iterator.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-t-f-record-dataset.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.-text-line-dataset.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.data.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.debugging.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-bernoulli.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-bernoulli.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-beta.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-beta.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-categorical.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-categorical.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-dirichlet-multinomial.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-dirichlet-multinomial.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-dirichlet.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-dirichlet.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-distribution.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-distribution.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-exponential.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-exponential.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-gamma.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-gamma.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-laplace.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-laplace.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-multinomial.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-multinomial.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-normal.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-normal.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-register-k-l.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-register-k-l.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-reparameterization-type.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-reparameterization-type.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-student-t.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-student-t.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.-uniform.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.-uniform.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.distributions.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.distributions.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.dtypes.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-aborted-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-aborted-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-already-exists-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-already-exists-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-cancelled-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-cancelled-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-data-loss-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-data-loss-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-deadline-exceeded-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-deadline-exceeded-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-failed-precondition-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-failed-precondition-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-internal-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-internal-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-invalid-argument-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-invalid-argument-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-not-found-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-not-found-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-op-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-op-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-out-of-range-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-out-of-range-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-permission-denied-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-permission-denied-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-resource-exhausted-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-resource-exhausted-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-unauthenticated-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-unauthenticated-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-unavailable-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-unavailable-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-unimplemented-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-unimplemented-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.-unknown-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.-unknown-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-baseline-classifier.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-baseline-classifier.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-baseline-regressor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-baseline-regressor.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-best-exporter.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-classifier.pbtxt67
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-regressor.pbtxt67
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-classifier.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-regressor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-estimator-spec.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-estimator-spec.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-estimator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-estimator.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-eval-spec.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-eval-spec.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-exporter.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-exporter.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-final-exporter.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-final-exporter.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-latest-exporter.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-latest-exporter.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-linear-classifier.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-linear-regressor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-mode-keys.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-mode-keys.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-run-config.pbtxt105
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-train-spec.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-train-spec.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-vocab-info.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-warm-start-settings.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.-warm-start-settings.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-classification-output.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-classification-output.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-export-output.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-export-output.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-predict-output.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-predict-output.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-regression-output.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-regression-output.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-serving-input-receiver.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-serving-input-receiver.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.export.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.export.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.inputs.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.inputs.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.estimator.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.feature_column.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.feature_column.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.gfile.-fast-g-file.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.gfile.-fast-g-file.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.gfile.-g-file.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.gfile.-g-file.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.gfile.-open.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.gfile.-open.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.gfile.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.gfile.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.glorot_normal_initializer.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.glorot_uniform_initializer.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.graph_util.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.graph_util.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.image.-resize-method.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.image.-resize-method.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.image.pbtxt251
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.constant.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.constant.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.identity.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.identity.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.ones.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.ones.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.orthogonal.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.orthogonal.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.pbtxt)24
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.random_normal.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.random_normal.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.random_uniform.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.random_uniform.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.truncated_normal.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.truncated_normal.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.uniform_unit_scaling.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.uniform_unit_scaling.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.variance_scaling.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.initializers.zeros.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.initializers.zeros.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.io.pbtxt43
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt268
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt285
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.activations.pbtxt55
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.backend.name_scope.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.backend.name_scope.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-base-logger.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-base-logger.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-callback.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-callback.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt)6
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-history.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-history.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-lambda-callback.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-lambda-callback.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-model-checkpoint.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-model-checkpoint.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-progbar-logger.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-progbar-logger.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt)6
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-remote-monitor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-remote-monitor.pbtxt)6
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt)6
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.callbacks.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-constraint.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.-constraint.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-max-norm.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.-max-norm.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-min-max-norm.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.-min-max-norm.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-non-neg.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.-non-neg.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-unit-norm.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.-unit-norm.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.max_norm.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.max_norm.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.min_max_norm.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.min_max_norm.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.non_neg.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.non_neg.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.unit_norm.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.constraints.unit_norm.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.boston_housing.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.boston_housing.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.cifar10.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.cifar10.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.cifar100.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.cifar100.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.fashion_mnist.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.fashion_mnist.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.imdb.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.imdb.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.mnist.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.mnist.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.reuters.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.datasets.reuters.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.estimator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.estimator.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-constant.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-constant.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-identity.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-identity.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-ones.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-ones.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-orthogonal.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-orthogonal.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-truncated-normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-variance-scaling.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-zeros.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-zeros.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.constant.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.identity.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.ones.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.orthogonal.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt)56
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.truncated_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.zeros.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activation.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activity-regularization.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-add.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-alpha-dropout.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-batch-normalization.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-bidirectional.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-concatenate.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d-transpose.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d-transpose.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt193
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt193
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dot.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dropout.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-e-l-u.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-embedding.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-flatten.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt)13
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-dropout.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-noise.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-layer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-spec.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-input-spec.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-lambda.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-leaky-re-l-u.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-masking.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-maximum.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-multiply.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-p-re-l-u.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-permute.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-r-n-n.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-repeat-vector.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-reshape.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-softmax.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt)16
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-time-distributed.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-wrapper.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.layers.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.pbtxt)28
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.losses.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.losses.pbtxt)44
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt)50
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt268
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt285
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.models.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.models.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adadelta.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adadelta.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adagrad.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adagrad.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adam.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adam.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adamax.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adamax.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-nadam.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-nadam.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-r-m-sprop.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-r-m-sprop.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-s-g-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.-s-g-d.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.optimizers.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.-l1-l2.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.regularizers.-l1-l2.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.-regularizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.regularizers.-regularizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.regularizers.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-custom-object-scope.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.utils.-custom-object-scope.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-generator-enqueuer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.utils.-generator-enqueuer.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-ordered-enqueuer.pbtxt26
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-progbar.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.utils.-progbar.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-sequence-enqueuer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.utils.-sequence-enqueuer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-sequence.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.utils.-sequence.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.utils.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.utils.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.wrappers.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-batch-normalization.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-conv1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-conv2-d-transpose.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-conv2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-conv3-d-transpose.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-conv3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-dense.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-dropout.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-flatten.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-input-spec.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-input-spec.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-layer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt)8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling3-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-separable-conv1-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.-separable-conv2-d.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt)12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.layers.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.layers.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.pbtxt155
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt155
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt155
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-composition.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-composition.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-diag.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-diag.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-full-matrix.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-full-matrix.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-identity.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-identity.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.pbtxt130
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.linalg.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.linalg.pbtxt)36
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.logging.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.logging.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.losses.-reduction.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.losses.-reduction.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.losses.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.losses.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.manip.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.math.pbtxt239
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.metrics.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.metrics.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.name_scope.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.name_scope.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt)10
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.ones_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.ones_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.orthogonal_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.orthogonal_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.pbtxt2255
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checker.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.pbtxt41
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.pbtxt191
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-multi-graph-node-proto.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-profile-option-builder.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.profiler.-profile-option-builder.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.-profiler.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.profiler.-profiler.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.profiler.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.profiler.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-compression-type.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-compression-type.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-options.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-options.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-writer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-writer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.python_io.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.python_io.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.quantization.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.random_normal_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.random_normal_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.random_uniform_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.random_uniform_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.resource_loader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.resource_loader.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.builder.-saved-model-builder.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.builder.-saved-model-builder.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.builder.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.builder.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.constants.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.constants.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.loader.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.loader.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.main_op.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.main_op.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.signature_constants.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.signature_constants.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.signature_def_utils.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.signature_def_utils.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.tag_constants.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.tag_constants.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.saved_model.utils.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.saved_model.utils.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.sets.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.sets.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.sparse.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.spectral.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.spectral.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.strings.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-event.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-file-writer-cache.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.summary.-file-writer-cache.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-file-writer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.summary.-file-writer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-session-log.pbtxt44
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-summary-description.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-audio.pbtxt36
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-image.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-value.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.pbtxt144
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.-tagged-run-metadata.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.summary.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.summary.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.sysconfig.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.sysconfig.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.test.-benchmark.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.test.-benchmark.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.test.-stub-out-for-testing.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.test.-stub-out-for-testing.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.test.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.test.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-adadelta-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-adadelta-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-adagrad-d-a-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-adagrad-d-a-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-adagrad-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-adagrad-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-adam-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-adam-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-bytes-list.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint-saver-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-checkpoint-saver-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint-saver-listener.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-checkpoint-saver-listener.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-chief-session-creator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-chief-session-creator.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-cluster-def.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-cluster-spec.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-cluster-spec.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-coordinator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-coordinator.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-example.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-exponential-moving-average.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-exponential-moving-average.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-feature-list.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.pbtxt32
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-feature.pbtxt33
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-features.-feature-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-features.pbtxt32
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-feed-fn-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-feed-fn-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-final-ops-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-final-ops-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-float-list.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-ftrl-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-ftrl-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-global-step-waiter-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-global-step-waiter-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-gradient-descent-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-gradient-descent-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-int64-list.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.-tasks-entry.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.pbtxt37
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-logging-tensor-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-logging-tensor-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-looper-thread.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-looper-thread.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-momentum-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-momentum-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-monitored-session.-step-context.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-monitored-session.-step-context.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-monitored-session.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-monitored-session.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-nan-loss-during-training-error.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-nan-loss-during-training-error.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-nan-tensor-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-nan-tensor-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-profiler-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-profiler-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-proximal-adagrad-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-queue-runner.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-queue-runner.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-r-m-s-prop-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-r-m-s-prop-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-saver-def.pbtxt64
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-saver.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-saver.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-scaffold.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-scaffold.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-second-or-step-timer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-second-or-step-timer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-sequence-example.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-server-def.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-server.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-server.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-session-creator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-session-creator.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-session-manager.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-session-manager.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-args.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-session-run-args.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-context.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-session-run-context.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-session-run-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-values.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-session-run-values.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-singular-monitored-session.-step-context.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-singular-monitored-session.-step-context.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-singular-monitored-session.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-singular-monitored-session.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-step-counter-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-step-counter-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-stop-at-step-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-stop-at-step-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-summary-saver-hook.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-summary-saver-hook.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-supervisor.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-supervisor.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-sync-replicas-optimizer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-sync-replicas-optimizer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-vocab-info.pbtxt)4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-worker-session-creator.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.-worker-session-creator.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.queue_runner.-queue-runner.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.queue_runner.-queue-runner.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.queue_runner.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.train.queue_runner.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.truncated_normal_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.truncated_normal_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.uniform_unit_scaling_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.uniform_unit_scaling_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.variable_scope.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.variable_scope.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.variance_scaling_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.zeros_initializer.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.zeros_initializer.pbtxt)0
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-aggregation-method.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-attr-value.-list-value.pbtxt70
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-attr-value.pbtxt151
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator-base.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-device-count-entry.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-experimental.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-config-proto.pbtxt146
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-d-type.pbtxt77
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-device-spec.pbtxt37
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-dimension.pbtxt25
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-event.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-f-i-f-o-queue.pbtxt66
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-feature.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-sequence-feature.pbtxt31
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-g-p-u-options.pbtxt92
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-gradient-tape.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-graph-def.pbtxt36
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-graph-keys.pbtxt140
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-graph-options.pbtxt67
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-graph.pbtxt141
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-histogram-proto.pbtxt54
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-indexed-slices.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-interactive-session.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-log-message.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-meta-info-def.pbtxt50
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.pbtxt133
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.-attr-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-node-def.-attr-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-node-def.pbtxt56
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-op-error.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-operation.pbtxt69
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-optimizer-options.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-padding-f-i-f-o-queue.pbtxt66
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-priority-queue.pbtxt66
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-queue-base.pbtxt65
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-random-shuffle-queue.pbtxt66
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-register-gradient.pbtxt9
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-run-metadata.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-run-options.-experimental.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-run-options.pbtxt83
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-session-log.pbtxt44
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-session.pbtxt55
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-sparse-conditional-accumulator.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-sparse-feature.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor-value.pbtxt26
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor.pbtxt54
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.-plugin-data.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.pbtxt40
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-summary.-audio.pbtxt36
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-summary.-image.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-summary.-value.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-summary.pbtxt144
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-tensor-array.pbtxt69
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.-coo-sparse.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.pbtxt59
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-tensor-shape.pbtxt77
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-tensor.pbtxt58
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-var-len-feature.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-variable-aggregation.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-variable-scope.pbtxt105
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-variable-synchronization.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-variable.-save-slice-info.pbtxt17
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-variable.pbtxt130
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.app.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.bitwise.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.compat.pbtxt47
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.constant_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt121
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt122
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-iterator.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt122
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt122
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt23
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.debugging.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-bernoulli.pbtxt143
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-beta.pbtxt147
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-categorical.pbtxt147
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet-multinomial.pbtxt147
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet.pbtxt143
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-distribution.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-exponential.pbtxt144
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-gamma.pbtxt143
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-laplace.pbtxt143
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-multinomial.pbtxt147
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-normal.pbtxt143
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-register-k-l.pbtxt9
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-reparameterization-type.pbtxt9
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-student-t.pbtxt147
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.-uniform.pbtxt147
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.distributions.pbtxt75
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.dtypes.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-aborted-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-already-exists-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-cancelled-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-data-loss-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-deadline-exceeded-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-failed-precondition-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-internal-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-invalid-argument-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-not-found-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-op-error.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-out-of-range-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-permission-denied-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-resource-exhausted-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-unauthenticated-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-unavailable-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-unimplemented-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.-unknown-error.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.pbtxt151
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-classifier.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-regressor.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-best-exporter.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-classifier.pbtxt67
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-regressor.pbtxt67
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-classifier.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-regressor.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator-spec.pbtxt59
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator.pbtxt61
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-eval-spec.pbtxt43
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-exporter.pbtxt16
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-final-exporter.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-latest-exporter.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-classifier.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-regressor.pbtxt62
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-mode-keys.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-run-config.pbtxt105
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-train-spec.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt43
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-warm-start-settings.pbtxt31
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-serving-input-receiver.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.export.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.inputs.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.pbtxt111
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.feature_column.pbtxt59
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.gfile.-fast-g-file.pbtxt58
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.gfile.-g-file.pbtxt58
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.gfile.-open.pbtxt58
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.gfile.pbtxt63
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.glorot_normal_initializer.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.glorot_uniform_initializer.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.graph_util.pbtxt23
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.image.-resize-method.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.image.pbtxt251
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.constant.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.identity.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.ones.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.orthogonal.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.pbtxt79
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.random_normal.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-random-normal.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.random_uniform.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-random-uniform.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.truncated_normal.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.initializers.-truncated-normal.pbtxt)2
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.uniform_unit_scaling.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.variance_scaling.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.initializers.zeros.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.io.pbtxt43
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt268
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt285
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.activations.pbtxt55
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.backend.name_scope.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt555
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-base-logger.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-callback.pbtxt41
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-history.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-lambda-callback.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-model-checkpoint.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-progbar-logger.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-remote-monitor.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.pbtxt55
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-constraint.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-max-norm.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-min-max-norm.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-non-neg.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-unit-norm.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.max_norm.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.min_max_norm.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.non_neg.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.unit_norm.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.pbtxt31
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.estimator.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-constant.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-identity.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-initializer.pbtxt16
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-ones.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-orthogonal.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-truncated-normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-variance-scaling.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-zeros.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.constant.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.identity.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.ones.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.orthogonal.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.pbtxt119
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.truncated_normal.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.uniform.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.zeros.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt188
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt273
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt193
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt193
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-flatten.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt)13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt179
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt256
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-dropout.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt)15
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-spec.pbtxt9
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt179
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt256
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt174
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-permute.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt)14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt187
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt177
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt179
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt244
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt187
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt176
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt180
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt179
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.layers.pbtxt435
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt115
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt127
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt268
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt285
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.models.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adadelta.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adagrad.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adam.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adamax.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-nadam.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-optimizer.pbtxt33
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-r-m-sprop.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-s-g-d.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.pbtxt47
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.pbtxt83
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-l1-l2.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-regularizer.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-custom-object-scope.pbtxt9
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-generator-enqueuer.pbtxt26
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-ordered-enqueuer.pbtxt26
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-progbar.pbtxt17
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence-enqueuer.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.utils.pbtxt75
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt42
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling1-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling2-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling3-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-batch-normalization.pbtxt185
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-conv1-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d-transpose.pbtxt187
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d-transpose.pbtxt187
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-dense.pbtxt185
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-dropout.pbtxt185
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-flatten.pbtxt185
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-input-spec.pbtxt9
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-layer.pbtxt183
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling1-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling2-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling3-d.pbtxt186
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv1-d.pbtxt187
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv2-d.pbtxt187
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.layers.pbtxt147
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.pbtxt155
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt155
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt155
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.pbtxt130
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.pbtxt131
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt154
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt130
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt135
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.pbtxt130
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.pbtxt129
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.linalg.pbtxt175
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.logging.pbtxt83
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.losses.-reduction.pbtxt40
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt71
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.manip.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.math.pbtxt239
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt135
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.name_scope.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.pbtxt359
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt202
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt202
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt201
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt205
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt202
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt202
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt201
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt200
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt201
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.pbtxt43
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.ones_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.orthogonal_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.pbtxt (renamed from tensorflow/tools/api/golden/tensorflow.pbtxt)172
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checker.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.pbtxt41
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.pbtxt191
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-multi-graph-node-proto.pbtxt134
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-profile-option-builder.pbtxt93
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.-profiler.pbtxt37
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.profiler.pbtxt39
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-compression-type.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-options.pbtxt17
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-writer.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.python_io.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.quantization.pbtxt35
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.random_normal_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.random_uniform_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.resource_loader.pbtxt23
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.-saved-model-builder.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.pbtxt7
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.constants.pbtxt39
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.loader.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.main_op.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.pbtxt39
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_constants.pbtxt47
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_def_utils.pbtxt23
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.tag_constants.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.saved_model.utils.pbtxt11
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.sets.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.sparse.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.spectral.pbtxt59
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.strings.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-event.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer-cache.pbtxt16
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer.pbtxt50
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-session-log.pbtxt44
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-summary-description.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-audio.pbtxt36
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-image.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-value.pbtxt74
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.pbtxt144
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.-tagged-run-metadata.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.summary.pbtxt67
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.sysconfig.pbtxt19
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.test.-benchmark.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.test.-stub-out-for-testing.pbtxt28
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.test.pbtxt59
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-adadelta-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-d-a-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-adam-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-bytes-list.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-listener.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-chief-session-creator.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-def.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-spec.pbtxt37
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-coordinator.pbtxt45
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-example.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-exponential-moving-average.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-feature-list.pbtxt13
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.pbtxt32
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-feature.pbtxt33
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-features.-feature-entry.pbtxt22
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-features.pbtxt32
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-feed-fn-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-final-ops-hook.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-float-list.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-ftrl-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-global-step-waiter-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-gradient-descent-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-int64-list.pbtxt15
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.-tasks-entry.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.pbtxt37
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-logging-tensor-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-looper-thread.pbtxt73
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-momentum-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.-step-context.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.pbtxt34
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-nan-loss-during-training-error.pbtxt16
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-nan-tensor-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-optimizer.pbtxt50
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-profiler-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-adagrad-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-r-m-s-prop-optimizer.pbtxt51
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-saver-def.pbtxt64
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-saver.pbtxt53
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-scaffold.pbtxt53
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-second-or-step-timer.pbtxt26
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-sequence-example.pbtxt20
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-server-def.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-server.pbtxt29
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-session-creator.pbtxt12
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-session-manager.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-args.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-context.pbtxt25
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-hook.pbtxt28
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-values.pbtxt27
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.-step-context.pbtxt21
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.pbtxt38
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-step-counter-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-stop-at-step-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-summary-saver-hook.pbtxt30
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-supervisor.pbtxt153
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-sync-replicas-optimizer.pbtxt63
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt43
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-worker-session-creator.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt391
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.truncated_normal_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.uniform_unit_scaling_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.variable_scope.pbtxt9
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.variance_scaling_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.zeros_initializer.pbtxt18
-rw-r--r--tensorflow/tools/api/lib/api_objects.proto11
-rw-r--r--tensorflow/tools/api/lib/python_object_to_proto_visitor.py18
-rw-r--r--tensorflow/tools/api/tests/BUILD8
-rw-r--r--tensorflow/tools/api/tests/api_compatibility_test.py124
-rw-r--r--tensorflow/tools/benchmark/benchmark_model.cc14
-rw-r--r--tensorflow/tools/benchmark/benchmark_model_test.cc55
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.cmake2
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le20
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu8
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le33
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.rbe.cpu4
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.rbe.cuda9.0-cudnn7-ubuntu14.0483
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.rbe.gpu6
-rw-r--r--tensorflow/tools/ci_build/README.md2
-rwxr-xr-xtensorflow/tools/ci_build/builds/android.sh8
-rwxr-xr-xtensorflow/tools/ci_build/builds/pip.sh13
-rwxr-xr-xtensorflow/tools/ci_build/builds/run_pip_tests.sh9
-rwxr-xr-xtensorflow/tools/ci_build/builds/test_user_ops.sh40
-rwxr-xr-xtensorflow/tools/ci_build/builds/with_the_same_user2
-rwxr-xr-xtensorflow/tools/ci_build/ci_build.sh14
-rwxr-xr-xtensorflow/tools/ci_build/ci_parameterized_build.sh134
-rwxr-xr-xtensorflow/tools/ci_build/ci_sanity.sh12
-rwxr-xr-xtensorflow/tools/ci_build/copy_binary.py3
-rwxr-xr-xtensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh85
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel.sh2
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel_from_source.sh40
-rwxr-xr-xtensorflow/tools/ci_build/install/install_buildifier_from_source.sh30
-rwxr-xr-xtensorflow/tools/ci_build/install/install_deb_packages.sh6
-rwxr-xr-xtensorflow/tools/ci_build/install/install_golang_ppc64le.sh22
-rwxr-xr-xtensorflow/tools/ci_build/install/install_hdf5_ppc64le.sh30
-rwxr-xr-xtensorflow/tools/ci_build/install/install_openblas_ppc64le.sh29
-rwxr-xr-xtensorflow/tools/ci_build/install/install_pip_packages.sh41
-rwxr-xr-xtensorflow/tools/ci_build/install/install_pip_packages_remote.sh6
-rwxr-xr-xtensorflow/tools/ci_build/install/install_proto3.sh2
-rwxr-xr-xtensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh17
-rwxr-xr-xtensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh15
-rwxr-xr-xtensorflow/tools/ci_build/linux/cpu/run_cc_core.sh2
-rwxr-xr-xtensorflow/tools/ci_build/linux/cpu/run_mkl.sh7
-rwxr-xr-xtensorflow/tools/ci_build/linux/cpu/run_py2_core.sh2
-rwxr-xr-xtensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh35
-rwxr-xr-xtensorflow/tools/ci_build/linux/cpu/run_py3_core.sh2
-rwxr-xr-xtensorflow/tools/ci_build/linux/gpu/run_cc_core.sh1
-rwxr-xr-xtensorflow/tools/ci_build/linux/gpu/run_mkl.sh47
-rwxr-xr-xtensorflow/tools/ci_build/linux/gpu/run_py3_core.sh1
-rwxr-xr-xtensorflow/tools/ci_build/linux/libtensorflow_docker.sh7
-rwxr-xr-xtensorflow/tools/ci_build/linux/mkl/basic-mkl-gpu-test.sh29
-rwxr-xr-xtensorflow/tools/ci_build/linux/mkl/basic-mkl-test.sh29
-rwxr-xr-xtensorflow/tools/ci_build/linux/mkl/build-dev-container.sh103
-rwxr-xr-xtensorflow/tools/ci_build/linux/ppc64le/cpu/run_py2.sh37
-rwxr-xr-xtensorflow/tools/ci_build/linux/ppc64le/cpu/run_py3.sh37
-rwxr-xr-xtensorflow/tools/ci_build/linux/ppc64le/gpu/run_py2.sh44
-rwxr-xr-xtensorflow/tools/ci_build/linux/ppc64le/gpu/run_py3.sh44
-rwxr-xr-xtensorflow/tools/ci_build/osx/libtensorflow_cpu.sh1
-rwxr-xr-xtensorflow/tools/ci_build/osx/libtensorflow_gpu.sh1
-rwxr-xr-xtensorflow/tools/ci_build/pi/build_raspberry_pi.sh12
-rwxr-xr-xtensorflow/tools/ci_build/update_version.py47
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh132
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/common_env.sh18
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh2
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh80
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh2
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/cmake/run_build.bat1
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat6
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh89
-rwxr-xr-xtensorflow/tools/ci_build/windows/libtensorflow_cpu.sh2
-rwxr-xr-xtensorflow/tools/ci_build/xla/linux/gpu/run_py3.sh3
-rw-r--r--tensorflow/tools/common/BUILD17
-rw-r--r--tensorflow/tools/common/public_api.py8
-rw-r--r--tensorflow/tools/common/test_module1.py31
-rw-r--r--tensorflow/tools/common/test_module2.py29
-rw-r--r--tensorflow/tools/common/traverse_test.py15
-rw-r--r--tensorflow/tools/compatibility/BUILD57
-rw-r--r--tensorflow/tools/compatibility/ast_edits.py502
-rw-r--r--tensorflow/tools/compatibility/renames_v2.py135
-rw-r--r--tensorflow/tools/compatibility/testdata/test_file_v0_11.py16
-rw-r--r--tensorflow/tools/compatibility/testdata/test_file_v1_10.py34
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade.py486
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_test.py5
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_v2.py147
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_v2_test.py96
-rw-r--r--tensorflow/tools/compatibility/update/BUILD15
-rw-r--r--tensorflow/tools/compatibility/update/generate_v2_renames_map.py103
-rw-r--r--tensorflow/tools/def_file_filter/def_file_filter.py.tpl1
-rw-r--r--tensorflow/tools/def_file_filter/def_file_filter_configure.bzl38
-rw-r--r--tensorflow/tools/dist_test/README.md2
-rwxr-xr-xtensorflow/tools/dist_test/build_server.sh2
-rwxr-xr-xtensorflow/tools/dist_test/local_test.sh14
-rwxr-xr-xtensorflow/tools/dist_test/remote_test.sh11
-rw-r--r--tensorflow/tools/docker/Dockerfile3
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel12
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-cpu-mkl83
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu35
-rwxr-xr-xtensorflow/tools/docker/Dockerfile.devel-mkl143
-rwxr-xr-xtensorflow/tools/docker/Dockerfile.devel-mkl-horovod168
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu11
-rwxr-xr-xtensorflow/tools/docker/Dockerfile.mkl77
-rwxr-xr-xtensorflow/tools/docker/Dockerfile.mkl-horovod111
-rw-r--r--tensorflow/tools/docker/README.md25
-rw-r--r--tensorflow/tools/docker/jupyter_notebook_config.py2
-rw-r--r--tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb2
-rwxr-xr-xtensorflow/tools/docker/parameterized_docker_build.sh166
-rw-r--r--tensorflow/tools/dockerfiles/README.md67
-rw-r--r--tensorflow/tools/dockerfiles/assembler.Dockerfile30
-rw-r--r--tensorflow/tools/dockerfiles/assembler.py554
-rw-r--r--tensorflow/tools/dockerfiles/bashrc50
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/cpu-devel-jupyter.Dockerfile100
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/cpu-devel.Dockerfile89
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile69
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile58
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel-jupyter.Dockerfile126
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel.Dockerfile115
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/nvidia-jupyter.Dockerfile95
-rw-r--r--tensorflow/tools/dockerfiles/dockerfiles/nvidia.Dockerfile84
-rw-r--r--tensorflow/tools/dockerfiles/partials/bazel.partial.Dockerfile13
-rw-r--r--tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile8
-rw-r--r--tensorflow/tools/dockerfiles/partials/nvidia-devel.partial.Dockerfile49
-rw-r--r--tensorflow/tools/dockerfiles/partials/nvidia.partial.Dockerfile28
-rw-r--r--tensorflow/tools/dockerfiles/partials/python.partial.Dockerfile12
-rw-r--r--tensorflow/tools/dockerfiles/partials/shell.partial.Dockerfile2
-rw-r--r--tensorflow/tools/dockerfiles/partials/tensorflow.partial.Dockerfile2
-rw-r--r--tensorflow/tools/dockerfiles/partials/ubuntu-devel.partial.Dockerfile24
-rw-r--r--tensorflow/tools/dockerfiles/partials/ubuntu.partial.Dockerfile2
-rw-r--r--tensorflow/tools/dockerfiles/spec.yml195
-rw-r--r--tensorflow/tools/docs/BUILD35
-rw-r--r--tensorflow/tools/docs/doc_controls.py319
-rw-r--r--tensorflow/tools/docs/doc_controls_test.py220
-rw-r--r--tensorflow/tools/docs/doc_generator_visitor.py66
-rw-r--r--tensorflow/tools/docs/doc_generator_visitor_test.py233
-rw-r--r--tensorflow/tools/docs/generate.py4
-rw-r--r--tensorflow/tools/docs/generate_lib.py258
-rw-r--r--tensorflow/tools/docs/generate_lib_test.py123
-rw-r--r--tensorflow/tools/docs/parser.py248
-rw-r--r--tensorflow/tools/docs/parser_test.py346
-rw-r--r--tensorflow/tools/docs/pretty_docs.py135
-rw-r--r--tensorflow/tools/docs/py_guide_parser.py3
-rwxr-xr-xtensorflow/tools/git/gen_git_source.py44
-rw-r--r--tensorflow/tools/graph_transforms/README.md2
-rw-r--r--tensorflow/tools/graph_transforms/fold_constants_lib.cc26
-rw-r--r--tensorflow/tools/graph_transforms/fold_constants_lib.h6
-rw-r--r--tensorflow/tools/graph_transforms/fold_constants_test.cc46
-rw-r--r--tensorflow/tools/graph_transforms/fold_old_batch_norms.cc3
-rw-r--r--tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc2
-rw-r--r--tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc5
-rw-r--r--tensorflow/tools/graph_transforms/sparsify_gather_test.cc4
-rw-r--r--tensorflow/tools/graph_transforms/transform_graph.cc17
-rw-r--r--tensorflow/tools/graph_transforms/transform_utils.cc15
-rw-r--r--tensorflow/tools/lib_package/BUILD77
-rw-r--r--tensorflow/tools/lib_package/README.md8
-rw-r--r--tensorflow/tools/pip_package/BUILD109
-rw-r--r--tensorflow/tools/pip_package/MANIFEST.in1
-rwxr-xr-xtensorflow/tools/pip_package/build_pip_package.sh183
-rw-r--r--tensorflow/tools/pip_package/pip_smoke_test.py71
-rw-r--r--tensorflow/tools/pip_package/setup.py72
-rw-r--r--tensorflow/tools/proto_text/BUILD9
-rw-r--r--tensorflow/tools/proto_text/gen_proto_text_functions.cc7
-rw-r--r--tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc28
-rw-r--r--tensorflow/tools/proto_text/gen_proto_text_functions_lib.h6
-rw-r--r--tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc5
-rw-r--r--tensorflow/tools/quantization/quantize_graph_test.py12
-rw-r--r--tensorflow/tools/test/check_futures_test.py3
-rw-r--r--tensorflow/tools/test/upload_test_benchmarks.py1
1382 files changed, 63600 insertions, 9682 deletions
diff --git a/tensorflow/tools/api/generator/BUILD b/tensorflow/tools/api/generator/BUILD
deleted file mode 100644
index a1c569951e..0000000000
--- a/tensorflow/tools/api/generator/BUILD
+++ /dev/null
@@ -1,136 +0,0 @@
-# Description:
-# Scripts used to generate TensorFlow Python API.
-
-licenses(["notice"]) # Apache 2.0
-
-exports_files(["LICENSE"])
-
-py_binary(
- name = "create_python_api",
- srcs = ["create_python_api.py"],
- srcs_version = "PY2AND3",
- deps = [
- "//tensorflow/python",
- ],
-)
-
-py_test(
- name = "create_python_api_test",
- srcs = ["create_python_api_test.py"],
- srcs_version = "PY2AND3",
- deps = [
- ":create_python_api",
- "//tensorflow/python:client_testlib",
- ],
-)
-
-genrule(
- name = "python_api_gen",
- # List of API files. This list should include file name for
- # every module exported using tf_export. For e.g. if an op is decorated with
- # @tf_export('module1.module2', 'module3'). Then, outs should include
- # api/module1/module2/__init__.py and api/module3/__init__.py.
- # keep sorted
- outs = [
- # BEGIN GENERATED FILES
- "api/__init__.py",
- "api/app/__init__.py",
- "api/bitwise/__init__.py",
- "api/compat/__init__.py",
- "api/contrib/__init__.py",
- "api/contrib/stat_summarizer/__init__.py",
- "api/data/__init__.py",
- "api/distributions/__init__.py",
- "api/distributions/bijectors/__init__.py",
- "api/errors/__init__.py",
- "api/estimator/__init__.py",
- "api/estimator/export/__init__.py",
- "api/estimator/inputs/__init__.py",
- "api/feature_column/__init__.py",
- "api/gfile/__init__.py",
- "api/graph_util/__init__.py",
- "api/image/__init__.py",
- "api/initializers/__init__.py",
- "api/keras/__init__.py",
- "api/keras/activations/__init__.py",
- "api/keras/applications/__init__.py",
- "api/keras/applications/densenet/__init__.py",
- "api/keras/applications/inception_resnet_v2/__init__.py",
- "api/keras/applications/inception_v3/__init__.py",
- "api/keras/applications/mobilenet/__init__.py",
- "api/keras/applications/nasnet/__init__.py",
- "api/keras/applications/resnet50/__init__.py",
- "api/keras/applications/vgg16/__init__.py",
- "api/keras/applications/vgg19/__init__.py",
- "api/keras/applications/xception/__init__.py",
- "api/keras/backend/__init__.py",
- "api/keras/callbacks/__init__.py",
- "api/keras/constraints/__init__.py",
- "api/keras/datasets/__init__.py",
- "api/keras/datasets/boston_housing/__init__.py",
- "api/keras/datasets/cifar10/__init__.py",
- "api/keras/datasets/cifar100/__init__.py",
- "api/keras/datasets/fashion_mnist/__init__.py",
- "api/keras/datasets/imdb/__init__.py",
- "api/keras/datasets/mnist/__init__.py",
- "api/keras/datasets/reuters/__init__.py",
- "api/keras/estimator/__init__.py",
- "api/keras/initializers/__init__.py",
- "api/keras/layers/__init__.py",
- "api/keras/losses/__init__.py",
- "api/keras/metrics/__init__.py",
- "api/keras/models/__init__.py",
- "api/keras/optimizers/__init__.py",
- "api/keras/preprocessing/__init__.py",
- "api/keras/preprocessing/image/__init__.py",
- "api/keras/preprocessing/sequence/__init__.py",
- "api/keras/preprocessing/text/__init__.py",
- "api/keras/regularizers/__init__.py",
- "api/keras/utils/__init__.py",
- "api/keras/wrappers/__init__.py",
- "api/keras/wrappers/scikit_learn/__init__.py",
- "api/layers/__init__.py",
- "api/linalg/__init__.py",
- "api/logging/__init__.py",
- "api/losses/__init__.py",
- "api/manip/__init__.py",
- "api/math/__init__.py",
- "api/metrics/__init__.py",
- "api/nn/__init__.py",
- "api/nn/rnn_cell/__init__.py",
- "api/profiler/__init__.py",
- "api/python_io/__init__.py",
- "api/resource_loader/__init__.py",
- "api/saved_model/__init__.py",
- "api/saved_model/builder/__init__.py",
- "api/saved_model/constants/__init__.py",
- "api/saved_model/loader/__init__.py",
- "api/saved_model/main_op/__init__.py",
- "api/saved_model/signature_constants/__init__.py",
- "api/saved_model/signature_def_utils/__init__.py",
- "api/saved_model/tag_constants/__init__.py",
- "api/saved_model/utils/__init__.py",
- "api/sets/__init__.py",
- "api/spectral/__init__.py",
- "api/summary/__init__.py",
- "api/sysconfig/__init__.py",
- "api/test/__init__.py",
- "api/train/__init__.py",
- "api/train/queue_runner/__init__.py",
- "api/user_ops/__init__.py",
- # END GENERATED FILES
- ],
- cmd = "$(location create_python_api) $(OUTS)",
- tools = ["create_python_api"],
-)
-
-py_library(
- name = "python_api",
- srcs = [":python_api_gen"],
- srcs_version = "PY2AND3",
- visibility = ["//tensorflow:__subpackages__"],
- deps = [
- "//tensorflow/contrib:contrib_py", # keep
- "//tensorflow/python", # keep
- ],
-)
diff --git a/tensorflow/tools/api/generator/create_python_api.py b/tensorflow/tools/api/generator/create_python_api.py
deleted file mode 100644
index c7748f5b7a..0000000000
--- a/tensorflow/tools/api/generator/create_python_api.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-"""Generates and prints out imports and constants for new TensorFlow python api.
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import argparse
-import collections
-import os
-import sys
-
-from tensorflow.python.util import tf_decorator
-
-
-_API_CONSTANTS_ATTR = '_tf_api_constants'
-_API_NAMES_ATTR = '_tf_api_names'
-_API_DIR = '/api/'
-_OUTPUT_MODULE = 'tensorflow.tools.api.generator.api'
-_GENERATED_FILE_HEADER = """\"\"\"Imports for Python API.
-
-This file is MACHINE GENERATED! Do not edit.
-Generated by: tensorflow/tools/api/generator/create_python_api.py script.
-\"\"\"
-"""
-
-
-class SymbolExposedTwiceError(Exception):
- """Raised when different symbols are exported with the same name."""
- pass
-
-
-def format_import(source_module_name, source_name, dest_name):
- """Formats import statement.
-
- Args:
- source_module_name: (string) Source module to import from.
- source_name: (string) Source symbol name to import.
- dest_name: (string) Destination alias name.
-
- Returns:
- An import statement string.
- """
- if source_module_name:
- if source_name == dest_name:
- return 'from %s import %s' % (source_module_name, source_name)
- else:
- return 'from %s import %s as %s' % (
- source_module_name, source_name, dest_name)
- else:
- if source_name == dest_name:
- return 'import %s' % source_name
- else:
- return 'import %s as %s' % (source_name, dest_name)
-
-
-class _ModuleInitCodeBuilder(object):
- """Builds a map from module name to imports included in that module."""
-
- def __init__(self):
- self.module_imports = collections.defaultdict(
- lambda: collections.defaultdict(set))
- self._dest_import_to_id = collections.defaultdict(int)
- # Names that start with underscore in the root module.
- self._underscore_names_in_root = []
-
- def add_import(
- self, symbol_id, dest_module_name, source_module_name, source_name,
- dest_name):
- """Adds this import to module_imports.
-
- Args:
- symbol_id: (number) Unique identifier of the symbol to import.
- dest_module_name: (string) Module name to add import to.
- source_module_name: (string) Module to import from.
- source_name: (string) Name of the symbol to import.
- dest_name: (string) Import the symbol using this name.
-
- Raises:
- SymbolExposedTwiceError: Raised when an import with the same
- dest_name has already been added to dest_module_name.
- """
- import_str = format_import(source_module_name, source_name, dest_name)
-
- # Check if we are trying to expose two different symbols with same name.
- full_api_name = dest_name
- if dest_module_name:
- full_api_name = dest_module_name + '.' + full_api_name
- if (full_api_name in self._dest_import_to_id and
- symbol_id != self._dest_import_to_id[full_api_name] and
- symbol_id != -1):
- raise SymbolExposedTwiceError(
- 'Trying to export multiple symbols with same name: %s.' %
- full_api_name)
- self._dest_import_to_id[full_api_name] = symbol_id
-
- if not dest_module_name and dest_name.startswith('_'):
- self._underscore_names_in_root.append(dest_name)
-
- # The same symbol can be available in multiple modules.
- # We store all possible ways of importing this symbol and later pick just
- # one.
- self.module_imports[dest_module_name][full_api_name].add(import_str)
-
- def build(self):
- """Get a map from destination module to __init__.py code for that module.
-
- Returns:
- A dictionary where
- key: (string) destination module (for e.g. tf or tf.consts).
- value: (string) text that should be in __init__.py files for
- corresponding modules.
- """
- module_text_map = {}
- for dest_module, dest_name_to_imports in self.module_imports.items():
- # Sort all possible imports for a symbol and pick the first one.
- imports_list = [
- sorted(imports)[0]
- for _, imports in dest_name_to_imports.items()]
- module_text_map[dest_module] = '\n'.join(sorted(imports_list))
-
- # Expose exported symbols with underscores in root module
- # since we import from it using * import.
- underscore_names_str = ', '.join(
- '\'%s\'' % name for name in self._underscore_names_in_root)
- module_text_map[''] += '''
-_names_with_underscore = [%s]
-__all__ = [s for s in dir() if not s.startswith('_')]
-__all__.extend([s for s in _names_with_underscore])
-''' % underscore_names_str
-
- return module_text_map
-
-
-def get_api_init_text():
- """Get a map from destination module to __init__.py code for that module.
-
- Returns:
- A dictionary where
- key: (string) destination module (for e.g. tf or tf.consts).
- value: (string) text that should be in __init__.py files for
- corresponding modules.
- """
- module_code_builder = _ModuleInitCodeBuilder()
-
- # Traverse over everything imported above. Specifically,
- # we want to traverse over TensorFlow Python modules.
- for module in sys.modules.values():
- # Only look at tensorflow modules.
- if (not module or not hasattr(module, "__name__") or
- 'tensorflow.' not in module.__name__):
- continue
- # Do not generate __init__.py files for contrib modules for now.
- if '.contrib.' in module.__name__ or module.__name__.endswith('.contrib'):
- continue
-
- for module_contents_name in dir(module):
- attr = getattr(module, module_contents_name)
-
- # If attr is _tf_api_constants attribute, then add the constants.
- if module_contents_name == _API_CONSTANTS_ATTR:
- for exports, value in attr:
- for export in exports:
- names = export.split('.')
- dest_module = '.'.join(names[:-1])
- module_code_builder.add_import(
- -1, dest_module, module.__name__, value, names[-1])
- continue
-
- _, attr = tf_decorator.unwrap(attr)
- # If attr is a symbol with _tf_api_names attribute, then
- # add import for it.
- if hasattr(attr, '__dict__') and _API_NAMES_ATTR in attr.__dict__:
- for export in attr._tf_api_names: # pylint: disable=protected-access
- names = export.split('.')
- dest_module = '.'.join(names[:-1])
- module_code_builder.add_import(
- id(attr), dest_module, module.__name__, module_contents_name,
- names[-1])
-
- # Import all required modules in their parent modules.
- # For e.g. if we import 'foo.bar.Value'. Then, we also
- # import 'bar' in 'foo'.
- imported_modules = set(module_code_builder.module_imports.keys())
- for module in imported_modules:
- if not module:
- continue
- module_split = module.split('.')
- parent_module = '' # we import submodules in their parent_module
-
- for submodule_index in range(len(module_split)):
- import_from = _OUTPUT_MODULE
- if submodule_index > 0:
- parent_module += ('.' + module_split[submodule_index-1] if parent_module
- else module_split[submodule_index-1])
- import_from += '.' + parent_module
- module_code_builder.add_import(
- -1, parent_module, import_from,
- module_split[submodule_index], module_split[submodule_index])
-
- return module_code_builder.build()
-
-
-def create_api_files(output_files):
- """Creates __init__.py files for the Python API.
-
- Args:
- output_files: List of __init__.py file paths to create.
- Each file must be under api/ directory.
-
- Raises:
- ValueError: if an output file is not under api/ directory,
- or output_files list is missing a required file.
- """
- module_name_to_file_path = {}
- for output_file in output_files:
- # Convert path separators to '/' for easier parsing below.
- normalized_output_file = output_file.replace(os.sep, '/')
- if _API_DIR not in output_file:
- raise ValueError(
- 'Output files must be in api/ directory, found %s.' % output_file)
- # Get the module name that corresponds to output_file.
- # First get module directory under _API_DIR.
- module_dir = os.path.dirname(
- normalized_output_file[
- normalized_output_file.rfind(_API_DIR)+len(_API_DIR):])
- # Convert / to .
- module_name = module_dir.replace('/', '.').strip('.')
- module_name_to_file_path[module_name] = os.path.normpath(output_file)
-
- # Create file for each expected output in genrule.
- for module, file_path in module_name_to_file_path.items():
- if not os.path.isdir(os.path.dirname(file_path)):
- os.makedirs(os.path.dirname(file_path))
- open(file_path, 'a').close()
-
- module_text_map = get_api_init_text()
-
- # Add imports to output files.
- missing_output_files = []
- for module, text in module_text_map.items():
- # Make sure genrule output file list is in sync with API exports.
- if module not in module_name_to_file_path:
- module_file_path = '"api/%s/__init__.py"' % (
- module.replace('.', '/'))
- missing_output_files.append(module_file_path)
- continue
- with open(module_name_to_file_path[module], 'w') as fp:
- fp.write(_GENERATED_FILE_HEADER + text)
-
- if missing_output_files:
- raise ValueError(
- 'Missing outputs for python_api_gen genrule:\n%s.'
- 'Make sure all required outputs are in the '
- 'tensorflow/tools/api/generator/BUILD file.' %
- ',\n'.join(sorted(missing_output_files)))
-
-
-def main(output_files):
- create_api_files(output_files)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument(
- 'outputs', metavar='O', type=str, nargs='+',
- help='If a single file is passed in, then we we assume it contains a '
- 'semicolon-separated list of Python files that we expect this script to '
- 'output. If multiple files are passed in, then we assume output files '
- 'are listed directly as arguments.')
- args = parser.parse_args()
- if len(args.outputs) == 1:
- # If we only get a single argument, then it must be a file containing
- # list of outputs.
- with open(args.outputs[0]) as output_list_file:
- outputs = [line.strip() for line in output_list_file.read().split(';')]
- else:
- outputs = args.outputs
- main(outputs)
diff --git a/tensorflow/tools/api/generator/create_python_api_test.py b/tensorflow/tools/api/generator/create_python_api_test.py
deleted file mode 100644
index 218c812045..0000000000
--- a/tensorflow/tools/api/generator/create_python_api_test.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-"""Tests for create_python_api."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import imp
-import sys
-
-from tensorflow.python.platform import test
-from tensorflow.python.util.tf_export import tf_export
-from tensorflow.tools.api.generator import create_python_api
-
-
-@tf_export('test_op', 'test_op1')
-def test_op():
- pass
-
-
-@tf_export('TestClass', 'NewTestClass')
-class TestClass(object):
- pass
-
-
-_TEST_CONSTANT = 5
-_MODULE_NAME = 'test.tensorflow.test_module'
-
-
-class CreatePythonApiTest(test.TestCase):
-
- def setUp(self):
- # Add fake op to a module that has 'tensorflow' in the name.
- sys.modules[_MODULE_NAME] = imp.new_module(_MODULE_NAME)
- setattr(sys.modules[_MODULE_NAME], 'test_op', test_op)
- setattr(sys.modules[_MODULE_NAME], 'TestClass', TestClass)
- test_op.__module__ = _MODULE_NAME
- TestClass.__module__ = _MODULE_NAME
- tf_export('consts._TEST_CONSTANT').export_constant(
- _MODULE_NAME, '_TEST_CONSTANT')
-
- def tearDown(self):
- del sys.modules[_MODULE_NAME]
-
- def testFunctionImportIsAdded(self):
- imports = create_python_api.get_api_init_text()
- expected_import = (
- 'from test.tensorflow.test_module import test_op as test_op1')
- self.assertTrue(
- expected_import in str(imports),
- msg='%s not in %s' % (expected_import, str(imports)))
-
- expected_import = 'from test.tensorflow.test_module import test_op'
- self.assertTrue(
- expected_import in str(imports),
- msg='%s not in %s' % (expected_import, str(imports)))
-
- def testClassImportIsAdded(self):
- imports = create_python_api.get_api_init_text()
- expected_import = 'from test.tensorflow.test_module import TestClass'
- self.assertTrue(
- 'TestClass' in str(imports),
- msg='%s not in %s' % (expected_import, str(imports)))
-
- def testConstantIsAdded(self):
- imports = create_python_api.get_api_init_text()
- expected = 'from test.tensorflow.test_module import _TEST_CONSTANT'
- self.assertTrue(expected in str(imports),
- msg='%s not in %s' % (expected, str(imports)))
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/tools/api/golden/BUILD b/tensorflow/tools/api/golden/BUILD
index ebdf42df2c..4389a999e7 100644
--- a/tensorflow/tools/api/golden/BUILD
+++ b/tensorflow/tools/api/golden/BUILD
@@ -7,6 +7,11 @@ package(
licenses(["notice"]) # Apache 2.0
filegroup(
- name = "api_golden",
- srcs = glob(["*.pbtxt"]),
+ name = "api_golden_v1",
+ srcs = glob(["v1/*.pbtxt"]),
+)
+
+filegroup(
+ name = "api_golden_v2",
+ srcs = glob(["v2/*.pbtxt"]),
)
diff --git a/tensorflow/tools/api/golden/tensorflow.-attr-value.-list-value.pbtxt b/tensorflow/tools/api/golden/tensorflow.-attr-value.-list-value.pbtxt
deleted file mode 100644
index 0fb1aaba28..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-attr-value.-list-value.pbtxt
+++ /dev/null
@@ -1,108 +0,0 @@
-path: "tensorflow.AttrValue.ListValue"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.attr_value_pb2.ListValue\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "B_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FUNC_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "F_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "I_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SHAPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "S_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSOR_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TYPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-attr-value.pbtxt b/tensorflow/tools/api/golden/tensorflow.-attr-value.pbtxt
deleted file mode 100644
index e7a3a1f02f..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-attr-value.pbtxt
+++ /dev/null
@@ -1,120 +0,0 @@
-path: "tensorflow.AttrValue"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.attr_value_pb2.AttrValue\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "B_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FUNC_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "F_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "I_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "LIST_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "ListValue"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "PLACEHOLDER_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SHAPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "S_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSOR_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TYPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.-device-count-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.-device-count-entry.pbtxt
deleted file mode 100644
index 29bb3be35c..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.-device-count-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.ConfigProto.DeviceCountEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.config_pb2.DeviceCountEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
new file mode 100644
index 0000000000..eb41deee13
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
@@ -0,0 +1,24 @@
+path: "tensorflow.ConfigProto.Experimental"
+tf_proto {
+ descriptor {
+ name: "Experimental"
+ field {
+ name: "collective_group_leader"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "client_handles_error_formatting"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "executor_type"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
index 009d64aed0..e565b903d2 100644
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
@@ -1,140 +1,148 @@
path: "tensorflow.ConfigProto"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.config_pb2.ConfigProto\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ALLOW_SOFT_PLACEMENT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CLUSTER_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "DEVICE_COUNT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DEVICE_FILTERS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DeviceCountEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "GPU_OPTIONS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "GRAPH_OPTIONS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "INTER_OP_PARALLELISM_THREADS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "INTRA_OP_PARALLELISM_THREADS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "ISOLATE_SESSION_STATE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "LOG_DEVICE_PLACEMENT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OPERATION_TIMEOUT_IN_MS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PLACEMENT_PERIOD_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "RPC_OPTIONS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SESSION_INTER_OP_THREAD_POOL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "USE_PER_SESSION_THREADS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
+tf_proto {
+ descriptor {
+ name: "ConfigProto"
+ field {
+ name: "device_count"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto.DeviceCountEntry"
+ }
+ field {
+ name: "intra_op_parallelism_threads"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "inter_op_parallelism_threads"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "use_per_session_threads"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "session_inter_op_thread_pool"
+ number: 12
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ThreadPoolOptionProto"
+ }
+ field {
+ name: "placement_period"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "device_filters"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "gpu_options"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GPUOptions"
+ }
+ field {
+ name: "allow_soft_placement"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "log_device_placement"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "graph_options"
+ number: 10
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GraphOptions"
+ }
+ field {
+ name: "operation_timeout_in_ms"
+ number: 11
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "rpc_options"
+ number: 13
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.RPCOptions"
+ }
+ field {
+ name: "cluster_def"
+ number: 14
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ClusterDef"
+ }
+ field {
+ name: "isolate_session_state"
+ number: 15
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "experimental"
+ number: 16
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto.Experimental"
+ }
+ nested_type {
+ name: "DeviceCountEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ options {
+ map_entry: true
+ }
+ }
+ nested_type {
+ name: "Experimental"
+ field {
+ name: "collective_group_leader"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "client_handles_error_formatting"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "executor_type"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.-event.pbtxt b/tensorflow/tools/api/golden/tensorflow.-event.pbtxt
deleted file mode 100644
index 9bf8c12428..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-event.pbtxt
+++ /dev/null
@@ -1,112 +0,0 @@
-path: "tensorflow.Event"
-tf_class {
- is_instance: "<class \'tensorflow.core.util.event_pb2.Event\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FILE_VERSION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "GRAPH_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "LOG_MESSAGE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "META_GRAPH_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SESSION_LOG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STEP_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SUMMARY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TAGGED_RUN_METADATA_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "WALL_TIME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt b/tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt
deleted file mode 100644
index 875d802a9c..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-g-p-u-options.pbtxt
+++ /dev/null
@@ -1,116 +0,0 @@
-path: "tensorflow.GPUOptions"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.config_pb2.GPUOptions\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ALLOCATOR_TYPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "ALLOW_GROWTH_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DEFERRED_DELETION_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "EXPERIMENTAL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Experimental"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FORCE_GPU_COMPATIBLE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "POLLING_ACTIVE_DELAY_USECS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "POLLING_INACTIVE_DELAY_MSECS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VISIBLE_DEVICE_LIST_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-graph-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.-graph-def.pbtxt
deleted file mode 100644
index 1495e847cb..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-graph-def.pbtxt
+++ /dev/null
@@ -1,92 +0,0 @@
-path: "tensorflow.GraphDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.graph_pb2.GraphDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "LIBRARY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NODE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VERSIONS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VERSION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-graph-options.pbtxt b/tensorflow/tools/api/golden/tensorflow.-graph-options.pbtxt
deleted file mode 100644
index 0844f891ca..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-graph-options.pbtxt
+++ /dev/null
@@ -1,112 +0,0 @@
-path: "tensorflow.GraphOptions"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.config_pb2.GraphOptions\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "BUILD_COST_MODEL_AFTER_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "BUILD_COST_MODEL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "ENABLE_BFLOAT16_SENDRECV_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "ENABLE_RECV_SCHEDULING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "INFER_SHAPES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OPTIMIZER_OPTIONS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PLACE_PRUNED_GRAPH_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "REWRITE_OPTIONS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TIMELINE_STEP_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-histogram-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.-histogram-proto.pbtxt
deleted file mode 100644
index 2567d2fe60..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-histogram-proto.pbtxt
+++ /dev/null
@@ -1,104 +0,0 @@
-path: "tensorflow.HistogramProto"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.HistogramProto\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "BUCKET_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "BUCKET_LIMIT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "MAX_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "MIN_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NUM_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SUM_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SUM_SQUARES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-log-message.pbtxt b/tensorflow/tools/api/golden/tensorflow.-log-message.pbtxt
deleted file mode 100644
index a43c5eb7e3..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-log-message.pbtxt
+++ /dev/null
@@ -1,112 +0,0 @@
-path: "tensorflow.LogMessage"
-tf_class {
- is_instance: "<class \'tensorflow.core.util.event_pb2.LogMessage\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DEBUGGING"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "ERROR"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FATAL"
- mtype: "<type \'int\'>"
- }
- member {
- name: "INFO"
- mtype: "<type \'int\'>"
- }
- member {
- name: "LEVEL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Level"
- mtype: "<class \'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper\'>"
- }
- member {
- name: "MESSAGE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "UNKNOWN"
- mtype: "<type \'int\'>"
- }
- member {
- name: "WARN"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt
deleted file mode 100644
index 3572126fbf..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.MetaGraphDef.CollectionDefEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.meta_graph_pb2.CollectionDefEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-meta-info-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-meta-info-def.pbtxt
deleted file mode 100644
index b0e9831154..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-meta-info-def.pbtxt
+++ /dev/null
@@ -1,104 +0,0 @@
-path: "tensorflow.MetaGraphDef.MetaInfoDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.meta_graph_pb2.MetaInfoDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ANY_INFO_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "META_GRAPH_VERSION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STRIPPED_DEFAULT_ATTRS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STRIPPED_OP_LIST_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TAGS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSORFLOW_GIT_VERSION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSORFLOW_VERSION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt
deleted file mode 100644
index 48fccac99d..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.MetaGraphDef.SignatureDefEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.meta_graph_pb2.SignatureDefEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.pbtxt
deleted file mode 100644
index 3e683a8715..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-meta-graph-def.pbtxt
+++ /dev/null
@@ -1,112 +0,0 @@
-path: "tensorflow.MetaGraphDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ASSET_FILE_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "COLLECTION_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CollectionDefEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "GRAPH_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "META_INFO_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "MetaInfoDef"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "SAVER_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SIGNATURE_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SignatureDefEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-name-attr-list.-attr-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.-name-attr-list.-attr-entry.pbtxt
deleted file mode 100644
index 2750bd780c..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-name-attr-list.-attr-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.NameAttrList.AttrEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.attr_value_pb2.AttrEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-name-attr-list.pbtxt b/tensorflow/tools/api/golden/tensorflow.-name-attr-list.pbtxt
deleted file mode 100644
index d10faf67d0..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-name-attr-list.pbtxt
+++ /dev/null
@@ -1,88 +0,0 @@
-path: "tensorflow.NameAttrList"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.attr_value_pb2.NameAttrList\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ATTR_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "AttrEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-node-def.-attr-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.-node-def.-attr-entry.pbtxt
deleted file mode 100644
index b1b62d60f1..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-node-def.-attr-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.NodeDef.AttrEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.node_def_pb2.AttrEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-node-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.-node-def.pbtxt
deleted file mode 100644
index b812b4df2b..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-node-def.pbtxt
+++ /dev/null
@@ -1,100 +0,0 @@
-path: "tensorflow.NodeDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.node_def_pb2.NodeDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ATTR_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "AttrEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "DEVICE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "INPUT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OP_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-optimizer-options.pbtxt b/tensorflow/tools/api/golden/tensorflow.-optimizer-options.pbtxt
deleted file mode 100644
index 6cac5c4d99..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-optimizer-options.pbtxt
+++ /dev/null
@@ -1,132 +0,0 @@
-path: "tensorflow.OptimizerOptions"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.config_pb2.OptimizerOptions\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DEFAULT"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "DO_COMMON_SUBEXPRESSION_ELIMINATION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DO_CONSTANT_FOLDING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DO_FUNCTION_INLINING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "GLOBAL_JIT_LEVEL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "GlobalJitLevel"
- mtype: "<class \'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper\'>"
- }
- member {
- name: "L0"
- mtype: "<type \'int\'>"
- }
- member {
- name: "L1"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Level"
- mtype: "<class \'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper\'>"
- }
- member {
- name: "MAX_FOLDED_CONSTANT_IN_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OFF"
- mtype: "<type \'int\'>"
- }
- member {
- name: "ON_1"
- mtype: "<type \'int\'>"
- }
- member {
- name: "ON_2"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OPT_LEVEL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-run-metadata.pbtxt b/tensorflow/tools/api/golden/tensorflow.-run-metadata.pbtxt
deleted file mode 100644
index 808fa0fa21..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-run-metadata.pbtxt
+++ /dev/null
@@ -1,88 +0,0 @@
-path: "tensorflow.RunMetadata"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.config_pb2.RunMetadata\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "COST_GRAPH_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "PARTITION_GRAPHS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STEP_STATS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-run-options.pbtxt b/tensorflow/tools/api/golden/tensorflow.-run-options.pbtxt
deleted file mode 100644
index 2f3e7f1a84..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-run-options.pbtxt
+++ /dev/null
@@ -1,120 +0,0 @@
-path: "tensorflow.RunOptions"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.config_pb2.RunOptions\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DEBUG_OPTIONS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FULL_TRACE"
- mtype: "<type \'int\'>"
- }
- member {
- name: "HARDWARE_TRACE"
- mtype: "<type \'int\'>"
- }
- member {
- name: "INTER_OP_THREAD_POOL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NO_TRACE"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OUTPUT_PARTITION_GRAPHS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "REPORT_TENSOR_ALLOCATIONS_UPON_OOM_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SOFTWARE_TRACE"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TIMEOUT_IN_MS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TRACE_LEVEL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TraceLevel"
- mtype: "<class \'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-session-log.pbtxt b/tensorflow/tools/api/golden/tensorflow.-session-log.pbtxt
deleted file mode 100644
index ec66d7f335..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-session-log.pbtxt
+++ /dev/null
@@ -1,108 +0,0 @@
-path: "tensorflow.SessionLog"
-tf_class {
- is_instance: "<class \'tensorflow.core.util.event_pb2.SessionLog\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CHECKPOINT"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CHECKPOINT_PATH_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "MSG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "START"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STATUS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STATUS_UNSPECIFIED"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STOP"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SessionStatus"
- mtype: "<class \'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-summary-metadata.-plugin-data.pbtxt b/tensorflow/tools/api/golden/tensorflow.-summary-metadata.-plugin-data.pbtxt
deleted file mode 100644
index 067f02ce8c..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-summary-metadata.-plugin-data.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.SummaryMetadata.PluginData"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.PluginData\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CONTENT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "PLUGIN_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-summary-metadata.pbtxt b/tensorflow/tools/api/golden/tensorflow.-summary-metadata.pbtxt
deleted file mode 100644
index b9156521cc..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-summary-metadata.pbtxt
+++ /dev/null
@@ -1,92 +0,0 @@
-path: "tensorflow.SummaryMetadata"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.SummaryMetadata\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "DISPLAY_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "PLUGIN_DATA_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PluginData"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "SUMMARY_DESCRIPTION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-summary.-audio.pbtxt b/tensorflow/tools/api/golden/tensorflow.-summary.-audio.pbtxt
deleted file mode 100644
index 781010d75e..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-summary.-audio.pbtxt
+++ /dev/null
@@ -1,96 +0,0 @@
-path: "tensorflow.Summary.Audio"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Audio\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CONTENT_TYPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "ENCODED_AUDIO_STRING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "LENGTH_FRAMES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NUM_CHANNELS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SAMPLE_RATE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-summary.-image.pbtxt b/tensorflow/tools/api/golden/tensorflow.-summary.-image.pbtxt
deleted file mode 100644
index feb9c7ee92..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-summary.-image.pbtxt
+++ /dev/null
@@ -1,92 +0,0 @@
-path: "tensorflow.Summary.Image"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Image\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "COLORSPACE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "ENCODED_IMAGE_STRING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "HEIGHT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "WIDTH_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-summary.-value.pbtxt b/tensorflow/tools/api/golden/tensorflow.-summary.-value.pbtxt
deleted file mode 100644
index ffb4f45fc5..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-summary.-value.pbtxt
+++ /dev/null
@@ -1,112 +0,0 @@
-path: "tensorflow.Summary.Value"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Value\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "AUDIO_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "HISTO_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "IMAGE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "METADATA_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NODE_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OBSOLETE_OLD_STYLE_HISTOGRAM_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SIMPLE_VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TAG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSOR_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-summary.pbtxt b/tensorflow/tools/api/golden/tensorflow.-summary.pbtxt
deleted file mode 100644
index 38de17fa9e..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-summary.pbtxt
+++ /dev/null
@@ -1,92 +0,0 @@
-path: "tensorflow.Summary"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Summary\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "Audio"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "Image"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Value"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-tensor-info.-coo-sparse.pbtxt b/tensorflow/tools/api/golden/tensorflow.-tensor-info.-coo-sparse.pbtxt
deleted file mode 100644
index 425c35e067..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-tensor-info.-coo-sparse.pbtxt
+++ /dev/null
@@ -1,88 +0,0 @@
-path: "tensorflow.TensorInfo.CooSparse"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.meta_graph_pb2.CooSparse\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DENSE_SHAPE_TENSOR_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "INDICES_TENSOR_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUES_TENSOR_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-tensor-info.pbtxt b/tensorflow/tools/api/golden/tensorflow.-tensor-info.pbtxt
deleted file mode 100644
index 41ea393be5..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.-tensor-info.pbtxt
+++ /dev/null
@@ -1,96 +0,0 @@
-path: "tensorflow.TensorInfo"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.meta_graph_pb2.TensorInfo\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "COO_SPARSE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CooSparse"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "DTYPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSOR_SHAPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-iterator.pbtxt b/tensorflow/tools/api/golden/tensorflow.data.-iterator.pbtxt
index 1f9aeb6ad6..4f0147a523 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-iterator.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.data.-iterator.pbtxt
@@ -1,6 +1,7 @@
path: "tensorflow.data.Iterator"
tf_class {
is_instance: "<class \'tensorflow.python.data.ops.iterator_ops.Iterator\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "initializer"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt
index fd9be8c759..c23b04b4ef 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-classifier.pbtxt
@@ -21,7 +21,11 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'config\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'None\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\', \'pruning_mode\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\', \'none\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt
index 6b305be43f..6878d28fff 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-boosted-trees-regressor.pbtxt
@@ -21,7 +21,11 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'label_dimension\', \'weight_column\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'config\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'None\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'label_dimension\', \'weight_column\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\', \'pruning_mode\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\', \'none\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
index c8da55d802..bf1f94b6ae 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
@@ -11,6 +11,10 @@ tf_class {
mtype: "<type \'property\'>"
}
member {
+ name: "eval_distribute"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "evaluation_master"
mtype: "<type \'property\'>"
}
@@ -51,6 +55,10 @@ tf_class {
mtype: "<type \'property\'>"
}
member {
+ name: "protocol"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "save_checkpoints_secs"
mtype: "<type \'property\'>"
}
@@ -88,7 +96,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\', \'protocol\', \'eval_distribute\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "replace"
diff --git a/tensorflow/tools/api/golden/tensorflow.image.pbtxt b/tensorflow/tools/api/golden/tensorflow.image.pbtxt
index 3fc64dae88..5c46dc5ee7 100644
--- a/tensorflow/tools/api/golden/tensorflow.image.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.image.pbtxt
@@ -21,6 +21,10 @@ tf_module {
argspec: "args=[\'image\', \'delta\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "adjust_jpeg_quality"
+ argspec: "args=[\'image\', \'jpeg_quality\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "adjust_saturation"
argspec: "args=[\'image\', \'saturation_factor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -54,7 +58,7 @@ tf_module {
}
member_method {
name: "decode_image"
- argspec: "args=[\'contents\', \'channels\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ argspec: "args=[\'contents\', \'channels\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'uint8\'>\", \'None\'], "
}
member_method {
name: "decode_jpeg"
@@ -81,6 +85,10 @@ tf_module {
argspec: "args=[\'input\', \'size\', \'offsets\', \'centered\', \'normalized\', \'uniform_noise\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'True\', \'True\', \'None\'], "
}
member_method {
+ name: "extract_image_patches"
+ argspec: "args=[\'images\', \'ksizes\', \'strides\', \'rates\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "extract_jpeg_shape"
argspec: "args=[\'contents\', \'output_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int32\'>\", \'None\'], "
}
@@ -110,7 +118,15 @@ tf_module {
}
member_method {
name: "non_max_suppression"
- argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'None\'], "
+ argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression_overlaps"
+ argspec: "args=[\'overlaps\', \'scores\', \'max_output_size\', \'overlap_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression_padded"
+ argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'score_threshold\', \'pad_to_max_output_size\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'False\', \'None\'], "
}
member_method {
name: "pad_to_bounding_box"
@@ -145,6 +161,10 @@ tf_module {
argspec: "args=[\'image\', \'max_delta\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "random_jpeg_quality"
+ argspec: "args=[\'image\', \'min_jpeg_quality\', \'max_jpeg_quality\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "random_saturation"
argspec: "args=[\'image\', \'lower\', \'upper\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -165,8 +185,12 @@ tf_module {
argspec: "args=[\'image\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "resize_image_with_pad"
+ argspec: "args=[\'image\', \'target_height\', \'target_width\', \'method\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
name: "resize_images"
- argspec: "args=[\'images\', \'size\', \'method\', \'align_corners\'], varargs=None, keywords=None, defaults=[\'0\', \'False\'], "
+ argspec: "args=[\'images\', \'size\', \'method\', \'align_corners\', \'preserve_aspect_ratio\'], varargs=None, keywords=None, defaults=[\'0\', \'False\', \'False\'], "
}
member_method {
name: "resize_nearest_neighbor"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
index 7713d78b8a..e579fe6a1a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.Model"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.training.Model\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.network.Network\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -119,7 +119,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -127,7 +127,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -135,7 +135,7 @@ tf_class {
}
member_method {
name: "compile"
- argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "compute_mask"
@@ -155,7 +155,7 @@ tf_class {
}
member_method {
name: "evaluate_generator"
- argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\'], "
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
}
member_method {
name: "fit"
@@ -239,7 +239,7 @@ tf_class {
}
member_method {
name: "save_weights"
- argspec: "args=[\'self\', \'filepath\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
member_method {
name: "set_weights"
@@ -251,7 +251,7 @@ tf_class {
}
member_method {
name: "test_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "to_json"
@@ -263,6 +263,6 @@ tf_class {
}
member_method {
name: "train_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
index 69b81f75fa..6f05cdd093 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.Sequential"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.sequential.Sequential\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.training.Model\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.network.Network\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.sequential.Sequential\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -124,7 +124,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -140,7 +140,7 @@ tf_class {
}
member_method {
name: "compile"
- argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "compute_mask"
@@ -160,7 +160,7 @@ tf_class {
}
member_method {
name: "evaluate_generator"
- argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\'], "
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
}
member_method {
name: "fit"
@@ -256,7 +256,7 @@ tf_class {
}
member_method {
name: "save_weights"
- argspec: "args=[\'self\', \'filepath\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
member_method {
name: "set_weights"
@@ -267,8 +267,12 @@ tf_class {
argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
member_method {
+ name: "symbolic_set_inputs"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "test_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "to_json"
@@ -280,6 +284,6 @@ tf_class {
}
member_method {
name: "train_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt
index 2cd83baf65..2e9de9ebb2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt
@@ -22,7 +22,7 @@ tf_module {
}
member_method {
name: "relu"
- argspec: "args=[\'x\', \'alpha\', \'max_value\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\'], "
+ argspec: "args=[\'x\', \'alpha\', \'max_value\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'0\'], "
}
member_method {
name: "selu"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.densenet.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.densenet.pbtxt
deleted file mode 100644
index 42cb914450..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.densenet.pbtxt
+++ /dev/null
@@ -1,23 +0,0 @@
-path: "tensorflow.keras.applications.densenet"
-tf_module {
- member_method {
- name: "DenseNet121"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "DenseNet169"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "DenseNet201"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt
deleted file mode 100644
index 211080c19b..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_resnet_v2.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.applications.inception_resnet_v2"
-tf_module {
- member_method {
- name: "InceptionResNetV2"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_v3.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_v3.pbtxt
deleted file mode 100644
index b67cee80ab..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.inception_v3.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.applications.inception_v3"
-tf_module {
- member_method {
- name: "InceptionV3"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.mobilenet.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.mobilenet.pbtxt
deleted file mode 100644
index ef774e1dd7..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.mobilenet.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.applications.mobilenet"
-tf_module {
- member_method {
- name: "MobileNet"
- argspec: "args=[\'input_shape\', \'alpha\', \'depth_multiplier\', \'dropout\', \'include_top\', \'weights\', \'input_tensor\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'None\', \'1.0\', \'1\', \'0.001\', \'True\', \'imagenet\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.nasnet.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.nasnet.pbtxt
deleted file mode 100644
index cd75b87540..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.nasnet.pbtxt
+++ /dev/null
@@ -1,19 +0,0 @@
-path: "tensorflow.keras.applications.nasnet"
-tf_module {
- member_method {
- name: "NASNetLarge"
- argspec: "args=[\'input_shape\', \'include_top\', \'weights\', \'input_tensor\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'imagenet\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "NASNetMobile"
- argspec: "args=[\'input_shape\', \'include_top\', \'weights\', \'input_tensor\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'imagenet\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt
deleted file mode 100644
index 9fc086eb8e..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.pbtxt
+++ /dev/null
@@ -1,87 +0,0 @@
-path: "tensorflow.keras.applications"
-tf_module {
- member {
- name: "densenet"
- mtype: "<type \'module\'>"
- }
- member {
- name: "inception_resnet_v2"
- mtype: "<type \'module\'>"
- }
- member {
- name: "inception_v3"
- mtype: "<type \'module\'>"
- }
- member {
- name: "mobilenet"
- mtype: "<type \'module\'>"
- }
- member {
- name: "nasnet"
- mtype: "<type \'module\'>"
- }
- member {
- name: "resnet50"
- mtype: "<type \'module\'>"
- }
- member {
- name: "vgg16"
- mtype: "<type \'module\'>"
- }
- member {
- name: "vgg19"
- mtype: "<type \'module\'>"
- }
- member {
- name: "xception"
- mtype: "<type \'module\'>"
- }
- member_method {
- name: "DenseNet121"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "DenseNet169"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "DenseNet201"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "InceptionResNetV2"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "InceptionV3"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "MobileNet"
- argspec: "args=[\'input_shape\', \'alpha\', \'depth_multiplier\', \'dropout\', \'include_top\', \'weights\', \'input_tensor\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'None\', \'1.0\', \'1\', \'0.001\', \'True\', \'imagenet\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "NASNetLarge"
- argspec: "args=[\'input_shape\', \'include_top\', \'weights\', \'input_tensor\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'imagenet\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "NASNetMobile"
- argspec: "args=[\'input_shape\', \'include_top\', \'weights\', \'input_tensor\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'imagenet\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "ResNet50"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "VGG16"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "VGG19"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "Xception"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt
deleted file mode 100644
index 7385af064d..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.resnet50.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.applications.resnet50"
-tf_module {
- member_method {
- name: "ResNet50"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\', \'data_format\', \'mode\'], varargs=None, keywords=None, defaults=[\'None\', \'caffe\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt
deleted file mode 100644
index ba66fba8f3..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg16.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.applications.vgg16"
-tf_module {
- member_method {
- name: "VGG16"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\', \'data_format\', \'mode\'], varargs=None, keywords=None, defaults=[\'None\', \'caffe\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt
deleted file mode 100644
index e55a1345b6..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.vgg19.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.applications.vgg19"
-tf_module {
- member_method {
- name: "VGG19"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\', \'data_format\', \'mode\'], varargs=None, keywords=None, defaults=[\'None\', \'caffe\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.applications.xception.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.applications.xception.pbtxt
deleted file mode 100644
index 59dd2108f2..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.applications.xception.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.applications.xception"
-tf_module {
- member_method {
- name: "Xception"
- argspec: "args=[\'include_top\', \'weights\', \'input_tensor\', \'input_shape\', \'pooling\', \'classes\'], varargs=None, keywords=None, defaults=[\'True\', \'imagenet\', \'None\', \'None\', \'None\', \'1000\'], "
- }
- member_method {
- name: "decode_predictions"
- argspec: "args=[\'preds\', \'top\'], varargs=None, keywords=None, defaults=[\'5\'], "
- }
- member_method {
- name: "preprocess_input"
- argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt
index 3ac285681f..56914e1746 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.models.Model"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.training.Model\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.network.Network\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -119,7 +119,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -127,7 +127,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -135,7 +135,7 @@ tf_class {
}
member_method {
name: "compile"
- argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "compute_mask"
@@ -155,7 +155,7 @@ tf_class {
}
member_method {
name: "evaluate_generator"
- argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\'], "
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
}
member_method {
name: "fit"
@@ -239,7 +239,7 @@ tf_class {
}
member_method {
name: "save_weights"
- argspec: "args=[\'self\', \'filepath\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
member_method {
name: "set_weights"
@@ -251,7 +251,7 @@ tf_class {
}
member_method {
name: "test_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "to_json"
@@ -263,6 +263,6 @@ tf_class {
}
member_method {
name: "train_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
index 51ba0c5043..4c1c54001d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.models.Sequential"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.sequential.Sequential\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.training.Model\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.network.Network\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.sequential.Sequential\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -124,7 +124,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -140,7 +140,7 @@ tf_class {
}
member_method {
name: "compile"
- argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "compute_mask"
@@ -160,7 +160,7 @@ tf_class {
}
member_method {
name: "evaluate_generator"
- argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\'], "
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
}
member_method {
name: "fit"
@@ -256,7 +256,7 @@ tf_class {
}
member_method {
name: "save_weights"
- argspec: "args=[\'self\', \'filepath\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
member_method {
name: "set_weights"
@@ -267,8 +267,12 @@ tf_class {
argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
member_method {
+ name: "symbolic_set_inputs"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "test_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "to_json"
@@ -280,6 +284,6 @@ tf_class {
}
member_method {
name: "train_on_batch"
- argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt
deleted file mode 100644
index ec0f3d892d..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt
+++ /dev/null
@@ -1,23 +0,0 @@
-path: "tensorflow.keras.preprocessing.image.DirectoryIterator"
-tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.image.DirectoryIterator\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.image.Iterator\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.Sequence\'>"
- is_instance: "<type \'object\'>"
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'directory\', \'image_data_generator\', \'target_size\', \'color_mode\', \'classes\', \'class_mode\', \'batch_size\', \'shuffle\', \'seed\', \'data_format\', \'save_to_dir\', \'save_prefix\', \'save_format\', \'follow_links\', \'subset\', \'interpolation\'], varargs=None, keywords=None, defaults=[\'(256, 256)\', \'rgb\', \'None\', \'categorical\', \'32\', \'True\', \'None\', \'None\', \'None\', \'\', \'png\', \'False\', \'None\', \'nearest\'], "
- }
- member_method {
- name: "next"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "on_epoch_end"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-image-data-generator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-image-data-generator.pbtxt
deleted file mode 100644
index f5bc04e44c..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-image-data-generator.pbtxt
+++ /dev/null
@@ -1,29 +0,0 @@
-path: "tensorflow.keras.preprocessing.image.ImageDataGenerator"
-tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.image.ImageDataGenerator\'>"
- is_instance: "<type \'object\'>"
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'featurewise_center\', \'samplewise_center\', \'featurewise_std_normalization\', \'samplewise_std_normalization\', \'zca_whitening\', \'zca_epsilon\', \'rotation_range\', \'width_shift_range\', \'height_shift_range\', \'brightness_range\', \'shear_range\', \'zoom_range\', \'channel_shift_range\', \'fill_mode\', \'cval\', \'horizontal_flip\', \'vertical_flip\', \'rescale\', \'preprocessing_function\', \'data_format\', \'validation_split\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'False\', \'False\', \'False\', \'1e-06\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'0.0\', \'0.0\', \'0.0\', \'nearest\', \'0.0\', \'False\', \'False\', \'None\', \'None\', \'None\', \'0.0\'], "
- }
- member_method {
- name: "fit"
- argspec: "args=[\'self\', \'x\', \'augment\', \'rounds\', \'seed\'], varargs=None, keywords=None, defaults=[\'False\', \'1\', \'None\'], "
- }
- member_method {
- name: "flow"
- argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'shuffle\', \'seed\', \'save_to_dir\', \'save_prefix\', \'save_format\', \'subset\'], varargs=None, keywords=None, defaults=[\'None\', \'32\', \'True\', \'None\', \'None\', \'\', \'png\', \'None\'], "
- }
- member_method {
- name: "flow_from_directory"
- argspec: "args=[\'self\', \'directory\', \'target_size\', \'color_mode\', \'classes\', \'class_mode\', \'batch_size\', \'shuffle\', \'seed\', \'save_to_dir\', \'save_prefix\', \'save_format\', \'follow_links\', \'subset\', \'interpolation\'], varargs=None, keywords=None, defaults=[\'(256, 256)\', \'rgb\', \'None\', \'categorical\', \'32\', \'True\', \'None\', \'None\', \'\', \'png\', \'False\', \'None\', \'nearest\'], "
- }
- member_method {
- name: "random_transform"
- argspec: "args=[\'self\', \'x\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "standardize"
- argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt
deleted file mode 100644
index 69488d63bf..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt
+++ /dev/null
@@ -1,18 +0,0 @@
-path: "tensorflow.keras.preprocessing.image.Iterator"
-tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.image.Iterator\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.Sequence\'>"
- is_instance: "<type \'object\'>"
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'n\', \'batch_size\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "on_epoch_end"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt
deleted file mode 100644
index 42196ddeee..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt
+++ /dev/null
@@ -1,23 +0,0 @@
-path: "tensorflow.keras.preprocessing.image.NumpyArrayIterator"
-tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.image.NumpyArrayIterator\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.image.Iterator\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.Sequence\'>"
- is_instance: "<type \'object\'>"
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'x\', \'y\', \'image_data_generator\', \'batch_size\', \'shuffle\', \'seed\', \'data_format\', \'save_to_dir\', \'save_prefix\', \'save_format\', \'subset\'], varargs=None, keywords=None, defaults=[\'32\', \'False\', \'None\', \'None\', \'None\', \'\', \'png\', \'None\'], "
- }
- member_method {
- name: "next"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "on_epoch_end"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt
deleted file mode 100644
index 6b850dd6b7..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt
+++ /dev/null
@@ -1,63 +0,0 @@
-path: "tensorflow.keras.preprocessing.image"
-tf_module {
- member {
- name: "DirectoryIterator"
- mtype: "<type \'type\'>"
- }
- member {
- name: "ImageDataGenerator"
- mtype: "<type \'type\'>"
- }
- member {
- name: "Iterator"
- mtype: "<type \'type\'>"
- }
- member {
- name: "NumpyArrayIterator"
- mtype: "<type \'type\'>"
- }
- member_method {
- name: "apply_transform"
- argspec: "args=[\'x\', \'transform_matrix\', \'channel_axis\', \'fill_mode\', \'cval\'], varargs=None, keywords=None, defaults=[\'0\', \'nearest\', \'0.0\'], "
- }
- member_method {
- name: "array_to_img"
- argspec: "args=[\'x\', \'data_format\', \'scale\'], varargs=None, keywords=None, defaults=[\'None\', \'True\'], "
- }
- member_method {
- name: "flip_axis"
- argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "img_to_array"
- argspec: "args=[\'img\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "load_img"
- argspec: "args=[\'path\', \'grayscale\', \'target_size\', \'interpolation\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'nearest\'], "
- }
- member_method {
- name: "random_brightness"
- argspec: "args=[\'x\', \'brightness_range\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "random_channel_shift"
- argspec: "args=[\'x\', \'intensity\', \'channel_axis\'], varargs=None, keywords=None, defaults=[\'0\'], "
- }
- member_method {
- name: "random_rotation"
- argspec: "args=[\'x\', \'rg\', \'row_axis\', \'col_axis\', \'channel_axis\', \'fill_mode\', \'cval\'], varargs=None, keywords=None, defaults=[\'1\', \'2\', \'0\', \'nearest\', \'0.0\'], "
- }
- member_method {
- name: "random_shear"
- argspec: "args=[\'x\', \'intensity\', \'row_axis\', \'col_axis\', \'channel_axis\', \'fill_mode\', \'cval\'], varargs=None, keywords=None, defaults=[\'1\', \'2\', \'0\', \'nearest\', \'0.0\'], "
- }
- member_method {
- name: "random_shift"
- argspec: "args=[\'x\', \'wrg\', \'hrg\', \'row_axis\', \'col_axis\', \'channel_axis\', \'fill_mode\', \'cval\'], varargs=None, keywords=None, defaults=[\'1\', \'2\', \'0\', \'nearest\', \'0.0\'], "
- }
- member_method {
- name: "random_zoom"
- argspec: "args=[\'x\', \'zoom_range\', \'row_axis\', \'col_axis\', \'channel_axis\', \'fill_mode\', \'cval\'], varargs=None, keywords=None, defaults=[\'1\', \'2\', \'0\', \'nearest\', \'0.0\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.pbtxt
deleted file mode 100644
index 5a78581fc5..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.pbtxt
+++ /dev/null
@@ -1,15 +0,0 @@
-path: "tensorflow.keras.preprocessing"
-tf_module {
- member {
- name: "image"
- mtype: "<type \'module\'>"
- }
- member {
- name: "sequence"
- mtype: "<type \'module\'>"
- }
- member {
- name: "text"
- mtype: "<type \'module\'>"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.-timeseries-generator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.-timeseries-generator.pbtxt
deleted file mode 100644
index d9c3215b55..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.-timeseries-generator.pbtxt
+++ /dev/null
@@ -1,14 +0,0 @@
-path: "tensorflow.keras.preprocessing.sequence.TimeseriesGenerator"
-tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.sequence.TimeseriesGenerator\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.Sequence\'>"
- is_instance: "<type \'object\'>"
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'data\', \'targets\', \'length\', \'sampling_rate\', \'stride\', \'start_index\', \'end_index\', \'shuffle\', \'reverse\', \'batch_size\'], varargs=None, keywords=None, defaults=[\'1\', \'1\', \'0\', \'None\', \'False\', \'False\', \'128\'], "
- }
- member_method {
- name: "on_epoch_end"
- argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.pbtxt
deleted file mode 100644
index cf59f8a272..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.sequence.pbtxt
+++ /dev/null
@@ -1,19 +0,0 @@
-path: "tensorflow.keras.preprocessing.sequence"
-tf_module {
- member {
- name: "TimeseriesGenerator"
- mtype: "<type \'type\'>"
- }
- member_method {
- name: "make_sampling_table"
- argspec: "args=[\'size\', \'sampling_factor\'], varargs=None, keywords=None, defaults=[\'1e-05\'], "
- }
- member_method {
- name: "pad_sequences"
- argspec: "args=[\'sequences\', \'maxlen\', \'dtype\', \'padding\', \'truncating\', \'value\'], varargs=None, keywords=None, defaults=[\'None\', \'int32\', \'pre\', \'pre\', \'0.0\'], "
- }
- member_method {
- name: "skipgrams"
- argspec: "args=[\'sequence\', \'vocabulary_size\', \'window_size\', \'negative_samples\', \'shuffle\', \'categorical\', \'sampling_table\', \'seed\'], varargs=None, keywords=None, defaults=[\'4\', \'1.0\', \'True\', \'False\', \'None\', \'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.-tokenizer.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.-tokenizer.pbtxt
deleted file mode 100644
index ce91caa1af..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.-tokenizer.pbtxt
+++ /dev/null
@@ -1,33 +0,0 @@
-path: "tensorflow.keras.preprocessing.text.Tokenizer"
-tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.preprocessing.text.Tokenizer\'>"
- is_instance: "<type \'object\'>"
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'num_words\', \'filters\', \'lower\', \'split\', \'char_level\', \'oov_token\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\', \'True\', \' \', \'False\', \'None\'], "
- }
- member_method {
- name: "fit_on_sequences"
- argspec: "args=[\'self\', \'sequences\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "fit_on_texts"
- argspec: "args=[\'self\', \'texts\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "sequences_to_matrix"
- argspec: "args=[\'self\', \'sequences\', \'mode\'], varargs=None, keywords=None, defaults=[\'binary\'], "
- }
- member_method {
- name: "texts_to_matrix"
- argspec: "args=[\'self\', \'texts\', \'mode\'], varargs=None, keywords=None, defaults=[\'binary\'], "
- }
- member_method {
- name: "texts_to_sequences"
- argspec: "args=[\'self\', \'texts\'], varargs=None, keywords=None, defaults=None"
- }
- member_method {
- name: "texts_to_sequences_generator"
- argspec: "args=[\'self\', \'texts\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.pbtxt
deleted file mode 100644
index 50b54fc7e1..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.text.pbtxt
+++ /dev/null
@@ -1,19 +0,0 @@
-path: "tensorflow.keras.preprocessing.text"
-tf_module {
- member {
- name: "Tokenizer"
- mtype: "<type \'type\'>"
- }
- member_method {
- name: "hashing_trick"
- argspec: "args=[\'text\', \'n\', \'hash_function\', \'filters\', \'lower\', \'split\'], varargs=None, keywords=None, defaults=[\'None\', \'!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\', \'True\', \' \'], "
- }
- member_method {
- name: "one_hot"
- argspec: "args=[\'text\', \'n\', \'filters\', \'lower\', \'split\'], varargs=None, keywords=None, defaults=[\'!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\', \'True\', \' \'], "
- }
- member_method {
- name: "text_to_word_sequence"
- argspec: "args=[\'text\', \'filters\', \'lower\', \'split\'], varargs=None, keywords=None, defaults=[\'!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\', \'True\', \' \'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.manip.pbtxt b/tensorflow/tools/api/golden/tensorflow.manip.pbtxt
deleted file mode 100644
index 0b84165285..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.manip.pbtxt
+++ /dev/null
@@ -1,7 +0,0 @@
-path: "tensorflow.manip"
-tf_module {
- member_method {
- name: "roll"
- argspec: "args=[\'input\', \'shift\', \'axis\'], varargs=None, keywords=None, defaults=None"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.math.pbtxt b/tensorflow/tools/api/golden/tensorflow.math.pbtxt
deleted file mode 100644
index 897718c05e..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.math.pbtxt
+++ /dev/null
@@ -1,7 +0,0 @@
-path: "tensorflow.math"
-tf_module {
- member_method {
- name: "polyval"
- argspec: "args=[\'coeffs\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checker.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checker.pbtxt
deleted file mode 100644
index bd5c36f390..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checker.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.profiler.AdviceProto.Checker"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_output_pb2.Checker\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "REPORTS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt
deleted file mode 100644
index 7c8c68e155..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.profiler.AdviceProto.CheckersEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_output_pb2.CheckersEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.pbtxt
deleted file mode 100644
index 1b789f4fc9..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-advice-proto.pbtxt
+++ /dev/null
@@ -1,88 +0,0 @@
-path: "tensorflow.profiler.AdviceProto"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_output_pb2.AdviceProto\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CHECKERS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Checker"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "CheckersEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt
deleted file mode 100644
index f0b9605bee..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.profiler.GraphNodeProto.InputShapesEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_output_pb2.InputShapesEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.pbtxt
deleted file mode 100644
index b80896a8a0..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-graph-node-proto.pbtxt
+++ /dev/null
@@ -1,188 +0,0 @@
-path: "tensorflow.profiler.GraphNodeProto"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_output_pb2.GraphNodeProto\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ACCELERATOR_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CHILDREN_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CPU_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "DEVICES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FLOAT_OPS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "INPUT_SHAPES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "InputShapesEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OUTPUT_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PARAMETERS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PEAK_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "REQUESTED_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "RESIDUAL_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "RUN_COUNT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SHAPES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSOR_VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_ACCELERATOR_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_CPU_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_DEFINITION_COUNT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_FLOAT_OPS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_OUTPUT_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_PARAMETERS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_PEAK_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_REQUESTED_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_RESIDUAL_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_RUN_COUNT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-multi-graph-node-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-multi-graph-node-proto.pbtxt
deleted file mode 100644
index 33deff6497..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-multi-graph-node-proto.pbtxt
+++ /dev/null
@@ -1,160 +0,0 @@
-path: "tensorflow.profiler.MultiGraphNodeProto"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_output_pb2.MultiGraphNodeProto\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "ACCELERATOR_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CHILDREN_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CPU_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FLOAT_OPS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "GRAPH_NODES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OUTPUT_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PARAMETERS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PEAK_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "REQUESTED_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "RESIDUAL_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_ACCELERATOR_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_CPU_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_EXEC_MICROS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_FLOAT_OPS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_OUTPUT_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_PARAMETERS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_PEAK_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_REQUESTED_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TOTAL_RESIDUAL_BYTES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt
deleted file mode 100644
index 8c4727cf35..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.profiler.OpLogProto.IdToStringEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_log_pb2.IdToStringEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.pbtxt
deleted file mode 100644
index 1071a82b5c..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-op-log-proto.pbtxt
+++ /dev/null
@@ -1,88 +0,0 @@
-path: "tensorflow.profiler.OpLogProto"
-tf_class {
- is_instance: "<class \'tensorflow.core.profiler.tfprof_log_pb2.OpLogProto\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "ID_TO_STRING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "IdToStringEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "LOG_ENTRIES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-event.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-event.pbtxt
deleted file mode 100644
index ab3449d80f..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-event.pbtxt
+++ /dev/null
@@ -1,112 +0,0 @@
-path: "tensorflow.summary.Event"
-tf_class {
- is_instance: "<class \'tensorflow.core.util.event_pb2.Event\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FILE_VERSION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "GRAPH_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "LOG_MESSAGE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "META_GRAPH_DEF_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SESSION_LOG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STEP_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SUMMARY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TAGGED_RUN_METADATA_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "WALL_TIME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-session-log.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-session-log.pbtxt
deleted file mode 100644
index 92ca4872ca..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-session-log.pbtxt
+++ /dev/null
@@ -1,108 +0,0 @@
-path: "tensorflow.summary.SessionLog"
-tf_class {
- is_instance: "<class \'tensorflow.core.util.event_pb2.SessionLog\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CHECKPOINT"
- mtype: "<type \'int\'>"
- }
- member {
- name: "CHECKPOINT_PATH_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "MSG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "START"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STATUS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STATUS_UNSPECIFIED"
- mtype: "<type \'int\'>"
- }
- member {
- name: "STOP"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SessionStatus"
- mtype: "<class \'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-summary-description.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-summary-description.pbtxt
deleted file mode 100644
index f93da2196a..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-summary-description.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.summary.SummaryDescription"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.SummaryDescription\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "TYPE_HINT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-summary.-audio.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-summary.-audio.pbtxt
deleted file mode 100644
index 605e305e82..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-summary.-audio.pbtxt
+++ /dev/null
@@ -1,96 +0,0 @@
-path: "tensorflow.summary.Summary.Audio"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Audio\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CONTENT_TYPE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "ENCODED_AUDIO_STRING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "LENGTH_FRAMES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NUM_CHANNELS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SAMPLE_RATE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-summary.-image.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-summary.-image.pbtxt
deleted file mode 100644
index 0646972196..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-summary.-image.pbtxt
+++ /dev/null
@@ -1,92 +0,0 @@
-path: "tensorflow.summary.Summary.Image"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Image\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "COLORSPACE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "ENCODED_IMAGE_STRING_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "HEIGHT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "WIDTH_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-summary.-value.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-summary.-value.pbtxt
deleted file mode 100644
index b319cd03d9..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-summary.-value.pbtxt
+++ /dev/null
@@ -1,112 +0,0 @@
-path: "tensorflow.summary.Summary.Value"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Value\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "AUDIO_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "HISTO_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "IMAGE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "METADATA_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "NODE_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "OBSOLETE_OLD_STYLE_HISTOGRAM_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SIMPLE_VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TAG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TENSOR_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-summary.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-summary.pbtxt
deleted file mode 100644
index 132ef1b7d2..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-summary.pbtxt
+++ /dev/null
@@ -1,92 +0,0 @@
-path: "tensorflow.summary.Summary"
-tf_class {
- is_instance: "<class \'tensorflow.core.framework.summary_pb2.Summary\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "Audio"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "Image"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "Value"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-tagged-run-metadata.pbtxt b/tensorflow/tools/api/golden/tensorflow.summary.-tagged-run-metadata.pbtxt
deleted file mode 100644
index 4dce20819d..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.summary.-tagged-run-metadata.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.summary.TaggedRunMetadata"
-tf_class {
- is_instance: "<class \'tensorflow.core.util.event_pb2.TaggedRunMetadata\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "RUN_METADATA_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TAG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-bytes-list.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-bytes-list.pbtxt
deleted file mode 100644
index 8cf52b817f..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-bytes-list.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.train.BytesList"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.BytesList\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-cluster-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-cluster-def.pbtxt
deleted file mode 100644
index 93ff856b09..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-cluster-def.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.train.ClusterDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.cluster_pb2.ClusterDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "JOB_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-example.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-example.pbtxt
deleted file mode 100644
index f7215a2037..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-example.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.train.Example"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.example_pb2.Example\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FEATURES_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-feature-list.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-feature-list.pbtxt
deleted file mode 100644
index 3ad98354d6..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-feature-list.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.train.FeatureList"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.FeatureList\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FEATURE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt
deleted file mode 100644
index cd171f4ca3..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.train.FeatureLists.FeatureListEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.FeatureListEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-feature-lists.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-feature-lists.pbtxt
deleted file mode 100644
index 3d95017d58..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-feature-lists.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.train.FeatureLists"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.FeatureLists\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FEATURE_LIST_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "FeatureListEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-feature.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-feature.pbtxt
deleted file mode 100644
index 9cca132bba..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-feature.pbtxt
+++ /dev/null
@@ -1,88 +0,0 @@
-path: "tensorflow.train.Feature"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.Feature\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "BYTES_LIST_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FLOAT_LIST_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "INT64_LIST_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-features.-feature-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-features.-feature-entry.pbtxt
deleted file mode 100644
index 858aee0341..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-features.-feature-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.train.Features.FeatureEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.FeatureEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-features.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-features.pbtxt
deleted file mode 100644
index 49cd12153b..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-features.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.train.Features"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.Features\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FEATURE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "FeatureEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-float-list.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-float-list.pbtxt
deleted file mode 100644
index e3f01334b5..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-float-list.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.train.FloatList"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.FloatList\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-int64-list.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-int64-list.pbtxt
deleted file mode 100644
index 8917dc122c..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-int64-list.pbtxt
+++ /dev/null
@@ -1,80 +0,0 @@
-path: "tensorflow.train.Int64List"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.feature_pb2.Int64List\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-job-def.-tasks-entry.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-job-def.-tasks-entry.pbtxt
deleted file mode 100644
index ac6d81541a..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-job-def.-tasks-entry.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.train.JobDef.TasksEntry"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.cluster_pb2.TasksEntry\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "KEY_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VALUE_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-job-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-job-def.pbtxt
deleted file mode 100644
index ce34537fa1..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-job-def.pbtxt
+++ /dev/null
@@ -1,88 +0,0 @@
-path: "tensorflow.train.JobDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.cluster_pb2.JobDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TASKS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TasksEntry"
- mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-saver-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-saver-def.pbtxt
deleted file mode 100644
index 84498a64f5..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-saver-def.pbtxt
+++ /dev/null
@@ -1,120 +0,0 @@
-path: "tensorflow.train.SaverDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.saver_pb2.SaverDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CheckpointFormatVersion"
- mtype: "<class \'google.protobuf.internal.enum_type_wrapper.EnumTypeWrapper\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FILENAME_TENSOR_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "KEEP_CHECKPOINT_EVERY_N_HOURS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "LEGACY"
- mtype: "<type \'int\'>"
- }
- member {
- name: "MAX_TO_KEEP_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "RESTORE_OP_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SAVE_TENSOR_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "SHARDED_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "V1"
- mtype: "<type \'int\'>"
- }
- member {
- name: "V2"
- mtype: "<type \'int\'>"
- }
- member {
- name: "VERSION_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-sequence-example.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-sequence-example.pbtxt
deleted file mode 100644
index 9ab9553702..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-sequence-example.pbtxt
+++ /dev/null
@@ -1,84 +0,0 @@
-path: "tensorflow.train.SequenceExample"
-tf_class {
- is_instance: "<class \'tensorflow.core.example.example_pb2.SequenceExample\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CONTEXT_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "FEATURE_LISTS_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-server-def.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-server-def.pbtxt
deleted file mode 100644
index af0a3b73cc..0000000000
--- a/tensorflow/tools/api/golden/tensorflow.train.-server-def.pbtxt
+++ /dev/null
@@ -1,96 +0,0 @@
-path: "tensorflow.train.ServerDef"
-tf_class {
- is_instance: "<class \'tensorflow.core.protobuf.tensorflow_server_pb2.ServerDef\'>"
- is_instance: "<type \'google.protobuf.pyext._message.CMessage\'>"
- member {
- name: "CLUSTER_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DEFAULT_SESSION_CONFIG_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "DESCRIPTOR"
- mtype: "<type \'google.protobuf.pyext._message.MessageDescriptor\'>"
- }
- member {
- name: "Extensions"
- mtype: "<type \'getset_descriptor\'>"
- }
- member {
- name: "JOB_NAME_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "PROTOCOL_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member {
- name: "TASK_INDEX_FIELD_NUMBER"
- mtype: "<type \'int\'>"
- }
- member_method {
- name: "ByteSize"
- }
- member_method {
- name: "Clear"
- }
- member_method {
- name: "ClearExtension"
- }
- member_method {
- name: "ClearField"
- }
- member_method {
- name: "CopyFrom"
- }
- member_method {
- name: "DiscardUnknownFields"
- }
- member_method {
- name: "FindInitializationErrors"
- }
- member_method {
- name: "FromString"
- }
- member_method {
- name: "HasExtension"
- }
- member_method {
- name: "HasField"
- }
- member_method {
- name: "IsInitialized"
- }
- member_method {
- name: "ListFields"
- }
- member_method {
- name: "MergeFrom"
- }
- member_method {
- name: "MergeFromString"
- }
- member_method {
- name: "ParseFromString"
- }
- member_method {
- name: "RegisterExtension"
- }
- member_method {
- name: "SerializePartialToString"
- }
- member_method {
- name: "SerializeToString"
- }
- member_method {
- name: "SetInParent"
- }
- member_method {
- name: "WhichOneof"
- }
- member_method {
- name: "__init__"
- }
-}
diff --git a/tensorflow/tools/api/golden/tensorflow.-aggregation-method.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-aggregation-method.pbtxt
index f79029d3fe..f79029d3fe 100644
--- a/tensorflow/tools/api/golden/tensorflow.-aggregation-method.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-aggregation-method.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-attr-value.-list-value.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-attr-value.-list-value.pbtxt
new file mode 100644
index 0000000000..f1dffd5952
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-attr-value.-list-value.pbtxt
@@ -0,0 +1,70 @@
+path: "tensorflow.AttrValue.ListValue"
+tf_proto {
+ descriptor {
+ name: "ListValue"
+ field {
+ name: "s"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_BYTES
+ }
+ field {
+ name: "i"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_INT64
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "f"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "b"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_BOOL
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "type"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "shape"
+ number: 7
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ }
+ field {
+ name: "func"
+ number: 9
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-attr-value.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-attr-value.pbtxt
new file mode 100644
index 0000000000..6ccd64f428
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-attr-value.pbtxt
@@ -0,0 +1,151 @@
+path: "tensorflow.AttrValue"
+tf_proto {
+ descriptor {
+ name: "AttrValue"
+ field {
+ name: "s"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "i"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ oneof_index: 0
+ }
+ field {
+ name: "f"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "b"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ oneof_index: 0
+ }
+ field {
+ name: "type"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ oneof_index: 0
+ }
+ field {
+ name: "shape"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ field {
+ name: "list"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue.ListValue"
+ oneof_index: 0
+ }
+ field {
+ name: "func"
+ number: 10
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList"
+ oneof_index: 0
+ }
+ field {
+ name: "placeholder"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ nested_type {
+ name: "ListValue"
+ field {
+ name: "s"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_BYTES
+ }
+ field {
+ name: "i"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_INT64
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "f"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "b"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_BOOL
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "type"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "shape"
+ number: 7
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ }
+ field {
+ name: "func"
+ number: 9
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList"
+ }
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-conditional-accumulator-base.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-conditional-accumulator-base.pbtxt
index c9a32c16b3..c9a32c16b3 100644
--- a/tensorflow/tools/api/golden/tensorflow.-conditional-accumulator-base.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-conditional-accumulator-base.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-conditional-accumulator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-conditional-accumulator.pbtxt
index d23b3bd0ca..15e0ab76b6 100644
--- a/tensorflow/tools/api/golden/tensorflow.-conditional-accumulator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-conditional-accumulator.pbtxt
@@ -17,7 +17,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'dtype\', \'shape\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'conditional_accumulator\'], "
+ argspec: "args=[\'self\', \'dtype\', \'shape\', \'shared_name\', \'name\', \'reduction_type\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'conditional_accumulator\', \'MEAN\'], "
}
member_method {
name: "apply_grad"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-device-count-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-device-count-entry.pbtxt
new file mode 100644
index 0000000000..d9b1426828
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-device-count-entry.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.ConfigProto.DeviceCountEntry"
+tf_proto {
+ descriptor {
+ name: "DeviceCountEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-experimental.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-experimental.pbtxt
new file mode 100644
index 0000000000..9f6dcd8fdb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.-experimental.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.ConfigProto.Experimental"
+tf_proto {
+ descriptor {
+ name: "Experimental"
+ field {
+ name: "collective_group_leader"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "executor_type"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ reserved_range {
+ start: 2
+ end: 3
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.pbtxt
new file mode 100644
index 0000000000..f3a515163d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-config-proto.pbtxt
@@ -0,0 +1,146 @@
+path: "tensorflow.ConfigProto"
+tf_proto {
+ descriptor {
+ name: "ConfigProto"
+ field {
+ name: "device_count"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto.DeviceCountEntry"
+ }
+ field {
+ name: "intra_op_parallelism_threads"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "inter_op_parallelism_threads"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "use_per_session_threads"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "session_inter_op_thread_pool"
+ number: 12
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ThreadPoolOptionProto"
+ }
+ field {
+ name: "placement_period"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "device_filters"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "gpu_options"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GPUOptions"
+ }
+ field {
+ name: "allow_soft_placement"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "log_device_placement"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "graph_options"
+ number: 10
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GraphOptions"
+ }
+ field {
+ name: "operation_timeout_in_ms"
+ number: 11
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "rpc_options"
+ number: 13
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.RPCOptions"
+ }
+ field {
+ name: "cluster_def"
+ number: 14
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ClusterDef"
+ }
+ field {
+ name: "isolate_session_state"
+ number: 15
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "experimental"
+ number: 16
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto.Experimental"
+ }
+ nested_type {
+ name: "DeviceCountEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ options {
+ map_entry: true
+ }
+ }
+ nested_type {
+ name: "Experimental"
+ field {
+ name: "collective_group_leader"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "executor_type"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ reserved_range {
+ start: 2
+ end: 3
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-d-type.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-d-type.pbtxt
index 0b5b88bba8..0b5b88bba8 100644
--- a/tensorflow/tools/api/golden/tensorflow.-d-type.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-d-type.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-device-spec.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-device-spec.pbtxt
index 92e535c341..92e535c341 100644
--- a/tensorflow/tools/api/golden/tensorflow.-device-spec.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-device-spec.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-dimension.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-dimension.pbtxt
index a9ab27719b..a9ab27719b 100644
--- a/tensorflow/tools/api/golden/tensorflow.-dimension.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-dimension.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-event.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-event.pbtxt
new file mode 100644
index 0000000000..3b75a1735b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-event.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.Event"
+tf_proto {
+ descriptor {
+ name: "Event"
+ field {
+ name: "wall_time"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "step"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "file_version"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ field {
+ name: "graph_def"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "summary"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary"
+ oneof_index: 0
+ }
+ field {
+ name: "log_message"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.LogMessage"
+ oneof_index: 0
+ }
+ field {
+ name: "session_log"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SessionLog"
+ oneof_index: 0
+ }
+ field {
+ name: "tagged_run_metadata"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TaggedRunMetadata"
+ oneof_index: 0
+ }
+ field {
+ name: "meta_graph_def"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "what"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-f-i-f-o-queue.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-f-i-f-o-queue.pbtxt
index a095616c00..a095616c00 100644
--- a/tensorflow/tools/api/golden/tensorflow.-f-i-f-o-queue.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-f-i-f-o-queue.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-fixed-len-feature.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-fixed-len-feature.pbtxt
index 6933814a7b..6933814a7b 100644
--- a/tensorflow/tools/api/golden/tensorflow.-fixed-len-feature.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-fixed-len-feature.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-fixed-len-sequence-feature.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-fixed-len-sequence-feature.pbtxt
index c538787951..c538787951 100644
--- a/tensorflow/tools/api/golden/tensorflow.-fixed-len-sequence-feature.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-fixed-len-sequence-feature.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-fixed-length-record-reader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-fixed-length-record-reader.pbtxt
index 260c796fd6..260c796fd6 100644
--- a/tensorflow/tools/api/golden/tensorflow.-fixed-length-record-reader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-fixed-length-record-reader.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-g-p-u-options.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-g-p-u-options.pbtxt
new file mode 100644
index 0000000000..353e63127d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-g-p-u-options.pbtxt
@@ -0,0 +1,92 @@
+path: "tensorflow.GPUOptions"
+tf_proto {
+ descriptor {
+ name: "GPUOptions"
+ field {
+ name: "per_process_gpu_memory_fraction"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "allow_growth"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "allocator_type"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "deferred_deletion_bytes"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "visible_device_list"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "polling_active_delay_usecs"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "polling_inactive_delay_msecs"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "force_gpu_compatible"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "experimental"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GPUOptions.Experimental"
+ }
+ nested_type {
+ name: "Experimental"
+ field {
+ name: "virtual_devices"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GPUOptions.Experimental.VirtualDevices"
+ }
+ field {
+ name: "use_unified_memory"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "num_dev_to_dev_copy_streams"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ nested_type {
+ name: "VirtualDevices"
+ field {
+ name: "memory_limit_mb"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ }
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-gradient-tape.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-gradient-tape.pbtxt
index 7405202b89..2f4257a66a 100644
--- a/tensorflow/tools/api/golden/tensorflow.-gradient-tape.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-gradient-tape.pbtxt
@@ -4,13 +4,21 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'persistent\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'self\', \'persistent\', \'watch_accessed_variables\'], varargs=None, keywords=None, defaults=[\'False\', \'True\'], "
}
member_method {
name: "gradient"
argspec: "args=[\'self\', \'target\', \'sources\', \'output_gradients\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "reset"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "stop_recording"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "watch"
argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-graph-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-graph-def.pbtxt
new file mode 100644
index 0000000000..19eccff03d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-graph-def.pbtxt
@@ -0,0 +1,36 @@
+path: "tensorflow.GraphDef"
+tf_proto {
+ descriptor {
+ name: "GraphDef"
+ field {
+ name: "node"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NodeDef"
+ }
+ field {
+ name: "versions"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.VersionDef"
+ }
+ field {
+ name: "version"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ options {
+ deprecated: true
+ }
+ }
+ field {
+ name: "library"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FunctionDefLibrary"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-graph-keys.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-graph-keys.pbtxt
index ffe4790933..ffe4790933 100644
--- a/tensorflow/tools/api/golden/tensorflow.-graph-keys.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-graph-keys.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-graph-options.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-graph-options.pbtxt
new file mode 100644
index 0000000000..a9f99bc171
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-graph-options.pbtxt
@@ -0,0 +1,67 @@
+path: "tensorflow.GraphOptions"
+tf_proto {
+ descriptor {
+ name: "GraphOptions"
+ field {
+ name: "enable_recv_scheduling"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "optimizer_options"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.OptimizerOptions"
+ }
+ field {
+ name: "build_cost_model"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "build_cost_model_after"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "infer_shapes"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "place_pruned_graph"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "enable_bfloat16_sendrecv"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "timeline_step"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "rewrite_options"
+ number: 10
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.RewriterConfig"
+ }
+ reserved_range {
+ start: 1
+ end: 2
+ }
+ reserved_name: "skip_common_subexpression_elimination"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-graph.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-graph.pbtxt
index cdaeb55e30..cdaeb55e30 100644
--- a/tensorflow/tools/api/golden/tensorflow.-graph.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-graph.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-histogram-proto.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-histogram-proto.pbtxt
new file mode 100644
index 0000000000..d4402f330b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-histogram-proto.pbtxt
@@ -0,0 +1,54 @@
+path: "tensorflow.HistogramProto"
+tf_proto {
+ descriptor {
+ name: "HistogramProto"
+ field {
+ name: "min"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "max"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "num"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "sum"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "sum_squares"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "bucket_limit"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_DOUBLE
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "bucket"
+ number: 7
+ label: LABEL_REPEATED
+ type: TYPE_DOUBLE
+ options {
+ packed: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-identity-reader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-identity-reader.pbtxt
index 2eda320d63..2eda320d63 100644
--- a/tensorflow/tools/api/golden/tensorflow.-identity-reader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-identity-reader.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-indexed-slices.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-indexed-slices.pbtxt
index fee84d8530..fee84d8530 100644
--- a/tensorflow/tools/api/golden/tensorflow.-indexed-slices.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-indexed-slices.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-interactive-session.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-interactive-session.pbtxt
index 0a3b81bf82..0a3b81bf82 100644
--- a/tensorflow/tools/api/golden/tensorflow.-interactive-session.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-interactive-session.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-l-m-d-b-reader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-l-m-d-b-reader.pbtxt
index f9b7e9bbca..f9b7e9bbca 100644
--- a/tensorflow/tools/api/golden/tensorflow.-l-m-d-b-reader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-l-m-d-b-reader.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-log-message.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-log-message.pbtxt
new file mode 100644
index 0000000000..5023aa96bf
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-log-message.pbtxt
@@ -0,0 +1,46 @@
+path: "tensorflow.LogMessage"
+tf_proto {
+ descriptor {
+ name: "LogMessage"
+ field {
+ name: "level"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.LogMessage.Level"
+ }
+ field {
+ name: "message"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ enum_type {
+ name: "Level"
+ value {
+ name: "UNKNOWN"
+ number: 0
+ }
+ value {
+ name: "DEBUGGING"
+ number: 10
+ }
+ value {
+ name: "INFO"
+ number: 20
+ }
+ value {
+ name: "WARN"
+ number: 30
+ }
+ value {
+ name: "ERROR"
+ number: 40
+ }
+ value {
+ name: "FATAL"
+ number: 50
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt
new file mode 100644
index 0000000000..0ba09bec4b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.MetaGraphDef.CollectionDefEntry"
+tf_proto {
+ descriptor {
+ name: "CollectionDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.CollectionDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-meta-info-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-meta-info-def.pbtxt
new file mode 100644
index 0000000000..41c62a407b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-meta-info-def.pbtxt
@@ -0,0 +1,50 @@
+path: "tensorflow.MetaGraphDef.MetaInfoDef"
+tf_proto {
+ descriptor {
+ name: "MetaInfoDef"
+ field {
+ name: "meta_graph_version"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_op_list"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.OpList"
+ }
+ field {
+ name: "any_info"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".google.protobuf.Any"
+ }
+ field {
+ name: "tags"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_version"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_git_version"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_default_attrs"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt
new file mode 100644
index 0000000000..73dc414a77
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.MetaGraphDef.SignatureDefEntry"
+tf_proto {
+ descriptor {
+ name: "SignatureDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SignatureDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.pbtxt
new file mode 100644
index 0000000000..d71c2358c9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-meta-graph-def.pbtxt
@@ -0,0 +1,133 @@
+path: "tensorflow.MetaGraphDef"
+tf_proto {
+ descriptor {
+ name: "MetaGraphDef"
+ field {
+ name: "meta_info_def"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.MetaGraphDef.MetaInfoDef"
+ }
+ field {
+ name: "graph_def"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GraphDef"
+ }
+ field {
+ name: "saver_def"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SaverDef"
+ }
+ field {
+ name: "collection_def"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.MetaGraphDef.CollectionDefEntry"
+ }
+ field {
+ name: "signature_def"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.MetaGraphDef.SignatureDefEntry"
+ }
+ field {
+ name: "asset_file_def"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AssetFileDef"
+ }
+ nested_type {
+ name: "MetaInfoDef"
+ field {
+ name: "meta_graph_version"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_op_list"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.OpList"
+ }
+ field {
+ name: "any_info"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".google.protobuf.Any"
+ }
+ field {
+ name: "tags"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_version"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_git_version"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_default_attrs"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ }
+ nested_type {
+ name: "CollectionDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.CollectionDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ nested_type {
+ name: "SignatureDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SignatureDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.-attr-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.-attr-entry.pbtxt
new file mode 100644
index 0000000000..b119b20877
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.-attr-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.NameAttrList.AttrEntry"
+tf_proto {
+ descriptor {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.pbtxt
new file mode 100644
index 0000000000..fcdb411ffc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-name-attr-list.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.NameAttrList"
+tf_proto {
+ descriptor {
+ name: "NameAttrList"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "attr"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList.AttrEntry"
+ }
+ nested_type {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-node-def.-attr-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-node-def.-attr-entry.pbtxt
new file mode 100644
index 0000000000..622e4c3d0f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-node-def.-attr-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.NodeDef.AttrEntry"
+tf_proto {
+ descriptor {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-node-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-node-def.pbtxt
new file mode 100644
index 0000000000..646fa8abb9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-node-def.pbtxt
@@ -0,0 +1,56 @@
+path: "tensorflow.NodeDef"
+tf_proto {
+ descriptor {
+ name: "NodeDef"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "op"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "input"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "device"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "attr"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NodeDef.AttrEntry"
+ }
+ nested_type {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-op-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-op-error.pbtxt
index 7e59615534..7e59615534 100644
--- a/tensorflow/tools/api/golden/tensorflow.-op-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-op-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-operation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-operation.pbtxt
index 64240f7069..64240f7069 100644
--- a/tensorflow/tools/api/golden/tensorflow.-operation.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-operation.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-optimizer-options.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-optimizer-options.pbtxt
new file mode 100644
index 0000000000..3ccf9d459b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-optimizer-options.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.OptimizerOptions"
+tf_proto {
+ descriptor {
+ name: "OptimizerOptions"
+ field {
+ name: "do_common_subexpression_elimination"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "do_constant_folding"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "max_folded_constant_in_bytes"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "do_function_inlining"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "opt_level"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.OptimizerOptions.Level"
+ }
+ field {
+ name: "global_jit_level"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.OptimizerOptions.GlobalJitLevel"
+ }
+ enum_type {
+ name: "Level"
+ value {
+ name: "L1"
+ number: 0
+ }
+ value {
+ name: "L0"
+ number: -1
+ }
+ }
+ enum_type {
+ name: "GlobalJitLevel"
+ value {
+ name: "DEFAULT"
+ number: 0
+ }
+ value {
+ name: "OFF"
+ number: -1
+ }
+ value {
+ name: "ON_1"
+ number: 1
+ }
+ value {
+ name: "ON_2"
+ number: 2
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-padding-f-i-f-o-queue.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-padding-f-i-f-o-queue.pbtxt
index 8fed133561..8fed133561 100644
--- a/tensorflow/tools/api/golden/tensorflow.-padding-f-i-f-o-queue.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-padding-f-i-f-o-queue.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-priority-queue.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-priority-queue.pbtxt
index ebb017e81b..ebb017e81b 100644
--- a/tensorflow/tools/api/golden/tensorflow.-priority-queue.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-priority-queue.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-queue-base.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-queue-base.pbtxt
index 761f90989f..761f90989f 100644
--- a/tensorflow/tools/api/golden/tensorflow.-queue-base.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-queue-base.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-random-shuffle-queue.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-random-shuffle-queue.pbtxt
index f3ca841393..f3ca841393 100644
--- a/tensorflow/tools/api/golden/tensorflow.-random-shuffle-queue.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-random-shuffle-queue.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-reader-base.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-reader-base.pbtxt
index f6a3ce76a1..f6a3ce76a1 100644
--- a/tensorflow/tools/api/golden/tensorflow.-reader-base.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-reader-base.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-register-gradient.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-register-gradient.pbtxt
index 4d6e4137d1..4d6e4137d1 100644
--- a/tensorflow/tools/api/golden/tensorflow.-register-gradient.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-register-gradient.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-run-metadata.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-run-metadata.pbtxt
new file mode 100644
index 0000000000..1287940326
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-run-metadata.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.RunMetadata"
+tf_proto {
+ descriptor {
+ name: "RunMetadata"
+ field {
+ name: "step_stats"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.StepStats"
+ }
+ field {
+ name: "cost_graph"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.CostGraphDef"
+ }
+ field {
+ name: "partition_graphs"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GraphDef"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-run-options.-experimental.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-run-options.-experimental.pbtxt
new file mode 100644
index 0000000000..537e73aa89
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-run-options.-experimental.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.RunOptions.Experimental"
+tf_proto {
+ descriptor {
+ name: "Experimental"
+ field {
+ name: "collective_graph_key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-run-options.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-run-options.pbtxt
new file mode 100644
index 0000000000..cec04a2bf0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-run-options.pbtxt
@@ -0,0 +1,83 @@
+path: "tensorflow.RunOptions"
+tf_proto {
+ descriptor {
+ name: "RunOptions"
+ field {
+ name: "trace_level"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.RunOptions.TraceLevel"
+ }
+ field {
+ name: "timeout_in_ms"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "inter_op_thread_pool"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "output_partition_graphs"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "debug_options"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.DebugOptions"
+ }
+ field {
+ name: "report_tensor_allocations_upon_oom"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "experimental"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.RunOptions.Experimental"
+ }
+ nested_type {
+ name: "Experimental"
+ field {
+ name: "collective_graph_key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ }
+ enum_type {
+ name: "TraceLevel"
+ value {
+ name: "NO_TRACE"
+ number: 0
+ }
+ value {
+ name: "SOFTWARE_TRACE"
+ number: 1
+ }
+ value {
+ name: "HARDWARE_TRACE"
+ number: 2
+ }
+ value {
+ name: "FULL_TRACE"
+ number: 3
+ }
+ }
+ reserved_range {
+ start: 4
+ end: 5
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-session-log.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-session-log.pbtxt
new file mode 100644
index 0000000000..259f241874
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-session-log.pbtxt
@@ -0,0 +1,44 @@
+path: "tensorflow.SessionLog"
+tf_proto {
+ descriptor {
+ name: "SessionLog"
+ field {
+ name: "status"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.SessionLog.SessionStatus"
+ }
+ field {
+ name: "checkpoint_path"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "msg"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ enum_type {
+ name: "SessionStatus"
+ value {
+ name: "STATUS_UNSPECIFIED"
+ number: 0
+ }
+ value {
+ name: "START"
+ number: 1
+ }
+ value {
+ name: "STOP"
+ number: 2
+ }
+ value {
+ name: "CHECKPOINT"
+ number: 3
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-session.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-session.pbtxt
index 1d6b037f9c..1d6b037f9c 100644
--- a/tensorflow/tools/api/golden/tensorflow.-session.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-session.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-sparse-conditional-accumulator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-conditional-accumulator.pbtxt
index 2260279ad2..39ff336c4f 100644
--- a/tensorflow/tools/api/golden/tensorflow.-sparse-conditional-accumulator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-conditional-accumulator.pbtxt
@@ -17,7 +17,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'dtype\', \'shape\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'sparse_conditional_accumulator\'], "
+ argspec: "args=[\'self\', \'dtype\', \'shape\', \'shared_name\', \'name\', \'reduction_type\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'sparse_conditional_accumulator\', \'MEAN\'], "
}
member_method {
name: "apply_grad"
diff --git a/tensorflow/tools/api/golden/tensorflow.-sparse-feature.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-feature.pbtxt
index d875394fb5..d875394fb5 100644
--- a/tensorflow/tools/api/golden/tensorflow.-sparse-feature.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-feature.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-sparse-tensor-value.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-tensor-value.pbtxt
index d33fd4d5d7..d33fd4d5d7 100644
--- a/tensorflow/tools/api/golden/tensorflow.-sparse-tensor-value.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-tensor-value.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-sparse-tensor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-tensor.pbtxt
index eac236d498..3add49e90d 100644
--- a/tensorflow/tools/api/golden/tensorflow.-sparse-tensor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-sparse-tensor.pbtxt
@@ -24,6 +24,10 @@ tf_class {
mtype: "<type \'property\'>"
}
member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "values"
mtype: "<type \'property\'>"
}
@@ -32,6 +36,10 @@ tf_class {
argspec: "args=[\'self\', \'indices\', \'values\', \'dense_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "consumers"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "eval"
argspec: "args=[\'self\', \'feed_dict\', \'session\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.-plugin-data.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.-plugin-data.pbtxt
new file mode 100644
index 0000000000..a66b74b315
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.-plugin-data.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.SummaryMetadata.PluginData"
+tf_proto {
+ descriptor {
+ name: "PluginData"
+ field {
+ name: "plugin_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "content"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.pbtxt
new file mode 100644
index 0000000000..c02575b962
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-summary-metadata.pbtxt
@@ -0,0 +1,40 @@
+path: "tensorflow.SummaryMetadata"
+tf_proto {
+ descriptor {
+ name: "SummaryMetadata"
+ field {
+ name: "plugin_data"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata.PluginData"
+ }
+ field {
+ name: "display_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "summary_description"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ nested_type {
+ name: "PluginData"
+ field {
+ name: "plugin_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "content"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-summary.-audio.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-summary.-audio.pbtxt
new file mode 100644
index 0000000000..94f712073e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-summary.-audio.pbtxt
@@ -0,0 +1,36 @@
+path: "tensorflow.Summary.Audio"
+tf_proto {
+ descriptor {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-summary.-image.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-summary.-image.pbtxt
new file mode 100644
index 0000000000..fc1acb483b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-summary.-image.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.Summary.Image"
+tf_proto {
+ descriptor {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-summary.-value.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-summary.-value.pbtxt
new file mode 100644
index 0000000000..feb84b6ee9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-summary.-value.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.Summary.Value"
+tf_proto {
+ descriptor {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-summary.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-summary.pbtxt
new file mode 100644
index 0000000000..b2bdff7171
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-summary.pbtxt
@@ -0,0 +1,144 @@
+path: "tensorflow.Summary"
+tf_proto {
+ descriptor {
+ name: "Summary"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Value"
+ }
+ nested_type {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+ nested_type {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+ nested_type {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-t-f-record-reader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-t-f-record-reader.pbtxt
index cdf7937391..cdf7937391 100644
--- a/tensorflow/tools/api/golden/tensorflow.-t-f-record-reader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-t-f-record-reader.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-tensor-array.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-array.pbtxt
index ed088c41ed..ed088c41ed 100644
--- a/tensorflow/tools/api/golden/tensorflow.-tensor-array.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-array.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.-coo-sparse.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.-coo-sparse.pbtxt
new file mode 100644
index 0000000000..0064c8460c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.-coo-sparse.pbtxt
@@ -0,0 +1,24 @@
+path: "tensorflow.TensorInfo.CooSparse"
+tf_proto {
+ descriptor {
+ name: "CooSparse"
+ field {
+ name: "values_tensor_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "indices_tensor_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "dense_shape_tensor_name"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.pbtxt
new file mode 100644
index 0000000000..63566c808e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-info.pbtxt
@@ -0,0 +1,59 @@
+path: "tensorflow.TensorInfo"
+tf_proto {
+ descriptor {
+ name: "TensorInfo"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ field {
+ name: "coo_sparse"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorInfo.CooSparse"
+ oneof_index: 0
+ }
+ field {
+ name: "dtype"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ }
+ field {
+ name: "tensor_shape"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ nested_type {
+ name: "CooSparse"
+ field {
+ name: "values_tensor_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "indices_tensor_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "dense_shape_tensor_name"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+ oneof_decl {
+ name: "encoding"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-tensor-shape.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-shape.pbtxt
index 8e3598fb24..8e3598fb24 100644
--- a/tensorflow/tools/api/golden/tensorflow.-tensor-shape.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-tensor-shape.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-tensor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-tensor.pbtxt
index 38d19bb537..38d19bb537 100644
--- a/tensorflow/tools/api/golden/tensorflow.-tensor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-tensor.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-text-line-reader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-text-line-reader.pbtxt
index e9779f0762..e9779f0762 100644
--- a/tensorflow/tools/api/golden/tensorflow.-text-line-reader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-text-line-reader.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-var-len-feature.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-var-len-feature.pbtxt
index 54b66f43f8..54b66f43f8 100644
--- a/tensorflow/tools/api/golden/tensorflow.-var-len-feature.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-var-len-feature.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-variable-aggregation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-variable-aggregation.pbtxt
new file mode 100644
index 0000000000..66a20547eb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-variable-aggregation.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.VariableAggregation"
+tf_class {
+ is_instance: "<enum \'VariableAggregation\'>"
+ member {
+ name: "MEAN"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "ONLY_FIRST_TOWER"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "SUM"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-variable-scope.pbtxt
index 8e539069da..c13eb7b8bb 100644
--- a/tensorflow/tools/api/golden/tensorflow.-variable-scope.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-variable-scope.pbtxt
@@ -56,7 +56,7 @@ tf_class {
}
member_method {
name: "get_variable"
- argspec: "args=[\'self\', \'var_store\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'reuse\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'var_store\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'reuse\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "global_variables"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.-variable-synchronization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-variable-synchronization.pbtxt
new file mode 100644
index 0000000000..7589bb2888
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-variable-synchronization.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.VariableSynchronization"
+tf_class {
+ is_instance: "<enum \'VariableSynchronization\'>"
+ member {
+ name: "AUTO"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "ON_READ"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "ON_WRITE"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.-variable.-save-slice-info.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-variable.-save-slice-info.pbtxt
index ac3ccd468b..ac3ccd468b 100644
--- a/tensorflow/tools/api/golden/tensorflow.-variable.-save-slice-info.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-variable.-save-slice-info.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.-variable.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-variable.pbtxt
index 5a02bb2175..05698b03ee 100644
--- a/tensorflow/tools/api/golden/tensorflow.-variable.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-variable.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.Variable"
tf_class {
is_instance: "<class \'tensorflow.python.ops.variables.Variable\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "SaveSliceInfo"
@@ -43,21 +43,25 @@ tf_class {
name: "shape"
mtype: "<type \'property\'>"
}
+ member {
+ name: "trainable"
+ mtype: "<type \'property\'>"
+ }
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'initial_value\', \'trainable\', \'collections\', \'validate_shape\', \'caching_device\', \'name\', \'variable_def\', \'dtype\', \'expected_shape\', \'import_scope\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'initial_value\', \'trainable\', \'collections\', \'validate_shape\', \'caching_device\', \'name\', \'variable_def\', \'dtype\', \'expected_shape\', \'import_scope\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "assign"
- argspec: "args=[\'self\', \'value\', \'use_locking\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'self\', \'value\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
}
member_method {
name: "assign_add"
- argspec: "args=[\'self\', \'delta\', \'use_locking\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
}
member_method {
name: "assign_sub"
- argspec: "args=[\'self\', \'delta\', \'use_locking\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
}
member_method {
name: "count_up_to"
@@ -88,8 +92,28 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "scatter_add"
+ argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_add"
+ argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_sub"
+ argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_update"
+ argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "scatter_sub"
- argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_update"
+ argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
}
member_method {
name: "set_shape"
diff --git a/tensorflow/tools/api/golden/tensorflow.-whole-file-reader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.-whole-file-reader.pbtxt
index 4ac759891c..4ac759891c 100644
--- a/tensorflow/tools/api/golden/tensorflow.-whole-file-reader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.-whole-file-reader.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.app.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.app.pbtxt
index 85044a8987..85044a8987 100644
--- a/tensorflow/tools/api/golden/tensorflow.app.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.app.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.bitwise.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.bitwise.pbtxt
index 01cbd55c5d..01cbd55c5d 100644
--- a/tensorflow/tools/api/golden/tensorflow.bitwise.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.bitwise.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.compat.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.compat.pbtxt
index bab480ff9b..f1d760603e 100644
--- a/tensorflow/tools/api/golden/tensorflow.compat.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.compat.pbtxt
@@ -33,6 +33,14 @@ tf_module {
argspec: "args=[\'bytes_or_text\', \'encoding\'], varargs=None, keywords=None, defaults=[\'utf-8\'], "
}
member_method {
+ name: "forward_compatibility_horizon"
+ argspec: "args=[\'year\', \'month\', \'day\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "forward_compatible"
+ argspec: "args=[\'year\', \'month\', \'day\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "path_to_str"
argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.constant_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.constant_initializer.pbtxt
index 00ec669b16..00ec669b16 100644
--- a/tensorflow/tools/api/golden/tensorflow.constant_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.constant_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.__metaclass__.pbtxt
index af08c88d33..af08c88d33 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-dataset.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt
index 0900adaf76..c3ba2dba57 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt
@@ -24,7 +24,7 @@ tf_class {
}
member_method {
name: "batch"
- argspec: "args=[\'self\', \'batch_size\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
}
member_method {
name: "cache"
@@ -44,7 +44,7 @@ tf_class {
}
member_method {
name: "from_generator"
- argspec: "args=[\'generator\', \'output_types\', \'output_shapes\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "from_sparse_tensor_slices"
@@ -60,11 +60,11 @@ tf_class {
}
member_method {
name: "interleave"
- argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\'], varargs=None, keywords=None, defaults=[\'1\'], "
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
}
member_method {
name: "list_files"
- argspec: "args=[\'file_pattern\', \'shuffle\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "make_initializable_iterator"
@@ -80,7 +80,7 @@ tf_class {
}
member_method {
name: "padded_batch"
- argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
member_method {
name: "prefetch"
@@ -111,6 +111,10 @@ tf_class {
argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt
index f384323fc8..f384323fc8 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-fixed-length-record-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt
index 7b16ac90c9..3541671bee 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-fixed-length-record-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt
@@ -25,7 +25,7 @@ tf_class {
}
member_method {
name: "batch"
- argspec: "args=[\'self\', \'batch_size\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
}
member_method {
name: "cache"
@@ -45,7 +45,7 @@ tf_class {
}
member_method {
name: "from_generator"
- argspec: "args=[\'generator\', \'output_types\', \'output_shapes\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "from_sparse_tensor_slices"
@@ -61,11 +61,11 @@ tf_class {
}
member_method {
name: "interleave"
- argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\'], varargs=None, keywords=None, defaults=[\'1\'], "
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
}
member_method {
name: "list_files"
- argspec: "args=[\'file_pattern\', \'shuffle\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "make_initializable_iterator"
@@ -81,7 +81,7 @@ tf_class {
}
member_method {
name: "padded_batch"
- argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
member_method {
name: "prefetch"
@@ -112,6 +112,10 @@ tf_class {
argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.-iterator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-iterator.pbtxt
new file mode 100644
index 0000000000..4f0147a523
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-iterator.pbtxt
@@ -0,0 +1,46 @@
+path: "tensorflow.data.Iterator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.iterator_ops.Iterator\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_classes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shapes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_types"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'iterator_resource\', \'initializer\', \'output_types\', \'output_shapes\', \'output_classes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_string_handle"
+ argspec: "args=[\'string_handle\', \'output_types\', \'output_shapes\', \'output_classes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_structure"
+ argspec: "args=[\'output_types\', \'output_shapes\', \'shared_name\', \'output_classes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_next"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "make_initializer"
+ argspec: "args=[\'self\', \'dataset\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "string_handle"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt
index b12dec8a70..b12dec8a70 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-t-f-record-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt
index 9cf5f2ae20..b113c18ee0 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-t-f-record-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt
@@ -25,7 +25,7 @@ tf_class {
}
member_method {
name: "batch"
- argspec: "args=[\'self\', \'batch_size\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
}
member_method {
name: "cache"
@@ -45,7 +45,7 @@ tf_class {
}
member_method {
name: "from_generator"
- argspec: "args=[\'generator\', \'output_types\', \'output_shapes\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "from_sparse_tensor_slices"
@@ -61,11 +61,11 @@ tf_class {
}
member_method {
name: "interleave"
- argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\'], varargs=None, keywords=None, defaults=[\'1\'], "
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
}
member_method {
name: "list_files"
- argspec: "args=[\'file_pattern\', \'shuffle\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "make_initializable_iterator"
@@ -81,7 +81,7 @@ tf_class {
}
member_method {
name: "padded_batch"
- argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
member_method {
name: "prefetch"
@@ -112,6 +112,10 @@ tf_class {
argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt
index 7ddcdce266..7ddcdce266 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.data.-text-line-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt
index 8c3d669143..7210bf5db4 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.-text-line-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt
@@ -25,7 +25,7 @@ tf_class {
}
member_method {
name: "batch"
- argspec: "args=[\'self\', \'batch_size\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
}
member_method {
name: "cache"
@@ -45,7 +45,7 @@ tf_class {
}
member_method {
name: "from_generator"
- argspec: "args=[\'generator\', \'output_types\', \'output_shapes\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "from_sparse_tensor_slices"
@@ -61,11 +61,11 @@ tf_class {
}
member_method {
name: "interleave"
- argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\'], varargs=None, keywords=None, defaults=[\'1\'], "
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
}
member_method {
name: "list_files"
- argspec: "args=[\'file_pattern\', \'shuffle\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "make_initializable_iterator"
@@ -81,7 +81,7 @@ tf_class {
}
member_method {
name: "padded_batch"
- argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
member_method {
name: "prefetch"
@@ -112,6 +112,10 @@ tf_class {
argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.data.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt
index 56fb270a49..56fb270a49 100644
--- a/tensorflow/tools/api/golden/tensorflow.data.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.debugging.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.debugging.pbtxt
new file mode 100644
index 0000000000..d9efe97821
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.debugging.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.debugging"
+tf_module {
+ member_method {
+ name: "check_numerics"
+ argspec: "args=[\'tensor\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_finite"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_inf"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_nan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-bernoulli.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-bernoulli.pbtxt
index ca96f4eaec..ca96f4eaec 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-bernoulli.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-bernoulli.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-beta.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-beta.pbtxt
index d0508acd9f..d0508acd9f 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-beta.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-beta.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-categorical.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-categorical.pbtxt
index ff0fbb56cd..ff0fbb56cd 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-categorical.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-categorical.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-dirichlet-multinomial.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-dirichlet-multinomial.pbtxt
index d75e4a2f88..d75e4a2f88 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-dirichlet-multinomial.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-dirichlet-multinomial.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-dirichlet.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-dirichlet.pbtxt
index b838b9ae21..b838b9ae21 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-dirichlet.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-dirichlet.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-distribution.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-distribution.pbtxt
index 6f06b7d50d..6f06b7d50d 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-distribution.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-distribution.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-exponential.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-exponential.pbtxt
index d34f9cde5d..d34f9cde5d 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-exponential.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-exponential.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-gamma.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-gamma.pbtxt
index df268b8d99..df268b8d99 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-gamma.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-gamma.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-laplace.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-laplace.pbtxt
index 303dcb4ed3..303dcb4ed3 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-laplace.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-laplace.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-multinomial.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-multinomial.pbtxt
index ecda8acb15..ecda8acb15 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-multinomial.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-multinomial.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-normal.pbtxt
index 92b9eeea22..92b9eeea22 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-normal.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-normal.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-register-k-l.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-register-k-l.pbtxt
index e3db443c2b..e3db443c2b 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-register-k-l.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-register-k-l.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-reparameterization-type.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-reparameterization-type.pbtxt
index 02e8d576dd..02e8d576dd 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-reparameterization-type.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-reparameterization-type.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-student-t.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-student-t.pbtxt
index 9aa7f9a634..9aa7f9a634 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-student-t.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-student-t.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.-uniform.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-uniform.pbtxt
index d1b9d30696..d1b9d30696 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.-uniform.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.-uniform.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.distributions.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.distributions.pbtxt
index 90b60ef074..90b60ef074 100644
--- a/tensorflow/tools/api/golden/tensorflow.distributions.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.distributions.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.dtypes.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.dtypes.pbtxt
new file mode 100644
index 0000000000..98e1feed00
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.dtypes.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.dtypes"
+tf_module {
+ member_method {
+ name: "as_string"
+ argspec: "args=[\'input\', \'precision\', \'scientific\', \'shortest\', \'width\', \'fill\', \'name\'], varargs=None, keywords=None, defaults=[\'-1\', \'False\', \'False\', \'-1\', \'\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-aborted-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-aborted-error.pbtxt
index ea9186b0b9..ea9186b0b9 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-aborted-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-aborted-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-already-exists-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-already-exists-error.pbtxt
index 4e155081dd..4e155081dd 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-already-exists-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-already-exists-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-cancelled-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-cancelled-error.pbtxt
index b02a0e023a..b02a0e023a 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-cancelled-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-cancelled-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-data-loss-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-data-loss-error.pbtxt
index c1fa66342a..c1fa66342a 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-data-loss-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-data-loss-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-deadline-exceeded-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-deadline-exceeded-error.pbtxt
index 8e03793619..8e03793619 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-deadline-exceeded-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-deadline-exceeded-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-failed-precondition-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-failed-precondition-error.pbtxt
index 384d4b534c..384d4b534c 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-failed-precondition-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-failed-precondition-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-internal-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-internal-error.pbtxt
index ac5c4d7879..ac5c4d7879 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-internal-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-internal-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-invalid-argument-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-invalid-argument-error.pbtxt
index 161edd4a7c..161edd4a7c 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-invalid-argument-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-invalid-argument-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-not-found-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-not-found-error.pbtxt
index 1e64730ac6..1e64730ac6 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-not-found-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-not-found-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-op-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-op-error.pbtxt
index b1f14c0457..b1f14c0457 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-op-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-op-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-out-of-range-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-out-of-range-error.pbtxt
index 6365e47286..6365e47286 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-out-of-range-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-out-of-range-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-permission-denied-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-permission-denied-error.pbtxt
index dc8a66f9ea..dc8a66f9ea 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-permission-denied-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-permission-denied-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-resource-exhausted-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-resource-exhausted-error.pbtxt
index 85bb384b46..85bb384b46 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-resource-exhausted-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-resource-exhausted-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-unauthenticated-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unauthenticated-error.pbtxt
index d57d7ac2f2..d57d7ac2f2 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-unauthenticated-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unauthenticated-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-unavailable-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unavailable-error.pbtxt
index cc33e6ed8d..cc33e6ed8d 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-unavailable-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unavailable-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-unimplemented-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unimplemented-error.pbtxt
index b8c2e22dbd..b8c2e22dbd 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-unimplemented-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unimplemented-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.-unknown-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unknown-error.pbtxt
index 8ffcfae95b..8ffcfae95b 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.-unknown-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.-unknown-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.pbtxt
index c5fe49baab..c5fe49baab 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt
index 5d25ec769a..5d25ec769a 100644
--- a/tensorflow/tools/api/golden/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-baseline-classifier.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-baseline-classifier.pbtxt
index be9ba4ce85..082e26b99b 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-baseline-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-baseline-classifier.pbtxt
@@ -24,10 +24,18 @@ tf_class {
argspec: "args=[\'self\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'config\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Ftrl\', \'None\', \'weighted_sum\'], "
}
member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-baseline-regressor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-baseline-regressor.pbtxt
index 91fca67b6b..7cc4191eb3 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-baseline-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-baseline-regressor.pbtxt
@@ -24,10 +24,18 @@ tf_class {
argspec: "args=[\'self\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'config\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Ftrl\', \'None\', \'weighted_sum\'], "
}
member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.estimator.-best-exporter.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-best-exporter.pbtxt
new file mode 100644
index 0000000000..9694268199
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-best-exporter.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.estimator.BestExporter"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.exporter.BestExporter\'>"
+ is_instance: "<class \'tensorflow.python.estimator.exporter.Exporter\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'serving_input_receiver_fn\', \'event_file_pattern\', \'compare_fn\', \'assets_extra\', \'as_text\', \'exports_to_keep\'], varargs=None, keywords=None, defaults=[\'best_exporter\', \'None\', \'eval/*.tfevents.*\', \'<function _loss_smaller instance>\', \'None\', \'False\', \'5\'], "
+ }
+ member_method {
+ name: "export"
+ argspec: "args=[\'self\', \'estimator\', \'export_path\', \'checkpoint_path\', \'eval_result\', \'is_the_final_export\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-classifier.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-classifier.pbtxt
new file mode 100644
index 0000000000..9e429a32a5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-classifier.pbtxt
@@ -0,0 +1,67 @@
+path: "tensorflow.estimator.BoostedTreesClassifier"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees.BoostedTreesClassifier\'>"
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees._BoostedTreesBase\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\', \'pruning_mode\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\', \'none\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "experimental_predict_with_explanations"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-regressor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-regressor.pbtxt
new file mode 100644
index 0000000000..56af1d137c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-boosted-trees-regressor.pbtxt
@@ -0,0 +1,67 @@
+path: "tensorflow.estimator.BoostedTreesRegressor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees.BoostedTreesRegressor\'>"
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees._BoostedTreesBase\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'label_dimension\', \'weight_column\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\', \'pruning_mode\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\', \'none\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "experimental_predict_with_explanations"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-classifier.pbtxt
index cd4f72fcf8..718f415a77 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-classifier.pbtxt
@@ -21,13 +21,21 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
index 303fd74a64..b23c019d6c 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
@@ -21,13 +21,21 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'2\', \'None\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'2\', \'None\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
index c97ea7969e..caa9e3f1de 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
@@ -21,13 +21,21 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'label_dimension\', \'weight_column\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'1\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'label_dimension\', \'weight_column\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'1\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-regressor.pbtxt
index 4b5b5bf0e3..1f5e650940 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-d-n-n-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-d-n-n-regressor.pbtxt
@@ -21,13 +21,21 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-estimator-spec.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-estimator-spec.pbtxt
index aa6ac46613..aa6ac46613 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-estimator-spec.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-estimator-spec.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-estimator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-estimator.pbtxt
index 42a0d59521..ebd3869c9b 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-estimator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-estimator.pbtxt
@@ -23,10 +23,18 @@ tf_class {
argspec: "args=[\'self\', \'model_fn\', \'model_dir\', \'config\', \'params\', \'warm_start_from\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-eval-spec.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-eval-spec.pbtxt
index db83ba1bd8..db83ba1bd8 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-eval-spec.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-eval-spec.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-exporter.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-exporter.pbtxt
index 035af70e52..035af70e52 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-exporter.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-exporter.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-final-exporter.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-final-exporter.pbtxt
index ee37b1fa21..ee37b1fa21 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-final-exporter.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-final-exporter.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-latest-exporter.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-latest-exporter.pbtxt
index 2a9d029029..2a9d029029 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-latest-exporter.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-latest-exporter.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-linear-classifier.pbtxt
index 2de52d6c57..53ec5a0c78 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-linear-classifier.pbtxt
@@ -21,13 +21,21 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\', \'sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-linear-regressor.pbtxt
index e552f33720..3791162619 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-linear-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-linear-regressor.pbtxt
@@ -21,13 +21,21 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\'], "
+ argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\', \'sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "evaluate"
argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
name: "export_savedmodel"
argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-mode-keys.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-mode-keys.pbtxt
index 6a1c24fa63..6a1c24fa63 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-mode-keys.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-mode-keys.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.estimator.-run-config.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-run-config.pbtxt
new file mode 100644
index 0000000000..269e18a0a7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-run-config.pbtxt
@@ -0,0 +1,105 @@
+path: "tensorflow.estimator.RunConfig"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.run_config.RunConfig\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "cluster_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "device_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "eval_distribute"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "evaluation_master"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "global_id_in_cluster"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_chief"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "keep_checkpoint_every_n_hours"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "keep_checkpoint_max"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "log_step_count_steps"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "master"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "num_ps_replicas"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "num_worker_replicas"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "protocol"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_checkpoints_secs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_checkpoints_steps"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_summary_steps"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "service"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "session_config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "task_id"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "task_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tf_random_seed"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "train_distribute"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\', \'protocol\', \'eval_distribute\', \'experimental_distribute\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "replace"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-train-spec.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-train-spec.pbtxt
index 7d2f77438a..7d2f77438a 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-train-spec.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-train-spec.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt
index 5301b94eb3..b6942cb7ed 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-vocab-info.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt
@@ -4,6 +4,10 @@ tf_class {
is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
is_instance: "<type \'tuple\'>"
member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "backup_initializer"
mtype: "<type \'property\'>"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-warm-start-settings.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-warm-start-settings.pbtxt
index 43f5343359..43f5343359 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-warm-start-settings.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-warm-start-settings.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt
index 3cf7af8da9..3cf7af8da9 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-classification-output.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-classification-output.pbtxt
index 2df1840c4a..2df1840c4a 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-classification-output.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-classification-output.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt
index 5d165ccbf9..5d165ccbf9 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-export-output.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-export-output.pbtxt
index fa62e8ced8..fa62e8ced8 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-export-output.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-export-output.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt
index 743495ba98..743495ba98 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-predict-output.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-predict-output.pbtxt
index e0160b10ce..e0160b10ce 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-predict-output.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-predict-output.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt
index dbf4e3dec8..dbf4e3dec8 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-regression-output.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-regression-output.pbtxt
index 905f0e0553..905f0e0553 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-regression-output.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-regression-output.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-serving-input-receiver.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-serving-input-receiver.pbtxt
index d71b2a4300..d71b2a4300 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-serving-input-receiver.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-serving-input-receiver.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt
index 4fe92643bf..4fe92643bf 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.export.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.pbtxt
index bd72f6cd79..bd72f6cd79 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.export.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.export.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.inputs.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.inputs.pbtxt
index b318fea1f8..b318fea1f8 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.inputs.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.inputs.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.pbtxt
index 4946f2c51a..f1d204a3ef 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.pbtxt
@@ -9,6 +9,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "BestExporter"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "BoostedTreesClassifier"
mtype: "<type \'type\'>"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.feature_column.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.feature_column.pbtxt
index 24a58fb118..f06e798953 100644
--- a/tensorflow/tools/api/golden/tensorflow.feature_column.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.feature_column.pbtxt
@@ -34,7 +34,7 @@ tf_module {
}
member_method {
name: "input_layer"
- argspec: "args=[\'features\', \'feature_columns\', \'weight_collections\', \'trainable\', \'cols_to_vars\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\'], "
+ argspec: "args=[\'features\', \'feature_columns\', \'weight_collections\', \'trainable\', \'cols_to_vars\', \'cols_to_output_tensors\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'None\'], "
}
member_method {
name: "linear_model"
diff --git a/tensorflow/tools/api/golden/tensorflow.gfile.-fast-g-file.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.gfile.-fast-g-file.pbtxt
index eecfaffd0a..eecfaffd0a 100644
--- a/tensorflow/tools/api/golden/tensorflow.gfile.-fast-g-file.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.gfile.-fast-g-file.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.gfile.-g-file.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.gfile.-g-file.pbtxt
index 305251059d..305251059d 100644
--- a/tensorflow/tools/api/golden/tensorflow.gfile.-g-file.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.gfile.-g-file.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.gfile.-open.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.gfile.-open.pbtxt
index 6e8894180a..6e8894180a 100644
--- a/tensorflow/tools/api/golden/tensorflow.gfile.-open.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.gfile.-open.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.gfile.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.gfile.pbtxt
index 65b55a8b7c..65b55a8b7c 100644
--- a/tensorflow/tools/api/golden/tensorflow.gfile.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.gfile.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.glorot_normal_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.glorot_normal_initializer.pbtxt
new file mode 100644
index 0000000000..483d1f8ba0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.glorot_normal_initializer.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.glorot_normal_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.glorot_uniform_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.glorot_uniform_initializer.pbtxt
new file mode 100644
index 0000000000..bb8540d0fd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.glorot_uniform_initializer.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.glorot_uniform_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.graph_util.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.graph_util.pbtxt
index eeabf845dc..eeabf845dc 100644
--- a/tensorflow/tools/api/golden/tensorflow.graph_util.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.graph_util.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.image.-resize-method.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.image.-resize-method.pbtxt
index dbc360b13e..dbc360b13e 100644
--- a/tensorflow/tools/api/golden/tensorflow.image.-resize-method.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.image.-resize-method.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.image.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.image.pbtxt
new file mode 100644
index 0000000000..5c46dc5ee7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.image.pbtxt
@@ -0,0 +1,251 @@
+path: "tensorflow.image"
+tf_module {
+ member {
+ name: "ResizeMethod"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "adjust_brightness"
+ argspec: "args=[\'image\', \'delta\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "adjust_contrast"
+ argspec: "args=[\'images\', \'contrast_factor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "adjust_gamma"
+ argspec: "args=[\'image\', \'gamma\', \'gain\'], varargs=None, keywords=None, defaults=[\'1\', \'1\'], "
+ }
+ member_method {
+ name: "adjust_hue"
+ argspec: "args=[\'image\', \'delta\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "adjust_jpeg_quality"
+ argspec: "args=[\'image\', \'jpeg_quality\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "adjust_saturation"
+ argspec: "args=[\'image\', \'saturation_factor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "central_crop"
+ argspec: "args=[\'image\', \'central_fraction\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convert_image_dtype"
+ argspec: "args=[\'image\', \'dtype\', \'saturate\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "crop_and_resize"
+ argspec: "args=[\'image\', \'boxes\', \'box_ind\', \'crop_size\', \'method\', \'extrapolation_value\', \'name\'], varargs=None, keywords=None, defaults=[\'bilinear\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "crop_to_bounding_box"
+ argspec: "args=[\'image\', \'offset_height\', \'offset_width\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "decode_and_crop_jpeg"
+ argspec: "args=[\'contents\', \'crop_window\', \'channels\', \'ratio\', \'fancy_upscaling\', \'try_recover_truncated\', \'acceptable_fraction\', \'dct_method\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'1\', \'True\', \'False\', \'1\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_bmp"
+ argspec: "args=[\'contents\', \'channels\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
+ }
+ member_method {
+ name: "decode_gif"
+ argspec: "args=[\'contents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_image"
+ argspec: "args=[\'contents\', \'channels\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'uint8\'>\", \'None\'], "
+ }
+ member_method {
+ name: "decode_jpeg"
+ argspec: "args=[\'contents\', \'channels\', \'ratio\', \'fancy_upscaling\', \'try_recover_truncated\', \'acceptable_fraction\', \'dct_method\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'1\', \'True\', \'False\', \'1\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_png"
+ argspec: "args=[\'contents\', \'channels\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \"<dtype: \'uint8\'>\", \'None\'], "
+ }
+ member_method {
+ name: "draw_bounding_boxes"
+ argspec: "args=[\'images\', \'boxes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "encode_jpeg"
+ argspec: "args=[\'image\', \'format\', \'quality\', \'progressive\', \'optimize_size\', \'chroma_downsampling\', \'density_unit\', \'x_density\', \'y_density\', \'xmp_metadata\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'95\', \'False\', \'False\', \'True\', \'in\', \'300\', \'300\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "encode_png"
+ argspec: "args=[\'image\', \'compression\', \'name\'], varargs=None, keywords=None, defaults=[\'-1\', \'None\'], "
+ }
+ member_method {
+ name: "extract_glimpse"
+ argspec: "args=[\'input\', \'size\', \'offsets\', \'centered\', \'normalized\', \'uniform_noise\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'True\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "extract_image_patches"
+ argspec: "args=[\'images\', \'ksizes\', \'strides\', \'rates\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "extract_jpeg_shape"
+ argspec: "args=[\'contents\', \'output_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "flip_left_right"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flip_up_down"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "grayscale_to_rgb"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "hsv_to_rgb"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "image_gradients"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_jpeg"
+ argspec: "args=[\'contents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression"
+ argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression_overlaps"
+ argspec: "args=[\'overlaps\', \'scores\', \'max_output_size\', \'overlap_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression_padded"
+ argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'score_threshold\', \'pad_to_max_output_size\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "pad_to_bounding_box"
+ argspec: "args=[\'image\', \'offset_height\', \'offset_width\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "per_image_standardization"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "psnr"
+ argspec: "args=[\'a\', \'b\', \'max_val\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_brightness"
+ argspec: "args=[\'image\', \'max_delta\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_contrast"
+ argspec: "args=[\'image\', \'lower\', \'upper\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_flip_left_right"
+ argspec: "args=[\'image\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_flip_up_down"
+ argspec: "args=[\'image\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_hue"
+ argspec: "args=[\'image\', \'max_delta\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_jpeg_quality"
+ argspec: "args=[\'image\', \'min_jpeg_quality\', \'max_jpeg_quality\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_saturation"
+ argspec: "args=[\'image\', \'lower\', \'upper\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "resize_area"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "resize_bicubic"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "resize_bilinear"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "resize_image_with_crop_or_pad"
+ argspec: "args=[\'image\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "resize_image_with_pad"
+ argspec: "args=[\'image\', \'target_height\', \'target_width\', \'method\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "resize_images"
+ argspec: "args=[\'images\', \'size\', \'method\', \'align_corners\', \'preserve_aspect_ratio\'], varargs=None, keywords=None, defaults=[\'0\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "resize_nearest_neighbor"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "rgb_to_grayscale"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rgb_to_hsv"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rgb_to_yiq"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "rgb_to_yuv"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "rot90"
+ argspec: "args=[\'image\', \'k\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
+ }
+ member_method {
+ name: "sample_distorted_bounding_box"
+ argspec: "args=[\'image_size\', \'bounding_boxes\', \'seed\', \'seed2\', \'min_object_covered\', \'aspect_ratio_range\', \'area_range\', \'max_attempts\', \'use_image_if_no_bounding_boxes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'0.1\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sobel_edges"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ssim"
+ argspec: "args=[\'img1\', \'img2\', \'max_val\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ssim_multiscale"
+ argspec: "args=[\'img1\', \'img2\', \'max_val\', \'power_factors\'], varargs=None, keywords=None, defaults=[\'(0.0448, 0.2856, 0.3001, 0.2363, 0.1333)\'], "
+ }
+ member_method {
+ name: "total_variation"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "transpose_image"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "yiq_to_rgb"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "yuv_to_rgb"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.constant.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.constant.pbtxt
index 607a5aae21..607a5aae21 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.constant.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.constant.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_normal.pbtxt
new file mode 100644
index 0000000000..4a81e52df9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.initializers.glorot_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_uniform.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_uniform.pbtxt
new file mode 100644
index 0000000000..815dc81dff
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.glorot_uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.initializers.glorot_uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.identity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.identity.pbtxt
index 37fcab9599..37fcab9599 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.identity.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.identity.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.ones.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.ones.pbtxt
index 18481d4815..18481d4815 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.ones.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.ones.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.orthogonal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.orthogonal.pbtxt
index ff64efd60c..ff64efd60c 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.orthogonal.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.orthogonal.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.pbtxt
index eaf0036cac..d499c67d89 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.pbtxt
@@ -5,6 +5,14 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "glorot_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "identity"
mtype: "<type \'type\'>"
}
@@ -45,6 +53,22 @@ tf_module {
argspec: "args=[], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "he_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "he_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "local_variables"
argspec: "args=[], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.random_normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.random_normal.pbtxt
index 133e61c1d9..133e61c1d9 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.random_normal.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.random_normal.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.random_uniform.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.random_uniform.pbtxt
index 0cfa0080f5..0cfa0080f5 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.random_uniform.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.random_uniform.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.truncated_normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.truncated_normal.pbtxt
index 730390fba2..730390fba2 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.truncated_normal.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.truncated_normal.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.uniform_unit_scaling.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.uniform_unit_scaling.pbtxt
index 13295ef375..13295ef375 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.uniform_unit_scaling.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.uniform_unit_scaling.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.variance_scaling.pbtxt
index a6b6e5eceb..86340913e2 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.variance_scaling.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.variance_scaling.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "from_config"
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.zeros.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.initializers.zeros.pbtxt
index 7df4237bb6..7df4237bb6 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.zeros.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.initializers.zeros.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.io.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.io.pbtxt
new file mode 100644
index 0000000000..8938cf217b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.io.pbtxt
@@ -0,0 +1,43 @@
+path: "tensorflow.io"
+tf_module {
+ member_method {
+ name: "decode_base64"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_compressed"
+ argspec: "args=[\'bytes\', \'compression_type\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_json_example"
+ argspec: "args=[\'json_examples\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_raw"
+ argspec: "args=[\'bytes\', \'out_type\', \'little_endian\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "encode_base64"
+ argspec: "args=[\'input\', \'pad\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "matching_files"
+ argspec: "args=[\'pattern\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "parse_sequence_example"
+ argspec: "args=[\'serialized\', \'context_features\', \'sequence_features\', \'example_names\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "parse_tensor"
+ argspec: "args=[\'serialized\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "read_file"
+ argspec: "args=[\'filename\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "write_file"
+ argspec: "args=[\'filename\', \'contents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt
new file mode 100644
index 0000000000..0869de0243
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt
@@ -0,0 +1,268 @@
+path: "tensorflow.keras.Model"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt
new file mode 100644
index 0000000000..20f39fae1e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt
@@ -0,0 +1,285 @@
+path: "tensorflow.keras.Sequential"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.sequential.Sequential\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layers\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'self\', \'layer\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "pop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_classes"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict_proba"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.activations.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.activations.pbtxt
new file mode 100644
index 0000000000..2e9de9ebb2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.activations.pbtxt
@@ -0,0 +1,55 @@
+path: "tensorflow.keras.activations"
+tf_module {
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'name\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "elu"
+ argspec: "args=[\'x\', \'alpha\'], varargs=None, keywords=None, defaults=[\'1.0\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "hard_sigmoid"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "linear"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "relu"
+ argspec: "args=[\'x\', \'alpha\', \'max_value\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'0\'], "
+ }
+ member_method {
+ name: "selu"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'activation\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sigmoid"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "softmax"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "softplus"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "softsign"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "tanh"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.backend.name_scope.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.name_scope.pbtxt
index a2b98b1c27..a2b98b1c27 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.backend.name_scope.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.name_scope.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt
index ba2d083a75..126ce8db6a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.backend.pbtxt
@@ -70,7 +70,7 @@ tf_module {
}
member_method {
name: "categorical_crossentropy"
- argspec: "args=[\'target\', \'output\', \'from_logits\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'target\', \'output\', \'from_logits\', \'axis\'], varargs=None, keywords=None, defaults=[\'False\', \'-1\'], "
}
member_method {
name: "clear_session"
@@ -366,7 +366,7 @@ tf_module {
}
member_method {
name: "relu"
- argspec: "args=[\'x\', \'alpha\', \'max_value\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\'], "
+ argspec: "args=[\'x\', \'alpha\', \'max_value\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'0\'], "
}
member_method {
name: "repeat"
@@ -450,7 +450,7 @@ tf_module {
}
member_method {
name: "softmax"
- argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
}
member_method {
name: "softplus"
@@ -462,7 +462,7 @@ tf_module {
}
member_method {
name: "sparse_categorical_crossentropy"
- argspec: "args=[\'target\', \'output\', \'from_logits\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'target\', \'output\', \'from_logits\', \'axis\'], varargs=None, keywords=None, defaults=[\'False\', \'-1\'], "
}
member_method {
name: "spatial_2d_padding"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-base-logger.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-base-logger.pbtxt
index 454823fd23..9eee9b3789 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-base-logger.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-base-logger.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.BaseLogger"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.BaseLogger\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.BaseLogger\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt
index 86b264c79f..5bb949c5bb 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.CSVLogger"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.CSVLogger\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.CSVLogger\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-callback.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-callback.pbtxt
index 1474b392ff..a5340d52c1 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-callback.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-callback.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.callbacks.Callback"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt
index 27d4a208a4..f71292856c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-early-stopping.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-early-stopping.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.keras.callbacks.EarlyStopping"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.EarlyStopping\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.EarlyStopping\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\'], "
+ argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\', \'baseline\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\', \'None\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-history.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-history.pbtxt
index a7b2deea82..ee400b31c4 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-history.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-history.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.History"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.History\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.History\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-lambda-callback.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-lambda-callback.pbtxt
index 5ee22948ad..df8d7b0ef7 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-lambda-callback.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-lambda-callback.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.LambdaCallback"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.LambdaCallback\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.LambdaCallback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt
index d4c85a4519..ce1a9b694d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.LearningRateScheduler"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.LearningRateScheduler\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.LearningRateScheduler\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-model-checkpoint.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-model-checkpoint.pbtxt
index 79f9c88bbc..48bb24a052 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-model-checkpoint.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-model-checkpoint.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.ModelCheckpoint"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.ModelCheckpoint\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.ModelCheckpoint\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-progbar-logger.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-progbar-logger.pbtxt
index 543de0ad48..d8bb8b2a7d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-progbar-logger.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-progbar-logger.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.ProgbarLogger"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.ProgbarLogger\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.ProgbarLogger\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt
index 5838d58312..dc27af9552 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.keras.callbacks.ReduceLROnPlateau"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.ReduceLROnPlateau\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.ReduceLROnPlateau\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'monitor\', \'factor\', \'patience\', \'verbose\', \'mode\', \'epsilon\', \'cooldown\', \'min_lr\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0.1\', \'10\', \'0\', \'auto\', \'0.0001\', \'0\', \'0\'], "
+ argspec: "args=[\'self\', \'monitor\', \'factor\', \'patience\', \'verbose\', \'mode\', \'min_delta\', \'cooldown\', \'min_lr\'], varargs=None, keywords=kwargs, defaults=[\'val_loss\', \'0.1\', \'10\', \'0\', \'auto\', \'0.0001\', \'0\', \'0\'], "
}
member_method {
name: "in_cooldown"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-remote-monitor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-remote-monitor.pbtxt
index 3d0acfed1d..5a3b791c0a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-remote-monitor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-remote-monitor.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.keras.callbacks.RemoteMonitor"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.RemoteMonitor\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.RemoteMonitor\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'root\', \'path\', \'field\', \'headers\'], varargs=None, keywords=None, defaults=[\'http://localhost:9000\', \'/publish/epoch/end/\', \'data\', \'None\'], "
+ argspec: "args=[\'self\', \'root\', \'path\', \'field\', \'headers\', \'send_as_json\'], varargs=None, keywords=None, defaults=[\'http://localhost:9000\', \'/publish/epoch/end/\', \'data\', \'None\', \'False\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt
index 7de4008c45..e58ba18c1c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-tensor-board.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.keras.callbacks.TensorBoard"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.TensorBoard\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.TensorBoard\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\'], "
+ argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt
index bf17e8736c..5c2d336353 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.callbacks.TerminateOnNaN"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.TerminateOnNaN\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.callbacks.Callback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.TerminateOnNaN\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.pbtxt
index 1e9085e034..1e9085e034 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.callbacks.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-constraint.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-constraint.pbtxt
index 14977c696f..8e07b7d98e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-constraint.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-constraint.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.constraints.Constraint"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-max-norm.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-max-norm.pbtxt
index a2269f8a18..2b81174b6c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-max-norm.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-max-norm.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.MaxNorm"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.MaxNorm\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.MaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-min-max-norm.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-min-max-norm.pbtxt
index afe0d6478d..a41eda86ac 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-min-max-norm.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-min-max-norm.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.MinMaxNorm"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.MinMaxNorm\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.MinMaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-non-neg.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-non-neg.pbtxt
index e8c4bb9088..572e3eea4d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-non-neg.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-non-neg.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.NonNeg"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.NonNeg\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.NonNeg\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-unit-norm.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-unit-norm.pbtxt
index d457cb6419..fe16c38cc8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.-unit-norm.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.-unit-norm.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.UnitNorm"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.UnitNorm\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.UnitNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.max_norm.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.max_norm.pbtxt
index 48128096d4..6650bae07a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.max_norm.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.max_norm.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.max_norm"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.MaxNorm\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.MaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.min_max_norm.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.min_max_norm.pbtxt
index 02eb3fb00c..9dd3bc92fc 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.min_max_norm.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.min_max_norm.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.min_max_norm"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.MinMaxNorm\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.MinMaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.non_neg.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.non_neg.pbtxt
index cc1101097c..a565840939 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.non_neg.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.non_neg.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.non_neg"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.NonNeg\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.NonNeg\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.pbtxt
index 655685956f..655685956f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.constraints.unit_norm.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.unit_norm.pbtxt
index 086f9f2d43..5cbe0da4c1 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.constraints.unit_norm.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.constraints.unit_norm.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.constraints.unit_norm"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.UnitNorm\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.constraints.Constraint\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.UnitNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.boston_housing.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.boston_housing.pbtxt
index bda31751d4..bda31751d4 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.boston_housing.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.boston_housing.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.cifar10.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.cifar10.pbtxt
index 8a5142f793..8a5142f793 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.cifar10.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.cifar10.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.cifar100.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.cifar100.pbtxt
index 16f184eeb5..16f184eeb5 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.cifar100.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.cifar100.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.fashion_mnist.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.fashion_mnist.pbtxt
index a0e14356fa..a0e14356fa 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.fashion_mnist.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.fashion_mnist.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.imdb.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.imdb.pbtxt
index ff962876b6..ff962876b6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.imdb.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.imdb.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.mnist.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.mnist.pbtxt
index 530bb07550..530bb07550 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.mnist.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.mnist.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.pbtxt
index 36e3aafbe4..36e3aafbe4 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.reuters.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.reuters.pbtxt
index 2da4a13067..2da4a13067 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.reuters.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.datasets.reuters.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.estimator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.estimator.pbtxt
index 7a3fb39f77..7a3fb39f77 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.estimator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.estimator.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-constant.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-constant.pbtxt
index cbaba78ed5..cbaba78ed5 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-constant.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-constant.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-identity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-identity.pbtxt
index a5f7f348de..a5f7f348de 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-identity.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-identity.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-initializer.pbtxt
index 8f10d1698e..8f10d1698e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-ones.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-ones.pbtxt
index 2fbfa774f8..2fbfa774f8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-ones.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-ones.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-orthogonal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-orthogonal.pbtxt
index 874d320d73..874d320d73 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-orthogonal.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-orthogonal.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-normal.pbtxt
new file mode 100644
index 0000000000..26784ce55d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.RandomNormal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-uniform.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-uniform.pbtxt
new file mode 100644
index 0000000000..4110bda5f6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-random-uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.RandomUniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'minval\', \'maxval\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'-0.05\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-truncated-normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-truncated-normal.pbtxt
new file mode 100644
index 0000000000..0451d0d73a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-truncated-normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.TruncatedNormal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-variance-scaling.pbtxt
index 32a6f6ee88..03f4064b9e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-variance-scaling.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-variance-scaling.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "from_config"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-zeros.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-zeros.pbtxt
index b6ab68e5be..b6ab68e5be 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-zeros.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.-zeros.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.constant.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.constant.pbtxt
new file mode 100644
index 0000000000..bddc37b907
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.constant.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.constant"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Constant\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'value\', \'dtype\', \'verify_shape\'], varargs=None, keywords=None, defaults=[\'0\', \"<dtype: \'float32\'>\", \'False\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_normal.pbtxt
new file mode 100644
index 0000000000..ef0815972d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.glorot_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_uniform.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_uniform.pbtxt
new file mode 100644
index 0000000000..439b5ada9b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.glorot_uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.glorot_uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.identity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.identity.pbtxt
new file mode 100644
index 0000000000..a4c5a61490
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.identity.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.identity"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Identity\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.normal.pbtxt
new file mode 100644
index 0000000000..8d0b5c242b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.ones.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.ones.pbtxt
new file mode 100644
index 0000000000..a89f78d1e1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.ones.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.ones"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Ones\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.orthogonal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.orthogonal.pbtxt
new file mode 100644
index 0000000000..ee1e9bbae2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.orthogonal.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.orthogonal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Orthogonal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.pbtxt
index 093c56595b..1540c2915b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.pbtxt
@@ -40,6 +40,54 @@ tf_module {
name: "Zeros"
mtype: "<type \'type\'>"
}
+ member {
+ name: "constant"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "identity"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ones"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "orthogonal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "random_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "random_uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "truncated_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "zeros"
+ mtype: "<type \'type\'>"
+ }
member_method {
name: "deserialize"
argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
@@ -49,14 +97,6 @@ tf_module {
argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
}
member_method {
- name: "glorot_normal"
- argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "glorot_uniform"
- argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
name: "he_normal"
argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_normal.pbtxt
new file mode 100644
index 0000000000..bac8211a10
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.random_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_uniform.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_uniform.pbtxt
new file mode 100644
index 0000000000..ab0d74d071
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.random_uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.random_uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'minval\', \'maxval\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'-0.05\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.truncated_normal.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.truncated_normal.pbtxt
new file mode 100644
index 0000000000..358cca2b9c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.truncated_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.truncated_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.uniform.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.uniform.pbtxt
new file mode 100644
index 0000000000..e6c731361a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'minval\', \'maxval\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'-0.05\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.zeros.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.zeros.pbtxt
new file mode 100644
index 0000000000..a262390687
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.initializers.zeros.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.zeros"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Zeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activation.pbtxt
index 96272d1b7d..5510465d7b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activation.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activation.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Activation"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Activation\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Activation\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activity-regularization.pbtxt
index 8fd55c8686..38ec8a0aff 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-activity-regularization.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activity-regularization.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.ActivityRegularization"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.ActivityRegularization\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.ActivityRegularization\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-add.pbtxt
index 47d1532c3c..41cb8e30bf 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-add.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-add.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Add"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge.Add\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge._Merge\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Add\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-alpha-dropout.pbtxt
index 797d422a90..9a7aaa8e96 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-alpha-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-alpha-dropout.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.AlphaDropout"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.noise.AlphaDropout\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.noise.AlphaDropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling1-d.pbtxt
index 269be1455b..c3dd2ad046 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.AveragePooling1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling2-d.pbtxt
index 3448136215..cc303bf7b9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.AveragePooling2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling3-d.pbtxt
index 979008d0ed..628447ce35 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.AveragePooling3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average.pbtxt
index 0ffdffd4cd..f03c986c22 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-average.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Average"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge.Average\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge._Merge\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Average\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool1-d.pbtxt
index 6b00f110ee..c440604aae 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.AvgPool1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool2-d.pbtxt
index caff5a2f1d..a01eaf8a12 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.AvgPool2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool3-d.pbtxt
index 4a72394921..0d6698f2ef 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-avg-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.AvgPool3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-batch-normalization.pbtxt
index 9804394fa5..f1b23be48f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-batch-normalization.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-batch-normalization.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.BatchNormalization"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.normalization.BatchNormalization\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.normalization.BatchNormalization\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-bidirectional.pbtxt
index 5e5b04c7c6..0672cd5b7b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-bidirectional.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-bidirectional.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Bidirectional"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.wrappers.Bidirectional\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.wrappers.Wrapper\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Bidirectional\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Wrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -119,7 +119,7 @@ tf_class {
}
member_method {
name: "call"
- argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\', \'initial_state\', \'constants\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "compute_mask"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-concatenate.pbtxt
index b8eb4079b9..b25ae1e82e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-concatenate.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-concatenate.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Concatenate"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge.Concatenate\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge._Merge\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Concatenate\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
index 3fdb101425..bb1918eba6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.ConvLSTM2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional_recurrent.ConvLSTM2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional_recurrent.ConvRNN2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.RNN\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional_recurrent.ConvLSTM2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional_recurrent.ConvRNN2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activation"
@@ -188,7 +188,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d.pbtxt
index 0be42471e3..16e0fd5a31 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Conv1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
index 39ba31a709..065bb4d35b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.Conv2DTranspose"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d.pbtxt
index 26d9d8c476..543bae6fa9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Conv2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
index 43611017fa..c7ba6056f9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.Conv3DTranspose"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d.pbtxt
index fa4925ab99..072943dc2c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Conv3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d.pbtxt
index c5c5d5e7c0..222a1ef4fc 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Convolution1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
index 36dc2d2e9a..8f4f7918ab 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.Convolution2DTranspose"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d.pbtxt
index 23ec74370b..f939067178 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Convolution2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
index 0e4089c578..93c442bd55 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.Convolution3DTranspose"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d.pbtxt
index 23ddbe1a92..471b18ef85 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-convolution3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Convolution3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping1-d.pbtxt
index e04ab6bea8..0f250a09b7 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping1-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Cropping1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Cropping1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Cropping1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping2-d.pbtxt
index 655314afff..f52128483c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping2-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Cropping2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Cropping2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Cropping2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping3-d.pbtxt
index d5215f1330..98daf3bab1 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-cropping3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping3-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Cropping3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Cropping3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Cropping3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt
new file mode 100644
index 0000000000..64e7a9046b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt
@@ -0,0 +1,193 @@
+path: "tensorflow.keras.layers.CuDNNGRU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent.CuDNNGRU\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent._CuDNNRNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "cell"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\'], varargs=None, keywords=kwargs, defaults=[\'glorot_uniform\', \'orthogonal\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt
new file mode 100644
index 0000000000..6fdffef776
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt
@@ -0,0 +1,193 @@
+path: "tensorflow.keras.layers.CuDNNLSTM"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent.CuDNNLSTM\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent._CuDNNRNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "cell"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'unit_forget_bias\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\'], varargs=None, keywords=kwargs, defaults=[\'glorot_uniform\', \'orthogonal\', \'zeros\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense.pbtxt
index 310a3c3b91..3ac3825759 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dense.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Dense"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Dense\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dense\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
index 2d67b5f720..280ec8c25f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.DepthwiseConv2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.DepthwiseConv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.DepthwiseConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dot.pbtxt
index 0e493a7f2b..560f66f9c7 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dot.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dot.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Dot"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge.Dot\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge._Merge\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Dot\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dropout.pbtxt
index 14726b4b6c..c0543529c3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dropout.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Dropout"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Dropout\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-e-l-u.pbtxt
index 32a50455ed..04eb2824b9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-e-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-e-l-u.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.ELU"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.advanced_activations.ELU\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.ELU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-embedding.pbtxt
index 2f615d8112..f400432915 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-embedding.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-embedding.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Embedding"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.embeddings.Embedding\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.embeddings.Embedding\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-flatten.pbtxt
index 4b0cf9a5d3..ab176b441a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-flatten.pbtxt
@@ -1,9 +1,8 @@
-path: "tensorflow.keras.layers.GlobalMaxPool2D"
+path: "tensorflow.keras.layers.Flatten"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalMaxPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Flatten\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u-cell.pbtxt
index d79d02b954..c3895a0ac1 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u-cell.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.GRUCell"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.GRUCell\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.GRUCell\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -133,6 +133,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u.pbtxt
index 1d38ae64bb..a0fe598ab9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-g-r-u.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GRU"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.GRU\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.RNN\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.GRU\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activation"
@@ -171,7 +171,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-dropout.pbtxt
index 135de9cd95..55e0d7ef02 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-dropout.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.GaussianDropout"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.noise.GaussianDropout\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.noise.GaussianDropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-noise.pbtxt
index 5db6e433ee..38fbff5e4a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-gaussian-noise.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-noise.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.GaussianNoise"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.noise.GaussianNoise\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.noise.GaussianNoise\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
index bf0dba0a92..5ea61d118d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalAveragePooling1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalAveragePooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
index 6da9803609..929f48df23 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalAveragePooling2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalAveragePooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
index 345593dec8..2e6d59337f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalAveragePooling3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalAveragePooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
index 5d3be9085e..11dca17c6d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalAvgPool1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalAveragePooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
index 0b79a87e05..4e3e258430 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalAvgPool2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalAveragePooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
index 68cdbac652..fb9166316f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalAvgPool3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalAveragePooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
index d5872b444f..278429af6f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalMaxPool1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalMaxPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
new file mode 100644
index 0000000000..87b7f6797a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPool2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
new file mode 100644
index 0000000000..98bf96fa0c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPool3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
index 815f1cf580..935a69ab2f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalMaxPooling1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalMaxPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
index e027dd6cc2..c9d4158d1c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalMaxPooling2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalMaxPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
index c647b24a23..9953102ff9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.GlobalMaxPooling3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalMaxPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-layer.pbtxt
index 75d70734b4..2617f5a95f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-layer.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.InputLayer"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.input_layer.InputLayer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.input_layer.InputLayer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-spec.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-spec.pbtxt
index 29edabe048..5fd0a47a68 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-spec.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-spec.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.layers.InputSpec"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.InputSpec\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.InputSpec\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
index 0ed383a355..e9f6ef45aa 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.LSTMCell"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.LSTMCell\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.LSTMCell\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -133,6 +133,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m.pbtxt
index 6d14c9c8f6..ecdbf48157 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-l-s-t-m.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.LSTM"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.LSTM\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.RNN\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.LSTM\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activation"
@@ -171,7 +171,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-lambda.pbtxt
index ddf96aba34..2e0b6bac24 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-lambda.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-lambda.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Lambda"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Lambda\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Lambda\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -118,7 +118,7 @@ tf_class {
}
member_method {
name: "compute_output_shape"
- argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "count_params"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer.pbtxt
index aca282d624..1e93d1118a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-layer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.layers.Layer"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -97,7 +97,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -105,7 +105,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
index b9c53b43c8..bfd36012a7 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.LeakyReLU"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.advanced_activations.LeakyReLU\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.LeakyReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt
index 2ee566d03b..5ad5990d7e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.LocallyConnected1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.local.LocallyConnected1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.local.LocallyConnected1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -82,7 +82,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'implementation\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1\'], "
}
member_method {
name: "add_loss"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt
index db0d0e816a..40d03369a5 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-locally-connected2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.LocallyConnected2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.local.LocallyConnected2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.local.LocallyConnected2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -82,7 +82,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'implementation\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1\'], "
}
member_method {
name: "add_loss"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-masking.pbtxt
index 82008b89d0..86666b51bb 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-masking.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-masking.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Masking"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Masking\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Masking\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool1-d.pbtxt
index 31a34a17d0..238d96cca6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.MaxPool1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool2-d.pbtxt
index 70d24ac75c..85f23df671 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.MaxPool2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool3-d.pbtxt
index 55b16564b3..235806b965 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.MaxPool3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling1-d.pbtxt
index a230b74c38..4a45bf7997 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling1-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.MaxPooling1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling2-d.pbtxt
index d98f7c39f5..fda2562fc8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.MaxPooling2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling3-d.pbtxt
index b2e96a4203..71d2d09a8d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-max-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.MaxPooling3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-maximum.pbtxt
index 0c45bbdf17..12949b39a6 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-maximum.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-maximum.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Maximum"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge.Maximum\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge._Merge\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Maximum\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt
new file mode 100644
index 0000000000..ab16d0021e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Minimum"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Minimum\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-multiply.pbtxt
index 6423d83418..61ccbf5962 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-multiply.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-multiply.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.Multiply"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge.Multiply\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.merge._Merge\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Multiply\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-p-re-l-u.pbtxt
index 6e17081375..ce2320d703 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-p-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-p-re-l-u.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.PReLU"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.advanced_activations.PReLU\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.PReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-permute.pbtxt
index d01d371da5..69848af8cf 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-permute.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-permute.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Permute"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Permute\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Permute\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-r-n-n.pbtxt
index d3f5508640..2b6e8af11d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-r-n-n.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-r-n-n.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.RNN"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.RNN\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -102,7 +102,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt
new file mode 100644
index 0000000000..413f45f018
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ReLU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.ReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'max_value\', \'negative_slope\', \'threshold\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'0\', \'0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-repeat-vector.pbtxt
index 44e1007f54..9c61ff6027 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-repeat-vector.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-repeat-vector.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.RepeatVector"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.RepeatVector\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.RepeatVector\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-reshape.pbtxt
index 8fc3ec3331..baa91804c4 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-reshape.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-reshape.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Reshape"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Reshape\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Reshape\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv1-d.pbtxt
index 457d277495..15a5d6ac9e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv1-d.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.SeparableConv1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv2-d.pbtxt
index 54eda8ee21..be43bd5b3c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv2-d.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.SeparableConv2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -84,7 +84,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'1\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_loss"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
index 7111965546..6105992c7a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.SeparableConvolution1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
index 815e34a48d..1b6cf1e9ec 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.keras.layers.SeparableConvolution2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -84,7 +84,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'1\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_loss"
@@ -100,7 +100,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
index 6614760e5e..29488a37f8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.SimpleRNNCell"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.SimpleRNNCell\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.SimpleRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -133,6 +133,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n.pbtxt
index bfcfd71ecd..182efb83b8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-simple-r-n-n.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.SimpleRNN"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.SimpleRNN\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.RNN\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.SimpleRNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activation"
@@ -159,7 +159,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-softmax.pbtxt
index 9c4618c4e9..d29731ecf9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-softmax.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-softmax.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Softmax"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.advanced_activations.Softmax\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.Softmax\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
new file mode 100644
index 0000000000..a6d7494ca7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.SpatialDropout1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.SpatialDropout1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
index 446f7122a6..c36e802693 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.SpatialDropout2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.SpatialDropout2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Dropout\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.SpatialDropout2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
index 52a0485b5c..9c46cfe40f 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.SpatialDropout3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.SpatialDropout3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Dropout\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.SpatialDropout3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +99,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +107,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
index c82e7a192d..8982f78794 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.StackedRNNCells"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.recurrent.StackedRNNCells\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.StackedRNNCells\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -61,6 +61,10 @@ tf_class {
mtype: "<type \'property\'>"
}
member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "state_size"
mtype: "<type \'property\'>"
}
@@ -102,7 +106,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -137,6 +141,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt
new file mode 100644
index 0000000000..ec2cc50298
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Subtract"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Subtract\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
index 9ccf251a18..d7bc1980f3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.ThresholdedReLU"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.advanced_activations.ThresholdedReLU\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.ThresholdedReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-time-distributed.pbtxt
index e080a07799..fec2de6b49 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-time-distributed.pbtxt
@@ -1,9 +1,9 @@
path: "tensorflow.keras.layers.TimeDistributed"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.wrappers.TimeDistributed\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.wrappers.Wrapper\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.TimeDistributed\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Wrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -103,7 +103,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling1-d.pbtxt
index 5fadca0b83..3d285e7f17 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling1-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.UpSampling1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.UpSampling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.UpSampling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt
index 2d395bf7e8..40a56a0c94 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.UpSampling2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.UpSampling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.UpSampling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling3-d.pbtxt
index 18d58ec3b2..728eca415a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-up-sampling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling3-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.UpSampling3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.UpSampling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.UpSampling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-wrapper.pbtxt
index 6223cb2f3c..da64e77c39 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-wrapper.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.Wrapper"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.wrappers.Wrapper\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Wrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -102,7 +102,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding1-d.pbtxt
index e71bba6a7f..2f505f9293 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding1-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.ZeroPadding1D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.ZeroPadding1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.ZeroPadding1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding2-d.pbtxt
index aba6d8cb1f..f82c77072e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding2-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.ZeroPadding2D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.ZeroPadding2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.ZeroPadding2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding3-d.pbtxt
index ce545ecc95..54e01a9917 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-zero-padding3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding3-d.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.keras.layers.ZeroPadding3D"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.ZeroPadding3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.ZeroPadding3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.pbtxt
index affc9bd09b..9d7e5bb8c7 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.pbtxt
@@ -113,6 +113,14 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "CuDNNGRU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "CuDNNLSTM"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "Dense"
mtype: "<type \'type\'>"
}
@@ -273,6 +281,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "Minimum"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "Multiply"
mtype: "<type \'type\'>"
}
@@ -289,6 +301,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "ReLU"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "RepeatVector"
mtype: "<type \'type\'>"
}
@@ -341,6 +357,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "Subtract"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "ThresholdedReLU"
mtype: "<type \'type\'>"
}
@@ -401,7 +421,15 @@ tf_module {
argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
}
member_method {
+ name: "minimum"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
name: "multiply"
argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
}
+ member_method {
+ name: "subtract"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.losses.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.pbtxt
index ae5f6305b7..eca6b91538 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.losses.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.losses.pbtxt
@@ -1,6 +1,26 @@
path: "tensorflow.keras.losses"
tf_module {
member_method {
+ name: "KLD"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAPE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MSE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MSLE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "binary_crossentropy"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -13,6 +33,10 @@ tf_module {
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "cosine"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "cosine_proximity"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -29,6 +53,10 @@ tf_module {
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "kld"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "kullback_leibler_divergence"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -37,6 +65,14 @@ tf_module {
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "mae"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mape"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "mean_absolute_error"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -53,6 +89,14 @@ tf_module {
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "mse"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "msle"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "poisson"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.pbtxt
index 42729e4237..a296e13158 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.pbtxt
@@ -1,10 +1,30 @@
path: "tensorflow.keras.metrics"
tf_module {
member_method {
- name: "binary_accuracy"
+ name: "KLD"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAPE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MSE"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "MSLE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "binary_accuracy"
+ argspec: "args=[\'y_true\', \'y_pred\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.5\'], "
+ }
+ member_method {
name: "binary_crossentropy"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -17,6 +37,10 @@ tf_module {
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "cosine"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "cosine_proximity"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -33,10 +57,22 @@ tf_module {
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "kld"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "kullback_leibler_divergence"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "mae"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mape"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "mean_absolute_error"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -53,6 +89,14 @@ tf_module {
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "mse"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "msle"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "poisson"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
@@ -61,6 +105,10 @@ tf_module {
argspec: "args=[\'metric\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "sparse_categorical_accuracy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "sparse_categorical_crossentropy"
argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt
new file mode 100644
index 0000000000..4011719317
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt
@@ -0,0 +1,268 @@
+path: "tensorflow.keras.models.Model"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt
new file mode 100644
index 0000000000..8a12ac1ad8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt
@@ -0,0 +1,285 @@
+path: "tensorflow.keras.models.Sequential"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.sequential.Sequential\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layers\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'self\', \'layer\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "pop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_classes"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict_proba"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.pbtxt
index 8ba0e7480b..7ad4a32d43 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.models.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.pbtxt
@@ -9,6 +9,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member_method {
+ name: "clone_model"
+ argspec: "args=[\'model\', \'input_tensors\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "load_model"
argspec: "args=[\'filepath\', \'custom_objects\', \'compile\'], varargs=None, keywords=None, defaults=[\'None\', \'True\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adadelta.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adadelta.pbtxt
index 32667cf31e..b9ce154bdd 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adadelta.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adadelta.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.optimizers.Adadelta"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Adadelta\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adadelta\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adagrad.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adagrad.pbtxt
index efca59e8e4..d0dc9e37a3 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adagrad.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adagrad.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.optimizers.Adagrad"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Adagrad\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adagrad\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adam.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adam.pbtxt
index 5546e2067a..06815fa99a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adam.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adam.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.optimizers.Adam"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Adam\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adam\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adamax.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adamax.pbtxt
index aaa54a1060..47b55fdb44 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-adamax.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-adamax.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.optimizers.Adamax"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Adamax\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adamax\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-nadam.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-nadam.pbtxt
index 1fada7fd9c..8c63a7dda9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-nadam.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-nadam.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.optimizers.Nadam"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Nadam\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Nadam\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-optimizer.pbtxt
index ca47e95228..53d64dae93 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-optimizer.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.optimizers.Optimizer"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-r-m-sprop.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-r-m-sprop.pbtxt
index fd3f97f35d..a1e9b8cceb 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-r-m-sprop.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-r-m-sprop.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.optimizers.RMSprop"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.RMSprop\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.RMSprop\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-s-g-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-s-g-d.pbtxt
index 25adfd3f0b..a67fefb1ba 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.-s-g-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.-s-g-d.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.optimizers.SGD"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.SGD\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.optimizers.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.SGD\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.pbtxt
index 7257b02087..7257b02087 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.optimizers.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.optimizers.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.pbtxt
index 754b3b84b0..754b3b84b0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.regularizers.-l1-l2.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.-l1-l2.pbtxt
index 04dcda3860..a45fb7b55e 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.regularizers.-l1-l2.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.-l1-l2.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.regularizers.L1L2"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.regularizers.L1L2\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.regularizers.Regularizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.regularizers.L1L2\'>"
+ is_instance: "<class \'tensorflow.python.keras.regularizers.Regularizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.regularizers.-regularizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.-regularizer.pbtxt
index b0a125f238..641001a646 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.regularizers.-regularizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.-regularizer.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.regularizers.Regularizer"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.regularizers.Regularizer\'>"
+ is_instance: "<class \'tensorflow.python.keras.regularizers.Regularizer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.regularizers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.pbtxt
index bb10d41d70..bb10d41d70 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.regularizers.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.regularizers.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.-custom-object-scope.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-custom-object-scope.pbtxt
index dda39ed221..109682046b 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.utils.-custom-object-scope.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-custom-object-scope.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.utils.CustomObjectScope"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.generic_utils.CustomObjectScope\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.generic_utils.CustomObjectScope\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.-generator-enqueuer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-generator-enqueuer.pbtxt
index 1c5868e711..939fd547d0 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.utils.-generator-enqueuer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-generator-enqueuer.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.utils.GeneratorEnqueuer"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.GeneratorEnqueuer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.SequenceEnqueuer\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.GeneratorEnqueuer\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.SequenceEnqueuer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt
index ce62c8bafc..6b832051a9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.utils.HDF5Matrix"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.io_utils.HDF5Matrix\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.io_utils.HDF5Matrix\'>"
is_instance: "<type \'object\'>"
member {
name: "dtype"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-ordered-enqueuer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-ordered-enqueuer.pbtxt
new file mode 100644
index 0000000000..e7e7d2839b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-ordered-enqueuer.pbtxt
@@ -0,0 +1,26 @@
+path: "tensorflow.keras.utils.OrderedEnqueuer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.OrderedEnqueuer\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.SequenceEnqueuer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'sequence\', \'use_multiprocessing\', \'shuffle\'], varargs=None, keywords=None, defaults=[\'False\', \'False\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_running"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start"
+ argspec: "args=[\'self\', \'workers\', \'max_queue_size\'], varargs=None, keywords=None, defaults=[\'1\', \'10\'], "
+ }
+ member_method {
+ name: "stop"
+ argspec: "args=[\'self\', \'timeout\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.-progbar.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-progbar.pbtxt
index 16e1cbe650..be4496e753 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.utils.-progbar.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-progbar.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.utils.Progbar"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.generic_utils.Progbar\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.generic_utils.Progbar\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.-sequence-enqueuer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-sequence-enqueuer.pbtxt
index 5cf2a07b0b..a9e499d100 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.utils.-sequence-enqueuer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-sequence-enqueuer.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.utils.SequenceEnqueuer"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.SequenceEnqueuer\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.SequenceEnqueuer\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.-sequence.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-sequence.pbtxt
index 5b272253e3..e2dc932dc8 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.utils.-sequence.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.-sequence.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.keras.utils.Sequence"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.utils.data_utils.Sequence\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.Sequence\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.pbtxt
index 5a446c09d0..81b91d2780 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.utils.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.utils.pbtxt
@@ -13,6 +13,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "OrderedEnqueuer"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "Progbar"
mtype: "<type \'type\'>"
}
@@ -45,8 +49,12 @@ tf_module {
argspec: "args=[\'fname\', \'origin\', \'untar\', \'md5_hash\', \'file_hash\', \'cache_subdir\', \'hash_algorithm\', \'extract\', \'archive_format\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'None\', \'datasets\', \'auto\', \'False\', \'auto\', \'None\'], "
}
member_method {
+ name: "get_source_inputs"
+ argspec: "args=[\'tensor\', \'layer\', \'node_index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
name: "multi_gpu_model"
- argspec: "args=[\'model\', \'gpus\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'model\', \'gpus\', \'cpu_merge\', \'cpu_relocation\'], varargs=None, keywords=None, defaults=[\'True\', \'False\'], "
}
member_method {
name: "normalize"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.pbtxt
index 0b2fac9b7d..0b2fac9b7d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt
index 8d200f99fd..67cca3af41 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.wrappers.scikit_learn.KerasClassifier"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.wrappers.scikit_learn.KerasClassifier\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.wrappers.scikit_learn.BaseWrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier\'>"
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.BaseWrapper\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt
index 7a971346d8..f4b9b7e277 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.keras.wrappers.scikit_learn.KerasRegressor"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.wrappers.scikit_learn.KerasRegressor\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.wrappers.scikit_learn.BaseWrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.KerasRegressor\'>"
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.BaseWrapper\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.pbtxt
index fbd4d13387..fbd4d13387 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.wrappers.scikit_learn.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.wrappers.scikit_learn.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling1-d.pbtxt
index 38fd78a5a8..c82e67526b 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling1-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.AveragePooling1D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.pooling.AveragePooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling2-d.pbtxt
index 86a524cc91..1d031cb5f8 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling2-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.AveragePooling2D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.pooling.AveragePooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling3-d.pbtxt
index 8a811fe456..a8dda6655d 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-average-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-average-pooling3-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.AveragePooling3D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.pooling.AveragePooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.AveragePooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-batch-normalization.pbtxt
index 3923e706be..97f65ed894 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-batch-normalization.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-batch-normalization.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.layers.BatchNormalization"
tf_class {
is_instance: "<class \'tensorflow.python.layers.normalization.BatchNormalization\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.normalization.BatchNormalization\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.normalization.BatchNormalization\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv1-d.pbtxt
index 7a0a8a2a51..ccd9578f0d 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv1-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.Conv1D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.convolutional.Conv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv2-d-transpose.pbtxt
index 7ed3a65251..9cbb58d721 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv2-d-transpose.pbtxt
@@ -1,12 +1,12 @@
path: "tensorflow.layers.Conv2DTranspose"
tf_class {
is_instance: "<class \'tensorflow.python.layers.convolutional.Conv2DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv2-d.pbtxt
index 23831aa74f..c75ea3911e 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv2-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.Conv2D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv3-d-transpose.pbtxt
index 9d41a6b099..5dc834e514 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d-transpose.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv3-d-transpose.pbtxt
@@ -1,12 +1,12 @@
path: "tensorflow.layers.Conv3DTranspose"
tf_class {
is_instance: "<class \'tensorflow.python.layers.convolutional.Conv3DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3DTranspose\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv3-d.pbtxt
index 865fe08e63..96ab209874 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-conv3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-conv3-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.Conv3D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.convolutional.Conv3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-dense.pbtxt
index ee164aae20..7e9656b352 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-dense.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-dense.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.layers.Dense"
tf_class {
is_instance: "<class \'tensorflow.python.layers.core.Dense\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Dense\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dense\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-dropout.pbtxt
index 8167dc79cd..e9a2269a6e 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-dropout.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-dropout.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.layers.Dropout"
tf_class {
is_instance: "<class \'tensorflow.python.layers.core.Dropout\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-flatten.pbtxt
index efa4419692..7d2eaaab2a 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-flatten.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-flatten.pbtxt
@@ -1,10 +1,10 @@
path: "tensorflow.layers.Flatten"
tf_class {
is_instance: "<class \'tensorflow.python.layers.core.Flatten\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Flatten\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Flatten\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -92,7 +92,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
}
member_method {
name: "add_loss"
@@ -108,7 +108,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-input-spec.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-input-spec.pbtxt
index 2ff89f0a6f..fd02c919ae 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-input-spec.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-input-spec.pbtxt
@@ -1,6 +1,6 @@
path: "tensorflow.layers.InputSpec"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.InputSpec\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.InputSpec\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-layer.pbtxt
index b3a6dfdffa..8bc3eb26e9 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-layer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-layer.pbtxt
@@ -1,8 +1,8 @@
path: "tensorflow.layers.Layer"
tf_class {
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -114,7 +114,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling1-d.pbtxt
index cef396489d..6a0dcce56a 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling1-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.MaxPooling1D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.pooling.MaxPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling2-d.pbtxt
index 565f0c7a79..b6c84edf2a 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling2-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.MaxPooling2D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.pooling.MaxPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling3-d.pbtxt
index 595ce2eead..062a02fa59 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-max-pooling3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-max-pooling3-d.pbtxt
@@ -1,11 +1,11 @@
path: "tensorflow.layers.MaxPooling3D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.pooling.MaxPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.MaxPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -109,7 +109,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-separable-conv1-d.pbtxt
index ccca96f722..eaad0fb23e 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-separable-conv1-d.pbtxt
@@ -1,12 +1,12 @@
path: "tensorflow.layers.SeparableConv1D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.convolutional.SeparableConv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.-separable-conv2-d.pbtxt
index 1c99c96182..ece28a8ce9 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.-separable-conv2-d.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.-separable-conv2-d.pbtxt
@@ -1,12 +1,12 @@
path: "tensorflow.layers.SeparableConv2D"
tf_class {
is_instance: "<class \'tensorflow.python.layers.convolutional.SeparableConv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv2D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.SeparableConv\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -110,7 +110,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
diff --git a/tensorflow/tools/api/golden/tensorflow.layers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.layers.pbtxt
index df74c32e1f..df74c32e1f 100644
--- a/tensorflow/tools/api/golden/tensorflow.layers.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.layers.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt
new file mode 100644
index 0000000000..b6dee63176
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorBlockDiag.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.pbtxt
new file mode 100644
index 0000000000..973705dae2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-block-diag.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.linalg.LinearOperatorBlockDiag"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_block_diag.LinearOperatorBlockDiag\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "operators"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'operators\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt
new file mode 100644
index 0000000000..3b33f3da97
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorCirculant.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.pbtxt
new file mode 100644
index 0000000000..de917706d5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant.pbtxt
@@ -0,0 +1,155 @@
+path: "tensorflow.linalg.LinearOperatorCirculant"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant.LinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant._BaseLinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_depth"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "spectrum"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'spectrum\', \'input_output_dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'complex64\'>\", \'None\', \'None\', \'None\', \'True\', \'LinearOperatorCirculant\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_hermitian_spectrum"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_hermitian_spectrum\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "block_shape_tensor"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convolution_kernel"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'convolution_kernel\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt
new file mode 100644
index 0000000000..591bc9631a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorCirculant2D.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt
new file mode 100644
index 0000000000..c4e6a21c3a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt
@@ -0,0 +1,155 @@
+path: "tensorflow.linalg.LinearOperatorCirculant2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant.LinearOperatorCirculant2D\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant._BaseLinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_depth"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "spectrum"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'spectrum\', \'input_output_dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'complex64\'>\", \'None\', \'None\', \'None\', \'True\', \'LinearOperatorCirculant2D\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_hermitian_spectrum"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_hermitian_spectrum\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "block_shape_tensor"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convolution_kernel"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'convolution_kernel\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt
new file mode 100644
index 0000000000..d643139a53
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorCirculant3D.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt
new file mode 100644
index 0000000000..2e085a8e28
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt
@@ -0,0 +1,155 @@
+path: "tensorflow.linalg.LinearOperatorCirculant3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant.LinearOperatorCirculant3D\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant._BaseLinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_depth"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "spectrum"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'spectrum\', \'input_output_dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'complex64\'>\", \'None\', \'None\', \'None\', \'True\', \'LinearOperatorCirculant3D\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_hermitian_spectrum"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_hermitian_spectrum\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "block_shape_tensor"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convolution_kernel"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'convolution_kernel\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt
index 1adbcb41ad..1adbcb41ad 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-composition.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-composition.pbtxt
index 42d22bce42..42d22bce42 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-composition.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-composition.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt
index 023d90ccdb..023d90ccdb 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-diag.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-diag.pbtxt
index d6749fdcec..d6749fdcec 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-diag.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-diag.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt
index 381072e76c..381072e76c 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-full-matrix.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-full-matrix.pbtxt
index d9f363d133..d9f363d133 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-full-matrix.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-full-matrix.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt
index 5d115b35fb..5d115b35fb 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-identity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-identity.pbtxt
index aac7ee31ed..aac7ee31ed 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-identity.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-identity.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt
new file mode 100644
index 0000000000..5c6784dd02
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorKronecker.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.pbtxt
new file mode 100644
index 0000000000..c11d390829
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-kronecker.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.linalg.LinearOperatorKronecker"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_kronecker.LinearOperatorKronecker\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "operators"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'operators\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt
index 1f0d33298a..1f0d33298a 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt
index 3ee800269e..3ee800269e 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt
index 2683430f4f..2683430f4f 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt
index 63a1bc2321..63a1bc2321 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt
index 38bf7ad586..38bf7ad586 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt
index e2c5a505a7..e2c5a505a7 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt
new file mode 100644
index 0000000000..49ff85728f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorZeros.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.pbtxt
new file mode 100644
index 0000000000..a1b0e06b47
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator-zeros.pbtxt
@@ -0,0 +1,130 @@
+path: "tensorflow.linalg.LinearOperatorZeros"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_zeros.LinearOperatorZeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_rows\', \'num_columns\', \'batch_shape\', \'dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'assert_proper_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'True\', \'False\', \'True\', \'False\', \'LinearOperatorZeros\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'mat\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt
index 38da809b36..38da809b36 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator.pbtxt
index 6d849dc040..6d849dc040 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.-linear-operator.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.linalg.pbtxt
index 1d9c0c0f6d..d979116887 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.linalg.pbtxt
@@ -5,6 +5,22 @@ tf_module {
mtype: "<class \'abc.ABCMeta\'>"
}
member {
+ name: "LinearOperatorBlockDiag"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorCirculant"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorCirculant2D"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorCirculant3D"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
name: "LinearOperatorComposition"
mtype: "<class \'abc.ABCMeta\'>"
}
@@ -21,6 +37,10 @@ tf_module {
mtype: "<class \'abc.ABCMeta\'>"
}
member {
+ name: "LinearOperatorKronecker"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
name: "LinearOperatorLowRankUpdate"
mtype: "<class \'abc.ABCMeta\'>"
}
@@ -32,6 +52,10 @@ tf_module {
name: "LinearOperatorScaledIdentity"
mtype: "<class \'abc.ABCMeta\'>"
}
+ member {
+ name: "LinearOperatorZeros"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
member_method {
name: "adjoint"
argspec: "args=[\'matrix\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
@@ -49,6 +73,10 @@ tf_module {
argspec: "args=[\'chol\', \'rhs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "cross"
+ argspec: "args=[\'a\', \'b\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "det"
argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -121,6 +149,14 @@ tf_module {
argspec: "args=[\'tensor\', \'full_matrices\', \'compute_uv\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'None\'], "
}
member_method {
+ name: "tensor_diag"
+ argspec: "args=[\'diagonal\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tensor_diag_part"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "tensordot"
argspec: "args=[\'a\', \'b\', \'axes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.logging.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.logging.pbtxt
index 85bb15455d..85bb15455d 100644
--- a/tensorflow/tools/api/golden/tensorflow.logging.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.logging.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.losses.-reduction.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.losses.-reduction.pbtxt
index 258ad5047e..258ad5047e 100644
--- a/tensorflow/tools/api/golden/tensorflow.losses.-reduction.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.losses.-reduction.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.losses.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.losses.pbtxt
index c1d190ae11..c1d190ae11 100644
--- a/tensorflow/tools/api/golden/tensorflow.losses.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.losses.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.manip.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.manip.pbtxt
new file mode 100644
index 0000000000..9add462396
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.manip.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.manip"
+tf_module {
+ member_method {
+ name: "batch_to_space_nd"
+ argspec: "args=[\'input\', \'block_shape\', \'crops\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "gather_nd"
+ argspec: "args=[\'params\', \'indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reshape"
+ argspec: "args=[\'tensor\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reverse"
+ argspec: "args=[\'tensor\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "roll"
+ argspec: "args=[\'input\', \'shift\', \'axis\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "scatter_nd"
+ argspec: "args=[\'indices\', \'updates\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "space_to_batch_nd"
+ argspec: "args=[\'input\', \'block_shape\', \'paddings\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tile"
+ argspec: "args=[\'input\', \'multiples\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.math.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.math.pbtxt
new file mode 100644
index 0000000000..a308c76ebc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.math.pbtxt
@@ -0,0 +1,239 @@
+path: "tensorflow.math"
+tf_module {
+ member_method {
+ name: "acos"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "acosh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "asin"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "asinh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atan2"
+ argspec: "args=[\'y\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atanh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i0"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i0e"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i1"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i1e"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "betainc"
+ argspec: "args=[\'a\', \'b\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ceil"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cos"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cosh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "digamma"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "erfc"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "exp"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "expm1"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "floor"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "greater"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "greater_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "igamma"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "igammac"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "invert_permutation"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "less"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "less_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lgamma"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "log"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "log1p"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_and"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_not"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_or"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "maximum"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "minimum"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "not_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "polygamma"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "polyval"
+ argspec: "args=[\'coeffs\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reciprocal"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rint"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rsqrt"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_max"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_mean"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_min"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_prod"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_sum"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sin"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sinh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "softplus"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "softsign"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "squared_difference"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_max"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_min"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_prod"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_sum"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "zeta"
+ argspec: "args=[\'x\', \'q\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.metrics.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.metrics.pbtxt
index e9b996c9f5..e9b996c9f5 100644
--- a/tensorflow/tools/api/golden/tensorflow.metrics.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.metrics.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.name_scope.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.name_scope.pbtxt
index 8041897013..8041897013 100644
--- a/tensorflow/tools/api/golden/tensorflow.name_scope.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.name_scope.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.pbtxt
index 455590d866..d9e5b0d0fc 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.pbtxt
@@ -261,6 +261,10 @@ tf_module {
argspec: "args=[\'x\', \'weights\', \'biases\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "safe_embedding_lookup_sparse"
+ argspec: "args=[\'embedding_weights\', \'sparse_ids\', \'sparse_weights\', \'combiner\', \'default_id\', \'name\', \'partition_strategy\', \'max_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'mean\', \'None\', \'None\', \'div\', \'None\'], "
+ }
+ member_method {
name: "sampled_softmax_loss"
argspec: "args=[\'weights\', \'biases\', \'labels\', \'inputs\', \'num_sampled\', \'num_classes\', \'num_true\', \'sampled_values\', \'remove_accidental_hits\', \'partition_strategy\', \'name\', \'seed\'], varargs=None, keywords=None, defaults=[\'1\', \'None\', \'True\', \'mod\', \'sampled_softmax_loss\', \'None\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
index f909cd8756..88b8f37c4f 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
@@ -4,8 +4,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -101,7 +101,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'num_units\', \'forget_bias\', \'state_is_tuple\', \'activation\', \'reuse\', \'name\'], varargs=None, keywords=None, defaults=[\'1.0\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'num_units\', \'forget_bias\', \'state_is_tuple\', \'activation\', \'reuse\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'1.0\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_loss"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -125,7 +125,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'inputs_shape\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -152,6 +152,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
index 173d2eae63..a4483fefa2 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
@@ -4,8 +4,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -101,7 +101,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'num_units\', \'activation\', \'reuse\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'num_units\', \'activation\', \'reuse\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_loss"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -125,7 +125,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'inputs_shape\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -152,6 +152,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
index 3c3e382297..381c4975d7 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
@@ -3,8 +3,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.DeviceWrapper\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -151,6 +151,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
index db16660f11..912365a28b 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
@@ -3,8 +3,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.DropoutWrapper\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -120,7 +120,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -155,6 +155,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
index d7f658aaee..a4bb3219c7 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
@@ -4,8 +4,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -101,7 +101,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'num_units\', \'activation\', \'reuse\', \'kernel_initializer\', \'bias_initializer\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'num_units\', \'activation\', \'reuse\', \'kernel_initializer\', \'bias_initializer\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_loss"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -125,7 +125,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'inputs_shape\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -152,6 +152,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
index b9ab487c77..715bfd5fc7 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
@@ -4,8 +4,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -101,7 +101,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'num_units\', \'use_peepholes\', \'cell_clip\', \'initializer\', \'num_proj\', \'proj_clip\', \'num_unit_shards\', \'num_proj_shards\', \'forget_bias\', \'state_is_tuple\', \'activation\', \'reuse\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1.0\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'num_units\', \'use_peepholes\', \'cell_clip\', \'initializer\', \'num_proj\', \'proj_clip\', \'num_unit_shards\', \'num_proj_shards\', \'forget_bias\', \'state_is_tuple\', \'activation\', \'reuse\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'False\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1.0\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "add_loss"
@@ -117,7 +117,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -125,7 +125,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'inputs_shape\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -152,6 +152,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt
index 1de8a55dcc..1de8a55dcc 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
index b9e3d93475..b66c0f89cc 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
@@ -3,8 +3,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.MultiRNNCell\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -151,6 +151,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
index 75b5898c59..faeb4f3513 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
@@ -2,8 +2,8 @@ path: "tensorflow.nn.rnn_cell.RNNCell"
tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -115,7 +115,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -150,6 +150,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
index fee0dc63b9..caa2e60080 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
@@ -3,8 +3,8 @@ tf_class {
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.ResidualWrapper\'>"
is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -116,7 +116,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
}
member_method {
name: "apply"
@@ -151,6 +151,10 @@ tf_class {
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
name: "get_input_at"
argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.pbtxt
index 64697e8a02..64697e8a02 100644
--- a/tensorflow/tools/api/golden/tensorflow.nn.rnn_cell.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.nn.rnn_cell.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.ones_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.ones_initializer.pbtxt
index 210b56242b..210b56242b 100644
--- a/tensorflow/tools/api/golden/tensorflow.ones_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.ones_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.orthogonal_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.orthogonal_initializer.pbtxt
index 13ec7454f4..13ec7454f4 100644
--- a/tensorflow/tools/api/golden/tensorflow.orthogonal_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.orthogonal_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt
new file mode 100644
index 0000000000..fbc58e5933
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt
@@ -0,0 +1,2255 @@
+path: "tensorflow"
+tf_module {
+ member {
+ name: "AUTO_REUSE"
+ mtype: "<enum \'_ReuseMode\'>"
+ }
+ member {
+ name: "AggregationMethod"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AttrValue"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "COMPILER_VERSION"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "CXX11_ABI_FLAG"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "ConditionalAccumulator"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ConditionalAccumulatorBase"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ConfigProto"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "DType"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DeviceSpec"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Dimension"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Event"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "FIFOQueue"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FixedLenFeature"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FixedLenSequenceFeature"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FixedLengthRecordReader"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GIT_VERSION"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "GPUOptions"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "GRAPH_DEF_VERSION"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GRAPH_DEF_VERSION_MIN_CONSUMER"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GRAPH_DEF_VERSION_MIN_PRODUCER"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GradientTape"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Graph"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GraphDef"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "GraphKeys"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GraphOptions"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "HistogramProto"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "IdentityReader"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "IndexedSlices"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "InteractiveSession"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LMDBReader"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LogMessage"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "MONOLITHIC_BUILD"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "MetaGraphDef"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "NameAttrList"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "NodeDef"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "OpError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Operation"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "OptimizerOptions"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "PaddingFIFOQueue"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "PriorityQueue"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "QUANTIZED_DTYPES"
+ mtype: "<type \'frozenset\'>"
+ }
+ member {
+ name: "QueueBase"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RandomShuffleQueue"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ReaderBase"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RegisterGradient"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RunMetadata"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "RunOptions"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "Session"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SessionLog"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "SparseConditionalAccumulator"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SparseFeature"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SparseTensor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SparseTensorValue"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Summary"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "SummaryMetadata"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "TFRecordReader"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Tensor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TensorArray"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TensorInfo"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "TensorShape"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TextLineReader"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "VERSION"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "VarLenFeature"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Variable"
+ mtype: "<class \'tensorflow.python.ops.variables.VariableMetaclass\'>"
+ }
+ member {
+ name: "VariableAggregation"
+ mtype: "<class \'enum.EnumMeta\'>"
+ }
+ member {
+ name: "VariableScope"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "VariableSynchronization"
+ mtype: "<class \'enum.EnumMeta\'>"
+ }
+ member {
+ name: "WholeFileReader"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "app"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "bfloat16"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "bitwise"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "bool"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "compat"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "complex128"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "complex64"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "constant_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "contrib"
+ mtype: "<class \'tensorflow.python.util.lazy_loader.LazyLoader\'>"
+ }
+ member {
+ name: "data"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "debugging"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "distributions"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "double"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "dtypes"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "errors"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "estimator"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "feature_column"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "flags"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "float16"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "float32"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "float64"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "gfile"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "glorot_normal_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_uniform_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "graph_util"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "half"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "image"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "initializers"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "int16"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "int32"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "int64"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "int8"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "io"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "keras"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "linalg"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "logging"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "manip"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "math"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "metrics"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "name_scope"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "newaxis"
+ mtype: "<type \'NoneType\'>"
+ }
+ member {
+ name: "nn"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "ones_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "orthogonal_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "profiler"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "python_io"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "pywrap_tensorflow"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "qint16"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "qint32"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "qint8"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "quantization"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "quint16"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "quint8"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "random_normal_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "random_uniform_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "resource"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "resource_loader"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "saved_model"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "sets"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "sparse"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "spectral"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "string"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "strings"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "summary"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "sysconfig"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "test"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "train"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "truncated_normal_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "uint16"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "uint32"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "uint64"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "uint8"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "uniform_unit_scaling_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "user_ops"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "variable_scope"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "variance_scaling_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "variant"
+ mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ }
+ member {
+ name: "zeros_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "Assert"
+ argspec: "args=[\'condition\', \'data\', \'summarize\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "NoGradient"
+ argspec: "args=[\'op_type\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "NotDifferentiable"
+ argspec: "args=[\'op_type\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Print"
+ argspec: "args=[\'input_\', \'data\', \'message\', \'first_n\', \'summarize\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "abs"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "accumulate_n"
+ argspec: "args=[\'inputs\', \'shape\', \'tensor_dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "acos"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "acosh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_check_numerics_ops"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_n"
+ argspec: "args=[\'inputs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_to_collection"
+ argspec: "args=[\'name\', \'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_to_collections"
+ argspec: "args=[\'names\', \'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "all_variables"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "angle"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "arg_max"
+ argspec: "args=[\'input\', \'dimension\', \'output_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int64\'>\", \'None\'], "
+ }
+ member_method {
+ name: "arg_min"
+ argspec: "args=[\'input\', \'dimension\', \'output_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int64\'>\", \'None\'], "
+ }
+ member_method {
+ name: "argmax"
+ argspec: "args=[\'input\', \'axis\', \'name\', \'dimension\', \'output_type\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \"<dtype: \'int64\'>\"], "
+ }
+ member_method {
+ name: "argmin"
+ argspec: "args=[\'input\', \'axis\', \'name\', \'dimension\', \'output_type\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \"<dtype: \'int64\'>\"], "
+ }
+ member_method {
+ name: "as_dtype"
+ argspec: "args=[\'type_value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_string"
+ argspec: "args=[\'input\', \'precision\', \'scientific\', \'shortest\', \'width\', \'fill\', \'name\'], varargs=None, keywords=None, defaults=[\'-1\', \'False\', \'False\', \'-1\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "asin"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "asinh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "assert_equal"
+ argspec: "args=[\'x\', \'y\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_greater"
+ argspec: "args=[\'x\', \'y\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_greater_equal"
+ argspec: "args=[\'x\', \'y\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_integer"
+ argspec: "args=[\'x\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_less"
+ argspec: "args=[\'x\', \'y\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_less_equal"
+ argspec: "args=[\'x\', \'y\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_near"
+ argspec: "args=[\'x\', \'y\', \'rtol\', \'atol\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_negative"
+ argspec: "args=[\'x\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_non_negative"
+ argspec: "args=[\'x\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_non_positive"
+ argspec: "args=[\'x\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_none_equal"
+ argspec: "args=[\'x\', \'y\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_positive"
+ argspec: "args=[\'x\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_proper_iterable"
+ argspec: "args=[\'values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "assert_rank"
+ argspec: "args=[\'x\', \'rank\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_rank_at_least"
+ argspec: "args=[\'x\', \'rank\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_rank_in"
+ argspec: "args=[\'x\', \'ranks\', \'data\', \'summarize\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_same_float_dtype"
+ argspec: "args=[\'tensors\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_scalar"
+ argspec: "args=[\'tensor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "assert_type"
+ argspec: "args=[\'tensor\', \'tf_type\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "assert_variables_initialized"
+ argspec: "args=[\'var_list\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "assign"
+ argspec: "args=[\'ref\', \'value\', \'validate_shape\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "assign_add"
+ argspec: "args=[\'ref\', \'value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "assign_sub"
+ argspec: "args=[\'ref\', \'value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "atan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atan2"
+ argspec: "args=[\'y\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atanh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "batch_gather"
+ argspec: "args=[\'params\', \'indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "batch_scatter_update"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "batch_to_space"
+ argspec: "args=[\'input\', \'crops\', \'block_size\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "batch_to_space_nd"
+ argspec: "args=[\'input\', \'block_shape\', \'crops\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "betainc"
+ argspec: "args=[\'a\', \'b\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bincount"
+ argspec: "args=[\'arr\', \'weights\', \'minlength\', \'maxlength\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \"<dtype: \'int32\'>\"], "
+ }
+ member_method {
+ name: "bitcast"
+ argspec: "args=[\'input\', \'type\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "boolean_mask"
+ argspec: "args=[\'tensor\', \'mask\', \'name\', \'axis\'], varargs=None, keywords=None, defaults=[\'boolean_mask\', \'None\'], "
+ }
+ member_method {
+ name: "broadcast_dynamic_shape"
+ argspec: "args=[\'shape_x\', \'shape_y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "broadcast_static_shape"
+ argspec: "args=[\'shape_x\', \'shape_y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "broadcast_to"
+ argspec: "args=[\'input\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "case"
+ argspec: "args=[\'pred_fn_pairs\', \'default\', \'exclusive\', \'strict\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'False\', \'case\'], "
+ }
+ member_method {
+ name: "cast"
+ argspec: "args=[\'x\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ceil"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "check_numerics"
+ argspec: "args=[\'tensor\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cholesky"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cholesky_solve"
+ argspec: "args=[\'chol\', \'rhs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "clip_by_average_norm"
+ argspec: "args=[\'t\', \'clip_norm\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "clip_by_global_norm"
+ argspec: "args=[\'t_list\', \'clip_norm\', \'use_norm\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "clip_by_norm"
+ argspec: "args=[\'t\', \'clip_norm\', \'axes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "clip_by_value"
+ argspec: "args=[\'t\', \'clip_value_min\', \'clip_value_max\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "colocate_with"
+ argspec: "args=[\'op\', \'ignore_existing\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "complex"
+ argspec: "args=[\'real\', \'imag\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "concat"
+ argspec: "args=[\'values\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'concat\'], "
+ }
+ member_method {
+ name: "cond"
+ argspec: "args=[\'pred\', \'true_fn\', \'false_fn\', \'strict\', \'name\', \'fn1\', \'fn2\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'False\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "confusion_matrix"
+ argspec: "args=[\'labels\', \'predictions\', \'num_classes\', \'dtype\', \'name\', \'weights\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'int32\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conj"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "constant"
+ argspec: "args=[\'value\', \'dtype\', \'shape\', \'name\', \'verify_shape\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Const\', \'False\'], "
+ }
+ member_method {
+ name: "container"
+ argspec: "args=[\'container_name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "control_dependencies"
+ argspec: "args=[\'control_inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convert_to_tensor"
+ argspec: "args=[\'value\', \'dtype\', \'name\', \'preferred_dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "convert_to_tensor_or_indexed_slices"
+ argspec: "args=[\'value\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "convert_to_tensor_or_sparse_tensor"
+ argspec: "args=[\'value\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "cos"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cosh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "count_nonzero"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'dtype\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'int64\'>\", \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "count_up_to"
+ argspec: "args=[\'ref\', \'limit\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "create_partitioned_variables"
+ argspec: "args=[\'shape\', \'slicing\', \'initializer\', \'dtype\', \'trainable\', \'collections\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'True\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "cross"
+ argspec: "args=[\'a\', \'b\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cumprod"
+ argspec: "args=[\'x\', \'axis\', \'exclusive\', \'reverse\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "cumsum"
+ argspec: "args=[\'x\', \'axis\', \'exclusive\', \'reverse\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "custom_gradient"
+ argspec: "args=[\'f\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "decode_base64"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_compressed"
+ argspec: "args=[\'bytes\', \'compression_type\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_csv"
+ argspec: "args=[\'records\', \'record_defaults\', \'field_delim\', \'use_quote_delim\', \'name\', \'na_value\', \'select_cols\'], varargs=None, keywords=None, defaults=[\',\', \'True\', \'None\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_json_example"
+ argspec: "args=[\'json_examples\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_raw"
+ argspec: "args=[\'bytes\', \'out_type\', \'little_endian\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "delete_session_tensor"
+ argspec: "args=[\'handle\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "depth_to_space"
+ argspec: "args=[\'input\', \'block_size\', \'name\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\', \'NHWC\'], "
+ }
+ member_method {
+ name: "dequantize"
+ argspec: "args=[\'input\', \'min_range\', \'max_range\', \'mode\', \'name\'], varargs=None, keywords=None, defaults=[\'MIN_COMBINED\', \'None\'], "
+ }
+ member_method {
+ name: "deserialize_many_sparse"
+ argspec: "args=[\'serialized_sparse\', \'dtype\', \'rank\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "device"
+ argspec: "args=[\'device_name_or_function\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "diag"
+ argspec: "args=[\'diagonal\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "digamma"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "disable_resource_variables"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "div"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "div_no_nan"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "divide"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dynamic_partition"
+ argspec: "args=[\'data\', \'partitions\', \'num_partitions\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dynamic_stitch"
+ argspec: "args=[\'indices\', \'data\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "edit_distance"
+ argspec: "args=[\'hypothesis\', \'truth\', \'normalize\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'edit_distance\'], "
+ }
+ member_method {
+ name: "einsum"
+ argspec: "args=[\'equation\'], varargs=inputs, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "enable_eager_execution"
+ argspec: "args=[\'config\', \'device_policy\', \'execution_mode\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "enable_resource_variables"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "encode_base64"
+ argspec: "args=[\'input\', \'pad\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "ensure_shape"
+ argspec: "args=[\'x\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "erf"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "erfc"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "executing_eagerly"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "exp"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "expand_dims"
+ argspec: "args=[\'input\', \'axis\', \'name\', \'dim\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "expm1"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "extract_image_patches"
+ argspec: "args=[\'images\', \'ksizes\', \'strides\', \'rates\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "eye"
+ argspec: "args=[\'num_rows\', \'num_columns\', \'batch_shape\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'float32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_args"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'-6\', \'6\', \'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_args_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'-6\', \'6\', \'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_per_channel"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_per_channel_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fft"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fft2d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fft3d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fill"
+ argspec: "args=[\'dims\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fixed_size_partitioner"
+ argspec: "args=[\'num_shards\', \'axis\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "floor"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "floor_div"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "floordiv"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "floormod"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "foldl"
+ argspec: "args=[\'fn\', \'elems\', \'initializer\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "foldr"
+ argspec: "args=[\'fn\', \'elems\', \'initializer\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "gather"
+ argspec: "args=[\'params\', \'indices\', \'validate_indices\', \'name\', \'axis\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'0\'], "
+ }
+ member_method {
+ name: "gather_nd"
+ argspec: "args=[\'params\', \'indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_collection"
+ argspec: "args=[\'key\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_collection_ref"
+ argspec: "args=[\'key\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_default_graph"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_default_session"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_local_variable"
+ argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "get_seed"
+ argspec: "args=[\'op_seed\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_session_handle"
+ argspec: "args=[\'data\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_session_tensor"
+ argspec: "args=[\'handle\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_variable"
+ argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "get_variable_scope"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "global_norm"
+ argspec: "args=[\'t_list\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "global_variables"
+ argspec: "args=[\'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "global_variables_initializer"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "gradients"
+ argspec: "args=[\'ys\', \'xs\', \'grad_ys\', \'name\', \'colocate_gradients_with_ops\', \'gate_gradients\', \'aggregation_method\', \'stop_gradients\'], varargs=None, keywords=None, defaults=[\'None\', \'gradients\', \'False\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "greater"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "greater_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "group"
+ argspec: "args=[], varargs=inputs, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "guarantee_const"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "hessians"
+ argspec: "args=[\'ys\', \'xs\', \'name\', \'colocate_gradients_with_ops\', \'gate_gradients\', \'aggregation_method\'], varargs=None, keywords=None, defaults=[\'hessians\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "histogram_fixed_width"
+ argspec: "args=[\'values\', \'value_range\', \'nbins\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'100\', \"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "histogram_fixed_width_bins"
+ argspec: "args=[\'values\', \'value_range\', \'nbins\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'100\', \"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "identity"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "identity_n"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ifft"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ifft2d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ifft3d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "igamma"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "igammac"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "imag"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "import_graph_def"
+ argspec: "args=[\'graph_def\', \'input_map\', \'return_elements\', \'name\', \'op_dict\', \'producer_op_list\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "init_scope"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "initialize_all_tables"
+ argspec: "args=[\'name\'], varargs=None, keywords=None, defaults=[\'init_all_tables\'], "
+ }
+ member_method {
+ name: "initialize_all_variables"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "initialize_local_variables"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "initialize_variables"
+ argspec: "args=[\'var_list\', \'name\'], varargs=None, keywords=None, defaults=[\'init\'], "
+ }
+ member_method {
+ name: "invert_permutation"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_finite"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_inf"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_nan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_non_decreasing"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_numeric_tensor"
+ argspec: "args=[\'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_strictly_increasing"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_variable_initialized"
+ argspec: "args=[\'variable\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "lbeta"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "less"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "less_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lgamma"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lin_space"
+ argspec: "args=[\'start\', \'stop\', \'num\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "linspace"
+ argspec: "args=[\'start\', \'stop\', \'num\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "load_file_system_library"
+ argspec: "args=[\'library_filename\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_library"
+ argspec: "args=[\'library_location\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_op_library"
+ argspec: "args=[\'library_filename\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "local_variables"
+ argspec: "args=[\'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "local_variables_initializer"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "log"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "log1p"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "log_sigmoid"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_and"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_not"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_or"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_xor"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'LogicalXor\'], "
+ }
+ member_method {
+ name: "make_ndarray"
+ argspec: "args=[\'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "make_template"
+ argspec: "args=[\'name_\', \'func_\', \'create_scope_now_\', \'unique_name_\', \'custom_getter_\'], varargs=None, keywords=kwargs, defaults=[\'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "make_tensor_proto"
+ argspec: "args=[\'values\', \'dtype\', \'shape\', \'verify_shape\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "map_fn"
+ argspec: "args=[\'fn\', \'elems\', \'dtype\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'infer_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "matching_files"
+ argspec: "args=[\'pattern\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'a\', \'b\', \'transpose_a\', \'transpose_b\', \'adjoint_a\', \'adjoint_b\', \'a_is_sparse\', \'b_is_sparse\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'False\', \'False\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "matrix_band_part"
+ argspec: "args=[\'input\', \'num_lower\', \'num_upper\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "matrix_determinant"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "matrix_diag"
+ argspec: "args=[\'diagonal\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "matrix_diag_part"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "matrix_inverse"
+ argspec: "args=[\'input\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "matrix_set_diag"
+ argspec: "args=[\'input\', \'diagonal\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "matrix_solve"
+ argspec: "args=[\'matrix\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "matrix_solve_ls"
+ argspec: "args=[\'matrix\', \'rhs\', \'l2_regularizer\', \'fast\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "matrix_transpose"
+ argspec: "args=[\'a\', \'name\', \'conjugate\'], varargs=None, keywords=None, defaults=[\'matrix_transpose\', \'False\'], "
+ }
+ member_method {
+ name: "matrix_triangular_solve"
+ argspec: "args=[\'matrix\', \'rhs\', \'lower\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "maximum"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "meshgrid"
+ argspec: "args=[], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "min_max_variable_partitioner"
+ argspec: "args=[\'max_partitions\', \'axis\', \'min_slice_size\', \'bytes_per_string_element\'], varargs=None, keywords=None, defaults=[\'1\', \'0\', \'262144\', \'16\'], "
+ }
+ member_method {
+ name: "minimum"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "mod"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "model_variables"
+ argspec: "args=[\'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "moving_average_variables"
+ argspec: "args=[\'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "multinomial"
+ argspec: "args=[\'logits\', \'num_samples\', \'seed\', \'name\', \'output_dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "multiply"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "negative"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "no_op"
+ argspec: "args=[\'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "no_regularizer"
+ argspec: "args=[\'_\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "norm"
+ argspec: "args=[\'tensor\', \'ord\', \'axis\', \'keepdims\', \'name\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'euclidean\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "not_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "one_hot"
+ argspec: "args=[\'indices\', \'depth\', \'on_value\', \'off_value\', \'axis\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "ones"
+ argspec: "args=[\'shape\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "ones_like"
+ argspec: "args=[\'tensor\', \'dtype\', \'name\', \'optimize\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "op_scope"
+ argspec: "args=[\'values\', \'name\', \'default_name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "pad"
+ argspec: "args=[\'tensor\', \'paddings\', \'mode\', \'name\', \'constant_values\'], varargs=None, keywords=None, defaults=[\'CONSTANT\', \'None\', \'0\'], "
+ }
+ member_method {
+ name: "parallel_stack"
+ argspec: "args=[\'values\', \'name\'], varargs=None, keywords=None, defaults=[\'parallel_stack\'], "
+ }
+ member_method {
+ name: "parse_example"
+ argspec: "args=[\'serialized\', \'features\', \'name\', \'example_names\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "parse_single_example"
+ argspec: "args=[\'serialized\', \'features\', \'name\', \'example_names\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "parse_single_sequence_example"
+ argspec: "args=[\'serialized\', \'context_features\', \'sequence_features\', \'example_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "parse_tensor"
+ argspec: "args=[\'serialized\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "placeholder"
+ argspec: "args=[\'dtype\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "placeholder_with_default"
+ argspec: "args=[\'input\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "polygamma"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "pow"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "print"
+ argspec: "args=[], varargs=inputs, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "py_func"
+ argspec: "args=[\'func\', \'inp\', \'Tout\', \'stateful\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "qr"
+ argspec: "args=[\'input\', \'full_matrices\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "quantize"
+ argspec: "args=[\'input\', \'min_range\', \'max_range\', \'T\', \'mode\', \'round_mode\', \'name\'], varargs=None, keywords=None, defaults=[\'MIN_COMBINED\', \'HALF_AWAY_FROM_ZERO\', \'None\'], "
+ }
+ member_method {
+ name: "quantize_v2"
+ argspec: "args=[\'input\', \'min_range\', \'max_range\', \'T\', \'mode\', \'name\', \'round_mode\'], varargs=None, keywords=None, defaults=[\'MIN_COMBINED\', \'None\', \'HALF_AWAY_FROM_ZERO\'], "
+ }
+ member_method {
+ name: "quantized_concat"
+ argspec: "args=[\'concat_dim\', \'values\', \'input_mins\', \'input_maxes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_crop"
+ argspec: "args=[\'value\', \'size\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_gamma"
+ argspec: "args=[\'shape\', \'alpha\', \'beta\', \'dtype\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_normal"
+ argspec: "args=[\'shape\', \'mean\', \'stddev\', \'dtype\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \"<dtype: \'float32\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_poisson"
+ argspec: "args=[\'lam\', \'shape\', \'dtype\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_shuffle"
+ argspec: "args=[\'value\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_uniform"
+ argspec: "args=[\'shape\', \'minval\', \'maxval\', \'dtype\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\', \"<dtype: \'float32\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "range"
+ argspec: "args=[\'start\', \'limit\', \'delta\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'range\'], "
+ }
+ member_method {
+ name: "rank"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "read_file"
+ argspec: "args=[\'filename\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "real"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "realdiv"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reciprocal"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reduce_all"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_any"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_join"
+ argspec: "args=[\'inputs\', \'axis\', \'keep_dims\', \'separator\', \'name\', \'reduction_indices\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_logsumexp"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_max"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_mean"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_min"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_prod"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reduce_sum"
+ argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "regex_replace"
+ argspec: "args=[\'input\', \'pattern\', \'rewrite\', \'replace_global\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "register_tensor_conversion_function"
+ argspec: "args=[\'base_type\', \'conversion_func\', \'priority\'], varargs=None, keywords=None, defaults=[\'100\'], "
+ }
+ member_method {
+ name: "report_uninitialized_variables"
+ argspec: "args=[\'var_list\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'report_uninitialized_variables\'], "
+ }
+ member_method {
+ name: "required_space_to_batch_paddings"
+ argspec: "args=[\'input_shape\', \'block_shape\', \'base_paddings\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "reset_default_graph"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reshape"
+ argspec: "args=[\'tensor\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reverse"
+ argspec: "args=[\'tensor\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reverse_sequence"
+ argspec: "args=[\'input\', \'seq_lengths\', \'seq_axis\', \'batch_axis\', \'name\', \'seq_dim\', \'batch_dim\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "reverse_v2"
+ argspec: "args=[\'tensor\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rint"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "round"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rsqrt"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "saturate_cast"
+ argspec: "args=[\'value\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scalar_mul"
+ argspec: "args=[\'scalar\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "scan"
+ argspec: "args=[\'fn\', \'elems\', \'initializer\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'infer_shape\', \'reverse\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'True\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_add"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_div"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_max"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_min"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_mul"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_nd"
+ argspec: "args=[\'indices\', \'updates\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_add"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_sub"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_update"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_sub"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_update"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "searchsorted"
+ argspec: "args=[\'sorted_sequence\', \'values\', \'side\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\'left\', \"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "segment_max"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_mean"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_min"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_prod"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_sum"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "self_adjoint_eig"
+ argspec: "args=[\'tensor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "self_adjoint_eigvals"
+ argspec: "args=[\'tensor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sequence_mask"
+ argspec: "args=[\'lengths\', \'maxlen\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'bool\'>\", \'None\'], "
+ }
+ member_method {
+ name: "serialize_many_sparse"
+ argspec: "args=[\'sp_input\', \'name\', \'out_type\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'string\'>\"], "
+ }
+ member_method {
+ name: "serialize_sparse"
+ argspec: "args=[\'sp_input\', \'name\', \'out_type\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'string\'>\"], "
+ }
+ member_method {
+ name: "serialize_tensor"
+ argspec: "args=[\'tensor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_random_seed"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "setdiff1d"
+ argspec: "args=[\'x\', \'y\', \'index_dtype\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "shape"
+ argspec: "args=[\'input\', \'name\', \'out_type\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'int32\'>\"], "
+ }
+ member_method {
+ name: "shape_n"
+ argspec: "args=[\'input\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "sigmoid"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sign"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sin"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sinh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'input\', \'name\', \'out_type\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'int32\'>\"], "
+ }
+ member_method {
+ name: "slice"
+ argspec: "args=[\'input_\', \'begin\', \'size\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "space_to_batch"
+ argspec: "args=[\'input\', \'paddings\', \'block_size\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "space_to_batch_nd"
+ argspec: "args=[\'input\', \'block_shape\', \'paddings\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "space_to_depth"
+ argspec: "args=[\'input\', \'block_size\', \'name\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\', \'NHWC\'], "
+ }
+ member_method {
+ name: "sparse_add"
+ argspec: "args=[\'a\', \'b\', \'thresh\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "sparse_concat"
+ argspec: "args=[\'axis\', \'sp_inputs\', \'name\', \'expand_nonconcat_dim\', \'concat_dim\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_fill_empty_rows"
+ argspec: "args=[\'sp_input\', \'default_value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_mask"
+ argspec: "args=[\'a\', \'mask_indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_matmul"
+ argspec: "args=[\'a\', \'b\', \'transpose_a\', \'transpose_b\', \'a_is_sparse\', \'b_is_sparse\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_maximum"
+ argspec: "args=[\'sp_a\', \'sp_b\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_merge"
+ argspec: "args=[\'sp_ids\', \'sp_values\', \'vocab_size\', \'name\', \'already_sorted\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "sparse_minimum"
+ argspec: "args=[\'sp_a\', \'sp_b\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_placeholder"
+ argspec: "args=[\'dtype\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_reduce_max"
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_reduce_max_sparse"
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_reduce_sum"
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_reduce_sum_sparse"
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_reorder"
+ argspec: "args=[\'sp_input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_reset_shape"
+ argspec: "args=[\'sp_input\', \'new_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_reshape"
+ argspec: "args=[\'sp_input\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_retain"
+ argspec: "args=[\'sp_input\', \'to_retain\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sparse_segment_mean"
+ argspec: "args=[\'data\', \'indices\', \'segment_ids\', \'name\', \'num_segments\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_segment_sqrt_n"
+ argspec: "args=[\'data\', \'indices\', \'segment_ids\', \'name\', \'num_segments\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_segment_sum"
+ argspec: "args=[\'data\', \'indices\', \'segment_ids\', \'name\', \'num_segments\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_slice"
+ argspec: "args=[\'sp_input\', \'start\', \'size\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_softmax"
+ argspec: "args=[\'sp_input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_split"
+ argspec: "args=[\'keyword_required\', \'sp_input\', \'num_split\', \'axis\', \'name\', \'split_dim\'], varargs=None, keywords=None, defaults=[\'KeywordRequired()\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_tensor_dense_matmul"
+ argspec: "args=[\'sp_a\', \'b\', \'adjoint_a\', \'adjoint_b\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_tensor_to_dense"
+ argspec: "args=[\'sp_input\', \'default_value\', \'validate_indices\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_to_dense"
+ argspec: "args=[\'sparse_indices\', \'output_shape\', \'sparse_values\', \'default_value\', \'validate_indices\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_to_indicator"
+ argspec: "args=[\'sp_input\', \'vocab_size\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_transpose"
+ argspec: "args=[\'sp_input\', \'perm\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "split"
+ argspec: "args=[\'value\', \'num_or_size_splits\', \'axis\', \'num\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\', \'split\'], "
+ }
+ member_method {
+ name: "sqrt"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "square"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "squared_difference"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "squeeze"
+ argspec: "args=[\'input\', \'axis\', \'name\', \'squeeze_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "stack"
+ argspec: "args=[\'values\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'stack\'], "
+ }
+ member_method {
+ name: "stop_gradient"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "strided_slice"
+ argspec: "args=[\'input_\', \'begin\', \'end\', \'strides\', \'begin_mask\', \'end_mask\', \'ellipsis_mask\', \'new_axis_mask\', \'shrink_axis_mask\', \'var\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'0\', \'0\', \'0\', \'0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "string_join"
+ argspec: "args=[\'inputs\', \'separator\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'None\'], "
+ }
+ member_method {
+ name: "string_split"
+ argspec: "args=[\'source\', \'delimiter\', \'skip_empty\'], varargs=None, keywords=None, defaults=[\' \', \'True\'], "
+ }
+ member_method {
+ name: "string_strip"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "string_to_hash_bucket"
+ argspec: "args=[\'string_tensor\', \'num_buckets\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "string_to_hash_bucket_fast"
+ argspec: "args=[\'input\', \'num_buckets\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "string_to_hash_bucket_strong"
+ argspec: "args=[\'input\', \'num_buckets\', \'key\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "string_to_number"
+ argspec: "args=[\'string_tensor\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "substr"
+ argspec: "args=[\'input\', \'pos\', \'len\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "subtract"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "svd"
+ argspec: "args=[\'tensor\', \'full_matrices\', \'compute_uv\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "tables_initializer"
+ argspec: "args=[\'name\'], varargs=None, keywords=None, defaults=[\'init_all_tables\'], "
+ }
+ member_method {
+ name: "tan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tanh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tensordot"
+ argspec: "args=[\'a\', \'b\', \'axes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tile"
+ argspec: "args=[\'input\', \'multiples\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "timestamp"
+ argspec: "args=[\'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_bfloat16"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'ToBFloat16\'], "
+ }
+ member_method {
+ name: "to_complex128"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'ToComplex128\'], "
+ }
+ member_method {
+ name: "to_complex64"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'ToComplex64\'], "
+ }
+ member_method {
+ name: "to_double"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'ToDouble\'], "
+ }
+ member_method {
+ name: "to_float"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'ToFloat\'], "
+ }
+ member_method {
+ name: "to_int32"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'ToInt32\'], "
+ }
+ member_method {
+ name: "to_int64"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'ToInt64\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "trainable_variables"
+ argspec: "args=[\'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "transpose"
+ argspec: "args=[\'a\', \'perm\', \'name\', \'conjugate\'], varargs=None, keywords=None, defaults=[\'None\', \'transpose\', \'False\'], "
+ }
+ member_method {
+ name: "truediv"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "truncated_normal"
+ argspec: "args=[\'shape\', \'mean\', \'stddev\', \'dtype\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \"<dtype: \'float32\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "truncatediv"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "truncatemod"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tuple"
+ argspec: "args=[\'tensors\', \'name\', \'control_inputs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "unique"
+ argspec: "args=[\'x\', \'out_idx\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "unique_with_counts"
+ argspec: "args=[\'x\', \'out_idx\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "unravel_index"
+ argspec: "args=[\'indices\', \'dims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_max"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_mean"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_min"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_prod"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_sqrt_n"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_sum"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unstack"
+ argspec: "args=[\'value\', \'num\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'unstack\'], "
+ }
+ member_method {
+ name: "variable_axis_size_partitioner"
+ argspec: "args=[\'max_shard_bytes\', \'axis\', \'bytes_per_string_element\', \'max_shards\'], varargs=None, keywords=None, defaults=[\'0\', \'16\', \'None\'], "
+ }
+ member_method {
+ name: "variable_op_scope"
+ argspec: "args=[\'values\', \'name_or_scope\', \'default_name\', \'initializer\', \'regularizer\', \'caching_device\', \'partitioner\', \'custom_getter\', \'reuse\', \'dtype\', \'use_resource\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables_initializer"
+ argspec: "args=[\'var_list\', \'name\'], varargs=None, keywords=None, defaults=[\'init\'], "
+ }
+ member_method {
+ name: "verify_tensor_all_finite"
+ argspec: "args=[\'t\', \'msg\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "where"
+ argspec: "args=[\'condition\', \'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "while_loop"
+ argspec: "args=[\'cond\', \'body\', \'loop_vars\', \'shape_invariants\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\', \'maximum_iterations\', \'return_same_structure\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "write_file"
+ argspec: "args=[\'filename\', \'contents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "zeros"
+ argspec: "args=[\'shape\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "zeros_like"
+ argspec: "args=[\'tensor\', \'dtype\', \'name\', \'optimize\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "zeta"
+ argspec: "args=[\'x\', \'q\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checker.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checker.pbtxt
new file mode 100644
index 0000000000..e09c44cc9c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checker.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.profiler.AdviceProto.Checker"
+tf_proto {
+ descriptor {
+ name: "Checker"
+ field {
+ name: "reports"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt
new file mode 100644
index 0000000000..8746243549
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.profiler.AdviceProto.CheckersEntry"
+tf_proto {
+ descriptor {
+ name: "CheckersEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.AdviceProto.Checker"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.pbtxt
new file mode 100644
index 0000000000..a8a8858ccd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-advice-proto.pbtxt
@@ -0,0 +1,41 @@
+path: "tensorflow.profiler.AdviceProto"
+tf_proto {
+ descriptor {
+ name: "AdviceProto"
+ field {
+ name: "checkers"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.AdviceProto.CheckersEntry"
+ }
+ nested_type {
+ name: "CheckersEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.AdviceProto.Checker"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ nested_type {
+ name: "Checker"
+ field {
+ name: "reports"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt
new file mode 100644
index 0000000000..afec73f537
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.profiler.GraphNodeProto.InputShapesEntry"
+tf_proto {
+ descriptor {
+ name: "InputShapesEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.pbtxt
new file mode 100644
index 0000000000..3c83177005
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-graph-node-proto.pbtxt
@@ -0,0 +1,191 @@
+path: "tensorflow.profiler.GraphNodeProto"
+tf_proto {
+ descriptor {
+ name: "GraphNodeProto"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensor_value"
+ number: 15
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.TFProfTensorProto"
+ }
+ field {
+ name: "run_count"
+ number: 21
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "exec_micros"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "accelerator_exec_micros"
+ number: 17
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "cpu_exec_micros"
+ number: 18
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "requested_bytes"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "peak_bytes"
+ number: 24
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "residual_bytes"
+ number: 25
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "output_bytes"
+ number: 26
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "parameters"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "float_ops"
+ number: 13
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "devices"
+ number: 10
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "total_definition_count"
+ number: 23
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_run_count"
+ number: 22
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_exec_micros"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_accelerator_exec_micros"
+ number: 19
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_cpu_exec_micros"
+ number: 20
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_requested_bytes"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_peak_bytes"
+ number: 27
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_residual_bytes"
+ number: 28
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_output_bytes"
+ number: 29
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_parameters"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_float_ops"
+ number: 14
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "shapes"
+ number: 11
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ field {
+ name: "input_shapes"
+ number: 16
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.GraphNodeProto.InputShapesEntry"
+ }
+ field {
+ name: "children"
+ number: 12
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.GraphNodeProto"
+ }
+ nested_type {
+ name: "InputShapesEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-multi-graph-node-proto.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-multi-graph-node-proto.pbtxt
new file mode 100644
index 0000000000..2b08a05437
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-multi-graph-node-proto.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.profiler.MultiGraphNodeProto"
+tf_proto {
+ descriptor {
+ name: "MultiGraphNodeProto"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "exec_micros"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "accelerator_exec_micros"
+ number: 12
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "cpu_exec_micros"
+ number: 13
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "requested_bytes"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "peak_bytes"
+ number: 16
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "residual_bytes"
+ number: 17
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "output_bytes"
+ number: 18
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "parameters"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "float_ops"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_exec_micros"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_accelerator_exec_micros"
+ number: 14
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_cpu_exec_micros"
+ number: 15
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_requested_bytes"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_peak_bytes"
+ number: 19
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_residual_bytes"
+ number: 20
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_output_bytes"
+ number: 21
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_parameters"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_float_ops"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "graph_nodes"
+ number: 10
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.GraphNodeProto"
+ }
+ field {
+ name: "children"
+ number: 11
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.MultiGraphNodeProto"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt
new file mode 100644
index 0000000000..b3adc50c7e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.profiler.OpLogProto.IdToStringEntry"
+tf_proto {
+ descriptor {
+ name: "IdToStringEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.pbtxt
new file mode 100644
index 0000000000..7510c566ba
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-op-log-proto.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.profiler.OpLogProto"
+tf_proto {
+ descriptor {
+ name: "OpLogProto"
+ field {
+ name: "log_entries"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.OpLogEntry"
+ }
+ field {
+ name: "id_to_string"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.OpLogProto.IdToStringEntry"
+ }
+ nested_type {
+ name: "IdToStringEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-profile-option-builder.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-profile-option-builder.pbtxt
index 19ff38a390..19ff38a390 100644
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-profile-option-builder.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-profile-option-builder.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.-profiler.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-profiler.pbtxt
index acb61dae9f..acb61dae9f 100644
--- a/tensorflow/tools/api/golden/tensorflow.profiler.-profiler.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.-profiler.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.profiler.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.profiler.pbtxt
index 7b4d3ac522..7b4d3ac522 100644
--- a/tensorflow/tools/api/golden/tensorflow.profiler.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.profiler.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-compression-type.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-compression-type.pbtxt
index 4941dda50e..4941dda50e 100644
--- a/tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-compression-type.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-compression-type.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-options.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-options.pbtxt
index 0853716023..614ba42d3e 100644
--- a/tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-options.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-options.pbtxt
@@ -8,7 +8,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'compression_type\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'compression_type\', \'flush_mode\', \'input_buffer_size\', \'output_buffer_size\', \'window_bits\', \'compression_level\', \'compression_method\', \'mem_level\', \'compression_strategy\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "get_compression_type_string"
diff --git a/tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-writer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-writer.pbtxt
index 31775de2d1..31775de2d1 100644
--- a/tensorflow/tools/api/golden/tensorflow.python_io.-t-f-record-writer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.python_io.-t-f-record-writer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.python_io.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.python_io.pbtxt
index 7c9953e5fe..7c9953e5fe 100644
--- a/tensorflow/tools/api/golden/tensorflow.python_io.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.python_io.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.quantization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.quantization.pbtxt
new file mode 100644
index 0000000000..6d865efed0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.quantization.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.quantization"
+tf_module {
+ member_method {
+ name: "dequantize"
+ argspec: "args=[\'input\', \'min_range\', \'max_range\', \'mode\', \'name\'], varargs=None, keywords=None, defaults=[\'MIN_COMBINED\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_args"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'-6\', \'6\', \'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_args_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'-6\', \'6\', \'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_per_channel"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_per_channel_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "quantized_concat"
+ argspec: "args=[\'concat_dim\', \'values\', \'input_mins\', \'input_maxes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.random_normal_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.random_normal_initializer.pbtxt
index 5993fdeb9c..5993fdeb9c 100644
--- a/tensorflow/tools/api/golden/tensorflow.random_normal_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.random_normal_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.random_uniform_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.random_uniform_initializer.pbtxt
index a434ed1599..a434ed1599 100644
--- a/tensorflow/tools/api/golden/tensorflow.random_uniform_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.random_uniform_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.resource_loader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.resource_loader.pbtxt
index 288b78b4cd..288b78b4cd 100644
--- a/tensorflow/tools/api/golden/tensorflow.resource_loader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.resource_loader.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.builder.-saved-model-builder.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.builder.-saved-model-builder.pbtxt
index ca8e5884b1..83bd703540 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.builder.-saved-model-builder.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.builder.-saved-model-builder.pbtxt
@@ -8,11 +8,11 @@ tf_class {
}
member_method {
name: "add_meta_graph"
- argspec: "args=[\'self\', \'tags\', \'signature_def_map\', \'assets_collection\', \'legacy_init_op\', \'clear_devices\', \'main_op\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'None\', \'False\'], "
+ argspec: "args=[\'self\', \'tags\', \'signature_def_map\', \'assets_collection\', \'legacy_init_op\', \'clear_devices\', \'main_op\', \'strip_default_attrs\', \'saver\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "add_meta_graph_and_variables"
- argspec: "args=[\'self\', \'sess\', \'tags\', \'signature_def_map\', \'assets_collection\', \'legacy_init_op\', \'clear_devices\', \'main_op\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'None\', \'False\'], "
+ argspec: "args=[\'self\', \'sess\', \'tags\', \'signature_def_map\', \'assets_collection\', \'legacy_init_op\', \'clear_devices\', \'main_op\', \'strip_default_attrs\', \'saver\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "save"
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.builder.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.builder.pbtxt
index adc697ad1c..adc697ad1c 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.builder.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.builder.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.constants.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.constants.pbtxt
index 20e10aa094..20e10aa094 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.constants.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.constants.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.loader.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.loader.pbtxt
index 896e2160c6..511e6b4712 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.loader.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.loader.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.saved_model.loader"
tf_module {
member_method {
name: "load"
- argspec: "args=[\'sess\', \'tags\', \'export_dir\'], varargs=None, keywords=saver_kwargs, defaults=None"
+ argspec: "args=[\'sess\', \'tags\', \'export_dir\', \'import_scope\'], varargs=None, keywords=saver_kwargs, defaults=[\'None\'], "
}
member_method {
name: "maybe_saved_model_directory"
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.main_op.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.main_op.pbtxt
index 176cb788c2..176cb788c2 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.main_op.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.main_op.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.pbtxt
index e1a0385092..e1a0385092 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.signature_constants.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.signature_constants.pbtxt
index 478d410e06..478d410e06 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.signature_constants.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.signature_constants.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.signature_def_utils.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.signature_def_utils.pbtxt
index a5602464ee..a5602464ee 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.signature_def_utils.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.signature_def_utils.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.tag_constants.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.tag_constants.pbtxt
index 6af72498d7..6af72498d7 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.tag_constants.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.tag_constants.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.saved_model.utils.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.utils.pbtxt
index d95c946682..d95c946682 100644
--- a/tensorflow/tools/api/golden/tensorflow.saved_model.utils.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.saved_model.utils.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.sets.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.sets.pbtxt
index 8a196b1a55..8a196b1a55 100644
--- a/tensorflow/tools/api/golden/tensorflow.sets.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.sets.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.sparse.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.sparse.pbtxt
new file mode 100644
index 0000000000..ba9e651b34
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.sparse.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.sparse"
+tf_module {
+ member_method {
+ name: "cross"
+ argspec: "args=[\'inputs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cross_hashed"
+ argspec: "args=[\'inputs\', \'num_buckets\', \'hash_key\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "expand_dims"
+ argspec: "args=[\'sp_input\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "eye"
+ argspec: "args=[\'num_rows\', \'num_columns\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\", \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.spectral.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.spectral.pbtxt
index 4f306540cc..6a421ef12d 100644
--- a/tensorflow/tools/api/golden/tensorflow.spectral.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.spectral.pbtxt
@@ -17,6 +17,10 @@ tf_module {
argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "idct"
+ argspec: "args=[\'input\', \'type\', \'n\', \'axis\', \'norm\', \'name\'], varargs=None, keywords=None, defaults=[\'2\', \'None\', \'-1\', \'None\', \'None\'], "
+ }
+ member_method {
name: "ifft"
argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.strings.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.strings.pbtxt
new file mode 100644
index 0000000000..c81c156518
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.strings.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.strings"
+tf_module {
+ member_method {
+ name: "format"
+ argspec: "args=[\'template\', \'inputs\', \'placeholder\', \'summarize\', \'name\'], varargs=None, keywords=None, defaults=[\'{}\', \'3\', \'None\'], "
+ }
+ member_method {
+ name: "join"
+ argspec: "args=[\'inputs\', \'separator\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'None\'], "
+ }
+ member_method {
+ name: "length"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "regex_full_match"
+ argspec: "args=[\'input\', \'pattern\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "regex_replace"
+ argspec: "args=[\'input\', \'pattern\', \'rewrite\', \'replace_global\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "split"
+ argspec: "args=[\'source\', \'sep\', \'maxsplit\'], varargs=None, keywords=None, defaults=[\'None\', \'-1\'], "
+ }
+ member_method {
+ name: "strip"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "substr"
+ argspec: "args=[\'input\', \'pos\', \'len\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_hash_bucket"
+ argspec: "args=[\'string_tensor\', \'num_buckets\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_hash_bucket_fast"
+ argspec: "args=[\'input\', \'num_buckets\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_hash_bucket_strong"
+ argspec: "args=[\'input\', \'num_buckets\', \'key\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_number"
+ argspec: "args=[\'string_tensor\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-event.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-event.pbtxt
new file mode 100644
index 0000000000..eb99d0f533
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-event.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.summary.Event"
+tf_proto {
+ descriptor {
+ name: "Event"
+ field {
+ name: "wall_time"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "step"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "file_version"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ field {
+ name: "graph_def"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "summary"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary"
+ oneof_index: 0
+ }
+ field {
+ name: "log_message"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.LogMessage"
+ oneof_index: 0
+ }
+ field {
+ name: "session_log"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SessionLog"
+ oneof_index: 0
+ }
+ field {
+ name: "tagged_run_metadata"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TaggedRunMetadata"
+ oneof_index: 0
+ }
+ field {
+ name: "meta_graph_def"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "what"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-file-writer-cache.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-file-writer-cache.pbtxt
index 2a5b63dcea..2a5b63dcea 100644
--- a/tensorflow/tools/api/golden/tensorflow.summary.-file-writer-cache.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-file-writer-cache.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.-file-writer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-file-writer.pbtxt
index 6b65b0ace3..6b65b0ace3 100644
--- a/tensorflow/tools/api/golden/tensorflow.summary.-file-writer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-file-writer.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-session-log.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-session-log.pbtxt
new file mode 100644
index 0000000000..73de73869c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-session-log.pbtxt
@@ -0,0 +1,44 @@
+path: "tensorflow.summary.SessionLog"
+tf_proto {
+ descriptor {
+ name: "SessionLog"
+ field {
+ name: "status"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.SessionLog.SessionStatus"
+ }
+ field {
+ name: "checkpoint_path"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "msg"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ enum_type {
+ name: "SessionStatus"
+ value {
+ name: "STATUS_UNSPECIFIED"
+ number: 0
+ }
+ value {
+ name: "START"
+ number: 1
+ }
+ value {
+ name: "STOP"
+ number: 2
+ }
+ value {
+ name: "CHECKPOINT"
+ number: 3
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary-description.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary-description.pbtxt
new file mode 100644
index 0000000000..4a8b59cf02
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary-description.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.summary.SummaryDescription"
+tf_proto {
+ descriptor {
+ name: "SummaryDescription"
+ field {
+ name: "type_hint"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-audio.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-audio.pbtxt
new file mode 100644
index 0000000000..8b271cf58f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-audio.pbtxt
@@ -0,0 +1,36 @@
+path: "tensorflow.summary.Summary.Audio"
+tf_proto {
+ descriptor {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-image.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-image.pbtxt
new file mode 100644
index 0000000000..dbbc02dd05
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-image.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.summary.Summary.Image"
+tf_proto {
+ descriptor {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-value.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-value.pbtxt
new file mode 100644
index 0000000000..4176171cd9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.-value.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.summary.Summary.Value"
+tf_proto {
+ descriptor {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.pbtxt
new file mode 100644
index 0000000000..d6c5e3a87a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-summary.pbtxt
@@ -0,0 +1,144 @@
+path: "tensorflow.summary.Summary"
+tf_proto {
+ descriptor {
+ name: "Summary"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Value"
+ }
+ nested_type {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+ nested_type {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+ nested_type {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.summary.-tagged-run-metadata.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.-tagged-run-metadata.pbtxt
new file mode 100644
index 0000000000..27c8873320
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.-tagged-run-metadata.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.summary.TaggedRunMetadata"
+tf_proto {
+ descriptor {
+ name: "TaggedRunMetadata"
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "run_metadata"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.summary.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.summary.pbtxt
index 871ebb5247..7ed9cd77a0 100644
--- a/tensorflow/tools/api/golden/tensorflow.summary.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.summary.pbtxt
@@ -50,7 +50,7 @@ tf_module {
}
member_method {
name: "merge_all"
- argspec: "args=[\'key\', \'scope\'], varargs=None, keywords=None, defaults=[\'summaries\', \'None\'], "
+ argspec: "args=[\'key\', \'scope\', \'name\'], varargs=None, keywords=None, defaults=[\'summaries\', \'None\', \'None\'], "
}
member_method {
name: "scalar"
diff --git a/tensorflow/tools/api/golden/tensorflow.sysconfig.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.sysconfig.pbtxt
index 2f00aeac25..2f00aeac25 100644
--- a/tensorflow/tools/api/golden/tensorflow.sysconfig.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.sysconfig.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.test.-benchmark.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.test.-benchmark.pbtxt
index df528e26b6..df528e26b6 100644
--- a/tensorflow/tools/api/golden/tensorflow.test.-benchmark.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.test.-benchmark.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.test.-stub-out-for-testing.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.test.-stub-out-for-testing.pbtxt
index e02a0c6097..e02a0c6097 100644
--- a/tensorflow/tools/api/golden/tensorflow.test.-stub-out-for-testing.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.test.-stub-out-for-testing.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.test.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.test.pbtxt
index abe9b068ae..abe9b068ae 100644
--- a/tensorflow/tools/api/golden/tensorflow.test.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.test.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-adadelta-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-adadelta-optimizer.pbtxt
index 16bfbf20d5..1f1d8b6f9e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-adadelta-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-adadelta-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.AdadeltaOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adadelta.AdadeltaOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-adagrad-d-a-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-adagrad-d-a-optimizer.pbtxt
index 61cde9181c..a7c05d4849 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-adagrad-d-a-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-adagrad-d-a-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.AdagradDAOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adagrad_da.AdagradDAOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-adagrad-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-adagrad-optimizer.pbtxt
index 0a998c1afe..bc8b92389c 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-adagrad-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-adagrad-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.AdagradOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adagrad.AdagradOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-adam-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-adam-optimizer.pbtxt
index cc59541525..5d17be9378 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-adam-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-adam-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.AdamOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adam.AdamOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-bytes-list.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-bytes-list.pbtxt
new file mode 100644
index 0000000000..87e4f160e5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-bytes-list.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.train.BytesList"
+tf_proto {
+ descriptor {
+ name: "BytesList"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-checkpoint-saver-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint-saver-hook.pbtxt
index c3037baa8c..c3037baa8c 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-checkpoint-saver-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint-saver-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-checkpoint-saver-listener.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint-saver-listener.pbtxt
index 9d3688e565..9d3688e565 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-checkpoint-saver-listener.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint-saver-listener.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint.pbtxt
new file mode 100644
index 0000000000..5be37200f3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-checkpoint.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.train.Checkpoint"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.checkpointable.util.Checkpoint\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.tracking.Checkpointable\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "save_counter"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "restore"
+ argspec: "args=[\'self\', \'save_path\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'file_prefix\', \'session\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "write"
+ argspec: "args=[\'self\', \'file_prefix\', \'session\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-chief-session-creator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-chief-session-creator.pbtxt
index abbe273be3..abbe273be3 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-chief-session-creator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-chief-session-creator.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-cluster-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-cluster-def.pbtxt
new file mode 100644
index 0000000000..f9de26839f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-cluster-def.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.train.ClusterDef"
+tf_proto {
+ descriptor {
+ name: "ClusterDef"
+ field {
+ name: "job"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.JobDef"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-cluster-spec.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-cluster-spec.pbtxt
index 1658b15a5f..1658b15a5f 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-cluster-spec.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-cluster-spec.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-coordinator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-coordinator.pbtxt
index 11277f077e..11277f077e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-coordinator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-coordinator.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-example.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-example.pbtxt
new file mode 100644
index 0000000000..23c30f1ef4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-example.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.train.Example"
+tf_proto {
+ descriptor {
+ name: "Example"
+ field {
+ name: "features"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Features"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-exponential-moving-average.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-exponential-moving-average.pbtxt
index 737acbe07c..c9fe136e68 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-exponential-moving-average.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-exponential-moving-average.pbtxt
@@ -2,6 +2,10 @@ path: "tensorflow.train.ExponentialMovingAverage"
tf_class {
is_instance: "<class \'tensorflow.python.training.moving_averages.ExponentialMovingAverage\'>"
is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
member_method {
name: "__init__"
argspec: "args=[\'self\', \'decay\', \'num_updates\', \'zero_debias\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'ExponentialMovingAverage\'], "
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-list.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-list.pbtxt
new file mode 100644
index 0000000000..2a8b3714fc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-list.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.train.FeatureList"
+tf_proto {
+ descriptor {
+ name: "FeatureList"
+ field {
+ name: "feature"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Feature"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt
new file mode 100644
index 0000000000..cd1d56e606
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.train.FeatureLists.FeatureListEntry"
+tf_proto {
+ descriptor {
+ name: "FeatureListEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureList"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.pbtxt
new file mode 100644
index 0000000000..3c183a6476
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature-lists.pbtxt
@@ -0,0 +1,32 @@
+path: "tensorflow.train.FeatureLists"
+tf_proto {
+ descriptor {
+ name: "FeatureLists"
+ field {
+ name: "feature_list"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureLists.FeatureListEntry"
+ }
+ nested_type {
+ name: "FeatureListEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureList"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-feature.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature.pbtxt
new file mode 100644
index 0000000000..5d0eb871c2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-feature.pbtxt
@@ -0,0 +1,33 @@
+path: "tensorflow.train.Feature"
+tf_proto {
+ descriptor {
+ name: "Feature"
+ field {
+ name: "bytes_list"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.BytesList"
+ oneof_index: 0
+ }
+ field {
+ name: "float_list"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FloatList"
+ oneof_index: 0
+ }
+ field {
+ name: "int64_list"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Int64List"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "kind"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-features.-feature-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-features.-feature-entry.pbtxt
new file mode 100644
index 0000000000..f912005f1c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-features.-feature-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.train.Features.FeatureEntry"
+tf_proto {
+ descriptor {
+ name: "FeatureEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Feature"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-features.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-features.pbtxt
new file mode 100644
index 0000000000..b788ca1d57
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-features.pbtxt
@@ -0,0 +1,32 @@
+path: "tensorflow.train.Features"
+tf_proto {
+ descriptor {
+ name: "Features"
+ field {
+ name: "feature"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Features.FeatureEntry"
+ }
+ nested_type {
+ name: "FeatureEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Feature"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-feed-fn-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-feed-fn-hook.pbtxt
index 7bec4d032c..7bec4d032c 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-feed-fn-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-feed-fn-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-final-ops-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-final-ops-hook.pbtxt
index 31cf9aaeb2..31cf9aaeb2 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-final-ops-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-final-ops-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-float-list.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-float-list.pbtxt
new file mode 100644
index 0000000000..55d3b46f20
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-float-list.pbtxt
@@ -0,0 +1,15 @@
+path: "tensorflow.train.FloatList"
+tf_proto {
+ descriptor {
+ name: "FloatList"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ options {
+ packed: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-ftrl-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-ftrl-optimizer.pbtxt
index 1add3a9021..d265fdeb01 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-ftrl-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-ftrl-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.FtrlOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.ftrl.FtrlOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-global-step-waiter-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-global-step-waiter-hook.pbtxt
index 147448618e..147448618e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-global-step-waiter-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-global-step-waiter-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-gradient-descent-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-gradient-descent-optimizer.pbtxt
index ef5bbd6ace..c673e29cd4 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-gradient-descent-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-gradient-descent-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.GradientDescentOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.gradient_descent.GradientDescentOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-int64-list.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-int64-list.pbtxt
new file mode 100644
index 0000000000..1de92b3ab7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-int64-list.pbtxt
@@ -0,0 +1,15 @@
+path: "tensorflow.train.Int64List"
+tf_proto {
+ descriptor {
+ name: "Int64List"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_INT64
+ options {
+ packed: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.-tasks-entry.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.-tasks-entry.pbtxt
new file mode 100644
index 0000000000..58115590a5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.-tasks-entry.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.train.JobDef.TasksEntry"
+tf_proto {
+ descriptor {
+ name: "TasksEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.pbtxt
new file mode 100644
index 0000000000..d7eb505e27
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-job-def.pbtxt
@@ -0,0 +1,37 @@
+path: "tensorflow.train.JobDef"
+tf_proto {
+ descriptor {
+ name: "JobDef"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tasks"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.JobDef.TasksEntry"
+ }
+ nested_type {
+ name: "TasksEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-logging-tensor-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-logging-tensor-hook.pbtxt
index 9801c05df1..9801c05df1 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-logging-tensor-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-logging-tensor-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-looper-thread.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-looper-thread.pbtxt
index c61859004e..c61859004e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-looper-thread.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-looper-thread.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-momentum-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-momentum-optimizer.pbtxt
index 3d6e87f5eb..8199f63b9b 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-momentum-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-momentum-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.MomentumOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.momentum.MomentumOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-monitored-session.-step-context.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-monitored-session.-step-context.pbtxt
index 03efe6639e..03efe6639e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-monitored-session.-step-context.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-monitored-session.-step-context.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-monitored-session.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-monitored-session.pbtxt
index 09b7b3fb53..09b7b3fb53 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-monitored-session.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-monitored-session.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-nan-loss-during-training-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-nan-loss-during-training-error.pbtxt
index 25fd5e75a7..25fd5e75a7 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-nan-loss-during-training-error.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-nan-loss-during-training-error.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-nan-tensor-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-nan-tensor-hook.pbtxt
index 7d1c89f9b3..7d1c89f9b3 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-nan-tensor-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-nan-tensor-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-optimizer.pbtxt
index e73861ff7c..876bb35e39 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-optimizer.pbtxt
@@ -1,7 +1,7 @@
path: "tensorflow.train.Optimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-profiler-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-profiler-hook.pbtxt
index 4df6c4156a..4df6c4156a 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-profiler-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-profiler-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
index 301b35b199..14349a74ef 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.ProximalAdagradOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.proximal_adagrad.ProximalAdagradOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt
index 8815befa93..7d982dc51f 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.ProximalGradientDescentOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.proximal_gradient_descent.ProximalGradientDescentOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-queue-runner.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-queue-runner.pbtxt
index d84d0058ee..d84d0058ee 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-queue-runner.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-queue-runner.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-r-m-s-prop-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-r-m-s-prop-optimizer.pbtxt
index e9819683ba..906384a287 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-r-m-s-prop-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-r-m-s-prop-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.RMSPropOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.rmsprop.RMSPropOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-saver-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-saver-def.pbtxt
new file mode 100644
index 0000000000..4ec99469e4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-saver-def.pbtxt
@@ -0,0 +1,64 @@
+path: "tensorflow.train.SaverDef"
+tf_proto {
+ descriptor {
+ name: "SaverDef"
+ field {
+ name: "filename_tensor_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "save_tensor_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "restore_op_name"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "max_to_keep"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "sharded"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "keep_checkpoint_every_n_hours"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "version"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.SaverDef.CheckpointFormatVersion"
+ }
+ enum_type {
+ name: "CheckpointFormatVersion"
+ value {
+ name: "LEGACY"
+ number: 0
+ }
+ value {
+ name: "V1"
+ number: 1
+ }
+ value {
+ name: "V2"
+ number: 2
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-saver.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-saver.pbtxt
index 2cda458f46..2cda458f46 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-saver.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-saver.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-scaffold.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-scaffold.pbtxt
index 38cc98b48e..38cc98b48e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-scaffold.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-scaffold.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-second-or-step-timer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-second-or-step-timer.pbtxt
index 3c5a6ac13c..3c5a6ac13c 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-second-or-step-timer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-second-or-step-timer.pbtxt
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-sequence-example.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-sequence-example.pbtxt
new file mode 100644
index 0000000000..6a4553bbc1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-sequence-example.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.train.SequenceExample"
+tf_proto {
+ descriptor {
+ name: "SequenceExample"
+ field {
+ name: "context"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Features"
+ }
+ field {
+ name: "feature_lists"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureLists"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-server-def.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-server-def.pbtxt
new file mode 100644
index 0000000000..83ee7b3eb9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-server-def.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.train.ServerDef"
+tf_proto {
+ descriptor {
+ name: "ServerDef"
+ field {
+ name: "cluster"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ClusterDef"
+ }
+ field {
+ name: "job_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "task_index"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "default_session_config"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto"
+ }
+ field {
+ name: "protocol"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-server.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-server.pbtxt
index 9b8f185f5b..9b8f185f5b 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-server.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-server.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-session-creator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-creator.pbtxt
index beb232715f..beb232715f 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-session-creator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-creator.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-session-manager.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-manager.pbtxt
index cc31bb4e4b..448764fe08 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-session-manager.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-manager.pbtxt
@@ -4,7 +4,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'local_init_op\', \'ready_op\', \'ready_for_local_init_op\', \'graph\', \'recovery_wait_secs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'30\'], "
+ argspec: "args=[\'self\', \'local_init_op\', \'ready_op\', \'ready_for_local_init_op\', \'graph\', \'recovery_wait_secs\', \'local_init_run_options\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'30\', \'None\'], "
}
member_method {
name: "prepare_session"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-session-run-args.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-args.pbtxt
index 442990893e..442990893e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-session-run-args.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-args.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-session-run-context.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-context.pbtxt
index d5adb15c95..d5adb15c95 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-session-run-context.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-context.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-session-run-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-hook.pbtxt
index db1aa24acf..db1aa24acf 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-session-run-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-session-run-values.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-values.pbtxt
index 0b401d59c4..0b401d59c4 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-session-run-values.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-session-run-values.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-singular-monitored-session.-step-context.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-singular-monitored-session.-step-context.pbtxt
index 36d8ce7ff8..36d8ce7ff8 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-singular-monitored-session.-step-context.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-singular-monitored-session.-step-context.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-singular-monitored-session.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-singular-monitored-session.pbtxt
index de0f2c1c1a..de0f2c1c1a 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-singular-monitored-session.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-singular-monitored-session.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-step-counter-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-step-counter-hook.pbtxt
index 13261f6dde..13261f6dde 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-step-counter-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-step-counter-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-stop-at-step-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-stop-at-step-hook.pbtxt
index e388599b0b..e388599b0b 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-stop-at-step-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-stop-at-step-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-summary-saver-hook.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-summary-saver-hook.pbtxt
index 697c3667b0..697c3667b0 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-summary-saver-hook.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-summary-saver-hook.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-supervisor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-supervisor.pbtxt
index 1f0e59a1ac..9677e5a98e 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-supervisor.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-supervisor.pbtxt
@@ -104,7 +104,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'graph\', \'ready_op\', \'ready_for_local_init_op\', \'is_chief\', \'init_op\', \'init_feed_dict\', \'local_init_op\', \'logdir\', \'summary_op\', \'saver\', \'global_step\', \'save_summaries_secs\', \'save_model_secs\', \'recovery_wait_secs\', \'stop_grace_secs\', \'checkpoint_basename\', \'session_manager\', \'summary_writer\', \'init_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'0\', \'True\', \'0\', \'None\', \'0\', \'None\', \'0\', \'0\', \'0\', \'120\', \'600\', \'30\', \'120\', \'model.ckpt\', \'None\', \'0\', \'None\'], "
+ argspec: "args=[\'self\', \'graph\', \'ready_op\', \'ready_for_local_init_op\', \'is_chief\', \'init_op\', \'init_feed_dict\', \'local_init_op\', \'logdir\', \'summary_op\', \'saver\', \'global_step\', \'save_summaries_secs\', \'save_model_secs\', \'recovery_wait_secs\', \'stop_grace_secs\', \'checkpoint_basename\', \'session_manager\', \'summary_writer\', \'init_fn\', \'local_init_run_options\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'0\', \'True\', \'0\', \'None\', \'0\', \'None\', \'0\', \'0\', \'0\', \'120\', \'600\', \'30\', \'120\', \'model.ckpt\', \'None\', \'0\', \'None\', \'None\'], "
}
member_method {
name: "loop"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-sync-replicas-optimizer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-sync-replicas-optimizer.pbtxt
index 3db96aff87..2c0fda3c72 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-sync-replicas-optimizer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-sync-replicas-optimizer.pbtxt
@@ -2,7 +2,7 @@ path: "tensorflow.train.SyncReplicasOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.sync_replicas_optimizer.SyncReplicasOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt
index 4ce7cb1111..39b946b82f 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-vocab-info.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt
@@ -4,6 +4,10 @@ tf_class {
is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
is_instance: "<type \'tuple\'>"
member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "backup_initializer"
mtype: "<type \'property\'>"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-worker-session-creator.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-worker-session-creator.pbtxt
index ac26358068..ac26358068 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.-worker-session-creator.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-worker-session-creator.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.pbtxt
index bec72e1e60..9f35395284 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.pbtxt
@@ -21,6 +21,10 @@ tf_module {
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
member {
+ name: "Checkpoint"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "CheckpointSaverHook"
mtype: "<type \'type\'>"
}
@@ -238,7 +242,7 @@ tf_module {
}
member_method {
name: "MonitoredTrainingSession"
- argspec: "args=[\'master\', \'is_chief\', \'checkpoint_dir\', \'scaffold\', \'hooks\', \'chief_only_hooks\', \'save_checkpoint_secs\', \'save_summaries_steps\', \'save_summaries_secs\', \'config\', \'stop_grace_period_secs\', \'log_step_count_steps\', \'max_wait_secs\', \'save_checkpoint_steps\'], varargs=None, keywords=None, defaults=[\'\', \'True\', \'None\', \'None\', \'None\', \'None\', \'<object object instance>\', \'<object object instance>\', \'<object object instance>\', \'None\', \'120\', \'100\', \'7200\', \'<object object instance>\'], "
+ argspec: "args=[\'master\', \'is_chief\', \'checkpoint_dir\', \'scaffold\', \'hooks\', \'chief_only_hooks\', \'save_checkpoint_secs\', \'save_summaries_steps\', \'save_summaries_secs\', \'config\', \'stop_grace_period_secs\', \'log_step_count_steps\', \'max_wait_secs\', \'save_checkpoint_steps\', \'summary_dir\'], varargs=None, keywords=None, defaults=[\'\', \'True\', \'None\', \'None\', \'None\', \'None\', \'<object object instance>\', \'<object object instance>\', \'<object object instance>\', \'None\', \'120\', \'100\', \'7200\', \'<object object instance>\', \'None\'], "
}
member_method {
name: "NewCheckpointReader"
@@ -294,7 +298,7 @@ tf_module {
}
member_method {
name: "generate_checkpoint_state_proto"
- argspec: "args=[\'save_dir\', \'model_checkpoint_path\', \'all_model_checkpoint_paths\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ argspec: "args=[\'save_dir\', \'model_checkpoint_path\', \'all_model_checkpoint_paths\', \'all_model_checkpoint_timestamps\', \'last_preserved_timestamp\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
member_method {
name: "get_checkpoint_mtimes"
@@ -397,6 +401,10 @@ tf_module {
argspec: "args=[\'limit\', \'num_epochs\', \'shuffle\', \'seed\', \'capacity\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'32\', \'None\', \'None\'], "
}
member_method {
+ name: "remove_checkpoint"
+ argspec: "args=[\'checkpoint_prefix\', \'checkpoint_format_version\', \'meta_graph_suffix\'], varargs=None, keywords=None, defaults=[\'2\', \'meta\'], "
+ }
+ member_method {
name: "replica_device_setter"
argspec: "args=[\'ps_tasks\', \'ps_device\', \'worker_device\', \'merge_devices\', \'cluster\', \'ps_ops\', \'ps_strategy\'], varargs=None, keywords=None, defaults=[\'0\', \'/job:ps\', \'/job:worker\', \'True\', \'None\', \'None\', \'None\'], "
}
@@ -438,7 +446,7 @@ tf_module {
}
member_method {
name: "update_checkpoint_state"
- argspec: "args=[\'save_dir\', \'model_checkpoint_path\', \'all_model_checkpoint_paths\', \'latest_filename\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ argspec: "args=[\'save_dir\', \'model_checkpoint_path\', \'all_model_checkpoint_paths\', \'latest_filename\', \'all_model_checkpoint_timestamps\', \'last_preserved_timestamp\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "warm_start"
diff --git a/tensorflow/tools/api/golden/tensorflow.train.queue_runner.-queue-runner.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.queue_runner.-queue-runner.pbtxt
index 23d402de30..23d402de30 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.queue_runner.-queue-runner.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.queue_runner.-queue-runner.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.train.queue_runner.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.queue_runner.pbtxt
index 6e2d043049..6e2d043049 100644
--- a/tensorflow/tools/api/golden/tensorflow.train.queue_runner.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.queue_runner.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.truncated_normal_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.truncated_normal_initializer.pbtxt
index c1e1c230a9..c1e1c230a9 100644
--- a/tensorflow/tools/api/golden/tensorflow.truncated_normal_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.truncated_normal_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.uniform_unit_scaling_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.uniform_unit_scaling_initializer.pbtxt
index e1b18dc92f..e1b18dc92f 100644
--- a/tensorflow/tools/api/golden/tensorflow.uniform_unit_scaling_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.uniform_unit_scaling_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.variable_scope.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.variable_scope.pbtxt
index e62dec93e6..e62dec93e6 100644
--- a/tensorflow/tools/api/golden/tensorflow.variable_scope.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.variable_scope.pbtxt
diff --git a/tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.variance_scaling_initializer.pbtxt
index a58398d645..09d7bc03b4 100644
--- a/tensorflow/tools/api/golden/tensorflow.variance_scaling_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.variance_scaling_initializer.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
}
member_method {
name: "from_config"
diff --git a/tensorflow/tools/api/golden/tensorflow.zeros_initializer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.zeros_initializer.pbtxt
index e229b02cee..e229b02cee 100644
--- a/tensorflow/tools/api/golden/tensorflow.zeros_initializer.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.zeros_initializer.pbtxt
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-aggregation-method.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-aggregation-method.pbtxt
new file mode 100644
index 0000000000..f79029d3fe
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-aggregation-method.pbtxt
@@ -0,0 +1,24 @@
+path: "tensorflow.AggregationMethod"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.gradients_impl.AggregationMethod\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "ADD_N"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "DEFAULT"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "EXPERIMENTAL_ACCUMULATE_N"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "EXPERIMENTAL_TREE"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-attr-value.-list-value.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-attr-value.-list-value.pbtxt
new file mode 100644
index 0000000000..f1dffd5952
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-attr-value.-list-value.pbtxt
@@ -0,0 +1,70 @@
+path: "tensorflow.AttrValue.ListValue"
+tf_proto {
+ descriptor {
+ name: "ListValue"
+ field {
+ name: "s"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_BYTES
+ }
+ field {
+ name: "i"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_INT64
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "f"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "b"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_BOOL
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "type"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "shape"
+ number: 7
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ }
+ field {
+ name: "func"
+ number: 9
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-attr-value.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-attr-value.pbtxt
new file mode 100644
index 0000000000..6ccd64f428
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-attr-value.pbtxt
@@ -0,0 +1,151 @@
+path: "tensorflow.AttrValue"
+tf_proto {
+ descriptor {
+ name: "AttrValue"
+ field {
+ name: "s"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "i"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ oneof_index: 0
+ }
+ field {
+ name: "f"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "b"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ oneof_index: 0
+ }
+ field {
+ name: "type"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ oneof_index: 0
+ }
+ field {
+ name: "shape"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ field {
+ name: "list"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue.ListValue"
+ oneof_index: 0
+ }
+ field {
+ name: "func"
+ number: 10
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList"
+ oneof_index: 0
+ }
+ field {
+ name: "placeholder"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ nested_type {
+ name: "ListValue"
+ field {
+ name: "s"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_BYTES
+ }
+ field {
+ name: "i"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_INT64
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "f"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "b"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_BOOL
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "type"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "shape"
+ number: 7
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ }
+ field {
+ name: "func"
+ number: 9
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList"
+ }
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator-base.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator-base.pbtxt
new file mode 100644
index 0000000000..c9a32c16b3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator-base.pbtxt
@@ -0,0 +1,29 @@
+path: "tensorflow.ConditionalAccumulatorBase"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.ConditionalAccumulatorBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "accumulator_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'shape\', \'accumulator_ref\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "num_accumulated"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_global_step"
+ argspec: "args=[\'self\', \'new_global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator.pbtxt
new file mode 100644
index 0000000000..15e0ab76b6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-conditional-accumulator.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.ConditionalAccumulator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.ConditionalAccumulator\'>"
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.ConditionalAccumulatorBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "accumulator_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'shape\', \'shared_name\', \'name\', \'reduction_type\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'conditional_accumulator\', \'MEAN\'], "
+ }
+ member_method {
+ name: "apply_grad"
+ argspec: "args=[\'self\', \'grad\', \'local_step\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
+ }
+ member_method {
+ name: "num_accumulated"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_global_step"
+ argspec: "args=[\'self\', \'new_global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "take_grad"
+ argspec: "args=[\'self\', \'num_required\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-device-count-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-device-count-entry.pbtxt
new file mode 100644
index 0000000000..d9b1426828
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-device-count-entry.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.ConfigProto.DeviceCountEntry"
+tf_proto {
+ descriptor {
+ name: "DeviceCountEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-experimental.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-experimental.pbtxt
new file mode 100644
index 0000000000..9f6dcd8fdb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.-experimental.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.ConfigProto.Experimental"
+tf_proto {
+ descriptor {
+ name: "Experimental"
+ field {
+ name: "collective_group_leader"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "executor_type"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ reserved_range {
+ start: 2
+ end: 3
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.pbtxt
new file mode 100644
index 0000000000..f3a515163d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-config-proto.pbtxt
@@ -0,0 +1,146 @@
+path: "tensorflow.ConfigProto"
+tf_proto {
+ descriptor {
+ name: "ConfigProto"
+ field {
+ name: "device_count"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto.DeviceCountEntry"
+ }
+ field {
+ name: "intra_op_parallelism_threads"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "inter_op_parallelism_threads"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "use_per_session_threads"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "session_inter_op_thread_pool"
+ number: 12
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ThreadPoolOptionProto"
+ }
+ field {
+ name: "placement_period"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "device_filters"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "gpu_options"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GPUOptions"
+ }
+ field {
+ name: "allow_soft_placement"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "log_device_placement"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "graph_options"
+ number: 10
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GraphOptions"
+ }
+ field {
+ name: "operation_timeout_in_ms"
+ number: 11
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "rpc_options"
+ number: 13
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.RPCOptions"
+ }
+ field {
+ name: "cluster_def"
+ number: 14
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ClusterDef"
+ }
+ field {
+ name: "isolate_session_state"
+ number: 15
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "experimental"
+ number: 16
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto.Experimental"
+ }
+ nested_type {
+ name: "DeviceCountEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ options {
+ map_entry: true
+ }
+ }
+ nested_type {
+ name: "Experimental"
+ field {
+ name: "collective_group_leader"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "executor_type"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ reserved_range {
+ start: 2
+ end: 3
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-d-type.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-d-type.pbtxt
new file mode 100644
index 0000000000..0b5b88bba8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-d-type.pbtxt
@@ -0,0 +1,77 @@
+path: "tensorflow.DType"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.dtypes.DType\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "as_datatype_enum"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "as_numpy_dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "base_dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_bool"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_complex"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_floating"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_integer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_numpy_compatible"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_quantized"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_unsigned"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "limits"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "max"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "min"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "real_dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "size"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'type_enum\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_compatible_with"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-device-spec.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-device-spec.pbtxt
new file mode 100644
index 0000000000..92e535c341
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-device-spec.pbtxt
@@ -0,0 +1,37 @@
+path: "tensorflow.DeviceSpec"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.device.DeviceSpec\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "job"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "replica"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "task"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'job\', \'replica\', \'task\', \'device_type\', \'device_index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_string"
+ argspec: "args=[\'spec\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "merge_from"
+ argspec: "args=[\'self\', \'dev\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "parse_from_string"
+ argspec: "args=[\'self\', \'spec\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "to_string"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-dimension.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-dimension.pbtxt
new file mode 100644
index 0000000000..a9ab27719b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-dimension.pbtxt
@@ -0,0 +1,25 @@
+path: "tensorflow.Dimension"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.tensor_shape.Dimension\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "value"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "assert_is_compatible_with"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_compatible_with"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "merge_with"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-event.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-event.pbtxt
new file mode 100644
index 0000000000..3b75a1735b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-event.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.Event"
+tf_proto {
+ descriptor {
+ name: "Event"
+ field {
+ name: "wall_time"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "step"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "file_version"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ field {
+ name: "graph_def"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "summary"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary"
+ oneof_index: 0
+ }
+ field {
+ name: "log_message"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.LogMessage"
+ oneof_index: 0
+ }
+ field {
+ name: "session_log"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SessionLog"
+ oneof_index: 0
+ }
+ field {
+ name: "tagged_run_metadata"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TaggedRunMetadata"
+ oneof_index: 0
+ }
+ field {
+ name: "meta_graph_def"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "what"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-f-i-f-o-queue.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-f-i-f-o-queue.pbtxt
new file mode 100644
index 0000000000..a095616c00
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-f-i-f-o-queue.pbtxt
@@ -0,0 +1,66 @@
+path: "tensorflow.FIFOQueue"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.FIFOQueue\'>"
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.QueueBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dtypes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "names"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "queue_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shapes"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'capacity\', \'dtypes\', \'shapes\', \'names\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'fifo_queue\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\', \'cancel_pending_enqueues\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "dequeue"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_many"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_up_to"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue_many"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "from_list"
+ argspec: "args=[\'index\', \'queues\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_closed"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-feature.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-feature.pbtxt
new file mode 100644
index 0000000000..6933814a7b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-feature.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.FixedLenFeature"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.FixedLenFeature\'>"
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.FixedLenFeature\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "default_value"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-sequence-feature.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-sequence-feature.pbtxt
new file mode 100644
index 0000000000..c538787951
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-fixed-len-sequence-feature.pbtxt
@@ -0,0 +1,31 @@
+path: "tensorflow.FixedLenSequenceFeature"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.FixedLenSequenceFeature\'>"
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.FixedLenSequenceFeature\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "allow_missing"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "default_value"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-g-p-u-options.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-g-p-u-options.pbtxt
new file mode 100644
index 0000000000..353e63127d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-g-p-u-options.pbtxt
@@ -0,0 +1,92 @@
+path: "tensorflow.GPUOptions"
+tf_proto {
+ descriptor {
+ name: "GPUOptions"
+ field {
+ name: "per_process_gpu_memory_fraction"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "allow_growth"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "allocator_type"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "deferred_deletion_bytes"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "visible_device_list"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "polling_active_delay_usecs"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "polling_inactive_delay_msecs"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "force_gpu_compatible"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "experimental"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GPUOptions.Experimental"
+ }
+ nested_type {
+ name: "Experimental"
+ field {
+ name: "virtual_devices"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GPUOptions.Experimental.VirtualDevices"
+ }
+ field {
+ name: "use_unified_memory"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "num_dev_to_dev_copy_streams"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ nested_type {
+ name: "VirtualDevices"
+ field {
+ name: "memory_limit_mb"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ }
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-gradient-tape.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-gradient-tape.pbtxt
new file mode 100644
index 0000000000..2f4257a66a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-gradient-tape.pbtxt
@@ -0,0 +1,29 @@
+path: "tensorflow.GradientTape"
+tf_class {
+ is_instance: "<class \'tensorflow.python.eager.backprop.GradientTape\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'persistent\', \'watch_accessed_variables\'], varargs=None, keywords=None, defaults=[\'False\', \'True\'], "
+ }
+ member_method {
+ name: "gradient"
+ argspec: "args=[\'self\', \'target\', \'sources\', \'output_gradients\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reset"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "stop_recording"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "watch"
+ argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "watched_variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-graph-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-graph-def.pbtxt
new file mode 100644
index 0000000000..19eccff03d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-graph-def.pbtxt
@@ -0,0 +1,36 @@
+path: "tensorflow.GraphDef"
+tf_proto {
+ descriptor {
+ name: "GraphDef"
+ field {
+ name: "node"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NodeDef"
+ }
+ field {
+ name: "versions"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.VersionDef"
+ }
+ field {
+ name: "version"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ options {
+ deprecated: true
+ }
+ }
+ field {
+ name: "library"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FunctionDefLibrary"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-graph-keys.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-graph-keys.pbtxt
new file mode 100644
index 0000000000..ffe4790933
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-graph-keys.pbtxt
@@ -0,0 +1,140 @@
+path: "tensorflow.GraphKeys"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.GraphKeys\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "ACTIVATIONS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "ASSET_FILEPATHS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "BIASES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "CONCATENATED_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "COND_CONTEXT"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "EVAL_STEP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "GLOBAL_STEP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "GLOBAL_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "INIT_OP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "LOCAL_INIT_OP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "LOCAL_RESOURCES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "LOCAL_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "LOSSES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "METRIC_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "MODEL_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "MOVING_AVERAGE_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "QUEUE_RUNNERS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "READY_FOR_LOCAL_INIT_OP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "READY_OP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "REGULARIZATION_LOSSES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "RESOURCES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SAVEABLE_OBJECTS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SAVERS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SUMMARIES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SUMMARY_OP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "TABLE_INITIALIZERS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "TRAINABLE_RESOURCE_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "TRAINABLE_VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "TRAIN_OP"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "UPDATE_OPS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "VARIABLES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "WEIGHTS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "WHILE_CONTEXT"
+ mtype: "<type \'str\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-graph-options.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-graph-options.pbtxt
new file mode 100644
index 0000000000..a9f99bc171
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-graph-options.pbtxt
@@ -0,0 +1,67 @@
+path: "tensorflow.GraphOptions"
+tf_proto {
+ descriptor {
+ name: "GraphOptions"
+ field {
+ name: "enable_recv_scheduling"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "optimizer_options"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.OptimizerOptions"
+ }
+ field {
+ name: "build_cost_model"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "build_cost_model_after"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "infer_shapes"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "place_pruned_graph"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "enable_bfloat16_sendrecv"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "timeline_step"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "rewrite_options"
+ number: 10
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.RewriterConfig"
+ }
+ reserved_range {
+ start: 1
+ end: 2
+ }
+ reserved_name: "skip_common_subexpression_elimination"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-graph.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-graph.pbtxt
new file mode 100644
index 0000000000..cdaeb55e30
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-graph.pbtxt
@@ -0,0 +1,141 @@
+path: "tensorflow.Graph"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.Graph\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "building_function"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "collections"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "finalized"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_def_versions"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "seed"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "version"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_to_collection"
+ argspec: "args=[\'self\', \'name\', \'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_to_collections"
+ argspec: "args=[\'self\', \'names\', \'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_default"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_graph_def"
+ argspec: "args=[\'self\', \'from_version\', \'add_shapes\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "as_graph_element"
+ argspec: "args=[\'self\', \'obj\', \'allow_tensor\', \'allow_operation\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "clear_collection"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "colocate_with"
+ argspec: "args=[\'self\', \'op\', \'ignore_existing\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "container"
+ argspec: "args=[\'self\', \'container_name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "control_dependencies"
+ argspec: "args=[\'self\', \'control_inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "create_op"
+ argspec: "args=[\'self\', \'op_type\', \'inputs\', \'dtypes\', \'input_types\', \'name\', \'attrs\', \'op_def\', \'compute_shapes\', \'compute_device\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'True\', \'True\'], "
+ }
+ member_method {
+ name: "device"
+ argspec: "args=[\'self\', \'device_name_or_function\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "finalize"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_all_collection_keys"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_collection"
+ argspec: "args=[\'self\', \'name\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_collection_ref"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_name_scope"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_operation_by_name"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_operations"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_tensor_by_name"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "gradient_override_map"
+ argspec: "args=[\'self\', \'op_type_map\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_feedable"
+ argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_fetchable"
+ argspec: "args=[\'self\', \'tensor_or_op\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "name_scope"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prevent_feeding"
+ argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prevent_fetching"
+ argspec: "args=[\'self\', \'op\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "switch_to_thread_local"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "unique_name"
+ argspec: "args=[\'self\', \'name\', \'mark_as_used\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-histogram-proto.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-histogram-proto.pbtxt
new file mode 100644
index 0000000000..d4402f330b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-histogram-proto.pbtxt
@@ -0,0 +1,54 @@
+path: "tensorflow.HistogramProto"
+tf_proto {
+ descriptor {
+ name: "HistogramProto"
+ field {
+ name: "min"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "max"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "num"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "sum"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "sum_squares"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "bucket_limit"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_DOUBLE
+ options {
+ packed: true
+ }
+ }
+ field {
+ name: "bucket"
+ number: 7
+ label: LABEL_REPEATED
+ type: TYPE_DOUBLE
+ options {
+ packed: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-indexed-slices.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-indexed-slices.pbtxt
new file mode 100644
index 0000000000..fee84d8530
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-indexed-slices.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.IndexedSlices"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.IndexedSlices\'>"
+ is_instance: "<class \'tensorflow.python.framework.ops._TensorLike\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dense_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "device"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "indices"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "values"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'values\', \'indices\', \'dense_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-interactive-session.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-interactive-session.pbtxt
new file mode 100644
index 0000000000..0a3b81bf82
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-interactive-session.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.InteractiveSession"
+tf_class {
+ is_instance: "<class \'tensorflow.python.client.session.InteractiveSession\'>"
+ is_instance: "<class \'tensorflow.python.client.session.BaseSession\'>"
+ is_instance: "<class \'tensorflow.python.client.session.SessionInterface\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "sess_str"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'target\', \'graph\', \'config\'], varargs=None, keywords=None, defaults=[\'\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "as_default"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "list_devices"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "make_callable"
+ argspec: "args=[\'self\', \'fetches\', \'feed_list\', \'accept_options\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "partial_run"
+ argspec: "args=[\'self\', \'handle\', \'fetches\', \'feed_dict\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "partial_run_setup"
+ argspec: "args=[\'self\', \'fetches\', \'feeds\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "run"
+ argspec: "args=[\'self\', \'fetches\', \'feed_dict\', \'options\', \'run_metadata\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-log-message.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-log-message.pbtxt
new file mode 100644
index 0000000000..5023aa96bf
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-log-message.pbtxt
@@ -0,0 +1,46 @@
+path: "tensorflow.LogMessage"
+tf_proto {
+ descriptor {
+ name: "LogMessage"
+ field {
+ name: "level"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.LogMessage.Level"
+ }
+ field {
+ name: "message"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ enum_type {
+ name: "Level"
+ value {
+ name: "UNKNOWN"
+ number: 0
+ }
+ value {
+ name: "DEBUGGING"
+ number: 10
+ }
+ value {
+ name: "INFO"
+ number: 20
+ }
+ value {
+ name: "WARN"
+ number: 30
+ }
+ value {
+ name: "ERROR"
+ number: 40
+ }
+ value {
+ name: "FATAL"
+ number: 50
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt
new file mode 100644
index 0000000000..0ba09bec4b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-collection-def-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.MetaGraphDef.CollectionDefEntry"
+tf_proto {
+ descriptor {
+ name: "CollectionDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.CollectionDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-meta-info-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-meta-info-def.pbtxt
new file mode 100644
index 0000000000..41c62a407b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-meta-info-def.pbtxt
@@ -0,0 +1,50 @@
+path: "tensorflow.MetaGraphDef.MetaInfoDef"
+tf_proto {
+ descriptor {
+ name: "MetaInfoDef"
+ field {
+ name: "meta_graph_version"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_op_list"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.OpList"
+ }
+ field {
+ name: "any_info"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".google.protobuf.Any"
+ }
+ field {
+ name: "tags"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_version"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_git_version"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_default_attrs"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt
new file mode 100644
index 0000000000..73dc414a77
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.-signature-def-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.MetaGraphDef.SignatureDefEntry"
+tf_proto {
+ descriptor {
+ name: "SignatureDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SignatureDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.pbtxt
new file mode 100644
index 0000000000..d71c2358c9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-meta-graph-def.pbtxt
@@ -0,0 +1,133 @@
+path: "tensorflow.MetaGraphDef"
+tf_proto {
+ descriptor {
+ name: "MetaGraphDef"
+ field {
+ name: "meta_info_def"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.MetaGraphDef.MetaInfoDef"
+ }
+ field {
+ name: "graph_def"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GraphDef"
+ }
+ field {
+ name: "saver_def"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SaverDef"
+ }
+ field {
+ name: "collection_def"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.MetaGraphDef.CollectionDefEntry"
+ }
+ field {
+ name: "signature_def"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.MetaGraphDef.SignatureDefEntry"
+ }
+ field {
+ name: "asset_file_def"
+ number: 6
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AssetFileDef"
+ }
+ nested_type {
+ name: "MetaInfoDef"
+ field {
+ name: "meta_graph_version"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_op_list"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.OpList"
+ }
+ field {
+ name: "any_info"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".google.protobuf.Any"
+ }
+ field {
+ name: "tags"
+ number: 4
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_version"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensorflow_git_version"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "stripped_default_attrs"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ }
+ nested_type {
+ name: "CollectionDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.CollectionDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ nested_type {
+ name: "SignatureDefEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SignatureDef"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.-attr-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.-attr-entry.pbtxt
new file mode 100644
index 0000000000..b119b20877
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.-attr-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.NameAttrList.AttrEntry"
+tf_proto {
+ descriptor {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.pbtxt
new file mode 100644
index 0000000000..fcdb411ffc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-name-attr-list.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.NameAttrList"
+tf_proto {
+ descriptor {
+ name: "NameAttrList"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "attr"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NameAttrList.AttrEntry"
+ }
+ nested_type {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-node-def.-attr-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-node-def.-attr-entry.pbtxt
new file mode 100644
index 0000000000..622e4c3d0f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-node-def.-attr-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.NodeDef.AttrEntry"
+tf_proto {
+ descriptor {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-node-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-node-def.pbtxt
new file mode 100644
index 0000000000..646fa8abb9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-node-def.pbtxt
@@ -0,0 +1,56 @@
+path: "tensorflow.NodeDef"
+tf_proto {
+ descriptor {
+ name: "NodeDef"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "op"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "input"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "device"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "attr"
+ number: 5
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.NodeDef.AttrEntry"
+ }
+ nested_type {
+ name: "AttrEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.AttrValue"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-op-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-op-error.pbtxt
new file mode 100644
index 0000000000..7e59615534
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-op-error.pbtxt
@@ -0,0 +1,29 @@
+path: "tensorflow.OpError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\', \'error_code\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-operation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-operation.pbtxt
new file mode 100644
index 0000000000..64240f7069
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-operation.pbtxt
@@ -0,0 +1,69 @@
+path: "tensorflow.Operation"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.Operation\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "control_inputs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "device"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inputs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outputs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "traceback"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "traceback_with_start_lines"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "type"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'g\', \'inputs\', \'output_types\', \'control_inputs\', \'input_types\', \'original_op\', \'op_def\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "colocation_groups"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_attr"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "run"
+ argspec: "args=[\'self\', \'feed_dict\', \'session\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "values"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-optimizer-options.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-optimizer-options.pbtxt
new file mode 100644
index 0000000000..3ccf9d459b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-optimizer-options.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.OptimizerOptions"
+tf_proto {
+ descriptor {
+ name: "OptimizerOptions"
+ field {
+ name: "do_common_subexpression_elimination"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "do_constant_folding"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "max_folded_constant_in_bytes"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "do_function_inlining"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "opt_level"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.OptimizerOptions.Level"
+ }
+ field {
+ name: "global_jit_level"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.OptimizerOptions.GlobalJitLevel"
+ }
+ enum_type {
+ name: "Level"
+ value {
+ name: "L1"
+ number: 0
+ }
+ value {
+ name: "L0"
+ number: -1
+ }
+ }
+ enum_type {
+ name: "GlobalJitLevel"
+ value {
+ name: "DEFAULT"
+ number: 0
+ }
+ value {
+ name: "OFF"
+ number: -1
+ }
+ value {
+ name: "ON_1"
+ number: 1
+ }
+ value {
+ name: "ON_2"
+ number: 2
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-padding-f-i-f-o-queue.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-padding-f-i-f-o-queue.pbtxt
new file mode 100644
index 0000000000..8fed133561
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-padding-f-i-f-o-queue.pbtxt
@@ -0,0 +1,66 @@
+path: "tensorflow.PaddingFIFOQueue"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.PaddingFIFOQueue\'>"
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.QueueBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dtypes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "names"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "queue_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shapes"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'capacity\', \'dtypes\', \'shapes\', \'names\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'padding_fifo_queue\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\', \'cancel_pending_enqueues\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "dequeue"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_many"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_up_to"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue_many"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "from_list"
+ argspec: "args=[\'index\', \'queues\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_closed"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-priority-queue.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-priority-queue.pbtxt
new file mode 100644
index 0000000000..ebb017e81b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-priority-queue.pbtxt
@@ -0,0 +1,66 @@
+path: "tensorflow.PriorityQueue"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.PriorityQueue\'>"
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.QueueBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dtypes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "names"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "queue_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shapes"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'capacity\', \'types\', \'shapes\', \'names\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'priority_queue\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\', \'cancel_pending_enqueues\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "dequeue"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_many"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_up_to"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue_many"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "from_list"
+ argspec: "args=[\'index\', \'queues\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_closed"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-queue-base.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-queue-base.pbtxt
new file mode 100644
index 0000000000..761f90989f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-queue-base.pbtxt
@@ -0,0 +1,65 @@
+path: "tensorflow.QueueBase"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.QueueBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dtypes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "names"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "queue_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shapes"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtypes\', \'shapes\', \'names\', \'queue_ref\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\', \'cancel_pending_enqueues\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "dequeue"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_many"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_up_to"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue_many"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "from_list"
+ argspec: "args=[\'index\', \'queues\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_closed"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-random-shuffle-queue.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-random-shuffle-queue.pbtxt
new file mode 100644
index 0000000000..f3ca841393
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-random-shuffle-queue.pbtxt
@@ -0,0 +1,66 @@
+path: "tensorflow.RandomShuffleQueue"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.RandomShuffleQueue\'>"
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.QueueBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dtypes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "names"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "queue_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shapes"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'capacity\', \'min_after_dequeue\', \'dtypes\', \'shapes\', \'names\', \'seed\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'random_shuffle_queue\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\', \'cancel_pending_enqueues\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "dequeue"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_many"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dequeue_up_to"
+ argspec: "args=[\'self\', \'n\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "enqueue_many"
+ argspec: "args=[\'self\', \'vals\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "from_list"
+ argspec: "args=[\'index\', \'queues\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_closed"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-register-gradient.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-register-gradient.pbtxt
new file mode 100644
index 0000000000..4d6e4137d1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-register-gradient.pbtxt
@@ -0,0 +1,9 @@
+path: "tensorflow.RegisterGradient"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.RegisterGradient\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'op_type\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-run-metadata.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-run-metadata.pbtxt
new file mode 100644
index 0000000000..1287940326
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-run-metadata.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.RunMetadata"
+tf_proto {
+ descriptor {
+ name: "RunMetadata"
+ field {
+ name: "step_stats"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.StepStats"
+ }
+ field {
+ name: "cost_graph"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.CostGraphDef"
+ }
+ field {
+ name: "partition_graphs"
+ number: 3
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.GraphDef"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-run-options.-experimental.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-run-options.-experimental.pbtxt
new file mode 100644
index 0000000000..537e73aa89
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-run-options.-experimental.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.RunOptions.Experimental"
+tf_proto {
+ descriptor {
+ name: "Experimental"
+ field {
+ name: "collective_graph_key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-run-options.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-run-options.pbtxt
new file mode 100644
index 0000000000..cec04a2bf0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-run-options.pbtxt
@@ -0,0 +1,83 @@
+path: "tensorflow.RunOptions"
+tf_proto {
+ descriptor {
+ name: "RunOptions"
+ field {
+ name: "trace_level"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.RunOptions.TraceLevel"
+ }
+ field {
+ name: "timeout_in_ms"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "inter_op_thread_pool"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "output_partition_graphs"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "debug_options"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.DebugOptions"
+ }
+ field {
+ name: "report_tensor_allocations_upon_oom"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "experimental"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.RunOptions.Experimental"
+ }
+ nested_type {
+ name: "Experimental"
+ field {
+ name: "collective_graph_key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ }
+ enum_type {
+ name: "TraceLevel"
+ value {
+ name: "NO_TRACE"
+ number: 0
+ }
+ value {
+ name: "SOFTWARE_TRACE"
+ number: 1
+ }
+ value {
+ name: "HARDWARE_TRACE"
+ number: 2
+ }
+ value {
+ name: "FULL_TRACE"
+ number: 3
+ }
+ }
+ reserved_range {
+ start: 4
+ end: 5
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-session-log.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-session-log.pbtxt
new file mode 100644
index 0000000000..259f241874
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-session-log.pbtxt
@@ -0,0 +1,44 @@
+path: "tensorflow.SessionLog"
+tf_proto {
+ descriptor {
+ name: "SessionLog"
+ field {
+ name: "status"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.SessionLog.SessionStatus"
+ }
+ field {
+ name: "checkpoint_path"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "msg"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ enum_type {
+ name: "SessionStatus"
+ value {
+ name: "STATUS_UNSPECIFIED"
+ number: 0
+ }
+ value {
+ name: "START"
+ number: 1
+ }
+ value {
+ name: "STOP"
+ number: 2
+ }
+ value {
+ name: "CHECKPOINT"
+ number: 3
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-session.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-session.pbtxt
new file mode 100644
index 0000000000..1d6b037f9c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-session.pbtxt
@@ -0,0 +1,55 @@
+path: "tensorflow.Session"
+tf_class {
+ is_instance: "<class \'tensorflow.python.client.session.Session\'>"
+ is_instance: "<class \'tensorflow.python.client.session.BaseSession\'>"
+ is_instance: "<class \'tensorflow.python.client.session.SessionInterface\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "sess_str"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'target\', \'graph\', \'config\'], varargs=None, keywords=None, defaults=[\'\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "as_default"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "list_devices"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "make_callable"
+ argspec: "args=[\'self\', \'fetches\', \'feed_list\', \'accept_options\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "partial_run"
+ argspec: "args=[\'self\', \'handle\', \'fetches\', \'feed_dict\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "partial_run_setup"
+ argspec: "args=[\'self\', \'fetches\', \'feeds\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reset"
+ argspec: "args=[\'target\', \'containers\', \'config\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "run"
+ argspec: "args=[\'self\', \'fetches\', \'feed_dict\', \'options\', \'run_metadata\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-sparse-conditional-accumulator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-conditional-accumulator.pbtxt
new file mode 100644
index 0000000000..39ff336c4f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-conditional-accumulator.pbtxt
@@ -0,0 +1,46 @@
+path: "tensorflow.SparseConditionalAccumulator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.SparseConditionalAccumulator\'>"
+ is_instance: "<class \'tensorflow.python.ops.data_flow_ops.ConditionalAccumulatorBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "accumulator_ref"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'shape\', \'shared_name\', \'name\', \'reduction_type\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'sparse_conditional_accumulator\', \'MEAN\'], "
+ }
+ member_method {
+ name: "apply_grad"
+ argspec: "args=[\'self\', \'grad_indices\', \'grad_values\', \'grad_shape\', \'local_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "apply_indexed_slices_grad"
+ argspec: "args=[\'self\', \'grad\', \'local_step\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
+ }
+ member_method {
+ name: "num_accumulated"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_global_step"
+ argspec: "args=[\'self\', \'new_global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "take_grad"
+ argspec: "args=[\'self\', \'num_required\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "take_indexed_slices_grad"
+ argspec: "args=[\'self\', \'num_required\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-sparse-feature.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-feature.pbtxt
new file mode 100644
index 0000000000..d875394fb5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-feature.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.SparseFeature"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.SparseFeature\'>"
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.SparseFeature\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "already_sorted"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "index_key"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "value_key"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor-value.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor-value.pbtxt
new file mode 100644
index 0000000000..d33fd4d5d7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor-value.pbtxt
@@ -0,0 +1,26 @@
+path: "tensorflow.SparseTensorValue"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.sparse_tensor.SparseTensorValue\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "dense_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "indices"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "values"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor.pbtxt
new file mode 100644
index 0000000000..3add49e90d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-sparse-tensor.pbtxt
@@ -0,0 +1,54 @@
+path: "tensorflow.SparseTensor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.sparse_tensor.SparseTensor\'>"
+ is_instance: "<class \'tensorflow.python.framework.ops._TensorLike\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dense_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "indices"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "values"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'indices\', \'values\', \'dense_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "consumers"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "eval"
+ argspec: "args=[\'self\', \'feed_dict\', \'session\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_value"
+ argspec: "args=[\'cls\', \'sparse_tensor_value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_shape"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.-plugin-data.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.-plugin-data.pbtxt
new file mode 100644
index 0000000000..a66b74b315
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.-plugin-data.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.SummaryMetadata.PluginData"
+tf_proto {
+ descriptor {
+ name: "PluginData"
+ field {
+ name: "plugin_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "content"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.pbtxt
new file mode 100644
index 0000000000..c02575b962
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-summary-metadata.pbtxt
@@ -0,0 +1,40 @@
+path: "tensorflow.SummaryMetadata"
+tf_proto {
+ descriptor {
+ name: "SummaryMetadata"
+ field {
+ name: "plugin_data"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata.PluginData"
+ }
+ field {
+ name: "display_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "summary_description"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ nested_type {
+ name: "PluginData"
+ field {
+ name: "plugin_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "content"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-summary.-audio.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-summary.-audio.pbtxt
new file mode 100644
index 0000000000..94f712073e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-summary.-audio.pbtxt
@@ -0,0 +1,36 @@
+path: "tensorflow.Summary.Audio"
+tf_proto {
+ descriptor {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-summary.-image.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-summary.-image.pbtxt
new file mode 100644
index 0000000000..fc1acb483b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-summary.-image.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.Summary.Image"
+tf_proto {
+ descriptor {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-summary.-value.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-summary.-value.pbtxt
new file mode 100644
index 0000000000..feb84b6ee9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-summary.-value.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.Summary.Value"
+tf_proto {
+ descriptor {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-summary.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-summary.pbtxt
new file mode 100644
index 0000000000..b2bdff7171
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-summary.pbtxt
@@ -0,0 +1,144 @@
+path: "tensorflow.Summary"
+tf_proto {
+ descriptor {
+ name: "Summary"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Value"
+ }
+ nested_type {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+ nested_type {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+ nested_type {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-tensor-array.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-array.pbtxt
new file mode 100644
index 0000000000..ed088c41ed
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-array.pbtxt
@@ -0,0 +1,69 @@
+path: "tensorflow.TensorArray"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.tensor_array_ops.TensorArray\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "flow"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "handle"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'size\', \'dynamic_size\', \'clear_after_read\', \'tensor_array_name\', \'handle\', \'flow\', \'infer_shape\', \'element_shape\', \'colocate_with_first_write_call\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "concat"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "gather"
+ argspec: "args=[\'self\', \'indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "grad"
+ argspec: "args=[\'self\', \'source\', \'flow\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "identity"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "read"
+ argspec: "args=[\'self\', \'index\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scatter"
+ argspec: "args=[\'self\', \'indices\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "split"
+ argspec: "args=[\'self\', \'value\', \'lengths\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "stack"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unstack"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "write"
+ argspec: "args=[\'self\', \'index\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.-coo-sparse.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.-coo-sparse.pbtxt
new file mode 100644
index 0000000000..0064c8460c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.-coo-sparse.pbtxt
@@ -0,0 +1,24 @@
+path: "tensorflow.TensorInfo.CooSparse"
+tf_proto {
+ descriptor {
+ name: "CooSparse"
+ field {
+ name: "values_tensor_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "indices_tensor_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "dense_shape_tensor_name"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.pbtxt
new file mode 100644
index 0000000000..63566c808e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-info.pbtxt
@@ -0,0 +1,59 @@
+path: "tensorflow.TensorInfo"
+tf_proto {
+ descriptor {
+ name: "TensorInfo"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ field {
+ name: "coo_sparse"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorInfo.CooSparse"
+ oneof_index: 0
+ }
+ field {
+ name: "dtype"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.DataType"
+ }
+ field {
+ name: "tensor_shape"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ nested_type {
+ name: "CooSparse"
+ field {
+ name: "values_tensor_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "indices_tensor_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "dense_shape_tensor_name"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+ oneof_decl {
+ name: "encoding"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-tensor-shape.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-shape.pbtxt
new file mode 100644
index 0000000000..8e3598fb24
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-tensor-shape.pbtxt
@@ -0,0 +1,77 @@
+path: "tensorflow.TensorShape"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.tensor_shape.TensorShape\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dims"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "ndims"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dims\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_list"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_proto"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "assert_has_rank"
+ argspec: "args=[\'self\', \'rank\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "assert_is_compatible_with"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "assert_is_fully_defined"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "assert_same_rank"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "concatenate"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_compatible_with"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_fully_defined"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "merge_with"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "most_specific_compatible_shape"
+ argspec: "args=[\'self\', \'other\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "num_elements"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_rank"
+ argspec: "args=[\'self\', \'rank\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_rank_at_least"
+ argspec: "args=[\'self\', \'rank\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_rank_at_most"
+ argspec: "args=[\'self\', \'rank\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-tensor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-tensor.pbtxt
new file mode 100644
index 0000000000..38d19bb537
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-tensor.pbtxt
@@ -0,0 +1,58 @@
+path: "tensorflow.Tensor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.Tensor\'>"
+ is_instance: "<class \'tensorflow.python.framework.ops._TensorLike\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "OVERLOADABLE_OPERATORS"
+ mtype: "<type \'set\'>"
+ }
+ member {
+ name: "device"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "value_index"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'op\', \'value_index\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "consumers"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "eval"
+ argspec: "args=[\'self\', \'feed_dict\', \'session\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_shape"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_shape"
+ argspec: "args=[\'self\', \'shape\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-var-len-feature.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-var-len-feature.pbtxt
new file mode 100644
index 0000000000..54b66f43f8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-var-len-feature.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.VarLenFeature"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.VarLenFeature\'>"
+ is_instance: "<class \'tensorflow.python.ops.parsing_ops.VarLenFeature\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-variable-aggregation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-variable-aggregation.pbtxt
new file mode 100644
index 0000000000..66a20547eb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-variable-aggregation.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.VariableAggregation"
+tf_class {
+ is_instance: "<enum \'VariableAggregation\'>"
+ member {
+ name: "MEAN"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "ONLY_FIRST_TOWER"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+ member {
+ name: "SUM"
+ mtype: "<enum \'VariableAggregation\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-variable-scope.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-variable-scope.pbtxt
new file mode 100644
index 0000000000..c13eb7b8bb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-variable-scope.pbtxt
@@ -0,0 +1,105 @@
+path: "tensorflow.VariableScope"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.variable_scope.VariableScope\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "caching_device"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "custom_getter"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "original_name_scope"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "partitioner"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reuse"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "use_resource"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'reuse\', \'name\', \'initializer\', \'regularizer\', \'caching_device\', \'partitioner\', \'custom_getter\', \'name_scope\', \'dtype\', \'use_resource\', \'constraint\'], varargs=None, keywords=None, defaults=[\'\', \'None\', \'None\', \'None\', \'None\', \'None\', \'\', \"<dtype: \'float32\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_collection"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable"
+ argspec: "args=[\'self\', \'var_store\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'reuse\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "global_variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "local_variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reuse_variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_caching_device"
+ argspec: "args=[\'self\', \'caching_device\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_custom_getter"
+ argspec: "args=[\'self\', \'custom_getter\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_dtype"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_initializer"
+ argspec: "args=[\'self\', \'initializer\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_partitioner"
+ argspec: "args=[\'self\', \'partitioner\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_regularizer"
+ argspec: "args=[\'self\', \'regularizer\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_use_resource"
+ argspec: "args=[\'self\', \'use_resource\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "trainable_variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-variable-synchronization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-variable-synchronization.pbtxt
new file mode 100644
index 0000000000..7589bb2888
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-variable-synchronization.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.VariableSynchronization"
+tf_class {
+ is_instance: "<enum \'VariableSynchronization\'>"
+ member {
+ name: "AUTO"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "ON_READ"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+ member {
+ name: "ON_WRITE"
+ mtype: "<enum \'VariableSynchronization\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-variable.-save-slice-info.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-variable.-save-slice-info.pbtxt
new file mode 100644
index 0000000000..ac3ccd468b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-variable.-save-slice-info.pbtxt
@@ -0,0 +1,17 @@
+path: "tensorflow.Variable.SaveSliceInfo"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.variables.SaveSliceInfo\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "spec"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'full_name\', \'full_shape\', \'var_offset\', \'var_shape\', \'save_slice_info_def\', \'import_scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_proto"
+ argspec: "args=[\'self\', \'export_scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-variable.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-variable.pbtxt
new file mode 100644
index 0000000000..05698b03ee
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.-variable.pbtxt
@@ -0,0 +1,130 @@
+path: "tensorflow.Variable"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.variables.Variable\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "SaveSliceInfo"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "device"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "initial_value"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'initial_value\', \'trainable\', \'collections\', \'validate_shape\', \'caching_device\', \'name\', \'variable_def\', \'dtype\', \'expected_shape\', \'import_scope\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "assign"
+ argspec: "args=[\'self\', \'value\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "assign_add"
+ argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "assign_sub"
+ argspec: "args=[\'self\', \'delta\', \'use_locking\', \'name\', \'read_value\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "count_up_to"
+ argspec: "args=[\'self\', \'limit\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "eval"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "from_proto"
+ argspec: "args=[\'variable_def\', \'import_scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_shape"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "initialized_value"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load"
+ argspec: "args=[\'self\', \'value\', \'session\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "read_value"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "scatter_add"
+ argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_add"
+ argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_sub"
+ argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scatter_nd_update"
+ argspec: "args=[\'self\', \'indices\', \'updates\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "scatter_sub"
+ argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "scatter_update"
+ argspec: "args=[\'self\', \'sparse_delta\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "set_shape"
+ argspec: "args=[\'self\', \'shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "to_proto"
+ argspec: "args=[\'self\', \'export_scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "value"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.app.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.app.pbtxt
new file mode 100644
index 0000000000..85044a8987
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.app.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.app"
+tf_module {
+ member {
+ name: "flags"
+ mtype: "<type \'module\'>"
+ }
+ member_method {
+ name: "run"
+ argspec: "args=[\'main\', \'argv\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.bitwise.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.bitwise.pbtxt
new file mode 100644
index 0000000000..01cbd55c5d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.bitwise.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.bitwise"
+tf_module {
+ member_method {
+ name: "bitwise_and"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bitwise_or"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bitwise_xor"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "invert"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "left_shift"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "right_shift"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.compat.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.compat.pbtxt
new file mode 100644
index 0000000000..f1d760603e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.compat.pbtxt
@@ -0,0 +1,47 @@
+path: "tensorflow.compat"
+tf_module {
+ member {
+ name: "bytes_or_text_types"
+ mtype: "<type \'tuple\'>"
+ }
+ member {
+ name: "complex_types"
+ mtype: "<type \'tuple\'>"
+ }
+ member {
+ name: "integral_types"
+ mtype: "<type \'tuple\'>"
+ }
+ member {
+ name: "real_types"
+ mtype: "<type \'tuple\'>"
+ }
+ member_method {
+ name: "as_bytes"
+ argspec: "args=[\'bytes_or_text\', \'encoding\'], varargs=None, keywords=None, defaults=[\'utf-8\'], "
+ }
+ member_method {
+ name: "as_str"
+ argspec: "args=[\'bytes_or_text\', \'encoding\'], varargs=None, keywords=None, defaults=[\'utf-8\'], "
+ }
+ member_method {
+ name: "as_str_any"
+ argspec: "args=[\'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_text"
+ argspec: "args=[\'bytes_or_text\', \'encoding\'], varargs=None, keywords=None, defaults=[\'utf-8\'], "
+ }
+ member_method {
+ name: "forward_compatibility_horizon"
+ argspec: "args=[\'year\', \'month\', \'day\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "forward_compatible"
+ argspec: "args=[\'year\', \'month\', \'day\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "path_to_str"
+ argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.constant_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.constant_initializer.pbtxt
new file mode 100644
index 0000000000..00ec669b16
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.constant_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.constant_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Constant\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'value\', \'dtype\', \'verify_shape\'], varargs=None, keywords=None, defaults=[\'0\', \"<dtype: \'float32\'>\", \'False\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.__metaclass__.pbtxt
new file mode 100644
index 0000000000..af08c88d33
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.data.Dataset.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt
new file mode 100644
index 0000000000..c3ba2dba57
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt
@@ -0,0 +1,121 @@
+path: "tensorflow.data.Dataset"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.dataset_ops.Dataset\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "output_classes"
+ mtype: "<class \'abc.abstractproperty\'>"
+ }
+ member {
+ name: "output_shapes"
+ mtype: "<class \'abc.abstractproperty\'>"
+ }
+ member {
+ name: "output_types"
+ mtype: "<class \'abc.abstractproperty\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'transformation_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "batch"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "cache"
+ argspec: "args=[\'self\', \'filename\'], varargs=None, keywords=None, defaults=[\'\'], "
+ }
+ member_method {
+ name: "concatenate"
+ argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "filter"
+ argspec: "args=[\'self\', \'predicate\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flat_map"
+ argspec: "args=[\'self\', \'map_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_generator"
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_sparse_tensor_slices"
+ argspec: "args=[\'sparse_tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensor_slices"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensors"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "interleave"
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
+ }
+ member_method {
+ name: "list_files"
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "make_initializable_iterator"
+ argspec: "args=[\'self\', \'shared_name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "make_one_shot_iterator"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "map"
+ argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "padded_batch"
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "prefetch"
+ argspec: "args=[\'self\', \'buffer_size\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "range"
+ argspec: "args=[], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "repeat"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "shard"
+ argspec: "args=[\'self\', \'num_shards\', \'index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "shuffle"
+ argspec: "args=[\'self\', \'buffer_size\', \'seed\', \'reshuffle_each_iteration\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "skip"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "take"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "zip"
+ argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt
new file mode 100644
index 0000000000..f384323fc8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.data.FixedLengthRecordDataset.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt
new file mode 100644
index 0000000000..3541671bee
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt
@@ -0,0 +1,122 @@
+path: "tensorflow.data.FixedLengthRecordDataset"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.readers.FixedLengthRecordDataset\'>"
+ is_instance: "<class \'tensorflow.python.data.ops.dataset_ops.Dataset\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "output_classes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shapes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_types"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filenames\', \'record_bytes\', \'header_bytes\', \'footer_bytes\', \'buffer_size\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'transformation_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "batch"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "cache"
+ argspec: "args=[\'self\', \'filename\'], varargs=None, keywords=None, defaults=[\'\'], "
+ }
+ member_method {
+ name: "concatenate"
+ argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "filter"
+ argspec: "args=[\'self\', \'predicate\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flat_map"
+ argspec: "args=[\'self\', \'map_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_generator"
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_sparse_tensor_slices"
+ argspec: "args=[\'sparse_tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensor_slices"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensors"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "interleave"
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
+ }
+ member_method {
+ name: "list_files"
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "make_initializable_iterator"
+ argspec: "args=[\'self\', \'shared_name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "make_one_shot_iterator"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "map"
+ argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "padded_batch"
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "prefetch"
+ argspec: "args=[\'self\', \'buffer_size\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "range"
+ argspec: "args=[], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "repeat"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "shard"
+ argspec: "args=[\'self\', \'num_shards\', \'index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "shuffle"
+ argspec: "args=[\'self\', \'buffer_size\', \'seed\', \'reshuffle_each_iteration\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "skip"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "take"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "zip"
+ argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-iterator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-iterator.pbtxt
new file mode 100644
index 0000000000..4f0147a523
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-iterator.pbtxt
@@ -0,0 +1,46 @@
+path: "tensorflow.data.Iterator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.iterator_ops.Iterator\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_classes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shapes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_types"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'iterator_resource\', \'initializer\', \'output_types\', \'output_shapes\', \'output_classes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_string_handle"
+ argspec: "args=[\'string_handle\', \'output_types\', \'output_shapes\', \'output_classes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_structure"
+ argspec: "args=[\'output_types\', \'output_shapes\', \'shared_name\', \'output_classes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_next"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "make_initializer"
+ argspec: "args=[\'self\', \'dataset\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "string_handle"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt
new file mode 100644
index 0000000000..b12dec8a70
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.data.TFRecordDataset.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt
new file mode 100644
index 0000000000..b113c18ee0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt
@@ -0,0 +1,122 @@
+path: "tensorflow.data.TFRecordDataset"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.readers.TFRecordDataset\'>"
+ is_instance: "<class \'tensorflow.python.data.ops.dataset_ops.Dataset\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "output_classes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shapes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_types"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filenames\', \'compression_type\', \'buffer_size\', \'num_parallel_reads\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'transformation_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "batch"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "cache"
+ argspec: "args=[\'self\', \'filename\'], varargs=None, keywords=None, defaults=[\'\'], "
+ }
+ member_method {
+ name: "concatenate"
+ argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "filter"
+ argspec: "args=[\'self\', \'predicate\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flat_map"
+ argspec: "args=[\'self\', \'map_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_generator"
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_sparse_tensor_slices"
+ argspec: "args=[\'sparse_tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensor_slices"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensors"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "interleave"
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
+ }
+ member_method {
+ name: "list_files"
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "make_initializable_iterator"
+ argspec: "args=[\'self\', \'shared_name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "make_one_shot_iterator"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "map"
+ argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "padded_batch"
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "prefetch"
+ argspec: "args=[\'self\', \'buffer_size\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "range"
+ argspec: "args=[], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "repeat"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "shard"
+ argspec: "args=[\'self\', \'num_shards\', \'index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "shuffle"
+ argspec: "args=[\'self\', \'buffer_size\', \'seed\', \'reshuffle_each_iteration\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "skip"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "take"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "zip"
+ argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt
new file mode 100644
index 0000000000..7ddcdce266
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.data.TextLineDataset.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt
new file mode 100644
index 0000000000..7210bf5db4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt
@@ -0,0 +1,122 @@
+path: "tensorflow.data.TextLineDataset"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.readers.TextLineDataset\'>"
+ is_instance: "<class \'tensorflow.python.data.ops.dataset_ops.Dataset\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "output_classes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shapes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_types"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filenames\', \'compression_type\', \'buffer_size\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'transformation_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "batch"
+ argspec: "args=[\'self\', \'batch_size\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "cache"
+ argspec: "args=[\'self\', \'filename\'], varargs=None, keywords=None, defaults=[\'\'], "
+ }
+ member_method {
+ name: "concatenate"
+ argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "filter"
+ argspec: "args=[\'self\', \'predicate\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flat_map"
+ argspec: "args=[\'self\', \'map_func\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_generator"
+ argspec: "args=[\'generator\', \'output_types\', \'output_shapes\', \'args\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "from_sparse_tensor_slices"
+ argspec: "args=[\'sparse_tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensor_slices"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_tensors"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "interleave"
+ argspec: "args=[\'self\', \'map_func\', \'cycle_length\', \'block_length\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
+ }
+ member_method {
+ name: "list_files"
+ argspec: "args=[\'file_pattern\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "make_initializable_iterator"
+ argspec: "args=[\'self\', \'shared_name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "make_one_shot_iterator"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "map"
+ argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "padded_batch"
+ argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "prefetch"
+ argspec: "args=[\'self\', \'buffer_size\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "range"
+ argspec: "args=[], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "repeat"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "shard"
+ argspec: "args=[\'self\', \'num_shards\', \'index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "shuffle"
+ argspec: "args=[\'self\', \'buffer_size\', \'seed\', \'reshuffle_each_iteration\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "skip"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "take"
+ argspec: "args=[\'self\', \'count\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "window"
+ argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "zip"
+ argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt
new file mode 100644
index 0000000000..56fb270a49
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt
@@ -0,0 +1,23 @@
+path: "tensorflow.data"
+tf_module {
+ member {
+ name: "Dataset"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "FixedLengthRecordDataset"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "Iterator"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TFRecordDataset"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "TextLineDataset"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.debugging.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.debugging.pbtxt
new file mode 100644
index 0000000000..d9efe97821
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.debugging.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.debugging"
+tf_module {
+ member_method {
+ name: "check_numerics"
+ argspec: "args=[\'tensor\', \'message\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_finite"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_inf"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "is_nan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-bernoulli.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-bernoulli.pbtxt
new file mode 100644
index 0000000000..ca96f4eaec
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-bernoulli.pbtxt
@@ -0,0 +1,143 @@
+path: "tensorflow.distributions.Bernoulli"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.bernoulli.Bernoulli\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "logits"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "probs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'logits\', \'probs\', \'dtype\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'int32\'>\", \'False\', \'True\', \'Bernoulli\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-beta.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-beta.pbtxt
new file mode 100644
index 0000000000..d0508acd9f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-beta.pbtxt
@@ -0,0 +1,147 @@
+path: "tensorflow.distributions.Beta"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.beta.Beta\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "concentration0"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "concentration1"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "total_concentration"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'concentration1\', \'concentration0\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'False\', \'True\', \'Beta\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-categorical.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-categorical.pbtxt
new file mode 100644
index 0000000000..ff0fbb56cd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-categorical.pbtxt
@@ -0,0 +1,147 @@
+path: "tensorflow.distributions.Categorical"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.categorical.Categorical\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "logits"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "probs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'logits\', \'probs\', \'dtype\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'int32\'>\", \'False\', \'True\', \'Categorical\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet-multinomial.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet-multinomial.pbtxt
new file mode 100644
index 0000000000..d75e4a2f88
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet-multinomial.pbtxt
@@ -0,0 +1,147 @@
+path: "tensorflow.distributions.DirichletMultinomial"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.dirichlet_multinomial.DirichletMultinomial\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "concentration"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "total_concentration"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "total_count"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'total_count\', \'concentration\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'DirichletMultinomial\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet.pbtxt
new file mode 100644
index 0000000000..b838b9ae21
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-dirichlet.pbtxt
@@ -0,0 +1,143 @@
+path: "tensorflow.distributions.Dirichlet"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.dirichlet.Dirichlet\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "concentration"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "total_concentration"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'concentration\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'Dirichlet\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-distribution.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-distribution.pbtxt
new file mode 100644
index 0000000000..6f06b7d50d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-distribution.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.distributions.Distribution"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'reparameterization_type\', \'validate_args\', \'allow_nan_stats\', \'parameters\', \'graph_parents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-exponential.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-exponential.pbtxt
new file mode 100644
index 0000000000..d34f9cde5d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-exponential.pbtxt
@@ -0,0 +1,144 @@
+path: "tensorflow.distributions.Exponential"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.exponential.Exponential\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.gamma.Gamma\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "concentration"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "rate"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'Exponential\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-gamma.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-gamma.pbtxt
new file mode 100644
index 0000000000..df268b8d99
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-gamma.pbtxt
@@ -0,0 +1,143 @@
+path: "tensorflow.distributions.Gamma"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.gamma.Gamma\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "concentration"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "rate"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'concentration\', \'rate\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'Gamma\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-laplace.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-laplace.pbtxt
new file mode 100644
index 0000000000..303dcb4ed3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-laplace.pbtxt
@@ -0,0 +1,143 @@
+path: "tensorflow.distributions.Laplace"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.laplace.Laplace\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "loc"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scale"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'loc\', \'scale\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'Laplace\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-multinomial.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-multinomial.pbtxt
new file mode 100644
index 0000000000..ecda8acb15
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-multinomial.pbtxt
@@ -0,0 +1,147 @@
+path: "tensorflow.distributions.Multinomial"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.multinomial.Multinomial\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "logits"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "probs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "total_count"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'total_count\', \'logits\', \'probs\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'False\', \'True\', \'Multinomial\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-normal.pbtxt
new file mode 100644
index 0000000000..92b9eeea22
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-normal.pbtxt
@@ -0,0 +1,143 @@
+path: "tensorflow.distributions.Normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.normal.Normal\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "loc"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scale"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'loc\', \'scale\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'Normal\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-register-k-l.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-register-k-l.pbtxt
new file mode 100644
index 0000000000..e3db443c2b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-register-k-l.pbtxt
@@ -0,0 +1,9 @@
+path: "tensorflow.distributions.RegisterKL"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.kullback_leibler.RegisterKL\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dist_cls_a\', \'dist_cls_b\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-reparameterization-type.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-reparameterization-type.pbtxt
new file mode 100644
index 0000000000..02e8d576dd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-reparameterization-type.pbtxt
@@ -0,0 +1,9 @@
+path: "tensorflow.distributions.ReparameterizationType"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.ReparameterizationType\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rep_type\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-student-t.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-student-t.pbtxt
new file mode 100644
index 0000000000..9aa7f9a634
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-student-t.pbtxt
@@ -0,0 +1,147 @@
+path: "tensorflow.distributions.StudentT"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.student_t.StudentT\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "df"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "loc"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scale"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'df\', \'loc\', \'scale\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'StudentT\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.-uniform.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-uniform.pbtxt
new file mode 100644
index 0000000000..d1b9d30696
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.-uniform.pbtxt
@@ -0,0 +1,147 @@
+path: "tensorflow.distributions.Uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.distributions.uniform.Uniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution.Distribution\'>"
+ is_instance: "<class \'tensorflow.python.ops.distributions.distribution._BaseDistribution\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "allow_nan_stats"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "event_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "high"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "low"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "parameters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reparameterization_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "validate_args"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'low\', \'high\', \'validate_args\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'False\', \'True\', \'Uniform\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'cdf\'], "
+ }
+ member_method {
+ name: "copy"
+ argspec: "args=[\'self\'], varargs=None, keywords=override_parameters_kwargs, defaults=None"
+ }
+ member_method {
+ name: "covariance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'covariance\'], "
+ }
+ member_method {
+ name: "cross_entropy"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'cross_entropy\'], "
+ }
+ member_method {
+ name: "entropy"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'entropy\'], "
+ }
+ member_method {
+ name: "event_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'event_shape_tensor\'], "
+ }
+ member_method {
+ name: "is_scalar_batch"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_batch\'], "
+ }
+ member_method {
+ name: "is_scalar_event"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'is_scalar_event\'], "
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'self\', \'other\', \'name\'], varargs=None, keywords=None, defaults=[\'kl_divergence\'], "
+ }
+ member_method {
+ name: "log_cdf"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_cdf\'], "
+ }
+ member_method {
+ name: "log_prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_prob\'], "
+ }
+ member_method {
+ name: "log_survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'log_survival_function\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mean\'], "
+ }
+ member_method {
+ name: "mode"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'mode\'], "
+ }
+ member_method {
+ name: "param_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'DistributionParamShapes\'], "
+ }
+ member_method {
+ name: "param_static_shapes"
+ argspec: "args=[\'cls\', \'sample_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "prob"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'prob\'], "
+ }
+ member_method {
+ name: "quantile"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'quantile\'], "
+ }
+ member_method {
+ name: "range"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range\'], "
+ }
+ member_method {
+ name: "sample"
+ argspec: "args=[\'self\', \'sample_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'()\', \'None\', \'sample\'], "
+ }
+ member_method {
+ name: "stddev"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'stddev\'], "
+ }
+ member_method {
+ name: "survival_function"
+ argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'survival_function\'], "
+ }
+ member_method {
+ name: "variance"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'variance\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.distributions.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.distributions.pbtxt
new file mode 100644
index 0000000000..90b60ef074
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.distributions.pbtxt
@@ -0,0 +1,75 @@
+path: "tensorflow.distributions"
+tf_module {
+ member {
+ name: "Bernoulli"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Beta"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Categorical"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Dirichlet"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "DirichletMultinomial"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Distribution"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Exponential"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "FULLY_REPARAMETERIZED"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution.ReparameterizationType\'>"
+ }
+ member {
+ name: "Gamma"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Laplace"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Multinomial"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "NOT_REPARAMETERIZED"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution.ReparameterizationType\'>"
+ }
+ member {
+ name: "Normal"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "RegisterKL"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ReparameterizationType"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "StudentT"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member {
+ name: "Uniform"
+ mtype: "<class \'tensorflow.python.ops.distributions.distribution._DistributionMeta\'>"
+ }
+ member_method {
+ name: "kl_divergence"
+ argspec: "args=[\'distribution_a\', \'distribution_b\', \'allow_nan_stats\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.dtypes.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.dtypes.pbtxt
new file mode 100644
index 0000000000..98e1feed00
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.dtypes.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.dtypes"
+tf_module {
+ member_method {
+ name: "as_string"
+ argspec: "args=[\'input\', \'precision\', \'scientific\', \'shortest\', \'width\', \'fill\', \'name\'], varargs=None, keywords=None, defaults=[\'-1\', \'False\', \'False\', \'-1\', \'\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-aborted-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-aborted-error.pbtxt
new file mode 100644
index 0000000000..ea9186b0b9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-aborted-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.AbortedError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.AbortedError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-already-exists-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-already-exists-error.pbtxt
new file mode 100644
index 0000000000..4e155081dd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-already-exists-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.AlreadyExistsError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.AlreadyExistsError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-cancelled-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-cancelled-error.pbtxt
new file mode 100644
index 0000000000..b02a0e023a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-cancelled-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.CancelledError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.CancelledError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-data-loss-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-data-loss-error.pbtxt
new file mode 100644
index 0000000000..c1fa66342a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-data-loss-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.DataLossError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.DataLossError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-deadline-exceeded-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-deadline-exceeded-error.pbtxt
new file mode 100644
index 0000000000..8e03793619
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-deadline-exceeded-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.DeadlineExceededError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.DeadlineExceededError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-failed-precondition-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-failed-precondition-error.pbtxt
new file mode 100644
index 0000000000..384d4b534c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-failed-precondition-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.FailedPreconditionError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.FailedPreconditionError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-internal-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-internal-error.pbtxt
new file mode 100644
index 0000000000..ac5c4d7879
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-internal-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.InternalError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.InternalError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-invalid-argument-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-invalid-argument-error.pbtxt
new file mode 100644
index 0000000000..161edd4a7c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-invalid-argument-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.InvalidArgumentError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.InvalidArgumentError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-not-found-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-not-found-error.pbtxt
new file mode 100644
index 0000000000..1e64730ac6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-not-found-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.NotFoundError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.NotFoundError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-op-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-op-error.pbtxt
new file mode 100644
index 0000000000..b1f14c0457
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-op-error.pbtxt
@@ -0,0 +1,29 @@
+path: "tensorflow.errors.OpError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\', \'error_code\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-out-of-range-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-out-of-range-error.pbtxt
new file mode 100644
index 0000000000..6365e47286
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-out-of-range-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.OutOfRangeError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OutOfRangeError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-permission-denied-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-permission-denied-error.pbtxt
new file mode 100644
index 0000000000..dc8a66f9ea
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-permission-denied-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.PermissionDeniedError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.PermissionDeniedError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-resource-exhausted-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-resource-exhausted-error.pbtxt
new file mode 100644
index 0000000000..85bb384b46
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-resource-exhausted-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.ResourceExhaustedError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.ResourceExhaustedError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-unauthenticated-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unauthenticated-error.pbtxt
new file mode 100644
index 0000000000..d57d7ac2f2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unauthenticated-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.UnauthenticatedError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.UnauthenticatedError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-unavailable-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unavailable-error.pbtxt
new file mode 100644
index 0000000000..cc33e6ed8d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unavailable-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.UnavailableError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.UnavailableError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-unimplemented-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unimplemented-error.pbtxt
new file mode 100644
index 0000000000..b8c2e22dbd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unimplemented-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.UnimplementedError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.UnimplementedError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.-unknown-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unknown-error.pbtxt
new file mode 100644
index 0000000000..8ffcfae95b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.-unknown-error.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.errors.UnknownError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.UnknownError\'>"
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.OpError\'>"
+ is_instance: "<type \'exceptions.Exception\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "error_code"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "node_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'node_def\', \'op\', \'message\', \'error_code\'], varargs=None, keywords=None, defaults=[\'2\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.pbtxt
new file mode 100644
index 0000000000..c5fe49baab
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.pbtxt
@@ -0,0 +1,151 @@
+path: "tensorflow.errors"
+tf_module {
+ member {
+ name: "ABORTED"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "ALREADY_EXISTS"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "AbortedError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AlreadyExistsError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "CANCELLED"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "CancelledError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DATA_LOSS"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "DEADLINE_EXCEEDED"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "DataLossError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DeadlineExceededError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FAILED_PRECONDITION"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "FailedPreconditionError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "INTERNAL"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "INVALID_ARGUMENT"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "InternalError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "InvalidArgumentError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "NOT_FOUND"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "NotFoundError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "OK"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "OUT_OF_RANGE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "OpError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "OutOfRangeError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "PERMISSION_DENIED"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "PermissionDeniedError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RESOURCE_EXHAUSTED"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "ResourceExhaustedError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UNAUTHENTICATED"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "UNAVAILABLE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "UNIMPLEMENTED"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "UNKNOWN"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "UnauthenticatedError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UnavailableError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UnimplementedError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UnknownError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "raise_exception_on_not_ok_status"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "error_code_from_exception_type"
+ argspec: "args=[\'cls\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "exception_type_from_error_code"
+ argspec: "args=[\'error_code\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt
new file mode 100644
index 0000000000..5d25ec769a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.errors.raise_exception_on_not_ok_status.pbtxt
@@ -0,0 +1,8 @@
+path: "tensorflow.errors.raise_exception_on_not_ok_status"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.errors_impl.raise_exception_on_not_ok_status\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-classifier.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-classifier.pbtxt
new file mode 100644
index 0000000000..082e26b99b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-classifier.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.BaselineClassifier"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.baseline.BaselineClassifier\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'config\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Ftrl\', \'None\', \'weighted_sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-regressor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-regressor.pbtxt
new file mode 100644
index 0000000000..7cc4191eb3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-baseline-regressor.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.BaselineRegressor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.baseline.BaselineRegressor\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'config\', \'loss_reduction\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Ftrl\', \'None\', \'weighted_sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-best-exporter.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-best-exporter.pbtxt
new file mode 100644
index 0000000000..9694268199
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-best-exporter.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.estimator.BestExporter"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.exporter.BestExporter\'>"
+ is_instance: "<class \'tensorflow.python.estimator.exporter.Exporter\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'serving_input_receiver_fn\', \'event_file_pattern\', \'compare_fn\', \'assets_extra\', \'as_text\', \'exports_to_keep\'], varargs=None, keywords=None, defaults=[\'best_exporter\', \'None\', \'eval/*.tfevents.*\', \'<function _loss_smaller instance>\', \'None\', \'False\', \'5\'], "
+ }
+ member_method {
+ name: "export"
+ argspec: "args=[\'self\', \'estimator\', \'export_path\', \'checkpoint_path\', \'eval_result\', \'is_the_final_export\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-classifier.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-classifier.pbtxt
new file mode 100644
index 0000000000..9e429a32a5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-classifier.pbtxt
@@ -0,0 +1,67 @@
+path: "tensorflow.estimator.BoostedTreesClassifier"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees.BoostedTreesClassifier\'>"
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees._BoostedTreesBase\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\', \'pruning_mode\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\', \'none\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "experimental_predict_with_explanations"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-regressor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-regressor.pbtxt
new file mode 100644
index 0000000000..56af1d137c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-boosted-trees-regressor.pbtxt
@@ -0,0 +1,67 @@
+path: "tensorflow.estimator.BoostedTreesRegressor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees.BoostedTreesRegressor\'>"
+ is_instance: "<class \'tensorflow.python.estimator.canned.boosted_trees._BoostedTreesBase\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'feature_columns\', \'n_batches_per_layer\', \'model_dir\', \'label_dimension\', \'weight_column\', \'n_trees\', \'max_depth\', \'learning_rate\', \'l1_regularization\', \'l2_regularization\', \'tree_complexity\', \'min_node_weight\', \'config\', \'center_bias\', \'pruning_mode\'], varargs=None, keywords=None, defaults=[\'None\', \'<object object instance>\', \'None\', \'100\', \'6\', \'0.1\', \'0.0\', \'0.0\', \'0.0\', \'0.0\', \'None\', \'False\', \'none\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "experimental_predict_with_explanations"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-classifier.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-classifier.pbtxt
new file mode 100644
index 0000000000..718f415a77
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-classifier.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.DNNClassifier"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.dnn.DNNClassifier\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
new file mode 100644
index 0000000000..b23c019d6c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-classifier.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.DNNLinearCombinedClassifier"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.dnn_linear_combined.DNNLinearCombinedClassifier\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'2\', \'None\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
new file mode 100644
index 0000000000..caa9e3f1de
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-linear-combined-regressor.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.DNNLinearCombinedRegressor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.dnn_linear_combined.DNNLinearCombinedRegressor\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'model_dir\', \'linear_feature_columns\', \'linear_optimizer\', \'dnn_feature_columns\', \'dnn_optimizer\', \'dnn_hidden_units\', \'dnn_activation_fn\', \'dnn_dropout\', \'label_dimension\', \'weight_column\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\', \'linear_sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'Ftrl\', \'None\', \'Adagrad\', \'None\', \'<function relu instance>\', \'None\', \'1\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-regressor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-regressor.pbtxt
new file mode 100644
index 0000000000..1f5e650940
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-d-n-n-regressor.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.DNNRegressor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.dnn.DNNRegressor\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'hidden_units\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'activation_fn\', \'dropout\', \'input_layer_partitioner\', \'config\', \'warm_start_from\', \'loss_reduction\', \'batch_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Adagrad\', \'<function relu instance>\', \'None\', \'None\', \'None\', \'None\', \'weighted_sum\', \'False\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator-spec.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator-spec.pbtxt
new file mode 100644
index 0000000000..aa6ac46613
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator-spec.pbtxt
@@ -0,0 +1,59 @@
+path: "tensorflow.estimator.EstimatorSpec"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.model_fn.EstimatorSpec\'>"
+ is_instance: "<class \'tensorflow.python.estimator.model_fn.EstimatorSpec\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "eval_metric_ops"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "evaluation_hooks"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "export_outputs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "loss"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "mode"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "prediction_hooks"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "predictions"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scaffold"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "train_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "training_chief_hooks"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "training_hooks"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator.pbtxt
new file mode 100644
index 0000000000..ebd3869c9b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-estimator.pbtxt
@@ -0,0 +1,61 @@
+path: "tensorflow.estimator.Estimator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'model_fn\', \'model_dir\', \'config\', \'params\', \'warm_start_from\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-eval-spec.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-eval-spec.pbtxt
new file mode 100644
index 0000000000..db83ba1bd8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-eval-spec.pbtxt
@@ -0,0 +1,43 @@
+path: "tensorflow.estimator.EvalSpec"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.training.EvalSpec\'>"
+ is_instance: "<class \'tensorflow.python.estimator.training.EvalSpec\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "exporters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "hooks"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "start_delay_secs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "steps"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "throttle_secs"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-exporter.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-exporter.pbtxt
new file mode 100644
index 0000000000..035af70e52
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-exporter.pbtxt
@@ -0,0 +1,16 @@
+path: "tensorflow.estimator.Exporter"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.exporter.Exporter\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<class \'abc.abstractproperty\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "export"
+ argspec: "args=[\'self\', \'estimator\', \'export_path\', \'checkpoint_path\', \'eval_result\', \'is_the_final_export\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-final-exporter.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-final-exporter.pbtxt
new file mode 100644
index 0000000000..ee37b1fa21
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-final-exporter.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.estimator.FinalExporter"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.exporter.FinalExporter\'>"
+ is_instance: "<class \'tensorflow.python.estimator.exporter.Exporter\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "export"
+ argspec: "args=[\'self\', \'estimator\', \'export_path\', \'checkpoint_path\', \'eval_result\', \'is_the_final_export\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-latest-exporter.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-latest-exporter.pbtxt
new file mode 100644
index 0000000000..2a9d029029
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-latest-exporter.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.estimator.LatestExporter"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.exporter.LatestExporter\'>"
+ is_instance: "<class \'tensorflow.python.estimator.exporter.Exporter\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'exports_to_keep\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'5\'], "
+ }
+ member_method {
+ name: "export"
+ argspec: "args=[\'self\', \'estimator\', \'export_path\', \'checkpoint_path\', \'eval_result\', \'is_the_final_export\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-classifier.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-classifier.pbtxt
new file mode 100644
index 0000000000..53ec5a0c78
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-classifier.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.LinearClassifier"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.linear.LinearClassifier\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'n_classes\', \'weight_column\', \'label_vocabulary\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\', \'sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'None\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-regressor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-regressor.pbtxt
new file mode 100644
index 0000000000..3791162619
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-linear-regressor.pbtxt
@@ -0,0 +1,62 @@
+path: "tensorflow.estimator.LinearRegressor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.canned.linear.LinearRegressor\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.Estimator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "params"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'feature_columns\', \'model_dir\', \'label_dimension\', \'weight_column\', \'optimizer\', \'config\', \'partitioner\', \'warm_start_from\', \'loss_reduction\', \'sparse_combiner\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'Ftrl\', \'None\', \'None\', \'None\', \'weighted_sum\', \'sum\'], "
+ }
+ member_method {
+ name: "eval_dir"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'input_fn\', \'steps\', \'hooks\', \'checkpoint_path\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "export_saved_model"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_savedmodel"
+ argspec: "args=[\'self\', \'export_dir_base\', \'serving_input_receiver_fn\', \'assets_extra\', \'as_text\', \'checkpoint_path\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "get_variable_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_variable_value"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'input_fn\', \'predict_keys\', \'hooks\', \'checkpoint_path\', \'yield_single_examples\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "train"
+ argspec: "args=[\'self\', \'input_fn\', \'hooks\', \'steps\', \'max_steps\', \'saving_listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-mode-keys.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-mode-keys.pbtxt
new file mode 100644
index 0000000000..6a1c24fa63
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-mode-keys.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.estimator.ModeKeys"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.model_fn.ModeKeys\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "EVAL"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "PREDICT"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "TRAIN"
+ mtype: "<type \'str\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-run-config.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-run-config.pbtxt
new file mode 100644
index 0000000000..269e18a0a7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-run-config.pbtxt
@@ -0,0 +1,105 @@
+path: "tensorflow.estimator.RunConfig"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.run_config.RunConfig\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "cluster_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "device_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "eval_distribute"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "evaluation_master"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "global_id_in_cluster"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_chief"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "keep_checkpoint_every_n_hours"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "keep_checkpoint_max"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "log_step_count_steps"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "master"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "model_dir"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "num_ps_replicas"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "num_worker_replicas"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "protocol"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_checkpoints_secs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_checkpoints_steps"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_summary_steps"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "service"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "session_config"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "task_id"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "task_type"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tf_random_seed"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "train_distribute"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\', \'protocol\', \'eval_distribute\', \'experimental_distribute\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "replace"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-train-spec.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-train-spec.pbtxt
new file mode 100644
index 0000000000..7d2f77438a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-train-spec.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.estimator.TrainSpec"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.training.TrainSpec\'>"
+ is_instance: "<class \'tensorflow.python.estimator.training.TrainSpec\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "hooks"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "max_steps"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt
new file mode 100644
index 0000000000..b6942cb7ed
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt
@@ -0,0 +1,43 @@
+path: "tensorflow.estimator.VocabInfo"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
+ is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "backup_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "new_vocab"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "new_vocab_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "num_oov_buckets"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "old_vocab"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "old_vocab_size"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-warm-start-settings.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-warm-start-settings.pbtxt
new file mode 100644
index 0000000000..43f5343359
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-warm-start-settings.pbtxt
@@ -0,0 +1,31 @@
+path: "tensorflow.estimator.WarmStartSettings"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.estimator.WarmStartSettings\'>"
+ is_instance: "<class \'tensorflow.python.estimator.estimator.WarmStartSettings\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "ckpt_to_initialize_from"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "var_name_to_prev_var_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "var_name_to_vocab_info"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "vars_to_warm_start"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt
new file mode 100644
index 0000000000..3cf7af8da9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.estimator.export.ClassificationOutput.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.pbtxt
new file mode 100644
index 0000000000..2df1840c4a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-classification-output.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.estimator.export.ClassificationOutput"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.export.export_output.ClassificationOutput\'>"
+ is_instance: "<class \'tensorflow.python.estimator.export.export_output.ExportOutput\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "classes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scores"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'scores\', \'classes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "as_signature_def"
+ argspec: "args=[\'self\', \'receiver_tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt
new file mode 100644
index 0000000000..5d165ccbf9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.estimator.export.ExportOutput.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.pbtxt
new file mode 100644
index 0000000000..fa62e8ced8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-export-output.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.estimator.export.ExportOutput"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.export.export_output.ExportOutput\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "as_signature_def"
+ argspec: "args=[\'self\', \'receiver_tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt
new file mode 100644
index 0000000000..743495ba98
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.estimator.export.PredictOutput.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.pbtxt
new file mode 100644
index 0000000000..e0160b10ce
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-predict-output.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.estimator.export.PredictOutput"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.export.export_output.PredictOutput\'>"
+ is_instance: "<class \'tensorflow.python.estimator.export.export_output.ExportOutput\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "outputs"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'outputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_signature_def"
+ argspec: "args=[\'self\', \'receiver_tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt
new file mode 100644
index 0000000000..dbf4e3dec8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.estimator.export.RegressionOutput.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.pbtxt
new file mode 100644
index 0000000000..905f0e0553
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-regression-output.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.estimator.export.RegressionOutput"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.export.export_output.RegressionOutput\'>"
+ is_instance: "<class \'tensorflow.python.estimator.export.export_output.ExportOutput\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "value"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_signature_def"
+ argspec: "args=[\'self\', \'receiver_tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-serving-input-receiver.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-serving-input-receiver.pbtxt
new file mode 100644
index 0000000000..d71b2a4300
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-serving-input-receiver.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.estimator.export.ServingInputReceiver"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.export.export.ServingInputReceiver\'>"
+ is_instance: "<class \'tensorflow.python.estimator.export.export.ServingInputReceiver\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "features"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "receiver_tensors"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "receiver_tensors_alternatives"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt
new file mode 100644
index 0000000000..4fe92643bf
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.-tensor-serving-input-receiver.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.estimator.export.TensorServingInputReceiver"
+tf_class {
+ is_instance: "<class \'tensorflow.python.estimator.export.export.TensorServingInputReceiver\'>"
+ is_instance: "<class \'tensorflow.python.estimator.export.export.TensorServingInputReceiver\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "features"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "receiver_tensors"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "receiver_tensors_alternatives"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.pbtxt
new file mode 100644
index 0000000000..bd72f6cd79
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.export.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.estimator.export"
+tf_module {
+ member {
+ name: "ClassificationOutput"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "ExportOutput"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "PredictOutput"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "RegressionOutput"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "ServingInputReceiver"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TensorServingInputReceiver"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "build_parsing_serving_input_receiver_fn"
+ argspec: "args=[\'feature_spec\', \'default_batch_size\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "build_raw_serving_input_receiver_fn"
+ argspec: "args=[\'features\', \'default_batch_size\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.inputs.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.inputs.pbtxt
new file mode 100644
index 0000000000..b318fea1f8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.inputs.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.estimator.inputs"
+tf_module {
+ member_method {
+ name: "numpy_input_fn"
+ argspec: "args=[\'x\', \'y\', \'batch_size\', \'num_epochs\', \'shuffle\', \'queue_capacity\', \'num_threads\'], varargs=None, keywords=None, defaults=[\'None\', \'128\', \'1\', \'None\', \'1000\', \'1\'], "
+ }
+ member_method {
+ name: "pandas_input_fn"
+ argspec: "args=[\'x\', \'y\', \'batch_size\', \'num_epochs\', \'shuffle\', \'queue_capacity\', \'num_threads\', \'target_column\'], varargs=None, keywords=None, defaults=[\'None\', \'128\', \'1\', \'None\', \'1000\', \'1\', \'target\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.pbtxt
new file mode 100644
index 0000000000..f1d204a3ef
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.pbtxt
@@ -0,0 +1,111 @@
+path: "tensorflow.estimator"
+tf_module {
+ member {
+ name: "BaselineClassifier"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BaselineRegressor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BestExporter"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BoostedTreesClassifier"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BoostedTreesRegressor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DNNClassifier"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DNNLinearCombinedClassifier"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DNNLinearCombinedRegressor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DNNRegressor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Estimator"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "EstimatorSpec"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "EvalSpec"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Exporter"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FinalExporter"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LatestExporter"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LinearClassifier"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LinearRegressor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ModeKeys"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RunConfig"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TrainSpec"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "VocabInfo"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "WarmStartSettings"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "export"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "inputs"
+ mtype: "<type \'module\'>"
+ }
+ member_method {
+ name: "classifier_parse_example_spec"
+ argspec: "args=[\'feature_columns\', \'label_key\', \'label_dtype\', \'label_default\', \'weight_column\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int64\'>\", \'None\', \'None\'], "
+ }
+ member_method {
+ name: "regressor_parse_example_spec"
+ argspec: "args=[\'feature_columns\', \'label_key\', \'label_dtype\', \'label_default\', \'label_dimension\', \'weight_column\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'None\', \'1\', \'None\'], "
+ }
+ member_method {
+ name: "train_and_evaluate"
+ argspec: "args=[\'estimator\', \'train_spec\', \'eval_spec\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.feature_column.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.feature_column.pbtxt
new file mode 100644
index 0000000000..f06e798953
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.feature_column.pbtxt
@@ -0,0 +1,59 @@
+path: "tensorflow.feature_column"
+tf_module {
+ member_method {
+ name: "bucketized_column"
+ argspec: "args=[\'source_column\', \'boundaries\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "categorical_column_with_hash_bucket"
+ argspec: "args=[\'key\', \'hash_bucket_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'string\'>\"], "
+ }
+ member_method {
+ name: "categorical_column_with_identity"
+ argspec: "args=[\'key\', \'num_buckets\', \'default_value\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "categorical_column_with_vocabulary_file"
+ argspec: "args=[\'key\', \'vocabulary_file\', \'vocabulary_size\', \'num_oov_buckets\', \'default_value\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \"<dtype: \'string\'>\"], "
+ }
+ member_method {
+ name: "categorical_column_with_vocabulary_list"
+ argspec: "args=[\'key\', \'vocabulary_list\', \'dtype\', \'default_value\', \'num_oov_buckets\'], varargs=None, keywords=None, defaults=[\'None\', \'-1\', \'0\'], "
+ }
+ member_method {
+ name: "crossed_column"
+ argspec: "args=[\'keys\', \'hash_bucket_size\', \'hash_key\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "embedding_column"
+ argspec: "args=[\'categorical_column\', \'dimension\', \'combiner\', \'initializer\', \'ckpt_to_load_from\', \'tensor_name_in_ckpt\', \'max_norm\', \'trainable\'], varargs=None, keywords=None, defaults=[\'mean\', \'None\', \'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "indicator_column"
+ argspec: "args=[\'categorical_column\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "input_layer"
+ argspec: "args=[\'features\', \'feature_columns\', \'weight_collections\', \'trainable\', \'cols_to_vars\', \'cols_to_output_tensors\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "linear_model"
+ argspec: "args=[\'features\', \'feature_columns\', \'units\', \'sparse_combiner\', \'weight_collections\', \'trainable\', \'cols_to_vars\'], varargs=None, keywords=None, defaults=[\'1\', \'sum\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "make_parse_example_spec"
+ argspec: "args=[\'feature_columns\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "numeric_column"
+ argspec: "args=[\'key\', \'shape\', \'default_value\', \'dtype\', \'normalizer_fn\'], varargs=None, keywords=None, defaults=[\'(1,)\', \'None\', \"<dtype: \'float32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "shared_embedding_columns"
+ argspec: "args=[\'categorical_columns\', \'dimension\', \'combiner\', \'initializer\', \'shared_embedding_collection_name\', \'ckpt_to_load_from\', \'tensor_name_in_ckpt\', \'max_norm\', \'trainable\'], varargs=None, keywords=None, defaults=[\'mean\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "weighted_categorical_column"
+ argspec: "args=[\'categorical_column\', \'weight_feature_key\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.gfile.-fast-g-file.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.gfile.-fast-g-file.pbtxt
new file mode 100644
index 0000000000..eecfaffd0a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.gfile.-fast-g-file.pbtxt
@@ -0,0 +1,58 @@
+path: "tensorflow.gfile.FastGFile"
+tf_class {
+ is_instance: "<class \'tensorflow.python.platform.gfile.FastGFile\'>"
+ is_instance: "<class \'tensorflow.python.lib.io.file_io.FileIO\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "mode"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'mode\'], varargs=None, keywords=None, defaults=[\'r\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flush"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "next"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "read"
+ argspec: "args=[\'self\', \'n\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "readline"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "readlines"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "seek"
+ argspec: "args=[\'self\', \'offset\', \'whence\', \'position\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "tell"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "write"
+ argspec: "args=[\'self\', \'file_content\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.gfile.-g-file.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.gfile.-g-file.pbtxt
new file mode 100644
index 0000000000..305251059d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.gfile.-g-file.pbtxt
@@ -0,0 +1,58 @@
+path: "tensorflow.gfile.GFile"
+tf_class {
+ is_instance: "<class \'tensorflow.python.platform.gfile.GFile\'>"
+ is_instance: "<class \'tensorflow.python.lib.io.file_io.FileIO\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "mode"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'mode\'], varargs=None, keywords=None, defaults=[\'r\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flush"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "next"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "read"
+ argspec: "args=[\'self\', \'n\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "readline"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "readlines"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "seek"
+ argspec: "args=[\'self\', \'offset\', \'whence\', \'position\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "tell"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "write"
+ argspec: "args=[\'self\', \'file_content\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.gfile.-open.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.gfile.-open.pbtxt
new file mode 100644
index 0000000000..6e8894180a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.gfile.-open.pbtxt
@@ -0,0 +1,58 @@
+path: "tensorflow.gfile.Open"
+tf_class {
+ is_instance: "<class \'tensorflow.python.platform.gfile.GFile\'>"
+ is_instance: "<class \'tensorflow.python.lib.io.file_io.FileIO\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "mode"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'mode\'], varargs=None, keywords=None, defaults=[\'r\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flush"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "next"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "read"
+ argspec: "args=[\'self\', \'n\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "readline"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "readlines"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "seek"
+ argspec: "args=[\'self\', \'offset\', \'whence\', \'position\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "size"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "tell"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "write"
+ argspec: "args=[\'self\', \'file_content\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.gfile.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.gfile.pbtxt
new file mode 100644
index 0000000000..65b55a8b7c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.gfile.pbtxt
@@ -0,0 +1,63 @@
+path: "tensorflow.gfile"
+tf_module {
+ member {
+ name: "FastGFile"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GFile"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Open"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "Copy"
+ argspec: "args=[\'oldpath\', \'newpath\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "DeleteRecursively"
+ argspec: "args=[\'dirname\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Exists"
+ argspec: "args=[\'filename\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Glob"
+ argspec: "args=[\'filename\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "IsDirectory"
+ argspec: "args=[\'dirname\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ListDirectory"
+ argspec: "args=[\'dirname\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MakeDirs"
+ argspec: "args=[\'dirname\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MkDir"
+ argspec: "args=[\'dirname\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Remove"
+ argspec: "args=[\'filename\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Rename"
+ argspec: "args=[\'oldname\', \'newname\', \'overwrite\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "Stat"
+ argspec: "args=[\'filename\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Walk"
+ argspec: "args=[\'top\', \'in_order\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.glorot_normal_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.glorot_normal_initializer.pbtxt
new file mode 100644
index 0000000000..483d1f8ba0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.glorot_normal_initializer.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.glorot_normal_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.glorot_uniform_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.glorot_uniform_initializer.pbtxt
new file mode 100644
index 0000000000..bb8540d0fd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.glorot_uniform_initializer.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.glorot_uniform_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.graph_util.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.graph_util.pbtxt
new file mode 100644
index 0000000000..eeabf845dc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.graph_util.pbtxt
@@ -0,0 +1,23 @@
+path: "tensorflow.graph_util"
+tf_module {
+ member_method {
+ name: "convert_variables_to_constants"
+ argspec: "args=[\'sess\', \'input_graph_def\', \'output_node_names\', \'variable_names_whitelist\', \'variable_names_blacklist\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "extract_sub_graph"
+ argspec: "args=[\'graph_def\', \'dest_nodes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "must_run_on_cpu"
+ argspec: "args=[\'node\', \'pin_variables_on_cpu\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "remove_training_nodes"
+ argspec: "args=[\'input_graph\', \'protected_nodes\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tensor_shape_from_node_def_name"
+ argspec: "args=[\'graph\', \'input_name\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.image.-resize-method.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.image.-resize-method.pbtxt
new file mode 100644
index 0000000000..dbc360b13e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.image.-resize-method.pbtxt
@@ -0,0 +1,24 @@
+path: "tensorflow.image.ResizeMethod"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.image_ops_impl.ResizeMethod\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "AREA"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "BICUBIC"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "BILINEAR"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "NEAREST_NEIGHBOR"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.image.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.image.pbtxt
new file mode 100644
index 0000000000..5c46dc5ee7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.image.pbtxt
@@ -0,0 +1,251 @@
+path: "tensorflow.image"
+tf_module {
+ member {
+ name: "ResizeMethod"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "adjust_brightness"
+ argspec: "args=[\'image\', \'delta\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "adjust_contrast"
+ argspec: "args=[\'images\', \'contrast_factor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "adjust_gamma"
+ argspec: "args=[\'image\', \'gamma\', \'gain\'], varargs=None, keywords=None, defaults=[\'1\', \'1\'], "
+ }
+ member_method {
+ name: "adjust_hue"
+ argspec: "args=[\'image\', \'delta\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "adjust_jpeg_quality"
+ argspec: "args=[\'image\', \'jpeg_quality\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "adjust_saturation"
+ argspec: "args=[\'image\', \'saturation_factor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "central_crop"
+ argspec: "args=[\'image\', \'central_fraction\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convert_image_dtype"
+ argspec: "args=[\'image\', \'dtype\', \'saturate\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "crop_and_resize"
+ argspec: "args=[\'image\', \'boxes\', \'box_ind\', \'crop_size\', \'method\', \'extrapolation_value\', \'name\'], varargs=None, keywords=None, defaults=[\'bilinear\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "crop_to_bounding_box"
+ argspec: "args=[\'image\', \'offset_height\', \'offset_width\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "decode_and_crop_jpeg"
+ argspec: "args=[\'contents\', \'crop_window\', \'channels\', \'ratio\', \'fancy_upscaling\', \'try_recover_truncated\', \'acceptable_fraction\', \'dct_method\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'1\', \'True\', \'False\', \'1\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_bmp"
+ argspec: "args=[\'contents\', \'channels\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\'], "
+ }
+ member_method {
+ name: "decode_gif"
+ argspec: "args=[\'contents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_image"
+ argspec: "args=[\'contents\', \'channels\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'uint8\'>\", \'None\'], "
+ }
+ member_method {
+ name: "decode_jpeg"
+ argspec: "args=[\'contents\', \'channels\', \'ratio\', \'fancy_upscaling\', \'try_recover_truncated\', \'acceptable_fraction\', \'dct_method\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'1\', \'True\', \'False\', \'1\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_png"
+ argspec: "args=[\'contents\', \'channels\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \"<dtype: \'uint8\'>\", \'None\'], "
+ }
+ member_method {
+ name: "draw_bounding_boxes"
+ argspec: "args=[\'images\', \'boxes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "encode_jpeg"
+ argspec: "args=[\'image\', \'format\', \'quality\', \'progressive\', \'optimize_size\', \'chroma_downsampling\', \'density_unit\', \'x_density\', \'y_density\', \'xmp_metadata\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'95\', \'False\', \'False\', \'True\', \'in\', \'300\', \'300\', \'\', \'None\'], "
+ }
+ member_method {
+ name: "encode_png"
+ argspec: "args=[\'image\', \'compression\', \'name\'], varargs=None, keywords=None, defaults=[\'-1\', \'None\'], "
+ }
+ member_method {
+ name: "extract_glimpse"
+ argspec: "args=[\'input\', \'size\', \'offsets\', \'centered\', \'normalized\', \'uniform_noise\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'True\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "extract_image_patches"
+ argspec: "args=[\'images\', \'ksizes\', \'strides\', \'rates\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "extract_jpeg_shape"
+ argspec: "args=[\'contents\', \'output_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "flip_left_right"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flip_up_down"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "grayscale_to_rgb"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "hsv_to_rgb"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "image_gradients"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_jpeg"
+ argspec: "args=[\'contents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression"
+ argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression_overlaps"
+ argspec: "args=[\'overlaps\', \'scores\', \'max_output_size\', \'overlap_threshold\', \'score_threshold\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'None\'], "
+ }
+ member_method {
+ name: "non_max_suppression_padded"
+ argspec: "args=[\'boxes\', \'scores\', \'max_output_size\', \'iou_threshold\', \'score_threshold\', \'pad_to_max_output_size\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'-inf\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "pad_to_bounding_box"
+ argspec: "args=[\'image\', \'offset_height\', \'offset_width\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "per_image_standardization"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "psnr"
+ argspec: "args=[\'a\', \'b\', \'max_val\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_brightness"
+ argspec: "args=[\'image\', \'max_delta\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_contrast"
+ argspec: "args=[\'image\', \'lower\', \'upper\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_flip_left_right"
+ argspec: "args=[\'image\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_flip_up_down"
+ argspec: "args=[\'image\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_hue"
+ argspec: "args=[\'image\', \'max_delta\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_jpeg_quality"
+ argspec: "args=[\'image\', \'min_jpeg_quality\', \'max_jpeg_quality\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "random_saturation"
+ argspec: "args=[\'image\', \'lower\', \'upper\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "resize_area"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "resize_bicubic"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "resize_bilinear"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "resize_image_with_crop_or_pad"
+ argspec: "args=[\'image\', \'target_height\', \'target_width\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "resize_image_with_pad"
+ argspec: "args=[\'image\', \'target_height\', \'target_width\', \'method\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "resize_images"
+ argspec: "args=[\'images\', \'size\', \'method\', \'align_corners\', \'preserve_aspect_ratio\'], varargs=None, keywords=None, defaults=[\'0\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "resize_nearest_neighbor"
+ argspec: "args=[\'images\', \'size\', \'align_corners\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "rgb_to_grayscale"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rgb_to_hsv"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rgb_to_yiq"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "rgb_to_yuv"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "rot90"
+ argspec: "args=[\'image\', \'k\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'None\'], "
+ }
+ member_method {
+ name: "sample_distorted_bounding_box"
+ argspec: "args=[\'image_size\', \'bounding_boxes\', \'seed\', \'seed2\', \'min_object_covered\', \'aspect_ratio_range\', \'area_range\', \'max_attempts\', \'use_image_if_no_bounding_boxes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'0.1\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sobel_edges"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ssim"
+ argspec: "args=[\'img1\', \'img2\', \'max_val\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ssim_multiscale"
+ argspec: "args=[\'img1\', \'img2\', \'max_val\', \'power_factors\'], varargs=None, keywords=None, defaults=[\'(0.0448, 0.2856, 0.3001, 0.2363, 0.1333)\'], "
+ }
+ member_method {
+ name: "total_variation"
+ argspec: "args=[\'images\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "transpose_image"
+ argspec: "args=[\'image\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "yiq_to_rgb"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "yuv_to_rgb"
+ argspec: "args=[\'images\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.constant.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.constant.pbtxt
new file mode 100644
index 0000000000..607a5aae21
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.constant.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.initializers.constant"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Constant\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'value\', \'dtype\', \'verify_shape\'], varargs=None, keywords=None, defaults=[\'0\', \"<dtype: \'float32\'>\", \'False\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_normal.pbtxt
new file mode 100644
index 0000000000..4a81e52df9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.initializers.glorot_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_uniform.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_uniform.pbtxt
new file mode 100644
index 0000000000..815dc81dff
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.glorot_uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.initializers.glorot_uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.identity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.identity.pbtxt
new file mode 100644
index 0000000000..37fcab9599
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.identity.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.initializers.identity"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Identity\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.ones.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.ones.pbtxt
new file mode 100644
index 0000000000..18481d4815
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.ones.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.initializers.ones"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Ones\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.orthogonal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.orthogonal.pbtxt
new file mode 100644
index 0000000000..ff64efd60c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.orthogonal.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.initializers.orthogonal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Orthogonal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.pbtxt
new file mode 100644
index 0000000000..d499c67d89
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.pbtxt
@@ -0,0 +1,79 @@
+path: "tensorflow.initializers"
+tf_module {
+ member {
+ name: "constant"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "identity"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ones"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "orthogonal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "random_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "random_uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "truncated_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "uniform_unit_scaling"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "variance_scaling"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "zeros"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "global_variables"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "he_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "he_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "local_variables"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'var_list\', \'name\'], varargs=None, keywords=None, defaults=[\'init\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-random-normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.random_normal.pbtxt
index 23cd02c0b0..133e61c1d9 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-random-normal.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.random_normal.pbtxt
@@ -1,4 +1,4 @@
-path: "tensorflow.keras.initializers.RandomNormal"
+path: "tensorflow.initializers.random_normal"
tf_class {
is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-random-uniform.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.random_uniform.pbtxt
index d98628f422..0cfa0080f5 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-random-uniform.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.random_uniform.pbtxt
@@ -1,4 +1,4 @@
-path: "tensorflow.keras.initializers.RandomUniform"
+path: "tensorflow.initializers.random_uniform"
tf_class {
is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-truncated-normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.truncated_normal.pbtxt
index 86d48257c1..730390fba2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.initializers.-truncated-normal.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.truncated_normal.pbtxt
@@ -1,4 +1,4 @@
-path: "tensorflow.keras.initializers.TruncatedNormal"
+path: "tensorflow.initializers.truncated_normal"
tf_class {
is_instance: "<class \'tensorflow.python.ops.init_ops.TruncatedNormal\'>"
is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.uniform_unit_scaling.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.uniform_unit_scaling.pbtxt
new file mode 100644
index 0000000000..13295ef375
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.uniform_unit_scaling.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.initializers.uniform_unit_scaling"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.UniformUnitScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'factor\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.variance_scaling.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.variance_scaling.pbtxt
new file mode 100644
index 0000000000..86340913e2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.variance_scaling.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.initializers.variance_scaling"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.initializers.zeros.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.initializers.zeros.pbtxt
new file mode 100644
index 0000000000..7df4237bb6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.initializers.zeros.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.initializers.zeros"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Zeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.io.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.io.pbtxt
new file mode 100644
index 0000000000..8938cf217b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.io.pbtxt
@@ -0,0 +1,43 @@
+path: "tensorflow.io"
+tf_module {
+ member_method {
+ name: "decode_base64"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_compressed"
+ argspec: "args=[\'bytes\', \'compression_type\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'None\'], "
+ }
+ member_method {
+ name: "decode_json_example"
+ argspec: "args=[\'json_examples\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "decode_raw"
+ argspec: "args=[\'bytes\', \'out_type\', \'little_endian\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "encode_base64"
+ argspec: "args=[\'input\', \'pad\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "matching_files"
+ argspec: "args=[\'pattern\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "parse_sequence_example"
+ argspec: "args=[\'serialized\', \'context_features\', \'sequence_features\', \'example_names\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "parse_tensor"
+ argspec: "args=[\'serialized\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "read_file"
+ argspec: "args=[\'filename\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "write_file"
+ argspec: "args=[\'filename\', \'contents\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt
new file mode 100644
index 0000000000..0869de0243
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt
@@ -0,0 +1,268 @@
+path: "tensorflow.keras.Model"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt
new file mode 100644
index 0000000000..20f39fae1e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt
@@ -0,0 +1,285 @@
+path: "tensorflow.keras.Sequential"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.sequential.Sequential\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layers\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'self\', \'layer\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "pop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_classes"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict_proba"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.activations.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.activations.pbtxt
new file mode 100644
index 0000000000..2e9de9ebb2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.activations.pbtxt
@@ -0,0 +1,55 @@
+path: "tensorflow.keras.activations"
+tf_module {
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'name\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "elu"
+ argspec: "args=[\'x\', \'alpha\'], varargs=None, keywords=None, defaults=[\'1.0\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "hard_sigmoid"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "linear"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "relu"
+ argspec: "args=[\'x\', \'alpha\', \'max_value\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'0\'], "
+ }
+ member_method {
+ name: "selu"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'activation\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sigmoid"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "softmax"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "softplus"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "softsign"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "tanh"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.name_scope.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.name_scope.pbtxt
new file mode 100644
index 0000000000..a2b98b1c27
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.name_scope.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.keras.backend.name_scope"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.name_scope\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'default_name\', \'values\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt
new file mode 100644
index 0000000000..126ce8db6a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.backend.pbtxt
@@ -0,0 +1,555 @@
+path: "tensorflow.keras.backend"
+tf_module {
+ member {
+ name: "name_scope"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "abs"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "all"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "any"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "arange"
+ argspec: "args=[\'start\', \'stop\', \'step\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'int32\'], "
+ }
+ member_method {
+ name: "argmax"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "argmin"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "backend"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "batch_dot"
+ argspec: "args=[\'x\', \'y\', \'axes\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "batch_flatten"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "batch_get_value"
+ argspec: "args=[\'tensors\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "batch_normalization"
+ argspec: "args=[\'x\', \'mean\', \'var\', \'beta\', \'gamma\', \'epsilon\'], varargs=None, keywords=None, defaults=[\'0.001\'], "
+ }
+ member_method {
+ name: "batch_set_value"
+ argspec: "args=[\'tuples\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "bias_add"
+ argspec: "args=[\'x\', \'bias\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "binary_crossentropy"
+ argspec: "args=[\'target\', \'output\', \'from_logits\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "cast"
+ argspec: "args=[\'x\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "cast_to_floatx"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "categorical_crossentropy"
+ argspec: "args=[\'target\', \'output\', \'from_logits\', \'axis\'], varargs=None, keywords=None, defaults=[\'False\', \'-1\'], "
+ }
+ member_method {
+ name: "clear_session"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "clip"
+ argspec: "args=[\'x\', \'min_value\', \'max_value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "concatenate"
+ argspec: "args=[\'tensors\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "constant"
+ argspec: "args=[\'value\', \'dtype\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv1d"
+ argspec: "args=[\'x\', \'kernel\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\'], varargs=None, keywords=None, defaults=[\'1\', \'valid\', \'None\', \'1\'], "
+ }
+ member_method {
+ name: "conv2d"
+ argspec: "args=[\'x\', \'kernel\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\'], varargs=None, keywords=None, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\'], "
+ }
+ member_method {
+ name: "conv2d_transpose"
+ argspec: "args=[\'x\', \'kernel\', \'output_shape\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=None, defaults=[\'(1, 1)\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "conv3d"
+ argspec: "args=[\'x\', \'kernel\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\'], varargs=None, keywords=None, defaults=[\'(1, 1, 1)\', \'valid\', \'None\', \'(1, 1, 1)\'], "
+ }
+ member_method {
+ name: "cos"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ctc_batch_cost"
+ argspec: "args=[\'y_true\', \'y_pred\', \'input_length\', \'label_length\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ctc_decode"
+ argspec: "args=[\'y_pred\', \'input_length\', \'greedy\', \'beam_width\', \'top_paths\'], varargs=None, keywords=None, defaults=[\'True\', \'100\', \'1\'], "
+ }
+ member_method {
+ name: "ctc_label_dense_to_sparse"
+ argspec: "args=[\'labels\', \'label_lengths\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "dot"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "dropout"
+ argspec: "args=[\'x\', \'level\', \'noise_shape\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "dtype"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "elu"
+ argspec: "args=[\'x\', \'alpha\'], varargs=None, keywords=None, defaults=[\'1.0\'], "
+ }
+ member_method {
+ name: "epsilon"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "equal"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "eval"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "exp"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "expand_dims"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "eye"
+ argspec: "args=[\'size\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "flatten"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "floatx"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "foldl"
+ argspec: "args=[\'fn\', \'elems\', \'initializer\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "foldr"
+ argspec: "args=[\'fn\', \'elems\', \'initializer\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "function"
+ argspec: "args=[\'inputs\', \'outputs\', \'updates\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "gather"
+ argspec: "args=[\'reference\', \'indices\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_session"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_uid"
+ argspec: "args=[\'prefix\'], varargs=None, keywords=None, defaults=[\'\'], "
+ }
+ member_method {
+ name: "get_value"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "gradients"
+ argspec: "args=[\'loss\', \'variables\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "greater"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "greater_equal"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "hard_sigmoid"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "image_data_format"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "in_test_phase"
+ argspec: "args=[\'x\', \'alt\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "in_top_k"
+ argspec: "args=[\'predictions\', \'targets\', \'k\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "in_train_phase"
+ argspec: "args=[\'x\', \'alt\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "int_shape"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_sparse"
+ argspec: "args=[\'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "l2_normalize"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "learning_phase"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "less"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "less_equal"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "log"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "manual_variable_initialization"
+ argspec: "args=[\'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "map_fn"
+ argspec: "args=[\'fn\', \'elems\', \'name\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "max"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "maximum"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "min"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "minimum"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "moving_average_update"
+ argspec: "args=[\'x\', \'value\', \'momentum\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ndim"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "normalize_batch_in_training"
+ argspec: "args=[\'x\', \'gamma\', \'beta\', \'reduction_axes\', \'epsilon\'], varargs=None, keywords=None, defaults=[\'0.001\'], "
+ }
+ member_method {
+ name: "not_equal"
+ argspec: "args=[\'x\', \'y\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "one_hot"
+ argspec: "args=[\'indices\', \'num_classes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "ones"
+ argspec: "args=[\'shape\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "ones_like"
+ argspec: "args=[\'x\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "permute_dimensions"
+ argspec: "args=[\'x\', \'pattern\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "placeholder"
+ argspec: "args=[\'shape\', \'ndim\', \'dtype\', \'sparse\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "pool2d"
+ argspec: "args=[\'x\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'pool_mode\'], varargs=None, keywords=None, defaults=[\'(1, 1)\', \'valid\', \'None\', \'max\'], "
+ }
+ member_method {
+ name: "pool3d"
+ argspec: "args=[\'x\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'pool_mode\'], varargs=None, keywords=None, defaults=[\'(1, 1, 1)\', \'valid\', \'None\', \'max\'], "
+ }
+ member_method {
+ name: "pow"
+ argspec: "args=[\'x\', \'a\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "print_tensor"
+ argspec: "args=[\'x\', \'message\'], varargs=None, keywords=None, defaults=[\'\'], "
+ }
+ member_method {
+ name: "prod"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "random_binomial"
+ argspec: "args=[\'shape\', \'p\', \'dtype\', \'seed\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_normal"
+ argspec: "args=[\'shape\', \'mean\', \'stddev\', \'dtype\', \'seed\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_normal_variable"
+ argspec: "args=[\'shape\', \'mean\', \'scale\', \'dtype\', \'name\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_uniform"
+ argspec: "args=[\'shape\', \'minval\', \'maxval\', \'dtype\', \'seed\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "random_uniform_variable"
+ argspec: "args=[\'shape\', \'low\', \'high\', \'dtype\', \'name\', \'seed\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "relu"
+ argspec: "args=[\'x\', \'alpha\', \'max_value\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'0\'], "
+ }
+ member_method {
+ name: "repeat"
+ argspec: "args=[\'x\', \'n\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "repeat_elements"
+ argspec: "args=[\'x\', \'rep\', \'axis\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_uids"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reshape"
+ argspec: "args=[\'x\', \'shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "resize_images"
+ argspec: "args=[\'x\', \'height_factor\', \'width_factor\', \'data_format\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "resize_volumes"
+ argspec: "args=[\'x\', \'depth_factor\', \'height_factor\', \'width_factor\', \'data_format\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reverse"
+ argspec: "args=[\'x\', \'axes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "rnn"
+ argspec: "args=[\'step_function\', \'inputs\', \'initial_states\', \'go_backwards\', \'mask\', \'constants\', \'unroll\', \'input_length\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "round"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "separable_conv2d"
+ argspec: "args=[\'x\', \'depthwise_kernel\', \'pointwise_kernel\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\'], varargs=None, keywords=None, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\'], "
+ }
+ member_method {
+ name: "set_epsilon"
+ argspec: "args=[\'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_floatx"
+ argspec: "args=[\'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_image_data_format"
+ argspec: "args=[\'data_format\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_learning_phase"
+ argspec: "args=[\'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_session"
+ argspec: "args=[\'session\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_value"
+ argspec: "args=[\'x\', \'value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "shape"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sigmoid"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sign"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sin"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "softmax"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "softplus"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "softsign"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sparse_categorical_crossentropy"
+ argspec: "args=[\'target\', \'output\', \'from_logits\', \'axis\'], varargs=None, keywords=None, defaults=[\'False\', \'-1\'], "
+ }
+ member_method {
+ name: "spatial_2d_padding"
+ argspec: "args=[\'x\', \'padding\', \'data_format\'], varargs=None, keywords=None, defaults=[\'((1, 1), (1, 1))\', \'None\'], "
+ }
+ member_method {
+ name: "spatial_3d_padding"
+ argspec: "args=[\'x\', \'padding\', \'data_format\'], varargs=None, keywords=None, defaults=[\'((1, 1), (1, 1), (1, 1))\', \'None\'], "
+ }
+ member_method {
+ name: "sqrt"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "square"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "squeeze"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "stack"
+ argspec: "args=[\'x\', \'axis\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "std"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "stop_gradient"
+ argspec: "args=[\'variables\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sum"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "switch"
+ argspec: "args=[\'condition\', \'then_expression\', \'else_expression\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "tanh"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "temporal_padding"
+ argspec: "args=[\'x\', \'padding\'], varargs=None, keywords=None, defaults=[\'(1, 1)\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "transpose"
+ argspec: "args=[\'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "truncated_normal"
+ argspec: "args=[\'shape\', \'mean\', \'stddev\', \'dtype\', \'seed\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "update"
+ argspec: "args=[\'x\', \'new_x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "update_add"
+ argspec: "args=[\'x\', \'increment\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "update_sub"
+ argspec: "args=[\'x\', \'decrement\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "var"
+ argspec: "args=[\'x\', \'axis\', \'keepdims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "variable"
+ argspec: "args=[\'value\', \'dtype\', \'name\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "zeros"
+ argspec: "args=[\'shape\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "zeros_like"
+ argspec: "args=[\'x\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-base-logger.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-base-logger.pbtxt
new file mode 100644
index 0000000000..9eee9b3789
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-base-logger.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.BaseLogger"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.BaseLogger\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'stateful_metrics\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt
new file mode 100644
index 0000000000..5bb949c5bb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-c-s-v-logger.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.CSVLogger"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.CSVLogger\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filename\', \'separator\', \'append\'], varargs=None, keywords=None, defaults=[\',\', \'False\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-callback.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-callback.pbtxt
new file mode 100644
index 0000000000..a5340d52c1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-callback.pbtxt
@@ -0,0 +1,41 @@
+path: "tensorflow.keras.callbacks.Callback"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt
new file mode 100644
index 0000000000..f71292856c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-early-stopping.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.EarlyStopping"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.EarlyStopping\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'monitor\', \'min_delta\', \'patience\', \'verbose\', \'mode\', \'baseline\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'0\', \'0\', \'auto\', \'None\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-history.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-history.pbtxt
new file mode 100644
index 0000000000..ee400b31c4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-history.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.History"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.History\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-lambda-callback.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-lambda-callback.pbtxt
new file mode 100644
index 0000000000..df8d7b0ef7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-lambda-callback.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.LambdaCallback"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.LambdaCallback\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'on_epoch_begin\', \'on_epoch_end\', \'on_batch_begin\', \'on_batch_end\', \'on_train_begin\', \'on_train_end\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt
new file mode 100644
index 0000000000..ce1a9b694d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-learning-rate-scheduler.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.LearningRateScheduler"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.LearningRateScheduler\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'schedule\', \'verbose\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-model-checkpoint.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-model-checkpoint.pbtxt
new file mode 100644
index 0000000000..48bb24a052
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-model-checkpoint.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.ModelCheckpoint"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.ModelCheckpoint\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filepath\', \'monitor\', \'verbose\', \'save_best_only\', \'save_weights_only\', \'mode\', \'period\'], varargs=None, keywords=None, defaults=[\'val_loss\', \'0\', \'False\', \'False\', \'auto\', \'1\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-progbar-logger.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-progbar-logger.pbtxt
new file mode 100644
index 0000000000..d8bb8b2a7d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-progbar-logger.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.ProgbarLogger"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.ProgbarLogger\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'count_mode\', \'stateful_metrics\'], varargs=None, keywords=None, defaults=[\'samples\', \'None\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt
new file mode 100644
index 0000000000..dc27af9552
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-reduce-l-r-on-plateau.pbtxt
@@ -0,0 +1,46 @@
+path: "tensorflow.keras.callbacks.ReduceLROnPlateau"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.ReduceLROnPlateau\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'monitor\', \'factor\', \'patience\', \'verbose\', \'mode\', \'min_delta\', \'cooldown\', \'min_lr\'], varargs=None, keywords=kwargs, defaults=[\'val_loss\', \'0.1\', \'10\', \'0\', \'auto\', \'0.0001\', \'0\', \'0\'], "
+ }
+ member_method {
+ name: "in_cooldown"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-remote-monitor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-remote-monitor.pbtxt
new file mode 100644
index 0000000000..5a3b791c0a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-remote-monitor.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.RemoteMonitor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.RemoteMonitor\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'root\', \'path\', \'field\', \'headers\', \'send_as_json\'], varargs=None, keywords=None, defaults=[\'http://localhost:9000\', \'/publish/epoch/end/\', \'data\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt
new file mode 100644
index 0000000000..e58ba18c1c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-tensor-board.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.TensorBoard"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.TensorBoard\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt
new file mode 100644
index 0000000000..5c2d336353
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.-terminate-on-na-n.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.callbacks.TerminateOnNaN"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.callbacks.TerminateOnNaN\'>"
+ is_instance: "<class \'tensorflow.python.keras.callbacks.Callback\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "on_batch_begin"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_batch_end"
+ argspec: "args=[\'self\', \'batch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_begin"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\', \'epoch\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_begin"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "on_train_end"
+ argspec: "args=[\'self\', \'logs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_model"
+ argspec: "args=[\'self\', \'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.pbtxt
new file mode 100644
index 0000000000..1e9085e034
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.callbacks.pbtxt
@@ -0,0 +1,55 @@
+path: "tensorflow.keras.callbacks"
+tf_module {
+ member {
+ name: "BaseLogger"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "CSVLogger"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Callback"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "EarlyStopping"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "History"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LambdaCallback"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LearningRateScheduler"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ModelCheckpoint"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ProgbarLogger"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ReduceLROnPlateau"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RemoteMonitor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TensorBoard"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TerminateOnNaN"
+ mtype: "<type \'type\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-constraint.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-constraint.pbtxt
new file mode 100644
index 0000000000..8e07b7d98e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-constraint.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.keras.constraints.Constraint"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-max-norm.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-max-norm.pbtxt
new file mode 100644
index 0000000000..2b81174b6c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-max-norm.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.keras.constraints.MaxNorm"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.MaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'max_value\', \'axis\'], varargs=None, keywords=None, defaults=[\'2\', \'0\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-min-max-norm.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-min-max-norm.pbtxt
new file mode 100644
index 0000000000..a41eda86ac
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-min-max-norm.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.keras.constraints.MinMaxNorm"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.MinMaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'min_value\', \'max_value\', \'rate\', \'axis\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'1.0\', \'0\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-non-neg.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-non-neg.pbtxt
new file mode 100644
index 0000000000..572e3eea4d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-non-neg.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.keras.constraints.NonNeg"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.NonNeg\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-unit-norm.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-unit-norm.pbtxt
new file mode 100644
index 0000000000..fe16c38cc8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.-unit-norm.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.keras.constraints.UnitNorm"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.UnitNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'axis\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.max_norm.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.max_norm.pbtxt
new file mode 100644
index 0000000000..6650bae07a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.max_norm.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.keras.constraints.max_norm"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.MaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'max_value\', \'axis\'], varargs=None, keywords=None, defaults=[\'2\', \'0\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.min_max_norm.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.min_max_norm.pbtxt
new file mode 100644
index 0000000000..9dd3bc92fc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.min_max_norm.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.keras.constraints.min_max_norm"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.MinMaxNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'min_value\', \'max_value\', \'rate\', \'axis\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'1.0\', \'0\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.non_neg.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.non_neg.pbtxt
new file mode 100644
index 0000000000..a565840939
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.non_neg.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.keras.constraints.non_neg"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.NonNeg\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.pbtxt
new file mode 100644
index 0000000000..655685956f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.keras.constraints"
+tf_module {
+ member {
+ name: "Constraint"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxNorm"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MinMaxNorm"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "NonNeg"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UnitNorm"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "max_norm"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "min_max_norm"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "non_neg"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "unit_norm"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'constraint\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.unit_norm.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.unit_norm.pbtxt
new file mode 100644
index 0000000000..5cbe0da4c1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.constraints.unit_norm.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.keras.constraints.unit_norm"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.constraints.UnitNorm\'>"
+ is_instance: "<class \'tensorflow.python.keras.constraints.Constraint\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'axis\'], varargs=None, keywords=None, defaults=[\'0\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt
new file mode 100644
index 0000000000..bda31751d4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.boston_housing.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.keras.datasets.boston_housing"
+tf_module {
+ member_method {
+ name: "load_data"
+ argspec: "args=[\'path\', \'test_split\', \'seed\'], varargs=None, keywords=None, defaults=[\'boston_housing.npz\', \'0.2\', \'113\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt
new file mode 100644
index 0000000000..8a5142f793
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar10.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.keras.datasets.cifar10"
+tf_module {
+ member_method {
+ name: "load_data"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt
new file mode 100644
index 0000000000..16f184eeb5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.cifar100.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.keras.datasets.cifar100"
+tf_module {
+ member_method {
+ name: "load_data"
+ argspec: "args=[\'label_mode\'], varargs=None, keywords=None, defaults=[\'fine\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt
new file mode 100644
index 0000000000..a0e14356fa
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.fashion_mnist.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.keras.datasets.fashion_mnist"
+tf_module {
+ member_method {
+ name: "load_data"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt
new file mode 100644
index 0000000000..ff962876b6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.imdb.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.keras.datasets.imdb"
+tf_module {
+ member_method {
+ name: "get_word_index"
+ argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=[\'imdb_word_index.json\'], "
+ }
+ member_method {
+ name: "load_data"
+ argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'seed\', \'start_char\', \'oov_char\', \'index_from\'], varargs=None, keywords=kwargs, defaults=[\'imdb.npz\', \'None\', \'0\', \'None\', \'113\', \'1\', \'2\', \'3\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt
new file mode 100644
index 0000000000..530bb07550
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.mnist.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.keras.datasets.mnist"
+tf_module {
+ member_method {
+ name: "load_data"
+ argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=[\'mnist.npz\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.pbtxt
new file mode 100644
index 0000000000..36e3aafbe4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.pbtxt
@@ -0,0 +1,31 @@
+path: "tensorflow.keras.datasets"
+tf_module {
+ member {
+ name: "boston_housing"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "cifar10"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "cifar100"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "fashion_mnist"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "imdb"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "mnist"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "reuters"
+ mtype: "<type \'module\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt
new file mode 100644
index 0000000000..2da4a13067
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.datasets.reuters.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.keras.datasets.reuters"
+tf_module {
+ member_method {
+ name: "get_word_index"
+ argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=[\'reuters_word_index.json\'], "
+ }
+ member_method {
+ name: "load_data"
+ argspec: "args=[\'path\', \'num_words\', \'skip_top\', \'maxlen\', \'test_split\', \'seed\', \'start_char\', \'oov_char\', \'index_from\'], varargs=None, keywords=kwargs, defaults=[\'reuters.npz\', \'None\', \'0\', \'None\', \'0.2\', \'113\', \'1\', \'2\', \'3\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.estimator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.estimator.pbtxt
new file mode 100644
index 0000000000..7a3fb39f77
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.estimator.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.keras.estimator"
+tf_module {
+ member_method {
+ name: "model_to_estimator"
+ argspec: "args=[\'keras_model\', \'keras_model_path\', \'custom_objects\', \'model_dir\', \'config\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-constant.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-constant.pbtxt
new file mode 100644
index 0000000000..cbaba78ed5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-constant.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.Constant"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Constant\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'value\', \'dtype\', \'verify_shape\'], varargs=None, keywords=None, defaults=[\'0\', \"<dtype: \'float32\'>\", \'False\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-identity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-identity.pbtxt
new file mode 100644
index 0000000000..a5f7f348de
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-identity.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.Identity"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Identity\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-initializer.pbtxt
new file mode 100644
index 0000000000..8f10d1698e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-initializer.pbtxt
@@ -0,0 +1,16 @@
+path: "tensorflow.keras.initializers.Initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-ones.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-ones.pbtxt
new file mode 100644
index 0000000000..2fbfa774f8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-ones.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.Ones"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Ones\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-orthogonal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-orthogonal.pbtxt
new file mode 100644
index 0000000000..874d320d73
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-orthogonal.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.Orthogonal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Orthogonal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-normal.pbtxt
new file mode 100644
index 0000000000..26784ce55d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.RandomNormal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-uniform.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-uniform.pbtxt
new file mode 100644
index 0000000000..4110bda5f6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-random-uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.RandomUniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'minval\', \'maxval\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'-0.05\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-truncated-normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-truncated-normal.pbtxt
new file mode 100644
index 0000000000..0451d0d73a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-truncated-normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.TruncatedNormal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-variance-scaling.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-variance-scaling.pbtxt
new file mode 100644
index 0000000000..03f4064b9e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-variance-scaling.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.VarianceScaling"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-zeros.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-zeros.pbtxt
new file mode 100644
index 0000000000..b6ab68e5be
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.-zeros.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.Zeros"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Zeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.constant.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.constant.pbtxt
new file mode 100644
index 0000000000..bddc37b907
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.constant.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.constant"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Constant\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'value\', \'dtype\', \'verify_shape\'], varargs=None, keywords=None, defaults=[\'0\', \"<dtype: \'float32\'>\", \'False\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_normal.pbtxt
new file mode 100644
index 0000000000..ef0815972d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.glorot_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_uniform.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_uniform.pbtxt
new file mode 100644
index 0000000000..439b5ada9b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.glorot_uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.glorot_uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.GlorotUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.identity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.identity.pbtxt
new file mode 100644
index 0000000000..a4c5a61490
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.identity.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.identity"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Identity\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.normal.pbtxt
new file mode 100644
index 0000000000..8d0b5c242b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.ones.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.ones.pbtxt
new file mode 100644
index 0000000000..a89f78d1e1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.ones.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.ones"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Ones\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.orthogonal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.orthogonal.pbtxt
new file mode 100644
index 0000000000..ee1e9bbae2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.orthogonal.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.orthogonal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Orthogonal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.pbtxt
new file mode 100644
index 0000000000..1540c2915b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.pbtxt
@@ -0,0 +1,119 @@
+path: "tensorflow.keras.initializers"
+tf_module {
+ member {
+ name: "Constant"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Identity"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Ones"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Orthogonal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RandomNormal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RandomUniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TruncatedNormal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "VarianceScaling"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Zeros"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "constant"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "identity"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ones"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "orthogonal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "random_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "random_uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "truncated_normal"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "uniform"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "zeros"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "he_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "he_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'initializer\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_normal.pbtxt
new file mode 100644
index 0000000000..bac8211a10
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.random_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_uniform.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_uniform.pbtxt
new file mode 100644
index 0000000000..ab0d74d071
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.random_uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.random_uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'minval\', \'maxval\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'-0.05\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.truncated_normal.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.truncated_normal.pbtxt
new file mode 100644
index 0000000000..358cca2b9c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.truncated_normal.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.truncated_normal"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.uniform.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.uniform.pbtxt
new file mode 100644
index 0000000000..e6c731361a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.uniform.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.keras.initializers.uniform"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.initializers.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'minval\', \'maxval\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'-0.05\', \'0.05\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.zeros.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.zeros.pbtxt
new file mode 100644
index 0000000000..a262390687
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.initializers.zeros.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.initializers.zeros"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Zeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt
new file mode 100644
index 0000000000..5510465d7b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Activation"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Activation\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'activation\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt
new file mode 100644
index 0000000000..38ec8a0aff
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ActivityRegularization"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.ActivityRegularization\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'l1\', \'l2\'], varargs=None, keywords=kwargs, defaults=[\'0.0\', \'0.0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt
new file mode 100644
index 0000000000..41cb8e30bf
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Add"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Add\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt
new file mode 100644
index 0000000000..9a7aaa8e96
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.AlphaDropout"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.noise.AlphaDropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\', \'noise_shape\', \'seed\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt
new file mode 100644
index 0000000000..c3dd2ad046
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.AveragePooling1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'2\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt
new file mode 100644
index 0000000000..cc303bf7b9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.AveragePooling2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt
new file mode 100644
index 0000000000..628447ce35
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.AveragePooling3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt
new file mode 100644
index 0000000000..f03c986c22
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Average"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Average\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt
new file mode 100644
index 0000000000..c440604aae
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.AvgPool1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'2\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt
new file mode 100644
index 0000000000..a01eaf8a12
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.AvgPool2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt
new file mode 100644
index 0000000000..0d6698f2ef
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.AvgPool3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt
new file mode 100644
index 0000000000..f1b23be48f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.BatchNormalization"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.normalization.BatchNormalization\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'axis\', \'momentum\', \'epsilon\', \'center\', \'scale\', \'beta_initializer\', \'gamma_initializer\', \'moving_mean_initializer\', \'moving_variance_initializer\', \'beta_regularizer\', \'gamma_regularizer\', \'beta_constraint\', \'gamma_constraint\', \'renorm\', \'renorm_clipping\', \'renorm_momentum\', \'fused\', \'trainable\', \'virtual_batch_size\', \'adjustment\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'0.99\', \'0.001\', \'True\', \'True\', \'zeros\', \'ones\', \'zeros\', \'ones\', \'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'0.99\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt
new file mode 100644
index 0000000000..0672cd5b7b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt
@@ -0,0 +1,188 @@
+path: "tensorflow.keras.layers.Bidirectional"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Bidirectional\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Wrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "constraints"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layer\', \'merge_mode\', \'weights\'], varargs=None, keywords=kwargs, defaults=[\'concat\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\', \'initial_state\', \'constants\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt
new file mode 100644
index 0000000000..b25ae1e82e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Concatenate"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Concatenate\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'axis\'], varargs=None, keywords=kwargs, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
new file mode 100644
index 0000000000..bb1918eba6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt
@@ -0,0 +1,273 @@
+path: "tensorflow.keras.layers.ConvLSTM2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional_recurrent.ConvLSTM2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional_recurrent.ConvRNN2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "data_format"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dilation_rate"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "filters"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "padding"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_activation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "strides"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "unit_forget_bias"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "use_bias"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'recurrent_activation\', \'use_bias\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'unit_forget_bias\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'return_sequences\', \'go_backwards\', \'stateful\', \'dropout\', \'recurrent_dropout\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\', \'tanh\', \'hard_sigmoid\', \'True\', \'glorot_uniform\', \'orthogonal\', \'zeros\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'False\', \'False\', \'False\', \'0.0\', \'0.0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt
new file mode 100644
index 0000000000..16e0fd5a31
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Conv1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'channels_last\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
new file mode 100644
index 0000000000..065bb4d35b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.Conv2DTranspose"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt
new file mode 100644
index 0000000000..543bae6fa9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Conv2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
new file mode 100644
index 0000000000..c7ba6056f9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.Conv3DTranspose"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt
new file mode 100644
index 0000000000..072943dc2c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Conv3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1, 1)\', \'valid\', \'None\', \'(1, 1, 1)\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt
new file mode 100644
index 0000000000..222a1ef4fc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Convolution1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'channels_last\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
new file mode 100644
index 0000000000..8f4f7918ab
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.Convolution2DTranspose"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt
new file mode 100644
index 0000000000..f939067178
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Convolution2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
new file mode 100644
index 0000000000..93c442bd55
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.Convolution3DTranspose"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt
new file mode 100644
index 0000000000..471b18ef85
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Convolution3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1, 1)\', \'valid\', \'None\', \'(1, 1, 1)\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt
new file mode 100644
index 0000000000..0f250a09b7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Cropping1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Cropping1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cropping\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt
new file mode 100644
index 0000000000..f52128483c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Cropping2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Cropping2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cropping\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'((0, 0), (0, 0))\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt
new file mode 100644
index 0000000000..98daf3bab1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Cropping3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Cropping3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cropping\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'((1, 1), (1, 1), (1, 1))\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt
new file mode 100644
index 0000000000..64e7a9046b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt
@@ -0,0 +1,193 @@
+path: "tensorflow.keras.layers.CuDNNGRU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent.CuDNNGRU\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent._CuDNNRNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "cell"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\'], varargs=None, keywords=kwargs, defaults=[\'glorot_uniform\', \'orthogonal\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt
new file mode 100644
index 0000000000..6fdffef776
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt
@@ -0,0 +1,193 @@
+path: "tensorflow.keras.layers.CuDNNLSTM"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent.CuDNNLSTM\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.cudnn_recurrent._CuDNNRNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "cell"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'unit_forget_bias\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\'], varargs=None, keywords=kwargs, defaults=[\'glorot_uniform\', \'orthogonal\', \'zeros\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt
new file mode 100644
index 0000000000..3ac3825759
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Dense"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dense\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
new file mode 100644
index 0000000000..280ec8c25f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.DepthwiseConv2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.DepthwiseConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'kernel_size\', \'strides\', \'padding\', \'depth_multiplier\', \'data_format\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'1\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt
new file mode 100644
index 0000000000..560f66f9c7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Dot"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Dot\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'axes\', \'normalize\'], varargs=None, keywords=kwargs, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt
new file mode 100644
index 0000000000..c0543529c3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Dropout"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\', \'noise_shape\', \'seed\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt
new file mode 100644
index 0000000000..04eb2824b9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ELU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.ELU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'alpha\'], varargs=None, keywords=kwargs, defaults=[\'1.0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt
new file mode 100644
index 0000000000..f400432915
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Embedding"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.embeddings.Embedding\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'input_dim\', \'output_dim\', \'embeddings_initializer\', \'embeddings_regularizer\', \'activity_regularizer\', \'embeddings_constraint\', \'mask_zero\', \'input_length\'], varargs=None, keywords=kwargs, defaults=[\'uniform\', \'None\', \'None\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-flatten.pbtxt
index 4c1adb2131..ab176b441a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-flatten.pbtxt
@@ -1,9 +1,8 @@
-path: "tensorflow.keras.layers.GlobalMaxPool3D"
+path: "tensorflow.keras.layers.Flatten"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalMaxPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.pooling.GlobalPooling3D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Flatten\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt
new file mode 100644
index 0000000000..c3895a0ac1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt
@@ -0,0 +1,179 @@
+path: "tensorflow.keras.layers.GRUCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.GRUCell\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'recurrent_activation\', \'use_bias\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'dropout\', \'recurrent_dropout\', \'implementation\', \'reset_after\'], varargs=None, keywords=kwargs, defaults=[\'tanh\', \'hard_sigmoid\', \'True\', \'glorot_uniform\', \'orthogonal\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'0.0\', \'0.0\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'states\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt
new file mode 100644
index 0000000000..a0fe598ab9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt
@@ -0,0 +1,256 @@
+path: "tensorflow.keras.layers.GRU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.GRU\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "implementation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_activation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "reset_after"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "units"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "use_bias"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'recurrent_activation\', \'use_bias\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'dropout\', \'recurrent_dropout\', \'implementation\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\', \'unroll\', \'reset_after\'], varargs=None, keywords=kwargs, defaults=[\'tanh\', \'hard_sigmoid\', \'True\', \'glorot_uniform\', \'orthogonal\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'0.0\', \'0.0\', \'1\', \'False\', \'False\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-dropout.pbtxt
index 9a0a19d2d5..55e0d7ef02 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-dropout.pbtxt
@@ -1,9 +1,8 @@
-path: "tensorflow.keras.layers.SpatialDropout1D"
+path: "tensorflow.keras.layers.GaussianDropout"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.SpatialDropout1D\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Dropout\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.noise.GaussianDropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -99,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -107,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
@@ -119,7 +118,7 @@ tf_class {
}
member_method {
name: "compute_output_shape"
- argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "count_params"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt
new file mode 100644
index 0000000000..38fbff5e4a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.GaussianNoise"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.noise.GaussianNoise\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'stddev\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
new file mode 100644
index 0000000000..5ea61d118d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalAveragePooling1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
new file mode 100644
index 0000000000..929f48df23
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalAveragePooling2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
new file mode 100644
index 0000000000..2e6d59337f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalAveragePooling3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
new file mode 100644
index 0000000000..11dca17c6d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalAvgPool1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
new file mode 100644
index 0000000000..4e3e258430
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalAvgPool2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
new file mode 100644
index 0000000000..fb9166316f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalAvgPool3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalAveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
new file mode 100644
index 0000000000..278429af6f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPool1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
new file mode 100644
index 0000000000..87b7f6797a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPool2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
new file mode 100644
index 0000000000..98bf96fa0c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPool3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
new file mode 100644
index 0000000000..935a69ab2f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPooling1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
new file mode 100644
index 0000000000..c9d4158d1c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPooling2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
new file mode 100644
index 0000000000..9953102ff9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.GlobalMaxPooling3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalMaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.GlobalPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt
new file mode 100644
index 0000000000..2617f5a95f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.InputLayer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.input_layer.InputLayer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'input_shape\', \'batch_size\', \'dtype\', \'input_tensor\', \'sparse\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-spec.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-spec.pbtxt
new file mode 100644
index 0000000000..5fd0a47a68
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-spec.pbtxt
@@ -0,0 +1,9 @@
+path: "tensorflow.keras.layers.InputSpec"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.InputSpec\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'shape\', \'ndim\', \'max_ndim\', \'min_ndim\', \'axes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
new file mode 100644
index 0000000000..e9f6ef45aa
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt
@@ -0,0 +1,179 @@
+path: "tensorflow.keras.layers.LSTMCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.LSTMCell\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'recurrent_activation\', \'use_bias\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'unit_forget_bias\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'dropout\', \'recurrent_dropout\', \'implementation\'], varargs=None, keywords=kwargs, defaults=[\'tanh\', \'hard_sigmoid\', \'True\', \'glorot_uniform\', \'orthogonal\', \'zeros\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'0.0\', \'0.0\', \'1\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'states\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt
new file mode 100644
index 0000000000..ecdbf48157
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt
@@ -0,0 +1,256 @@
+path: "tensorflow.keras.layers.LSTM"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.LSTM\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "implementation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_activation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "unit_forget_bias"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "units"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "use_bias"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'recurrent_activation\', \'use_bias\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'unit_forget_bias\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'dropout\', \'recurrent_dropout\', \'implementation\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\', \'unroll\'], varargs=None, keywords=kwargs, defaults=[\'tanh\', \'hard_sigmoid\', \'True\', \'glorot_uniform\', \'orthogonal\', \'zeros\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'0.0\', \'0.0\', \'1\', \'False\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt
new file mode 100644
index 0000000000..2e0b6bac24
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Lambda"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Lambda\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'function\', \'output_shape\', \'mask\', \'arguments\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt
new file mode 100644
index 0000000000..1e93d1118a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt
@@ -0,0 +1,174 @@
+path: "tensorflow.keras.layers.Layer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'trainable\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
new file mode 100644
index 0000000000..bfd36012a7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.LeakyReLU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.LeakyReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'alpha\'], varargs=None, keywords=kwargs, defaults=[\'0.3\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt
new file mode 100644
index 0000000000..5ad5990d7e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.LocallyConnected1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.local.LocallyConnected1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'implementation\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt
new file mode 100644
index 0000000000..40d03369a5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.LocallyConnected2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.local.LocallyConnected2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'implementation\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'None\', \'True\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt
new file mode 100644
index 0000000000..86666b51bb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Masking"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Masking\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mask_value\'], varargs=None, keywords=kwargs, defaults=[\'0.0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt
new file mode 100644
index 0000000000..238d96cca6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.MaxPool1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'2\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt
new file mode 100644
index 0000000000..85f23df671
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.MaxPool2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt
new file mode 100644
index 0000000000..235806b965
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.MaxPool3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt
new file mode 100644
index 0000000000..4a45bf7997
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.MaxPooling1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'2\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt
new file mode 100644
index 0000000000..fda2562fc8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.MaxPooling2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt
new file mode 100644
index 0000000000..71d2d09a8d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.MaxPooling3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2, 2)\', \'None\', \'valid\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt
new file mode 100644
index 0000000000..12949b39a6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Maximum"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Maximum\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt
new file mode 100644
index 0000000000..ab16d0021e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Minimum"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Minimum\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt
new file mode 100644
index 0000000000..61ccbf5962
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Multiply"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Multiply\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt
new file mode 100644
index 0000000000..ce2320d703
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.PReLU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.PReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'alpha_initializer\', \'alpha_regularizer\', \'alpha_constraint\', \'shared_axes\'], varargs=None, keywords=kwargs, defaults=[\'zeros\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-permute.pbtxt
index 82dc878a8c..69848af8cf 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-flatten.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-permute.pbtxt
@@ -1,8 +1,8 @@
-path: "tensorflow.keras.layers.Flatten"
+path: "tensorflow.keras.layers.Permute"
tf_class {
- is_instance: "<class \'tensorflow.python.keras._impl.keras.layers.core.Flatten\'>"
- is_instance: "<class \'tensorflow.python.keras._impl.keras.engine.base_layer.Layer\'>"
- is_instance: "<class \'tensorflow.python.training.checkpointable.CheckpointableBase\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Permute\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "activity_regularizer"
@@ -82,7 +82,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ argspec: "args=[\'self\', \'dims\'], varargs=None, keywords=kwargs, defaults=None"
}
member_method {
name: "add_loss"
@@ -98,7 +98,7 @@ tf_class {
}
member_method {
name: "add_weight"
- argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'getter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "apply"
@@ -106,7 +106,7 @@ tf_class {
}
member_method {
name: "build"
- argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "call"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt
new file mode 100644
index 0000000000..2b6e8af11d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt
@@ -0,0 +1,187 @@
+path: "tensorflow.keras.layers.RNN"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cell\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\', \'unroll\'], varargs=None, keywords=kwargs, defaults=[\'False\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\', \'constants\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt
new file mode 100644
index 0000000000..413f45f018
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ReLU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.ReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'max_value\', \'negative_slope\', \'threshold\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'0\', \'0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt
new file mode 100644
index 0000000000..9c61ff6027
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.RepeatVector"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.RepeatVector\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'n\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt
new file mode 100644
index 0000000000..baa91804c4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Reshape"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Reshape\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'target_shape\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt
new file mode 100644
index 0000000000..15a5d6ac9e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.SeparableConv1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'None\', \'1\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt
new file mode 100644
index 0000000000..be43bd5b3c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.SeparableConv2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
new file mode 100644
index 0000000000..6105992c7a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.SeparableConvolution1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'None\', \'1\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
new file mode 100644
index 0000000000..1b6cf1e9ec
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt
@@ -0,0 +1,177 @@
+path: "tensorflow.keras.layers.SeparableConvolution2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'(1, 1)\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
new file mode 100644
index 0000000000..29488a37f8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt
@@ -0,0 +1,179 @@
+path: "tensorflow.keras.layers.SimpleRNNCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.SimpleRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'use_bias\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'dropout\', \'recurrent_dropout\'], varargs=None, keywords=kwargs, defaults=[\'tanh\', \'True\', \'glorot_uniform\', \'orthogonal\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'0.0\', \'0.0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'states\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt
new file mode 100644
index 0000000000..182efb83b8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt
@@ -0,0 +1,244 @@
+path: "tensorflow.keras.layers.SimpleRNN"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.SimpleRNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.RNN\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activation"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "bias_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "kernel_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_constraint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_dropout"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "recurrent_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "states"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "units"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "use_bias"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'use_bias\', \'kernel_initializer\', \'recurrent_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'recurrent_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'recurrent_constraint\', \'bias_constraint\', \'dropout\', \'recurrent_dropout\', \'return_sequences\', \'return_state\', \'go_backwards\', \'stateful\', \'unroll\'], varargs=None, keywords=kwargs, defaults=[\'tanh\', \'True\', \'glorot_uniform\', \'orthogonal\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'0.0\', \'0.0\', \'False\', \'False\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\', \'training\', \'initial_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\', \'states\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt
new file mode 100644
index 0000000000..d29731ecf9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.Softmax"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.Softmax\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'axis\'], varargs=None, keywords=kwargs, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
new file mode 100644
index 0000000000..a6d7494ca7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.SpatialDropout1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.SpatialDropout1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
new file mode 100644
index 0000000000..c36e802693
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.SpatialDropout2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.SpatialDropout2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
new file mode 100644
index 0000000000..9c46cfe40f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.SpatialDropout3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.core.SpatialDropout3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
new file mode 100644
index 0000000000..8982f78794
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt
@@ -0,0 +1,187 @@
+path: "tensorflow.keras.layers.StackedRNNCells"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.recurrent.StackedRNNCells\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cells\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'states\', \'constants\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt
new file mode 100644
index 0000000000..ec2cc50298
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt
@@ -0,0 +1,176 @@
+path: "tensorflow.keras.layers.Subtract"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.merge.Subtract\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.merge._Merge\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
new file mode 100644
index 0000000000..d7bc1980f3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ThresholdedReLU"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.advanced_activations.ThresholdedReLU\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'theta\'], varargs=None, keywords=kwargs, defaults=[\'1.0\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt
new file mode 100644
index 0000000000..fec2de6b49
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt
@@ -0,0 +1,180 @@
+path: "tensorflow.keras.layers.TimeDistributed"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.TimeDistributed\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Wrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layer\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt
new file mode 100644
index 0000000000..3d285e7f17
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.UpSampling1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.UpSampling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'size\'], varargs=None, keywords=kwargs, defaults=[\'2\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt
new file mode 100644
index 0000000000..40a56a0c94
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.UpSampling2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.UpSampling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'size\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2)\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt
new file mode 100644
index 0000000000..728eca415a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.UpSampling3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.UpSampling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'size\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(2, 2, 2)\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt
new file mode 100644
index 0000000000..da64e77c39
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt
@@ -0,0 +1,179 @@
+path: "tensorflow.keras.layers.Wrapper"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.wrappers.Wrapper\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layer\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt
new file mode 100644
index 0000000000..2f505f9293
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ZeroPadding1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.ZeroPadding1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'padding\'], varargs=None, keywords=kwargs, defaults=[\'1\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt
new file mode 100644
index 0000000000..f82c77072e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ZeroPadding2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.ZeroPadding2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt
new file mode 100644
index 0000000000..54e01a9917
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.keras.layers.ZeroPadding3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.ZeroPadding3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'padding\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1, 1)\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.pbtxt
new file mode 100644
index 0000000000..9d7e5bb8c7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.pbtxt
@@ -0,0 +1,435 @@
+path: "tensorflow.keras.layers"
+tf_module {
+ member {
+ name: "Activation"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ActivityRegularization"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Add"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AlphaDropout"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Average"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AveragePooling1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AveragePooling2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AveragePooling3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AvgPool1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AvgPool2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AvgPool3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BatchNormalization"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Bidirectional"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Concatenate"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv2DTranspose"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv3DTranspose"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ConvLSTM2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Convolution1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Convolution2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Convolution2DTranspose"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Convolution3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Convolution3DTranspose"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Cropping1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Cropping2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Cropping3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "CuDNNGRU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "CuDNNLSTM"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Dense"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DepthwiseConv2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Dot"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Dropout"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ELU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Embedding"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Flatten"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GRU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GRUCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GaussianDropout"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GaussianNoise"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalAveragePooling1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalAveragePooling2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalAveragePooling3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalAvgPool1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalAvgPool2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalAvgPool3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalMaxPool1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalMaxPool2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalMaxPool3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalMaxPooling1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalMaxPooling2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalMaxPooling3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "InputLayer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "InputSpec"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LSTM"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LSTMCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Lambda"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Layer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LeakyReLU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LocallyConnected1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LocallyConnected2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Masking"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPool1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPool2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPool3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPooling1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPooling2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPooling3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Maximum"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Minimum"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Multiply"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "PReLU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Permute"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RNN"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ReLU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RepeatVector"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Reshape"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SeparableConv1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SeparableConv2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SeparableConvolution1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SeparableConvolution2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SimpleRNN"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SimpleRNNCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Softmax"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SpatialDropout1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SpatialDropout2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SpatialDropout3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "StackedRNNCells"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Subtract"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ThresholdedReLU"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TimeDistributed"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UpSampling1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UpSampling2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "UpSampling3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Wrapper"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ZeroPadding1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ZeroPadding2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ZeroPadding3D"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "Input"
+ argspec: "args=[\'shape\', \'batch_size\', \'name\', \'dtype\', \'sparse\', \'tensor\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "average"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "concatenate"
+ argspec: "args=[\'inputs\', \'axis\'], varargs=None, keywords=kwargs, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "dot"
+ argspec: "args=[\'inputs\', \'axes\', \'normalize\'], varargs=None, keywords=kwargs, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "maximum"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "minimum"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "multiply"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "subtract"
+ argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt
new file mode 100644
index 0000000000..eca6b91538
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.losses.pbtxt
@@ -0,0 +1,115 @@
+path: "tensorflow.keras.losses"
+tf_module {
+ member_method {
+ name: "KLD"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAPE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MSE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MSLE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "binary_crossentropy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "categorical_crossentropy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "categorical_hinge"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "cosine"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "cosine_proximity"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'name\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "hinge"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "kld"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "kullback_leibler_divergence"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "logcosh"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mae"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mape"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_absolute_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_absolute_percentage_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_squared_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_squared_logarithmic_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mse"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "msle"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "poisson"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'loss\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sparse_categorical_crossentropy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "squared_hinge"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt
new file mode 100644
index 0000000000..a296e13158
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.pbtxt
@@ -0,0 +1,127 @@
+path: "tensorflow.keras.metrics"
+tf_module {
+ member_method {
+ name: "KLD"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MAPE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MSE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "MSLE"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "binary_accuracy"
+ argspec: "args=[\'y_true\', \'y_pred\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.5\'], "
+ }
+ member_method {
+ name: "binary_crossentropy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "categorical_accuracy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "categorical_crossentropy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "cosine"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "cosine_proximity"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "hinge"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "kld"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "kullback_leibler_divergence"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mae"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mape"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_absolute_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_absolute_percentage_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_squared_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mean_squared_logarithmic_error"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "mse"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "msle"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "poisson"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'metric\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sparse_categorical_accuracy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sparse_categorical_crossentropy"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "sparse_top_k_categorical_accuracy"
+ argspec: "args=[\'y_true\', \'y_pred\', \'k\'], varargs=None, keywords=None, defaults=[\'5\'], "
+ }
+ member_method {
+ name: "squared_hinge"
+ argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "top_k_categorical_accuracy"
+ argspec: "args=[\'y_true\', \'y_pred\', \'k\'], varargs=None, keywords=None, defaults=[\'5\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt
new file mode 100644
index 0000000000..4011719317
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt
@@ -0,0 +1,268 @@
+path: "tensorflow.keras.models.Model"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt
new file mode 100644
index 0000000000..8a12ac1ad8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt
@@ -0,0 +1,285 @@
+path: "tensorflow.keras.models.Sequential"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.sequential.Sequential\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.training.Model\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.network.Network\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_spec"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stateful"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "uses_learning_phase"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'layers\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'self\', \'layer\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compile"
+ argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'loss_weights\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\', \'distribute\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "evaluate"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "evaluate_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "fit_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_layer"
+ argspec: "args=[\'self\', \'name\', \'index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_weights"
+ argspec: "args=[\'self\', \'filepath\', \'by_name\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "pop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'None\', \'10\', \'1\', \'False\'], "
+ }
+ member_method {
+ name: "predict_classes"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "predict_generator"
+ argspec: "args=[\'self\', \'generator\', \'steps\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'verbose\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'1\', \'False\', \'0\'], "
+ }
+ member_method {
+ name: "predict_on_batch"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict_proba"
+ argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], "
+ }
+ member_method {
+ name: "reset_states"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "save_weights"
+ argspec: "args=[\'self\', \'filepath\', \'overwrite\', \'save_format\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary"
+ argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "test_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "to_json"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "to_yaml"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "train_on_batch"
+ argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\', \'class_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.models.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.pbtxt
new file mode 100644
index 0000000000..7ad4a32d43
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.keras.models"
+tf_module {
+ member {
+ name: "Model"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Sequential"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "clone_model"
+ argspec: "args=[\'model\', \'input_tensors\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "load_model"
+ argspec: "args=[\'filepath\', \'custom_objects\', \'compile\'], varargs=None, keywords=None, defaults=[\'None\', \'True\'], "
+ }
+ member_method {
+ name: "model_from_config"
+ argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "model_from_json"
+ argspec: "args=[\'json_string\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "model_from_yaml"
+ argspec: "args=[\'yaml_string\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "save_model"
+ argspec: "args=[\'model\', \'filepath\', \'overwrite\', \'include_optimizer\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adadelta.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adadelta.pbtxt
new file mode 100644
index 0000000000..b9ce154bdd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adadelta.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.keras.optimizers.Adadelta"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adadelta\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'lr\', \'rho\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'1.0\', \'0.95\', \'None\', \'0.0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adagrad.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adagrad.pbtxt
new file mode 100644
index 0000000000..d0dc9e37a3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adagrad.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.keras.optimizers.Adagrad"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adagrad\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'lr\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'0.01\', \'None\', \'0.0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adam.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adam.pbtxt
new file mode 100644
index 0000000000..06815fa99a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adam.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.keras.optimizers.Adam"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adam\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'lr\', \'beta_1\', \'beta_2\', \'epsilon\', \'decay\', \'amsgrad\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.9\', \'0.999\', \'None\', \'0.0\', \'False\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adamax.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adamax.pbtxt
new file mode 100644
index 0000000000..47b55fdb44
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-adamax.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.keras.optimizers.Adamax"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Adamax\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'lr\', \'beta_1\', \'beta_2\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'0.002\', \'0.9\', \'0.999\', \'None\', \'0.0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-nadam.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-nadam.pbtxt
new file mode 100644
index 0000000000..8c63a7dda9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-nadam.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.keras.optimizers.Nadam"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Nadam\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'lr\', \'beta_1\', \'beta_2\', \'epsilon\', \'schedule_decay\'], varargs=None, keywords=kwargs, defaults=[\'0.002\', \'0.9\', \'0.999\', \'None\', \'0.004\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-optimizer.pbtxt
new file mode 100644
index 0000000000..53d64dae93
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-optimizer.pbtxt
@@ -0,0 +1,33 @@
+path: "tensorflow.keras.optimizers.Optimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-r-m-sprop.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-r-m-sprop.pbtxt
new file mode 100644
index 0000000000..a1e9b8cceb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-r-m-sprop.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.keras.optimizers.RMSprop"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.RMSprop\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'lr\', \'rho\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.9\', \'None\', \'0.0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-s-g-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-s-g-d.pbtxt
new file mode 100644
index 0000000000..a67fefb1ba
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.-s-g-d.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.keras.optimizers.SGD"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.optimizers.SGD\'>"
+ is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'lr\', \'momentum\', \'decay\', \'nesterov\'], varargs=None, keywords=kwargs, defaults=[\'0.01\', \'0.0\', \'0.0\', \'False\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_gradients"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates"
+ argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.pbtxt
new file mode 100644
index 0000000000..7257b02087
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.optimizers.pbtxt
@@ -0,0 +1,47 @@
+path: "tensorflow.keras.optimizers"
+tf_module {
+ member {
+ name: "Adadelta"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Adagrad"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Adam"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Adamax"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Nadam"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Optimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RMSprop"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SGD"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'optimizer\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.pbtxt
new file mode 100644
index 0000000000..754b3b84b0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.pbtxt
@@ -0,0 +1,83 @@
+path: "tensorflow.keras"
+tf_module {
+ member {
+ name: "Model"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Sequential"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "activations"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "applications"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "backend"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "callbacks"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "constraints"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "datasets"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "estimator"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "initializers"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "layers"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "metrics"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "models"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "optimizers"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "preprocessing"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "regularizers"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "utils"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "wrappers"
+ mtype: "<type \'module\'>"
+ }
+ member_method {
+ name: "Input"
+ argspec: "args=[\'shape\', \'batch_size\', \'name\', \'dtype\', \'sparse\', \'tensor\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-l1-l2.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-l1-l2.pbtxt
new file mode 100644
index 0000000000..a45fb7b55e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-l1-l2.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.keras.regularizers.L1L2"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.regularizers.L1L2\'>"
+ is_instance: "<class \'tensorflow.python.keras.regularizers.Regularizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'l1\', \'l2\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.0\'], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-regularizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-regularizer.pbtxt
new file mode 100644
index 0000000000..641001a646
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.-regularizer.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.keras.regularizers.Regularizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.regularizers.Regularizer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.pbtxt
new file mode 100644
index 0000000000..bb10d41d70
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.regularizers.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.keras.regularizers"
+tf_module {
+ member {
+ name: "L1L2"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Regularizer"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "deserialize"
+ argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'identifier\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "l1"
+ argspec: "args=[\'l\'], varargs=None, keywords=None, defaults=[\'0.01\'], "
+ }
+ member_method {
+ name: "l1_l2"
+ argspec: "args=[\'l1\', \'l2\'], varargs=None, keywords=None, defaults=[\'0.01\', \'0.01\'], "
+ }
+ member_method {
+ name: "l2"
+ argspec: "args=[\'l\'], varargs=None, keywords=None, defaults=[\'0.01\'], "
+ }
+ member_method {
+ name: "serialize"
+ argspec: "args=[\'regularizer\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-custom-object-scope.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-custom-object-scope.pbtxt
new file mode 100644
index 0000000000..109682046b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-custom-object-scope.pbtxt
@@ -0,0 +1,9 @@
+path: "tensorflow.keras.utils.CustomObjectScope"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.generic_utils.CustomObjectScope\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=args, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-generator-enqueuer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-generator-enqueuer.pbtxt
new file mode 100644
index 0000000000..939fd547d0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-generator-enqueuer.pbtxt
@@ -0,0 +1,26 @@
+path: "tensorflow.keras.utils.GeneratorEnqueuer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.GeneratorEnqueuer\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.SequenceEnqueuer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'generator\', \'use_multiprocessing\', \'wait_time\', \'seed\'], varargs=None, keywords=None, defaults=[\'False\', \'0.05\', \'None\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_running"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start"
+ argspec: "args=[\'self\', \'workers\', \'max_queue_size\'], varargs=None, keywords=None, defaults=[\'1\', \'10\'], "
+ }
+ member_method {
+ name: "stop"
+ argspec: "args=[\'self\', \'timeout\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt
new file mode 100644
index 0000000000..6b832051a9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-h-d-f5-matrix.pbtxt
@@ -0,0 +1,29 @@
+path: "tensorflow.keras.utils.HDF5Matrix"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.io_utils.HDF5Matrix\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "ndim"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "refs"
+ mtype: "<type \'collections.defaultdict\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "size"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'datapath\', \'dataset\', \'start\', \'end\', \'normalizer\'], varargs=None, keywords=None, defaults=[\'0\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-ordered-enqueuer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-ordered-enqueuer.pbtxt
new file mode 100644
index 0000000000..e7e7d2839b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-ordered-enqueuer.pbtxt
@@ -0,0 +1,26 @@
+path: "tensorflow.keras.utils.OrderedEnqueuer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.OrderedEnqueuer\'>"
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.SequenceEnqueuer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'sequence\', \'use_multiprocessing\', \'shuffle\'], varargs=None, keywords=None, defaults=[\'False\', \'False\'], "
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_running"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start"
+ argspec: "args=[\'self\', \'workers\', \'max_queue_size\'], varargs=None, keywords=None, defaults=[\'1\', \'10\'], "
+ }
+ member_method {
+ name: "stop"
+ argspec: "args=[\'self\', \'timeout\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-progbar.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-progbar.pbtxt
new file mode 100644
index 0000000000..be4496e753
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-progbar.pbtxt
@@ -0,0 +1,17 @@
+path: "tensorflow.keras.utils.Progbar"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.generic_utils.Progbar\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'target\', \'width\', \'verbose\', \'interval\', \'stateful_metrics\'], varargs=None, keywords=None, defaults=[\'30\', \'1\', \'0.05\', \'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'self\', \'n\', \'values\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "update"
+ argspec: "args=[\'self\', \'current\', \'values\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence-enqueuer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence-enqueuer.pbtxt
new file mode 100644
index 0000000000..a9e499d100
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence-enqueuer.pbtxt
@@ -0,0 +1,24 @@
+path: "tensorflow.keras.utils.SequenceEnqueuer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.SequenceEnqueuer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_running"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start"
+ argspec: "args=[\'self\', \'workers\', \'max_queue_size\'], varargs=None, keywords=None, defaults=[\'1\', \'10\'], "
+ }
+ member_method {
+ name: "stop"
+ argspec: "args=[\'self\', \'timeout\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence.pbtxt
new file mode 100644
index 0000000000..e2dc932dc8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.-sequence.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.keras.utils.Sequence"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.utils.data_utils.Sequence\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "on_epoch_end"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.pbtxt
new file mode 100644
index 0000000000..81b91d2780
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.utils.pbtxt
@@ -0,0 +1,75 @@
+path: "tensorflow.keras.utils"
+tf_module {
+ member {
+ name: "CustomObjectScope"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GeneratorEnqueuer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "HDF5Matrix"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "OrderedEnqueuer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Progbar"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Sequence"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SequenceEnqueuer"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "convert_all_kernels_in_model"
+ argspec: "args=[\'model\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "custom_object_scope"
+ argspec: "args=[], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "deserialize_keras_object"
+ argspec: "args=[\'identifier\', \'module_objects\', \'custom_objects\', \'printable_module_name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'object\'], "
+ }
+ member_method {
+ name: "get_custom_objects"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_file"
+ argspec: "args=[\'fname\', \'origin\', \'untar\', \'md5_hash\', \'file_hash\', \'cache_subdir\', \'hash_algorithm\', \'extract\', \'archive_format\', \'cache_dir\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'None\', \'datasets\', \'auto\', \'False\', \'auto\', \'None\'], "
+ }
+ member_method {
+ name: "get_source_inputs"
+ argspec: "args=[\'tensor\', \'layer\', \'node_index\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "multi_gpu_model"
+ argspec: "args=[\'model\', \'gpus\', \'cpu_merge\', \'cpu_relocation\'], varargs=None, keywords=None, defaults=[\'True\', \'False\'], "
+ }
+ member_method {
+ name: "normalize"
+ argspec: "args=[\'x\', \'axis\', \'order\'], varargs=None, keywords=None, defaults=[\'-1\', \'2\'], "
+ }
+ member_method {
+ name: "plot_model"
+ argspec: "args=[\'model\', \'to_file\', \'show_shapes\', \'show_layer_names\', \'rankdir\'], varargs=None, keywords=None, defaults=[\'model.png\', \'False\', \'True\', \'TB\'], "
+ }
+ member_method {
+ name: "serialize_keras_object"
+ argspec: "args=[\'instance\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "to_categorical"
+ argspec: "args=[\'y\', \'num_classes\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.pbtxt
new file mode 100644
index 0000000000..0b2fac9b7d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.keras.wrappers"
+tf_module {
+ member {
+ name: "scikit_learn"
+ mtype: "<type \'module\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt
new file mode 100644
index 0000000000..67cca3af41
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-classifier.pbtxt
@@ -0,0 +1,42 @@
+path: "tensorflow.keras.wrappers.scikit_learn.KerasClassifier"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier\'>"
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.BaseWrapper\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'build_fn\'], varargs=None, keywords=sk_params, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "check_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "filter_sk_params"
+ argspec: "args=[\'self\', \'fn\', \'override\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "get_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=params, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "predict_proba"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "score"
+ argspec: "args=[\'self\', \'x\', \'y\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=params, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt
new file mode 100644
index 0000000000..f4b9b7e277
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.-keras-regressor.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.keras.wrappers.scikit_learn.KerasRegressor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.KerasRegressor\'>"
+ is_instance: "<class \'tensorflow.python.keras.wrappers.scikit_learn.BaseWrapper\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'build_fn\'], varargs=None, keywords=sk_params, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "check_params"
+ argspec: "args=[\'self\', \'params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "filter_sk_params"
+ argspec: "args=[\'self\', \'fn\', \'override\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fit"
+ argspec: "args=[\'self\', \'x\', \'y\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "get_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=params, defaults=None"
+ }
+ member_method {
+ name: "predict"
+ argspec: "args=[\'self\', \'x\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "score"
+ argspec: "args=[\'self\', \'x\', \'y\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "set_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=params, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.pbtxt
new file mode 100644
index 0000000000..fbd4d13387
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.wrappers.scikit_learn.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.keras.wrappers.scikit_learn"
+tf_module {
+ member {
+ name: "KerasClassifier"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "KerasRegressor"
+ mtype: "<type \'type\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling1-d.pbtxt
new file mode 100644
index 0000000000..c82e67526b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling1-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.AveragePooling1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.pooling.AveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling2-d.pbtxt
new file mode 100644
index 0000000000..1d031cb5f8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling2-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.AveragePooling2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.pooling.AveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling3-d.pbtxt
new file mode 100644
index 0000000000..a8dda6655d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-average-pooling3-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.AveragePooling3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.pooling.AveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.AveragePooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-batch-normalization.pbtxt
new file mode 100644
index 0000000000..97f65ed894
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-batch-normalization.pbtxt
@@ -0,0 +1,185 @@
+path: "tensorflow.layers.BatchNormalization"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.normalization.BatchNormalization\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.normalization.BatchNormalization\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'axis\', \'momentum\', \'epsilon\', \'center\', \'scale\', \'beta_initializer\', \'gamma_initializer\', \'moving_mean_initializer\', \'moving_variance_initializer\', \'beta_regularizer\', \'gamma_regularizer\', \'beta_constraint\', \'gamma_constraint\', \'renorm\', \'renorm_clipping\', \'renorm_momentum\', \'fused\', \'trainable\', \'virtual_batch_size\', \'adjustment\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'-1\', \'0.99\', \'0.001\', \'True\', \'True\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'<tensorflow.python.ops.init_ops.Ones object instance>\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'<tensorflow.python.ops.init_ops.Ones object instance>\', \'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'0.99\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv1-d.pbtxt
new file mode 100644
index 0000000000..ccd9578f0d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv1-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.Conv1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.convolutional.Conv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'channels_last\', \'1\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d-transpose.pbtxt
new file mode 100644
index 0000000000..9cbb58d721
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d-transpose.pbtxt
@@ -0,0 +1,187 @@
+path: "tensorflow.layers.Conv2DTranspose"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.convolutional.Conv2DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'channels_last\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d.pbtxt
new file mode 100644
index 0000000000..c75ea3911e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv2-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.Conv2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'channels_last\', \'(1, 1)\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d-transpose.pbtxt
new file mode 100644
index 0000000000..5dc834e514
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d-transpose.pbtxt
@@ -0,0 +1,187 @@
+path: "tensorflow.layers.Conv3DTranspose"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.convolutional.Conv3DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3DTranspose\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1, 1)\', \'valid\', \'channels_last\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d.pbtxt
new file mode 100644
index 0000000000..96ab209874
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-conv3-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.Conv3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1, 1)\', \'valid\', \'channels_last\', \'(1, 1, 1)\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-dense.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-dense.pbtxt
new file mode 100644
index 0000000000..7e9656b352
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-dense.pbtxt
@@ -0,0 +1,185 @@
+path: "tensorflow.layers.Dense"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.core.Dense\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dense\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'units\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-dropout.pbtxt
new file mode 100644
index 0000000000..e9a2269a6e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-dropout.pbtxt
@@ -0,0 +1,185 @@
+path: "tensorflow.layers.Dropout"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Dropout\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'rate\', \'noise_shape\', \'seed\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'0.5\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-flatten.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-flatten.pbtxt
new file mode 100644
index 0000000000..7d2eaaab2a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-flatten.pbtxt
@@ -0,0 +1,185 @@
+path: "tensorflow.layers.Flatten"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.core.Flatten\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.core.Flatten\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'data_format\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-input-spec.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-input-spec.pbtxt
new file mode 100644
index 0000000000..fd02c919ae
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-input-spec.pbtxt
@@ -0,0 +1,9 @@
+path: "tensorflow.layers.InputSpec"
+tf_class {
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.InputSpec\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'shape\', \'ndim\', \'max_ndim\', \'min_ndim\', \'axes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-layer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-layer.pbtxt
new file mode 100644
index 0000000000..8bc3eb26e9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-layer.pbtxt
@@ -0,0 +1,183 @@
+path: "tensorflow.layers.Layer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'trainable\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling1-d.pbtxt
new file mode 100644
index 0000000000..6a0dcce56a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling1-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.MaxPooling1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.pooling.MaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling1D\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling2-d.pbtxt
new file mode 100644
index 0000000000..b6c84edf2a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling2-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.MaxPooling2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.pooling.MaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling2D\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling3-d.pbtxt
new file mode 100644
index 0000000000..062a02fa59
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-max-pooling3-d.pbtxt
@@ -0,0 +1,186 @@
+path: "tensorflow.layers.MaxPooling3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.pooling.MaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.MaxPooling3D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.pooling.Pooling3D\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv1-d.pbtxt
new file mode 100644
index 0000000000..eaad0fb23e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv1-d.pbtxt
@@ -0,0 +1,187 @@
+path: "tensorflow.layers.SeparableConv1D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.convolutional.SeparableConv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv1D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'valid\', \'channels_last\', \'1\', \'1\', \'None\', \'True\', \'None\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv2-d.pbtxt
new file mode 100644
index 0000000000..ece28a8ce9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.-separable-conv2-d.pbtxt
@@ -0,0 +1,187 @@
+path: "tensorflow.layers.SeparableConv2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.layers.convolutional.SeparableConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv2D\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.SeparableConv\'>"
+ is_instance: "<class \'tensorflow.python.keras.layers.convolutional.Conv\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\', \'trainable\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'channels_last\', \'(1, 1)\', \'1\', \'None\', \'True\', \'None\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.layers.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.layers.pbtxt
new file mode 100644
index 0000000000..df74c32e1f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.layers.pbtxt
@@ -0,0 +1,147 @@
+path: "tensorflow.layers"
+tf_module {
+ member {
+ name: "AveragePooling1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AveragePooling2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AveragePooling3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BatchNormalization"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv2DTranspose"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Conv3DTranspose"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Dense"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Dropout"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Flatten"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "InputSpec"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Layer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPooling1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPooling2D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MaxPooling3D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SeparableConv1D"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SeparableConv2D"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "average_pooling1d"
+ argspec: "args=[\'inputs\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "average_pooling2d"
+ argspec: "args=[\'inputs\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "average_pooling3d"
+ argspec: "args=[\'inputs\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "batch_normalization"
+ argspec: "args=[\'inputs\', \'axis\', \'momentum\', \'epsilon\', \'center\', \'scale\', \'beta_initializer\', \'gamma_initializer\', \'moving_mean_initializer\', \'moving_variance_initializer\', \'beta_regularizer\', \'gamma_regularizer\', \'beta_constraint\', \'gamma_constraint\', \'training\', \'trainable\', \'name\', \'reuse\', \'renorm\', \'renorm_clipping\', \'renorm_momentum\', \'fused\', \'virtual_batch_size\', \'adjustment\'], varargs=None, keywords=None, defaults=[\'-1\', \'0.99\', \'0.001\', \'True\', \'True\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'<tensorflow.python.ops.init_ops.Ones object instance>\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'<tensorflow.python.ops.init_ops.Ones object instance>\', \'None\', \'None\', \'None\', \'None\', \'False\', \'True\', \'None\', \'None\', \'False\', \'None\', \'0.99\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv1d"
+ argspec: "args=[\'inputs\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'1\', \'valid\', \'channels_last\', \'1\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv2d"
+ argspec: "args=[\'inputs\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'(1, 1)\', \'valid\', \'channels_last\', \'(1, 1)\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv2d_transpose"
+ argspec: "args=[\'inputs\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'(1, 1)\', \'valid\', \'channels_last\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv3d"
+ argspec: "args=[\'inputs\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'(1, 1, 1)\', \'valid\', \'channels_last\', \'(1, 1, 1)\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv3d_transpose"
+ argspec: "args=[\'inputs\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'(1, 1, 1)\', \'valid\', \'channels_last\', \'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "dense"
+ argspec: "args=[\'inputs\', \'units\', \'activation\', \'use_bias\', \'kernel_initializer\', \'bias_initializer\', \'kernel_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'kernel_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "dropout"
+ argspec: "args=[\'inputs\', \'rate\', \'noise_shape\', \'seed\', \'training\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'None\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "flatten"
+ argspec: "args=[\'inputs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "max_pooling1d"
+ argspec: "args=[\'inputs\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "max_pooling2d"
+ argspec: "args=[\'inputs\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "max_pooling3d"
+ argspec: "args=[\'inputs\', \'pool_size\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'valid\', \'channels_last\', \'None\'], "
+ }
+ member_method {
+ name: "separable_conv1d"
+ argspec: "args=[\'inputs\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'1\', \'valid\', \'channels_last\', \'1\', \'1\', \'None\', \'True\', \'None\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "separable_conv2d"
+ argspec: "args=[\'inputs\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\', \'trainable\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\'(1, 1)\', \'valid\', \'channels_last\', \'(1, 1)\', \'1\', \'None\', \'True\', \'None\', \'None\', \'<tensorflow.python.ops.init_ops.Zeros object instance>\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt
new file mode 100644
index 0000000000..b6dee63176
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorBlockDiag.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.pbtxt
new file mode 100644
index 0000000000..973705dae2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-block-diag.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.linalg.LinearOperatorBlockDiag"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_block_diag.LinearOperatorBlockDiag\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "operators"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'operators\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt
new file mode 100644
index 0000000000..3b33f3da97
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorCirculant.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.pbtxt
new file mode 100644
index 0000000000..de917706d5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant.pbtxt
@@ -0,0 +1,155 @@
+path: "tensorflow.linalg.LinearOperatorCirculant"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant.LinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant._BaseLinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_depth"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "spectrum"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'spectrum\', \'input_output_dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'complex64\'>\", \'None\', \'None\', \'None\', \'True\', \'LinearOperatorCirculant\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_hermitian_spectrum"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_hermitian_spectrum\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "block_shape_tensor"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convolution_kernel"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'convolution_kernel\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt
new file mode 100644
index 0000000000..591bc9631a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorCirculant2D.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt
new file mode 100644
index 0000000000..c4e6a21c3a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant2-d.pbtxt
@@ -0,0 +1,155 @@
+path: "tensorflow.linalg.LinearOperatorCirculant2D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant.LinearOperatorCirculant2D\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant._BaseLinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_depth"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "spectrum"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'spectrum\', \'input_output_dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'complex64\'>\", \'None\', \'None\', \'None\', \'True\', \'LinearOperatorCirculant2D\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_hermitian_spectrum"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_hermitian_spectrum\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "block_shape_tensor"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convolution_kernel"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'convolution_kernel\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt
new file mode 100644
index 0000000000..d643139a53
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorCirculant3D.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt
new file mode 100644
index 0000000000..2e085a8e28
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-circulant3-d.pbtxt
@@ -0,0 +1,155 @@
+path: "tensorflow.linalg.LinearOperatorCirculant3D"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant.LinearOperatorCirculant3D\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_circulant._BaseLinearOperatorCirculant\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_depth"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "block_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "spectrum"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'spectrum\', \'input_output_dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'complex64\'>\", \'None\', \'None\', \'None\', \'True\', \'LinearOperatorCirculant3D\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_hermitian_spectrum"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_hermitian_spectrum\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "block_shape_tensor"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "convolution_kernel"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'convolution_kernel\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt
new file mode 100644
index 0000000000..1adbcb41ad
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorComposition.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.pbtxt
new file mode 100644
index 0000000000..42d22bce42
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-composition.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.linalg.LinearOperatorComposition"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_composition.LinearOperatorComposition\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "operators"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'operators\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt
new file mode 100644
index 0000000000..023d90ccdb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorDiag.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.pbtxt
new file mode 100644
index 0000000000..d6749fdcec
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-diag.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.linalg.LinearOperatorDiag"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_diag.LinearOperatorDiag\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "diag"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'diag\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'LinearOperatorDiag\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt
new file mode 100644
index 0000000000..381072e76c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorFullMatrix.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.pbtxt
new file mode 100644
index 0000000000..d9f363d133
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-full-matrix.pbtxt
@@ -0,0 +1,130 @@
+path: "tensorflow.linalg.LinearOperatorFullMatrix"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_full_matrix.LinearOperatorFullMatrix\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'matrix\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'LinearOperatorFullMatrix\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt
new file mode 100644
index 0000000000..5d115b35fb
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorIdentity.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.pbtxt
new file mode 100644
index 0000000000..aac7ee31ed
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-identity.pbtxt
@@ -0,0 +1,131 @@
+path: "tensorflow.linalg.LinearOperatorIdentity"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.LinearOperatorIdentity\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.BaseLinearOperatorIdentity\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_rows\', \'batch_shape\', \'dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'assert_proper_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\', \'True\', \'True\', \'True\', \'False\', \'LinearOperatorIdentity\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'mat\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt
new file mode 100644
index 0000000000..5c6784dd02
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorKronecker.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.pbtxt
new file mode 100644
index 0000000000..c11d390829
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-kronecker.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.linalg.LinearOperatorKronecker"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_kronecker.LinearOperatorKronecker\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "operators"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'operators\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt
new file mode 100644
index 0000000000..1f0d33298a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorLowRankUpdate.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt
new file mode 100644
index 0000000000..3ee800269e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-low-rank-update.pbtxt
@@ -0,0 +1,154 @@
+path: "tensorflow.linalg.LinearOperatorLowRankUpdate"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_low_rank_update.LinearOperatorLowRankUpdate\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "base_operator"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "diag_operator"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "diag_update"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_diag_update_positive"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "u"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "v"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'base_operator\', \'u\', \'diag_update\', \'v\', \'is_diag_update_positive\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'LinearOperatorLowRankUpdate\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt
new file mode 100644
index 0000000000..2683430f4f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorLowerTriangular.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt
new file mode 100644
index 0000000000..63a1bc2321
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-lower-triangular.pbtxt
@@ -0,0 +1,130 @@
+path: "tensorflow.linalg.LinearOperatorLowerTriangular"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_lower_triangular.LinearOperatorLowerTriangular\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'tril\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'LinearOperatorLowerTriangular\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt
new file mode 100644
index 0000000000..38bf7ad586
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorScaledIdentity.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt
new file mode 100644
index 0000000000..e2c5a505a7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-scaled-identity.pbtxt
@@ -0,0 +1,135 @@
+path: "tensorflow.linalg.LinearOperatorScaledIdentity"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.LinearOperatorScaledIdentity\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_identity.BaseLinearOperatorIdentity\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "multiplier"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_rows\', \'multiplier\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'assert_proper_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'True\', \'False\', \'LinearOperatorScaledIdentity\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'mat\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt
new file mode 100644
index 0000000000..49ff85728f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorZeros.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.pbtxt
new file mode 100644
index 0000000000..a1b0e06b47
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator-zeros.pbtxt
@@ -0,0 +1,130 @@
+path: "tensorflow.linalg.LinearOperatorZeros"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_zeros.LinearOperatorZeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_rows\', \'num_columns\', \'batch_shape\', \'dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'assert_proper_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'True\', \'False\', \'True\', \'False\', \'LinearOperatorZeros\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'mat\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt
new file mode 100644
index 0000000000..38da809b36
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperator.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.pbtxt
new file mode 100644
index 0000000000..6d849dc040
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.-linear-operator.pbtxt
@@ -0,0 +1,129 @@
+path: "tensorflow.linalg.LinearOperator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\', \'graph_parents\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.linalg.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.linalg.pbtxt
new file mode 100644
index 0000000000..d979116887
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.linalg.pbtxt
@@ -0,0 +1,175 @@
+path: "tensorflow.linalg"
+tf_module {
+ member {
+ name: "LinearOperator"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorBlockDiag"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorCirculant"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorCirculant2D"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorCirculant3D"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorComposition"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorDiag"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorFullMatrix"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorIdentity"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorKronecker"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorLowRankUpdate"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorLowerTriangular"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorScaledIdentity"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member {
+ name: "LinearOperatorZeros"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
+ member_method {
+ name: "adjoint"
+ argspec: "args=[\'matrix\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "band_part"
+ argspec: "args=[\'input\', \'num_lower\', \'num_upper\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cholesky"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cholesky_solve"
+ argspec: "args=[\'chol\', \'rhs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cross"
+ argspec: "args=[\'a\', \'b\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "det"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "diag"
+ argspec: "args=[\'diagonal\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "eigh"
+ argspec: "args=[\'tensor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "eigvalsh"
+ argspec: "args=[\'tensor\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "einsum"
+ argspec: "args=[\'equation\'], varargs=inputs, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "expm"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "eye"
+ argspec: "args=[\'num_rows\', \'num_columns\', \'batch_shape\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'float32\'>\", \'None\'], "
+ }
+ member_method {
+ name: "inv"
+ argspec: "args=[\'input\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "logdet"
+ argspec: "args=[\'matrix\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logm"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lstsq"
+ argspec: "args=[\'matrix\', \'rhs\', \'l2_regularizer\', \'fast\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "norm"
+ argspec: "args=[\'tensor\', \'ord\', \'axis\', \'keepdims\', \'name\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'euclidean\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "qr"
+ argspec: "args=[\'input\', \'full_matrices\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "set_diag"
+ argspec: "args=[\'input\', \'diagonal\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "slogdet"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'matrix\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "svd"
+ argspec: "args=[\'tensor\', \'full_matrices\', \'compute_uv\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "tensor_diag"
+ argspec: "args=[\'diagonal\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tensor_diag_part"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tensordot"
+ argspec: "args=[\'a\', \'b\', \'axes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "transpose"
+ argspec: "args=[\'a\', \'name\', \'conjugate\'], varargs=None, keywords=None, defaults=[\'matrix_transpose\', \'False\'], "
+ }
+ member_method {
+ name: "triangular_solve"
+ argspec: "args=[\'matrix\', \'rhs\', \'lower\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'False\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.logging.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.logging.pbtxt
new file mode 100644
index 0000000000..85bb15455d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.logging.pbtxt
@@ -0,0 +1,83 @@
+path: "tensorflow.logging"
+tf_module {
+ member {
+ name: "DEBUG"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "ERROR"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "FATAL"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "INFO"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "WARN"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "TaskLevelStatusMessage"
+ argspec: "args=[\'msg\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "debug"
+ argspec: "args=[\'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "error"
+ argspec: "args=[\'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "fatal"
+ argspec: "args=[\'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "flush"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_verbosity"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "info"
+ argspec: "args=[\'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "log"
+ argspec: "args=[\'level\', \'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "log_every_n"
+ argspec: "args=[\'level\', \'msg\', \'n\'], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "log_first_n"
+ argspec: "args=[\'level\', \'msg\', \'n\'], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "log_if"
+ argspec: "args=[\'level\', \'msg\', \'condition\'], varargs=args, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_verbosity"
+ argspec: "args=[\'v\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "vlog"
+ argspec: "args=[\'level\', \'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "warn"
+ argspec: "args=[\'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "warning"
+ argspec: "args=[\'msg\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.losses.-reduction.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.losses.-reduction.pbtxt
new file mode 100644
index 0000000000..258ad5047e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.losses.-reduction.pbtxt
@@ -0,0 +1,40 @@
+path: "tensorflow.losses.Reduction"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.losses.losses_impl.Reduction\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "MEAN"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SUM"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SUM_BY_NONZERO_WEIGHTS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SUM_OVER_BATCH_SIZE"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SUM_OVER_NONZERO_WEIGHTS"
+ mtype: "<type \'str\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "all"
+ argspec: "args=[\'cls\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "validate"
+ argspec: "args=[\'cls\', \'key\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt
new file mode 100644
index 0000000000..c1d190ae11
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.losses.pbtxt
@@ -0,0 +1,71 @@
+path: "tensorflow.losses"
+tf_module {
+ member {
+ name: "Reduction"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "absolute_difference"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'loss\', \'loss_collection\'], varargs=None, keywords=None, defaults=[\'losses\'], "
+ }
+ member_method {
+ name: "compute_weighted_loss"
+ argspec: "args=[\'losses\', \'weights\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "cosine_distance"
+ argspec: "args=[\'labels\', \'predictions\', \'axis\', \'weights\', \'scope\', \'loss_collection\', \'reduction\', \'dim\'], varargs=None, keywords=None, defaults=[\'None\', \'1.0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\', \'None\'], "
+ }
+ member_method {
+ name: "get_losses"
+ argspec: "args=[\'scope\', \'loss_collection\'], varargs=None, keywords=None, defaults=[\'None\', \'losses\'], "
+ }
+ member_method {
+ name: "get_regularization_loss"
+ argspec: "args=[\'scope\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'total_regularization_loss\'], "
+ }
+ member_method {
+ name: "get_regularization_losses"
+ argspec: "args=[\'scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_total_loss"
+ argspec: "args=[\'add_regularization_losses\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'total_loss\'], "
+ }
+ member_method {
+ name: "hinge_loss"
+ argspec: "args=[\'labels\', \'logits\', \'weights\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "huber_loss"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'delta\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'1.0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "log_loss"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'epsilon\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'1e-07\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "mean_pairwise_squared_error"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'scope\', \'loss_collection\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \'losses\'], "
+ }
+ member_method {
+ name: "mean_squared_error"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "sigmoid_cross_entropy"
+ argspec: "args=[\'multi_class_labels\', \'logits\', \'weights\', \'label_smoothing\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "softmax_cross_entropy"
+ argspec: "args=[\'onehot_labels\', \'logits\', \'weights\', \'label_smoothing\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+ member_method {
+ name: "sparse_softmax_cross_entropy"
+ argspec: "args=[\'labels\', \'logits\', \'weights\', \'scope\', \'loss_collection\', \'reduction\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \'losses\', \'weighted_sum_by_nonzero_weights\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.manip.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.manip.pbtxt
new file mode 100644
index 0000000000..9add462396
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.manip.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.manip"
+tf_module {
+ member_method {
+ name: "batch_to_space_nd"
+ argspec: "args=[\'input\', \'block_shape\', \'crops\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "gather_nd"
+ argspec: "args=[\'params\', \'indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reshape"
+ argspec: "args=[\'tensor\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reverse"
+ argspec: "args=[\'tensor\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "roll"
+ argspec: "args=[\'input\', \'shift\', \'axis\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "scatter_nd"
+ argspec: "args=[\'indices\', \'updates\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "space_to_batch_nd"
+ argspec: "args=[\'input\', \'block_shape\', \'paddings\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tile"
+ argspec: "args=[\'input\', \'multiples\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.math.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.math.pbtxt
new file mode 100644
index 0000000000..a308c76ebc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.math.pbtxt
@@ -0,0 +1,239 @@
+path: "tensorflow.math"
+tf_module {
+ member_method {
+ name: "acos"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "acosh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "asin"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "asinh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atan2"
+ argspec: "args=[\'y\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atanh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i0"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i0e"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i1"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bessel_i1e"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "betainc"
+ argspec: "args=[\'a\', \'b\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ceil"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cos"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cosh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "digamma"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "erfc"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "exp"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "expm1"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "floor"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "greater"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "greater_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "igamma"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "igammac"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "invert_permutation"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "less"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "less_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lgamma"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "log"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "log1p"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_and"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_not"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "logical_or"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "maximum"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "minimum"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "not_equal"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "polygamma"
+ argspec: "args=[\'a\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "polyval"
+ argspec: "args=[\'coeffs\', \'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "reciprocal"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rint"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "rsqrt"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_max"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_mean"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_min"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_prod"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "segment_sum"
+ argspec: "args=[\'data\', \'segment_ids\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sin"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sinh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "softplus"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "softsign"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "squared_difference"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "tan"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_max"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_min"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_prod"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "unsorted_segment_sum"
+ argspec: "args=[\'data\', \'segment_ids\', \'num_segments\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "zeta"
+ argspec: "args=[\'x\', \'q\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt
new file mode 100644
index 0000000000..e9b996c9f5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.pbtxt
@@ -0,0 +1,135 @@
+path: "tensorflow.metrics"
+tf_module {
+ member_method {
+ name: "accuracy"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "auc"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'num_thresholds\', \'metrics_collections\', \'updates_collections\', \'curve\', \'name\', \'summation_method\'], varargs=None, keywords=None, defaults=[\'None\', \'200\', \'None\', \'None\', \'ROC\', \'None\', \'trapezoidal\'], "
+ }
+ member_method {
+ name: "average_precision_at_k"
+ argspec: "args=[\'labels\', \'predictions\', \'k\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "false_negatives"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "false_negatives_at_thresholds"
+ argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "false_positives"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "false_positives_at_thresholds"
+ argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean"
+ argspec: "args=[\'values\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean_absolute_error"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean_cosine_distance"
+ argspec: "args=[\'labels\', \'predictions\', \'dim\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean_iou"
+ argspec: "args=[\'labels\', \'predictions\', \'num_classes\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean_per_class_accuracy"
+ argspec: "args=[\'labels\', \'predictions\', \'num_classes\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean_relative_error"
+ argspec: "args=[\'labels\', \'predictions\', \'normalizer\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean_squared_error"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "mean_tensor"
+ argspec: "args=[\'values\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "percentage_below"
+ argspec: "args=[\'values\', \'threshold\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "precision"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "precision_at_k"
+ argspec: "args=[\'labels\', \'predictions\', \'k\', \'class_id\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "precision_at_thresholds"
+ argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "precision_at_top_k"
+ argspec: "args=[\'labels\', \'predictions_idx\', \'k\', \'class_id\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "recall"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "recall_at_k"
+ argspec: "args=[\'labels\', \'predictions\', \'k\', \'class_id\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "recall_at_thresholds"
+ argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "recall_at_top_k"
+ argspec: "args=[\'labels\', \'predictions_idx\', \'k\', \'class_id\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "root_mean_squared_error"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sensitivity_at_specificity"
+ argspec: "args=[\'labels\', \'predictions\', \'specificity\', \'weights\', \'num_thresholds\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'200\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_average_precision_at_k"
+ argspec: "args=[\'labels\', \'predictions\', \'k\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sparse_precision_at_k"
+ argspec: "args=[\'labels\', \'predictions\', \'k\', \'class_id\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "specificity_at_sensitivity"
+ argspec: "args=[\'labels\', \'predictions\', \'sensitivity\', \'weights\', \'num_thresholds\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'200\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "true_negatives"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "true_negatives_at_thresholds"
+ argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "true_positives"
+ argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "true_positives_at_thresholds"
+ argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.name_scope.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.name_scope.pbtxt
new file mode 100644
index 0000000000..8041897013
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.name_scope.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.name_scope"
+tf_class {
+ is_instance: "<class \'tensorflow.python.framework.ops.name_scope\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name\', \'default_name\', \'values\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.pbtxt
new file mode 100644
index 0000000000..d9e5b0d0fc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.pbtxt
@@ -0,0 +1,359 @@
+path: "tensorflow.nn"
+tf_module {
+ member {
+ name: "rnn_cell"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "swish"
+ mtype: "<class \'tensorflow.python.framework.function._OverloadedFunction\'>"
+ }
+ member_method {
+ name: "all_candidate_sampler"
+ argspec: "args=[\'true_classes\', \'num_true\', \'num_sampled\', \'unique\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "atrous_conv2d"
+ argspec: "args=[\'value\', \'filters\', \'rate\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "atrous_conv2d_transpose"
+ argspec: "args=[\'value\', \'filters\', \'output_shape\', \'rate\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "avg_pool"
+ argspec: "args=[\'value\', \'ksize\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'NHWC\', \'None\'], "
+ }
+ member_method {
+ name: "avg_pool3d"
+ argspec: "args=[\'input\', \'ksize\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'NDHWC\', \'None\'], "
+ }
+ member_method {
+ name: "batch_norm_with_global_normalization"
+ argspec: "args=[\'t\', \'m\', \'v\', \'beta\', \'gamma\', \'variance_epsilon\', \'scale_after_normalization\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "batch_normalization"
+ argspec: "args=[\'x\', \'mean\', \'variance\', \'offset\', \'scale\', \'variance_epsilon\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "bias_add"
+ argspec: "args=[\'value\', \'bias\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "bidirectional_dynamic_rnn"
+ argspec: "args=[\'cell_fw\', \'cell_bw\', \'inputs\', \'sequence_length\', \'initial_state_fw\', \'initial_state_bw\', \'dtype\', \'parallel_iterations\', \'swap_memory\', \'time_major\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "compute_accidental_hits"
+ argspec: "args=[\'true_classes\', \'sampled_candidates\', \'num_true\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv1d"
+ argspec: "args=[\'value\', \'filters\', \'stride\', \'padding\', \'use_cudnn_on_gpu\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "conv2d"
+ argspec: "args=[\'input\', \'filter\', \'strides\', \'padding\', \'use_cudnn_on_gpu\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'NHWC\', \'[1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "conv2d_backprop_filter"
+ argspec: "args=[\'input\', \'filter_sizes\', \'out_backprop\', \'strides\', \'padding\', \'use_cudnn_on_gpu\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'NHWC\', \'[1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "conv2d_backprop_input"
+ argspec: "args=[\'input_sizes\', \'filter\', \'out_backprop\', \'strides\', \'padding\', \'use_cudnn_on_gpu\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'NHWC\', \'[1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "conv2d_transpose"
+ argspec: "args=[\'value\', \'filter\', \'output_shape\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'SAME\', \'NHWC\', \'None\'], "
+ }
+ member_method {
+ name: "conv3d"
+ argspec: "args=[\'input\', \'filter\', \'strides\', \'padding\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'NDHWC\', \'[1, 1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "conv3d_backprop_filter_v2"
+ argspec: "args=[\'input\', \'filter_sizes\', \'out_backprop\', \'strides\', \'padding\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'NDHWC\', \'[1, 1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "conv3d_transpose"
+ argspec: "args=[\'value\', \'filter\', \'output_shape\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'SAME\', \'NDHWC\', \'None\'], "
+ }
+ member_method {
+ name: "convolution"
+ argspec: "args=[\'input\', \'filter\', \'padding\', \'strides\', \'dilation_rate\', \'name\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "crelu"
+ argspec: "args=[\'features\', \'name\', \'axis\'], varargs=None, keywords=None, defaults=[\'None\', \'-1\'], "
+ }
+ member_method {
+ name: "ctc_beam_search_decoder"
+ argspec: "args=[\'inputs\', \'sequence_length\', \'beam_width\', \'top_paths\', \'merge_repeated\'], varargs=None, keywords=None, defaults=[\'100\', \'1\', \'True\'], "
+ }
+ member_method {
+ name: "ctc_greedy_decoder"
+ argspec: "args=[\'inputs\', \'sequence_length\', \'merge_repeated\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+ member_method {
+ name: "ctc_loss"
+ argspec: "args=[\'labels\', \'inputs\', \'sequence_length\', \'preprocess_collapse_repeated\', \'ctc_merge_repeated\', \'ignore_longer_outputs_than_inputs\', \'time_major\'], varargs=None, keywords=None, defaults=[\'False\', \'True\', \'False\', \'True\'], "
+ }
+ member_method {
+ name: "depthwise_conv2d"
+ argspec: "args=[\'input\', \'filter\', \'strides\', \'padding\', \'rate\', \'name\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "depthwise_conv2d_native"
+ argspec: "args=[\'input\', \'filter\', \'strides\', \'padding\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'NHWC\', \'[1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "depthwise_conv2d_native_backprop_filter"
+ argspec: "args=[\'input\', \'filter_sizes\', \'out_backprop\', \'strides\', \'padding\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'NHWC\', \'[1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "depthwise_conv2d_native_backprop_input"
+ argspec: "args=[\'input_sizes\', \'filter\', \'out_backprop\', \'strides\', \'padding\', \'data_format\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\'NHWC\', \'[1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "dilation2d"
+ argspec: "args=[\'input\', \'filter\', \'strides\', \'rates\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "dropout"
+ argspec: "args=[\'x\', \'keep_prob\', \'noise_shape\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "dynamic_rnn"
+ argspec: "args=[\'cell\', \'inputs\', \'sequence_length\', \'initial_state\', \'dtype\', \'parallel_iterations\', \'swap_memory\', \'time_major\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "elu"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "embedding_lookup"
+ argspec: "args=[\'params\', \'ids\', \'partition_strategy\', \'name\', \'validate_indices\', \'max_norm\'], varargs=None, keywords=None, defaults=[\'mod\', \'None\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "embedding_lookup_sparse"
+ argspec: "args=[\'params\', \'sp_ids\', \'sp_weights\', \'partition_strategy\', \'name\', \'combiner\', \'max_norm\'], varargs=None, keywords=None, defaults=[\'mod\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "erosion2d"
+ argspec: "args=[\'value\', \'kernel\', \'strides\', \'rates\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fixed_unigram_candidate_sampler"
+ argspec: "args=[\'true_classes\', \'num_true\', \'num_sampled\', \'unique\', \'range_max\', \'vocab_file\', \'distortion\', \'num_reserved_ids\', \'num_shards\', \'shard\', \'unigrams\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'1.0\', \'0\', \'1\', \'0\', \'()\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "fractional_avg_pool"
+ argspec: "args=[\'value\', \'pooling_ratio\', \'pseudo_random\', \'overlapping\', \'deterministic\', \'seed\', \'seed2\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'False\', \'0\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "fractional_max_pool"
+ argspec: "args=[\'value\', \'pooling_ratio\', \'pseudo_random\', \'overlapping\', \'deterministic\', \'seed\', \'seed2\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'False\', \'0\', \'0\', \'None\'], "
+ }
+ member_method {
+ name: "fused_batch_norm"
+ argspec: "args=[\'x\', \'scale\', \'offset\', \'mean\', \'variance\', \'epsilon\', \'data_format\', \'is_training\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'0.001\', \'NHWC\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "in_top_k"
+ argspec: "args=[\'predictions\', \'targets\', \'k\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "l2_loss"
+ argspec: "args=[\'t\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "l2_normalize"
+ argspec: "args=[\'x\', \'axis\', \'epsilon\', \'name\', \'dim\'], varargs=None, keywords=None, defaults=[\'None\', \'1e-12\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "leaky_relu"
+ argspec: "args=[\'features\', \'alpha\', \'name\'], varargs=None, keywords=None, defaults=[\'0.2\', \'None\'], "
+ }
+ member_method {
+ name: "learned_unigram_candidate_sampler"
+ argspec: "args=[\'true_classes\', \'num_true\', \'num_sampled\', \'unique\', \'range_max\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "local_response_normalization"
+ argspec: "args=[\'input\', \'depth_radius\', \'bias\', \'alpha\', \'beta\', \'name\'], varargs=None, keywords=None, defaults=[\'5\', \'1\', \'1\', \'0.5\', \'None\'], "
+ }
+ member_method {
+ name: "log_poisson_loss"
+ argspec: "args=[\'targets\', \'log_input\', \'compute_full_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "log_softmax"
+ argspec: "args=[\'logits\', \'axis\', \'name\', \'dim\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "log_uniform_candidate_sampler"
+ argspec: "args=[\'true_classes\', \'num_true\', \'num_sampled\', \'unique\', \'range_max\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "lrn"
+ argspec: "args=[\'input\', \'depth_radius\', \'bias\', \'alpha\', \'beta\', \'name\'], varargs=None, keywords=None, defaults=[\'5\', \'1\', \'1\', \'0.5\', \'None\'], "
+ }
+ member_method {
+ name: "max_pool"
+ argspec: "args=[\'value\', \'ksize\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'NHWC\', \'None\'], "
+ }
+ member_method {
+ name: "max_pool3d"
+ argspec: "args=[\'input\', \'ksize\', \'strides\', \'padding\', \'data_format\', \'name\'], varargs=None, keywords=None, defaults=[\'NDHWC\', \'None\'], "
+ }
+ member_method {
+ name: "max_pool_with_argmax"
+ argspec: "args=[\'input\', \'ksize\', \'strides\', \'padding\', \'Targmax\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'int64\'>\", \'None\'], "
+ }
+ member_method {
+ name: "moments"
+ argspec: "args=[\'x\', \'axes\', \'shift\', \'name\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'False\'], "
+ }
+ member_method {
+ name: "nce_loss"
+ argspec: "args=[\'weights\', \'biases\', \'labels\', \'inputs\', \'num_sampled\', \'num_classes\', \'num_true\', \'sampled_values\', \'remove_accidental_hits\', \'partition_strategy\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'None\', \'False\', \'mod\', \'nce_loss\'], "
+ }
+ member_method {
+ name: "normalize_moments"
+ argspec: "args=[\'counts\', \'mean_ss\', \'variance_ss\', \'shift\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "pool"
+ argspec: "args=[\'input\', \'window_shape\', \'pooling_type\', \'padding\', \'dilation_rate\', \'strides\', \'name\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "quantized_avg_pool"
+ argspec: "args=[\'input\', \'min_input\', \'max_input\', \'ksize\', \'strides\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "quantized_conv2d"
+ argspec: "args=[\'input\', \'filter\', \'min_input\', \'max_input\', \'min_filter\', \'max_filter\', \'strides\', \'padding\', \'out_type\', \'dilations\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'qint32\'>\", \'[1, 1, 1, 1]\', \'None\'], "
+ }
+ member_method {
+ name: "quantized_max_pool"
+ argspec: "args=[\'input\', \'min_input\', \'max_input\', \'ksize\', \'strides\', \'padding\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "quantized_relu_x"
+ argspec: "args=[\'features\', \'max_value\', \'min_features\', \'max_features\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'quint8\'>\", \'None\'], "
+ }
+ member_method {
+ name: "raw_rnn"
+ argspec: "args=[\'cell\', \'loop_fn\', \'parallel_iterations\', \'swap_memory\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "relu"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "relu6"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "relu_layer"
+ argspec: "args=[\'x\', \'weights\', \'biases\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "safe_embedding_lookup_sparse"
+ argspec: "args=[\'embedding_weights\', \'sparse_ids\', \'sparse_weights\', \'combiner\', \'default_id\', \'name\', \'partition_strategy\', \'max_norm\'], varargs=None, keywords=None, defaults=[\'None\', \'mean\', \'None\', \'None\', \'div\', \'None\'], "
+ }
+ member_method {
+ name: "sampled_softmax_loss"
+ argspec: "args=[\'weights\', \'biases\', \'labels\', \'inputs\', \'num_sampled\', \'num_classes\', \'num_true\', \'sampled_values\', \'remove_accidental_hits\', \'partition_strategy\', \'name\', \'seed\'], varargs=None, keywords=None, defaults=[\'1\', \'None\', \'True\', \'mod\', \'sampled_softmax_loss\', \'None\'], "
+ }
+ member_method {
+ name: "selu"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "separable_conv2d"
+ argspec: "args=[\'input\', \'depthwise_filter\', \'pointwise_filter\', \'strides\', \'padding\', \'rate\', \'name\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sigmoid"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sigmoid_cross_entropy_with_logits"
+ argspec: "args=[\'_sentinel\', \'labels\', \'logits\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "softmax"
+ argspec: "args=[\'logits\', \'axis\', \'name\', \'dim\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "softmax_cross_entropy_with_logits"
+ argspec: "args=[\'_sentinel\', \'labels\', \'logits\', \'dim\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'-1\', \'None\'], "
+ }
+ member_method {
+ name: "softmax_cross_entropy_with_logits_v2"
+ argspec: "args=[\'_sentinel\', \'labels\', \'logits\', \'dim\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'-1\', \'None\'], "
+ }
+ member_method {
+ name: "softplus"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "softsign"
+ argspec: "args=[\'features\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sparse_softmax_cross_entropy_with_logits"
+ argspec: "args=[\'_sentinel\', \'labels\', \'logits\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "static_bidirectional_rnn"
+ argspec: "args=[\'cell_fw\', \'cell_bw\', \'inputs\', \'initial_state_fw\', \'initial_state_bw\', \'dtype\', \'sequence_length\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "static_rnn"
+ argspec: "args=[\'cell\', \'inputs\', \'initial_state\', \'dtype\', \'sequence_length\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "static_state_saving_rnn"
+ argspec: "args=[\'cell\', \'inputs\', \'state_saver\', \'state_name\', \'sequence_length\', \'scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "sufficient_statistics"
+ argspec: "args=[\'x\', \'axes\', \'shift\', \'keep_dims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "tanh"
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "top_k"
+ argspec: "args=[\'input\', \'k\', \'sorted\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'True\', \'None\'], "
+ }
+ member_method {
+ name: "uniform_candidate_sampler"
+ argspec: "args=[\'true_classes\', \'num_true\', \'num_sampled\', \'unique\', \'range_max\', \'seed\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "weighted_cross_entropy_with_logits"
+ argspec: "args=[\'targets\', \'logits\', \'pos_weight\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "weighted_moments"
+ argspec: "args=[\'x\', \'axes\', \'frequency_weights\', \'name\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
+ }
+ member_method {
+ name: "with_space_to_batch"
+ argspec: "args=[\'input\', \'dilation_rate\', \'padding\', \'op\', \'filter_shape\', \'spatial_dims\', \'data_format\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "xw_plus_b"
+ argspec: "args=[\'x\', \'weights\', \'biases\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "zero_fraction"
+ argspec: "args=[\'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
new file mode 100644
index 0000000000..88b8f37c4f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-l-s-t-m-cell.pbtxt
@@ -0,0 +1,202 @@
+path: "tensorflow.nn.rnn_cell.BasicLSTMCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.BasicLSTMCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_units\', \'forget_bias\', \'state_is_tuple\', \'activation\', \'reuse\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'1.0\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'state\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
new file mode 100644
index 0000000000..a4483fefa2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-basic-r-n-n-cell.pbtxt
@@ -0,0 +1,202 @@
+path: "tensorflow.nn.rnn_cell.BasicRNNCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.BasicRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_units\', \'activation\', \'reuse\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'state\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
new file mode 100644
index 0000000000..381c4975d7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-device-wrapper.pbtxt
@@ -0,0 +1,201 @@
+path: "tensorflow.nn.rnn_cell.DeviceWrapper"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.DeviceWrapper\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cell\', \'device\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
new file mode 100644
index 0000000000..912365a28b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-dropout-wrapper.pbtxt
@@ -0,0 +1,205 @@
+path: "tensorflow.nn.rnn_cell.DropoutWrapper"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.DropoutWrapper\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "wrapped_cell"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cell\', \'input_keep_prob\', \'output_keep_prob\', \'state_keep_prob\', \'variational_recurrent\', \'input_size\', \'dtype\', \'seed\', \'dropout_state_filter_visitor\'], varargs=None, keywords=None, defaults=[\'1.0\', \'1.0\', \'1.0\', \'False\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
new file mode 100644
index 0000000000..a4bb3219c7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-g-r-u-cell.pbtxt
@@ -0,0 +1,202 @@
+path: "tensorflow.nn.rnn_cell.GRUCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.GRUCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_units\', \'activation\', \'reuse\', \'kernel_initializer\', \'bias_initializer\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'state\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
new file mode 100644
index 0000000000..715bfd5fc7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-cell.pbtxt
@@ -0,0 +1,202 @@
+path: "tensorflow.nn.rnn_cell.LSTMCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LSTMCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LayerRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_units\', \'use_peepholes\', \'cell_clip\', \'initializer\', \'num_proj\', \'proj_clip\', \'num_unit_shards\', \'num_proj_shards\', \'forget_bias\', \'state_is_tuple\', \'activation\', \'reuse\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'False\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'1.0\', \'True\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'instance\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'state\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt
new file mode 100644
index 0000000000..1de8a55dcc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-l-s-t-m-state-tuple.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.nn.rnn_cell.LSTMStateTuple"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LSTMStateTuple\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.LSTMStateTuple\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "c"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "h"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
new file mode 100644
index 0000000000..b66c0f89cc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-multi-r-n-n-cell.pbtxt
@@ -0,0 +1,201 @@
+path: "tensorflow.nn.rnn_cell.MultiRNNCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.MultiRNNCell\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cells\', \'state_is_tuple\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\', \'state\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
new file mode 100644
index 0000000000..faeb4f3513
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-r-n-n-cell.pbtxt
@@ -0,0 +1,200 @@
+path: "tensorflow.nn.rnn_cell.RNNCell"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'trainable\', \'name\', \'dtype\'], varargs=None, keywords=kwargs, defaults=[\'True\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
new file mode 100644
index 0000000000..caa2e60080
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.-residual-wrapper.pbtxt
@@ -0,0 +1,201 @@
+path: "tensorflow.nn.rnn_cell.ResidualWrapper"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.ResidualWrapper\'>"
+ is_instance: "<class \'tensorflow.python.ops.rnn_cell_impl.RNNCell\'>"
+ is_instance: "<class \'tensorflow.python.layers.base.Layer\'>"
+ is_instance: "<class \'tensorflow.python.keras.engine.base_layer.Layer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "activity_regularizer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "inbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "input_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "losses"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "non_trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "outbound_nodes"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_mask"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "output_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "scope_name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "state_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "trainable_weights"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "updates"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "variables"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "weights"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cell\', \'residual_fn\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_loss"
+ argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_update"
+ argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_variable"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "add_weight"
+ argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\', \'partitioner\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\', \'None\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\', \'_\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "call"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "compute_mask"
+ argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "compute_output_shape"
+ argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "count_params"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_initial_state"
+ argspec: "args=[\'self\', \'inputs\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_input_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_input_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_losses_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_mask_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_output_shape_at"
+ argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_updates_for"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_weights"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_weights"
+ argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "zero_state"
+ argspec: "args=[\'self\', \'batch_size\', \'dtype\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.pbtxt
new file mode 100644
index 0000000000..64697e8a02
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.rnn_cell.pbtxt
@@ -0,0 +1,43 @@
+path: "tensorflow.nn.rnn_cell"
+tf_module {
+ member {
+ name: "BasicLSTMCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BasicRNNCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DeviceWrapper"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "DropoutWrapper"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GRUCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LSTMCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LSTMStateTuple"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MultiRNNCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RNNCell"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ResidualWrapper"
+ mtype: "<type \'type\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.ones_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.ones_initializer.pbtxt
new file mode 100644
index 0000000000..210b56242b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.ones_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.ones_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Ones\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.orthogonal_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.orthogonal_initializer.pbtxt
new file mode 100644
index 0000000000..13ec7454f4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.orthogonal_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.orthogonal_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Orthogonal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'gain\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.pbtxt
index c66249999f..7eca26be06 100644
--- a/tensorflow/tools/api/golden/tensorflow.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.pbtxt
@@ -61,10 +61,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "FixedLengthRecordReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "GIT_VERSION"
mtype: "<type \'str\'>"
}
@@ -109,10 +105,6 @@ tf_module {
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
member {
- name: "IdentityReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "IndexedSlices"
mtype: "<type \'type\'>"
}
@@ -121,10 +113,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "LMDBReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "LogMessage"
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
@@ -177,10 +165,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "ReaderBase"
- mtype: "<type \'type\'>"
- }
- member {
name: "RegisterGradient"
mtype: "<type \'type\'>"
}
@@ -225,10 +209,6 @@ tf_module {
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
member {
- name: "TFRecordReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "Tensor"
mtype: "<type \'type\'>"
}
@@ -245,10 +225,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "TextLineReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "VERSION"
mtype: "<type \'str\'>"
}
@@ -258,15 +234,19 @@ tf_module {
}
member {
name: "Variable"
- mtype: "<type \'type\'>"
+ mtype: "<class \'tensorflow.python.ops.variables.VariableMetaclass\'>"
+ }
+ member {
+ name: "VariableAggregation"
+ mtype: "<class \'enum.EnumMeta\'>"
}
member {
name: "VariableScope"
mtype: "<type \'type\'>"
}
member {
- name: "WholeFileReader"
- mtype: "<type \'type\'>"
+ name: "VariableSynchronization"
+ mtype: "<class \'enum.EnumMeta\'>"
}
member {
name: "app"
@@ -309,6 +289,10 @@ tf_module {
mtype: "<type \'module\'>"
}
member {
+ name: "debugging"
+ mtype: "<type \'module\'>"
+ }
+ member {
name: "distributions"
mtype: "<type \'module\'>"
}
@@ -317,6 +301,10 @@ tf_module {
mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
}
member {
+ name: "dtypes"
+ mtype: "<type \'module\'>"
+ }
+ member {
name: "errors"
mtype: "<type \'module\'>"
}
@@ -349,6 +337,14 @@ tf_module {
mtype: "<type \'module\'>"
}
member {
+ name: "glorot_normal_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "glorot_uniform_initializer"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "graph_util"
mtype: "<type \'module\'>"
}
@@ -381,6 +377,10 @@ tf_module {
mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
}
member {
+ name: "io"
+ mtype: "<type \'module\'>"
+ }
+ member {
name: "keras"
mtype: "<type \'module\'>"
}
@@ -457,6 +457,10 @@ tf_module {
mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
}
member {
+ name: "quantization"
+ mtype: "<type \'module\'>"
+ }
+ member {
name: "quint16"
mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
}
@@ -489,6 +493,10 @@ tf_module {
mtype: "<type \'module\'>"
}
member {
+ name: "sparse"
+ mtype: "<type \'module\'>"
+ }
+ member {
name: "spectral"
mtype: "<type \'module\'>"
}
@@ -497,6 +505,10 @@ tf_module {
mtype: "<class \'tensorflow.python.framework.dtypes.DType\'>"
}
member {
+ name: "strings"
+ mtype: "<type \'module\'>"
+ }
+ member {
name: "summary"
mtype: "<type \'module\'>"
}
@@ -569,10 +581,6 @@ tf_module {
argspec: "args=[\'op_type\'], varargs=None, keywords=None, defaults=None"
}
member_method {
- name: "Print"
- argspec: "args=[\'input_\', \'data\', \'message\', \'first_n\', \'summarize\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
- }
- member_method {
name: "abs"
argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -729,18 +737,6 @@ tf_module {
argspec: "args=[\'var_list\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
- name: "assign"
- argspec: "args=[\'ref\', \'value\', \'validate_shape\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
- }
- member_method {
- name: "assign_add"
- argspec: "args=[\'ref\', \'value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
- }
- member_method {
- name: "assign_sub"
- argspec: "args=[\'ref\', \'value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
- }
- member_method {
name: "atan"
argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -753,6 +749,14 @@ tf_module {
argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "batch_gather"
+ argspec: "args=[\'params\', \'indices\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "batch_scatter_update"
+ argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
name: "batch_to_space"
argspec: "args=[\'input\', \'crops\', \'block_size\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -785,6 +789,10 @@ tf_module {
argspec: "args=[\'shape_x\', \'shape_y\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "broadcast_to"
+ argspec: "args=[\'input\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "case"
argspec: "args=[\'pred_fn_pairs\', \'default\', \'exclusive\', \'strict\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'False\', \'case\'], "
}
@@ -885,10 +893,6 @@ tf_module {
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'dtype\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \"<dtype: \'int64\'>\", \'None\', \'None\', \'None\'], "
}
member_method {
- name: "count_up_to"
- argspec: "args=[\'ref\', \'limit\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
name: "create_partitioned_variables"
argspec: "args=[\'shape\', \'slicing\', \'initializer\', \'dtype\', \'trainable\', \'collections\', \'name\', \'reuse\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'True\', \'None\', \'None\', \'None\'], "
}
@@ -965,6 +969,10 @@ tf_module {
argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "div_no_nan"
+ argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "divide"
argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -993,6 +1001,10 @@ tf_module {
argspec: "args=[\'input\', \'pad\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
}
member_method {
+ name: "ensure_shape"
+ argspec: "args=[\'x\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "equal"
argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -1122,7 +1134,7 @@ tf_module {
}
member_method {
name: "get_local_variable"
- argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "get_seed"
@@ -1138,7 +1150,7 @@ tf_module {
}
member_method {
name: "get_variable"
- argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'collections\', \'caching_device\', \'partitioner\', \'validate_shape\', \'use_resource\', \'custom_getter\', \'constraint\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "get_variable_scope"
@@ -1157,14 +1169,6 @@ tf_module {
argspec: "args=[], varargs=None, keywords=None, defaults=None"
}
member_method {
- name: "glorot_normal_initializer"
- argspec: "args=[\'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
- }
- member_method {
- name: "glorot_uniform_initializer"
- argspec: "args=[\'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
- }
- member_method {
name: "gradients"
argspec: "args=[\'ys\', \'xs\', \'grad_ys\', \'name\', \'colocate_gradients_with_ops\', \'gate_gradients\', \'aggregation_method\', \'stop_gradients\'], varargs=None, keywords=None, defaults=[\'None\', \'gradients\', \'False\', \'False\', \'None\', \'None\'], "
}
@@ -1233,6 +1237,10 @@ tf_module {
argspec: "args=[\'graph_def\', \'input_map\', \'return_elements\', \'name\', \'op_dict\', \'producer_op_list\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
}
member_method {
+ name: "init_scope"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "initialize_all_tables"
argspec: "args=[\'name\'], varargs=None, keywords=None, defaults=[\'init_all_tables\'], "
}
@@ -1282,7 +1290,7 @@ tf_module {
}
member_method {
name: "lbeta"
- argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'lbeta\'], "
+ argspec: "args=[\'x\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "less"
@@ -1309,6 +1317,10 @@ tf_module {
argspec: "args=[\'library_filename\'], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "load_library"
+ argspec: "args=[\'library_location\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "load_op_library"
argspec: "args=[\'library_filename\'], varargs=None, keywords=None, defaults=None"
}
@@ -1525,6 +1537,10 @@ tf_module {
argspec: "args=[\'x\', \'y\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "print"
+ argspec: "args=[], varargs=inputs, keywords=kwargs, defaults=None"
+ }
+ member_method {
name: "py_func"
argspec: "args=[\'func\', \'inp\', \'Tout\', \'stateful\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
}
@@ -1686,11 +1702,7 @@ tf_module {
}
member_method {
name: "scan"
- argspec: "args=[\'fn\', \'elems\', \'initializer\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'infer_shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'True\', \'None\'], "
- }
- member_method {
- name: "scatter_add"
- argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ argspec: "args=[\'fn\', \'elems\', \'initializer\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'infer_shape\', \'reverse\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'True\', \'False\', \'None\'], "
}
member_method {
name: "scatter_div"
@@ -1713,24 +1725,8 @@ tf_module {
argspec: "args=[\'indices\', \'updates\', \'shape\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
- name: "scatter_nd_add"
- argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
- }
- member_method {
- name: "scatter_nd_sub"
- argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
- }
- member_method {
- name: "scatter_nd_update"
- argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
- }
- member_method {
- name: "scatter_sub"
- argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
- }
- member_method {
- name: "scatter_update"
- argspec: "args=[\'ref\', \'indices\', \'updates\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ name: "searchsorted"
+ argspec: "args=[\'sorted_sequence\', \'values\', \'side\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\'left\', \"<dtype: \'int32\'>\", \'None\'], "
}
member_method {
name: "segment_max"
@@ -1866,19 +1862,19 @@ tf_module {
}
member_method {
name: "sparse_reduce_max"
- argspec: "args=[\'sp_input\', \'axis\', \'keep_dims\', \'reduction_axes\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "sparse_reduce_max_sparse"
- argspec: "args=[\'sp_input\', \'axis\', \'keep_dims\', \'reduction_axes\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "sparse_reduce_sum"
- argspec: "args=[\'sp_input\', \'axis\', \'keep_dims\', \'reduction_axes\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "sparse_reduce_sum_sparse"
- argspec: "args=[\'sp_input\', \'axis\', \'keep_dims\', \'reduction_axes\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
+ argspec: "args=[\'sp_input\', \'axis\', \'keepdims\', \'reduction_axes\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "sparse_reorder"
@@ -1981,6 +1977,10 @@ tf_module {
argspec: "args=[\'source\', \'delimiter\', \'skip_empty\'], varargs=None, keywords=None, defaults=[\' \', \'True\'], "
}
member_method {
+ name: "string_strip"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "string_to_hash_bucket"
argspec: "args=[\'string_tensor\', \'num_buckets\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
@@ -2154,7 +2154,7 @@ tf_module {
}
member_method {
name: "while_loop"
- argspec: "args=[\'cond\', \'body\', \'loop_vars\', \'shape_invariants\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\', \'maximum_iterations\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\', \'None\'], "
+ argspec: "args=[\'cond\', \'body\', \'loop_vars\', \'shape_invariants\', \'parallel_iterations\', \'back_prop\', \'swap_memory\', \'name\', \'maximum_iterations\', \'return_same_structure\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'True\', \'False\', \'None\', \'None\', \'False\'], "
}
member_method {
name: "write_file"
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checker.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checker.pbtxt
new file mode 100644
index 0000000000..e09c44cc9c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checker.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.profiler.AdviceProto.Checker"
+tf_proto {
+ descriptor {
+ name: "Checker"
+ field {
+ name: "reports"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt
new file mode 100644
index 0000000000..8746243549
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.-checkers-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.profiler.AdviceProto.CheckersEntry"
+tf_proto {
+ descriptor {
+ name: "CheckersEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.AdviceProto.Checker"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.pbtxt
new file mode 100644
index 0000000000..a8a8858ccd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-advice-proto.pbtxt
@@ -0,0 +1,41 @@
+path: "tensorflow.profiler.AdviceProto"
+tf_proto {
+ descriptor {
+ name: "AdviceProto"
+ field {
+ name: "checkers"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.AdviceProto.CheckersEntry"
+ }
+ nested_type {
+ name: "CheckersEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.AdviceProto.Checker"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ nested_type {
+ name: "Checker"
+ field {
+ name: "reports"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt
new file mode 100644
index 0000000000..afec73f537
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.-input-shapes-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.profiler.GraphNodeProto.InputShapesEntry"
+tf_proto {
+ descriptor {
+ name: "InputShapesEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.pbtxt
new file mode 100644
index 0000000000..3c83177005
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-graph-node-proto.pbtxt
@@ -0,0 +1,191 @@
+path: "tensorflow.profiler.GraphNodeProto"
+tf_proto {
+ descriptor {
+ name: "GraphNodeProto"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tensor_value"
+ number: 15
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.TFProfTensorProto"
+ }
+ field {
+ name: "run_count"
+ number: 21
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "exec_micros"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "accelerator_exec_micros"
+ number: 17
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "cpu_exec_micros"
+ number: 18
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "requested_bytes"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "peak_bytes"
+ number: 24
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "residual_bytes"
+ number: 25
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "output_bytes"
+ number: 26
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "parameters"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "float_ops"
+ number: 13
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "devices"
+ number: 10
+ label: LABEL_REPEATED
+ type: TYPE_STRING
+ }
+ field {
+ name: "total_definition_count"
+ number: 23
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_run_count"
+ number: 22
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_exec_micros"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_accelerator_exec_micros"
+ number: 19
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_cpu_exec_micros"
+ number: 20
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_requested_bytes"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_peak_bytes"
+ number: 27
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_residual_bytes"
+ number: 28
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_output_bytes"
+ number: 29
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_parameters"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_float_ops"
+ number: 14
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "shapes"
+ number: 11
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ field {
+ name: "input_shapes"
+ number: 16
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.GraphNodeProto.InputShapesEntry"
+ }
+ field {
+ name: "children"
+ number: 12
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.GraphNodeProto"
+ }
+ nested_type {
+ name: "InputShapesEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorShapeProto"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-multi-graph-node-proto.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-multi-graph-node-proto.pbtxt
new file mode 100644
index 0000000000..2b08a05437
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-multi-graph-node-proto.pbtxt
@@ -0,0 +1,134 @@
+path: "tensorflow.profiler.MultiGraphNodeProto"
+tf_proto {
+ descriptor {
+ name: "MultiGraphNodeProto"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "exec_micros"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "accelerator_exec_micros"
+ number: 12
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "cpu_exec_micros"
+ number: 13
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "requested_bytes"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "peak_bytes"
+ number: 16
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "residual_bytes"
+ number: 17
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "output_bytes"
+ number: 18
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "parameters"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "float_ops"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_exec_micros"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_accelerator_exec_micros"
+ number: 14
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_cpu_exec_micros"
+ number: 15
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_requested_bytes"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_peak_bytes"
+ number: 19
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_residual_bytes"
+ number: 20
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_output_bytes"
+ number: 21
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_parameters"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "total_float_ops"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "graph_nodes"
+ number: 10
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.GraphNodeProto"
+ }
+ field {
+ name: "children"
+ number: 11
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.MultiGraphNodeProto"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt
new file mode 100644
index 0000000000..b3adc50c7e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.-id-to-string-entry.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.profiler.OpLogProto.IdToStringEntry"
+tf_proto {
+ descriptor {
+ name: "IdToStringEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.pbtxt
new file mode 100644
index 0000000000..7510c566ba
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-op-log-proto.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.profiler.OpLogProto"
+tf_proto {
+ descriptor {
+ name: "OpLogProto"
+ field {
+ name: "log_entries"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.OpLogEntry"
+ }
+ field {
+ name: "id_to_string"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.tfprof.OpLogProto.IdToStringEntry"
+ }
+ nested_type {
+ name: "IdToStringEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-profile-option-builder.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-profile-option-builder.pbtxt
new file mode 100644
index 0000000000..19ff38a390
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-profile-option-builder.pbtxt
@@ -0,0 +1,93 @@
+path: "tensorflow.profiler.ProfileOptionBuilder"
+tf_class {
+ is_instance: "<class \'tensorflow.python.profiler.option_builder.ProfileOptionBuilder\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "account_displayed_op_only"
+ argspec: "args=[\'self\', \'is_true\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "float_operation"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "order_by"
+ argspec: "args=[\'self\', \'attribute\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "select"
+ argspec: "args=[\'self\', \'attributes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "time_and_memory"
+ argspec: "args=[\'min_micros\', \'min_bytes\', \'min_accelerator_micros\', \'min_cpu_micros\', \'min_peak_bytes\', \'min_residual_bytes\', \'min_output_bytes\'], varargs=None, keywords=None, defaults=[\'1\', \'1\', \'0\', \'0\', \'0\', \'0\', \'0\'], "
+ }
+ member_method {
+ name: "trainable_variables_parameter"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_accounted_types"
+ argspec: "args=[\'self\', \'account_type_regexes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_empty_output"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_file_output"
+ argspec: "args=[\'self\', \'outfile\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_max_depth"
+ argspec: "args=[\'self\', \'max_depth\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_min_execution_time"
+ argspec: "args=[\'self\', \'min_micros\', \'min_accelerator_micros\', \'min_cpu_micros\'], varargs=None, keywords=None, defaults=[\'0\', \'0\', \'0\'], "
+ }
+ member_method {
+ name: "with_min_float_operations"
+ argspec: "args=[\'self\', \'min_float_ops\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_min_memory"
+ argspec: "args=[\'self\', \'min_bytes\', \'min_peak_bytes\', \'min_residual_bytes\', \'min_output_bytes\'], varargs=None, keywords=None, defaults=[\'0\', \'0\', \'0\', \'0\'], "
+ }
+ member_method {
+ name: "with_min_occurrence"
+ argspec: "args=[\'self\', \'min_occurrence\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_min_parameters"
+ argspec: "args=[\'self\', \'min_params\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_node_names"
+ argspec: "args=[\'self\', \'start_name_regexes\', \'show_name_regexes\', \'hide_name_regexes\', \'trim_name_regexes\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "with_pprof_output"
+ argspec: "args=[\'self\', \'pprof_file\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_stdout_output"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_step"
+ argspec: "args=[\'self\', \'step\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "with_timeline_output"
+ argspec: "args=[\'self\', \'timeline_file\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.-profiler.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-profiler.pbtxt
new file mode 100644
index 0000000000..acb61dae9f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.-profiler.pbtxt
@@ -0,0 +1,37 @@
+path: "tensorflow.profiler.Profiler"
+tf_class {
+ is_instance: "<class \'tensorflow.python.profiler.model_analyzer.Profiler\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'graph\', \'op_log\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_step"
+ argspec: "args=[\'self\', \'step\', \'run_meta\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "advise"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "profile_graph"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "profile_name_scope"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "profile_operations"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "profile_python"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "serialize_to_string"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.profiler.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.profiler.pbtxt
new file mode 100644
index 0000000000..7b4d3ac522
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.profiler.pbtxt
@@ -0,0 +1,39 @@
+path: "tensorflow.profiler"
+tf_module {
+ member {
+ name: "AdviceProto"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "GraphNodeProto"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "MultiGraphNodeProto"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "OpLogProto"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "ProfileOptionBuilder"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Profiler"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "advise"
+ argspec: "args=[\'graph\', \'run_meta\', \'options\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'0\'], "
+ }
+ member_method {
+ name: "profile"
+ argspec: "args=[\'graph\', \'run_meta\', \'op_log\', \'cmd\', \'options\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'scope\', \'0\'], "
+ }
+ member_method {
+ name: "write_op_log"
+ argspec: "args=[\'graph\', \'log_dir\', \'op_log\', \'run_meta\', \'add_trace\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-compression-type.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-compression-type.pbtxt
new file mode 100644
index 0000000000..4941dda50e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-compression-type.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.python_io.TFRecordCompressionType"
+tf_class {
+ is_instance: "<class \'tensorflow.python.lib.io.tf_record.TFRecordCompressionType\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GZIP"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "ZLIB"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-options.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-options.pbtxt
new file mode 100644
index 0000000000..614ba42d3e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-options.pbtxt
@@ -0,0 +1,17 @@
+path: "tensorflow.python_io.TFRecordOptions"
+tf_class {
+ is_instance: "<class \'tensorflow.python.lib.io.tf_record.TFRecordOptions\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "compression_type_map"
+ mtype: "<type \'dict\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'compression_type\', \'flush_mode\', \'input_buffer_size\', \'output_buffer_size\', \'window_bits\', \'compression_level\', \'compression_method\', \'mem_level\', \'compression_strategy\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_compression_type_string"
+ argspec: "args=[\'cls\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-writer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-writer.pbtxt
new file mode 100644
index 0000000000..31775de2d1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.python_io.-t-f-record-writer.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.python_io.TFRecordWriter"
+tf_class {
+ is_instance: "<class \'tensorflow.python.lib.io.tf_record.TFRecordWriter\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'path\', \'options\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flush"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "write"
+ argspec: "args=[\'self\', \'record\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.python_io.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.python_io.pbtxt
new file mode 100644
index 0000000000..7c9953e5fe
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.python_io.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.python_io"
+tf_module {
+ member {
+ name: "TFRecordCompressionType"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TFRecordOptions"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TFRecordWriter"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "tf_record_iterator"
+ argspec: "args=[\'path\', \'options\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.quantization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.quantization.pbtxt
new file mode 100644
index 0000000000..6d865efed0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.quantization.pbtxt
@@ -0,0 +1,35 @@
+path: "tensorflow.quantization"
+tf_module {
+ member_method {
+ name: "dequantize"
+ argspec: "args=[\'input\', \'min_range\', \'max_range\', \'mode\', \'name\'], varargs=None, keywords=None, defaults=[\'MIN_COMBINED\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_args"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'-6\', \'6\', \'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_args_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'-6\', \'6\', \'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_per_channel"
+ argspec: "args=[\'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "fake_quant_with_min_max_vars_per_channel_gradient"
+ argspec: "args=[\'gradients\', \'inputs\', \'min\', \'max\', \'num_bits\', \'narrow_range\', \'name\'], varargs=None, keywords=None, defaults=[\'8\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "quantized_concat"
+ argspec: "args=[\'concat_dim\', \'values\', \'input_mins\', \'input_maxes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.random_normal_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.random_normal_initializer.pbtxt
new file mode 100644
index 0000000000..5993fdeb9c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.random_normal_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.random_normal_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.random_uniform_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.random_uniform_initializer.pbtxt
new file mode 100644
index 0000000000..a434ed1599
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.random_uniform_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.random_uniform_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.RandomUniform\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'minval\', \'maxval\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0\', \'None\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.resource_loader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.resource_loader.pbtxt
new file mode 100644
index 0000000000..288b78b4cd
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.resource_loader.pbtxt
@@ -0,0 +1,23 @@
+path: "tensorflow.resource_loader"
+tf_module {
+ member_method {
+ name: "get_data_files_path"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_path_to_datafile"
+ argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_root_dir_with_all_resources"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_resource"
+ argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "readahead_file_path"
+ argspec: "args=[\'path\', \'readahead\'], varargs=None, keywords=None, defaults=[\'128M\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.-saved-model-builder.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.-saved-model-builder.pbtxt
new file mode 100644
index 0000000000..83bd703540
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.-saved-model-builder.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.saved_model.builder.SavedModelBuilder"
+tf_class {
+ is_instance: "<class \'tensorflow.python.saved_model.builder_impl.SavedModelBuilder\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'export_dir\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_meta_graph"
+ argspec: "args=[\'self\', \'tags\', \'signature_def_map\', \'assets_collection\', \'legacy_init_op\', \'clear_devices\', \'main_op\', \'strip_default_attrs\', \'saver\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "add_meta_graph_and_variables"
+ argspec: "args=[\'self\', \'sess\', \'tags\', \'signature_def_map\', \'assets_collection\', \'legacy_init_op\', \'clear_devices\', \'main_op\', \'strip_default_attrs\', \'saver\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'as_text\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.pbtxt
new file mode 100644
index 0000000000..adc697ad1c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.builder.pbtxt
@@ -0,0 +1,7 @@
+path: "tensorflow.saved_model.builder"
+tf_module {
+ member {
+ name: "SavedModelBuilder"
+ mtype: "<type \'type\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.constants.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.constants.pbtxt
new file mode 100644
index 0000000000..20e10aa094
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.constants.pbtxt
@@ -0,0 +1,39 @@
+path: "tensorflow.saved_model.constants"
+tf_module {
+ member {
+ name: "ASSETS_DIRECTORY"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "ASSETS_KEY"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "LEGACY_INIT_OP_KEY"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "MAIN_OP_KEY"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SAVED_MODEL_FILENAME_PB"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SAVED_MODEL_FILENAME_PBTXT"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SAVED_MODEL_SCHEMA_VERSION"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "VARIABLES_DIRECTORY"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "VARIABLES_FILENAME"
+ mtype: "<type \'str\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.loader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.loader.pbtxt
new file mode 100644
index 0000000000..511e6b4712
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.loader.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.saved_model.loader"
+tf_module {
+ member_method {
+ name: "load"
+ argspec: "args=[\'sess\', \'tags\', \'export_dir\', \'import_scope\'], varargs=None, keywords=saver_kwargs, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "maybe_saved_model_directory"
+ argspec: "args=[\'export_dir\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.main_op.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.main_op.pbtxt
new file mode 100644
index 0000000000..176cb788c2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.main_op.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.saved_model.main_op"
+tf_module {
+ member_method {
+ name: "main_op"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "main_op_with_restore"
+ argspec: "args=[\'restore_op_name\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.pbtxt
new file mode 100644
index 0000000000..e1a0385092
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.pbtxt
@@ -0,0 +1,39 @@
+path: "tensorflow.saved_model"
+tf_module {
+ member {
+ name: "builder"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "constants"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "loader"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "main_op"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "signature_constants"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "signature_def_utils"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "tag_constants"
+ mtype: "<type \'module\'>"
+ }
+ member {
+ name: "utils"
+ mtype: "<type \'module\'>"
+ }
+ member_method {
+ name: "simple_save"
+ argspec: "args=[\'session\', \'export_dir\', \'inputs\', \'outputs\', \'legacy_init_op\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_constants.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_constants.pbtxt
new file mode 100644
index 0000000000..478d410e06
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_constants.pbtxt
@@ -0,0 +1,47 @@
+path: "tensorflow.saved_model.signature_constants"
+tf_module {
+ member {
+ name: "CLASSIFY_INPUTS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "CLASSIFY_METHOD_NAME"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "CLASSIFY_OUTPUT_CLASSES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "CLASSIFY_OUTPUT_SCORES"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "DEFAULT_SERVING_SIGNATURE_DEF_KEY"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "PREDICT_INPUTS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "PREDICT_METHOD_NAME"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "PREDICT_OUTPUTS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "REGRESS_INPUTS"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "REGRESS_METHOD_NAME"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "REGRESS_OUTPUTS"
+ mtype: "<type \'str\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_def_utils.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_def_utils.pbtxt
new file mode 100644
index 0000000000..a5602464ee
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.signature_def_utils.pbtxt
@@ -0,0 +1,23 @@
+path: "tensorflow.saved_model.signature_def_utils"
+tf_module {
+ member_method {
+ name: "build_signature_def"
+ argspec: "args=[\'inputs\', \'outputs\', \'method_name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "classification_signature_def"
+ argspec: "args=[\'examples\', \'classes\', \'scores\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_valid_signature"
+ argspec: "args=[\'signature_def\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "predict_signature_def"
+ argspec: "args=[\'inputs\', \'outputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "regression_signature_def"
+ argspec: "args=[\'examples\', \'predictions\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.tag_constants.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.tag_constants.pbtxt
new file mode 100644
index 0000000000..6af72498d7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.tag_constants.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.saved_model.tag_constants"
+tf_module {
+ member {
+ name: "GPU"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "SERVING"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "TPU"
+ mtype: "<type \'str\'>"
+ }
+ member {
+ name: "TRAINING"
+ mtype: "<type \'str\'>"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.saved_model.utils.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.utils.pbtxt
new file mode 100644
index 0000000000..d95c946682
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.saved_model.utils.pbtxt
@@ -0,0 +1,11 @@
+path: "tensorflow.saved_model.utils"
+tf_module {
+ member_method {
+ name: "build_tensor_info"
+ argspec: "args=[\'tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_tensor_from_tensor_info"
+ argspec: "args=[\'tensor_info\', \'graph\', \'import_scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.sets.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.sets.pbtxt
new file mode 100644
index 0000000000..8a196b1a55
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.sets.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.sets"
+tf_module {
+ member_method {
+ name: "set_difference"
+ argspec: "args=[\'a\', \'b\', \'aminusb\', \'validate_indices\'], varargs=None, keywords=None, defaults=[\'True\', \'True\'], "
+ }
+ member_method {
+ name: "set_intersection"
+ argspec: "args=[\'a\', \'b\', \'validate_indices\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+ member_method {
+ name: "set_size"
+ argspec: "args=[\'a\', \'validate_indices\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+ member_method {
+ name: "set_union"
+ argspec: "args=[\'a\', \'b\', \'validate_indices\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.sparse.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.sparse.pbtxt
new file mode 100644
index 0000000000..ba9e651b34
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.sparse.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.sparse"
+tf_module {
+ member_method {
+ name: "cross"
+ argspec: "args=[\'inputs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "cross_hashed"
+ argspec: "args=[\'inputs\', \'num_buckets\', \'hash_key\', \'name\'], varargs=None, keywords=None, defaults=[\'0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "expand_dims"
+ argspec: "args=[\'sp_input\', \'axis\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "eye"
+ argspec: "args=[\'num_rows\', \'num_columns\', \'dtype\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\", \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.spectral.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.spectral.pbtxt
new file mode 100644
index 0000000000..6a421ef12d
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.spectral.pbtxt
@@ -0,0 +1,59 @@
+path: "tensorflow.spectral"
+tf_module {
+ member_method {
+ name: "dct"
+ argspec: "args=[\'input\', \'type\', \'n\', \'axis\', \'norm\', \'name\'], varargs=None, keywords=None, defaults=[\'2\', \'None\', \'-1\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "fft"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fft2d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "fft3d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "idct"
+ argspec: "args=[\'input\', \'type\', \'n\', \'axis\', \'norm\', \'name\'], varargs=None, keywords=None, defaults=[\'2\', \'None\', \'-1\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "ifft"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ifft2d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ifft3d"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "irfft"
+ argspec: "args=[\'input_tensor\', \'fft_length\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "irfft2d"
+ argspec: "args=[\'input_tensor\', \'fft_length\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "irfft3d"
+ argspec: "args=[\'input_tensor\', \'fft_length\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "rfft"
+ argspec: "args=[\'input_tensor\', \'fft_length\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "rfft2d"
+ argspec: "args=[\'input_tensor\', \'fft_length\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "rfft3d"
+ argspec: "args=[\'input_tensor\', \'fft_length\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.strings.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.strings.pbtxt
new file mode 100644
index 0000000000..c81c156518
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.strings.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.strings"
+tf_module {
+ member_method {
+ name: "format"
+ argspec: "args=[\'template\', \'inputs\', \'placeholder\', \'summarize\', \'name\'], varargs=None, keywords=None, defaults=[\'{}\', \'3\', \'None\'], "
+ }
+ member_method {
+ name: "join"
+ argspec: "args=[\'inputs\', \'separator\', \'name\'], varargs=None, keywords=None, defaults=[\'\', \'None\'], "
+ }
+ member_method {
+ name: "length"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "regex_full_match"
+ argspec: "args=[\'input\', \'pattern\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "regex_replace"
+ argspec: "args=[\'input\', \'pattern\', \'rewrite\', \'replace_global\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "split"
+ argspec: "args=[\'source\', \'sep\', \'maxsplit\'], varargs=None, keywords=None, defaults=[\'None\', \'-1\'], "
+ }
+ member_method {
+ name: "strip"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "substr"
+ argspec: "args=[\'input\', \'pos\', \'len\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_hash_bucket"
+ argspec: "args=[\'string_tensor\', \'num_buckets\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_hash_bucket_fast"
+ argspec: "args=[\'input\', \'num_buckets\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_hash_bucket_strong"
+ argspec: "args=[\'input\', \'num_buckets\', \'key\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "to_number"
+ argspec: "args=[\'string_tensor\', \'out_type\', \'name\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\", \'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-event.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-event.pbtxt
new file mode 100644
index 0000000000..eb99d0f533
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-event.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.summary.Event"
+tf_proto {
+ descriptor {
+ name: "Event"
+ field {
+ name: "wall_time"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_DOUBLE
+ }
+ field {
+ name: "step"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "file_version"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ oneof_index: 0
+ }
+ field {
+ name: "graph_def"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "summary"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary"
+ oneof_index: 0
+ }
+ field {
+ name: "log_message"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.LogMessage"
+ oneof_index: 0
+ }
+ field {
+ name: "session_log"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SessionLog"
+ oneof_index: 0
+ }
+ field {
+ name: "tagged_run_metadata"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TaggedRunMetadata"
+ oneof_index: 0
+ }
+ field {
+ name: "meta_graph_def"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "what"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer-cache.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer-cache.pbtxt
new file mode 100644
index 0000000000..2a5b63dcea
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer-cache.pbtxt
@@ -0,0 +1,16 @@
+path: "tensorflow.summary.FileWriterCache"
+tf_class {
+ is_instance: "<class \'tensorflow.python.summary.writer.writer_cache.FileWriterCache\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "clear"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get"
+ argspec: "args=[\'logdir\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer.pbtxt
new file mode 100644
index 0000000000..6b65b0ace3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-file-writer.pbtxt
@@ -0,0 +1,50 @@
+path: "tensorflow.summary.FileWriter"
+tf_class {
+ is_instance: "<class \'tensorflow.python.summary.writer.writer.FileWriter\'>"
+ is_instance: "<class \'tensorflow.python.summary.writer.writer.SummaryToEventTransformer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'logdir\', \'graph\', \'max_queue\', \'flush_secs\', \'graph_def\', \'filename_suffix\', \'session\'], varargs=None, keywords=None, defaults=[\'None\', \'10\', \'120\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_event"
+ argspec: "args=[\'self\', \'event\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "add_graph"
+ argspec: "args=[\'self\', \'graph\', \'global_step\', \'graph_def\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "add_meta_graph"
+ argspec: "args=[\'self\', \'meta_graph_def\', \'global_step\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_run_metadata"
+ argspec: "args=[\'self\', \'run_metadata\', \'tag\', \'global_step\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_session_log"
+ argspec: "args=[\'self\', \'session_log\', \'global_step\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "add_summary"
+ argspec: "args=[\'self\', \'summary\', \'global_step\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "flush"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_logdir"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reopen"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-session-log.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-session-log.pbtxt
new file mode 100644
index 0000000000..73de73869c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-session-log.pbtxt
@@ -0,0 +1,44 @@
+path: "tensorflow.summary.SessionLog"
+tf_proto {
+ descriptor {
+ name: "SessionLog"
+ field {
+ name: "status"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.SessionLog.SessionStatus"
+ }
+ field {
+ name: "checkpoint_path"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "msg"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ enum_type {
+ name: "SessionStatus"
+ value {
+ name: "STATUS_UNSPECIFIED"
+ number: 0
+ }
+ value {
+ name: "START"
+ number: 1
+ }
+ value {
+ name: "STOP"
+ number: 2
+ }
+ value {
+ name: "CHECKPOINT"
+ number: 3
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary-description.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary-description.pbtxt
new file mode 100644
index 0000000000..4a8b59cf02
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary-description.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.summary.SummaryDescription"
+tf_proto {
+ descriptor {
+ name: "SummaryDescription"
+ field {
+ name: "type_hint"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-audio.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-audio.pbtxt
new file mode 100644
index 0000000000..8b271cf58f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-audio.pbtxt
@@ -0,0 +1,36 @@
+path: "tensorflow.summary.Summary.Audio"
+tf_proto {
+ descriptor {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-image.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-image.pbtxt
new file mode 100644
index 0000000000..dbbc02dd05
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-image.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.summary.Summary.Image"
+tf_proto {
+ descriptor {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-value.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-value.pbtxt
new file mode 100644
index 0000000000..4176171cd9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.-value.pbtxt
@@ -0,0 +1,74 @@
+path: "tensorflow.summary.Summary.Value"
+tf_proto {
+ descriptor {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.pbtxt
new file mode 100644
index 0000000000..d6c5e3a87a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-summary.pbtxt
@@ -0,0 +1,144 @@
+path: "tensorflow.summary.Summary"
+tf_proto {
+ descriptor {
+ name: "Summary"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Value"
+ }
+ nested_type {
+ name: "Image"
+ field {
+ name: "height"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "width"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "colorspace"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "encoded_image_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+ nested_type {
+ name: "Audio"
+ field {
+ name: "sample_rate"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "num_channels"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "length_frames"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT64
+ }
+ field {
+ name: "encoded_audio_string"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ field {
+ name: "content_type"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+ nested_type {
+ name: "Value"
+ field {
+ name: "node_name"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "metadata"
+ number: 9
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.SummaryMetadata"
+ }
+ field {
+ name: "simple_value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ oneof_index: 0
+ }
+ field {
+ name: "obsolete_old_style_histogram"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ oneof_index: 0
+ }
+ field {
+ name: "image"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Image"
+ oneof_index: 0
+ }
+ field {
+ name: "histo"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.HistogramProto"
+ oneof_index: 0
+ }
+ field {
+ name: "audio"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Summary.Audio"
+ oneof_index: 0
+ }
+ field {
+ name: "tensor"
+ number: 8
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.TensorProto"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "value"
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.-tagged-run-metadata.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.-tagged-run-metadata.pbtxt
new file mode 100644
index 0000000000..27c8873320
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.-tagged-run-metadata.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.summary.TaggedRunMetadata"
+tf_proto {
+ descriptor {
+ name: "TaggedRunMetadata"
+ field {
+ name: "tag"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "run_metadata"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.summary.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.summary.pbtxt
new file mode 100644
index 0000000000..7ed9cd77a0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.summary.pbtxt
@@ -0,0 +1,67 @@
+path: "tensorflow.summary"
+tf_module {
+ member {
+ name: "Event"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "FileWriter"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FileWriterCache"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SessionLog"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "Summary"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "SummaryDescription"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "TaggedRunMetadata"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member_method {
+ name: "audio"
+ argspec: "args=[\'name\', \'tensor\', \'sample_rate\', \'max_outputs\', \'collections\', \'family\'], varargs=None, keywords=None, defaults=[\'3\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_summary_description"
+ argspec: "args=[\'node_def\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "histogram"
+ argspec: "args=[\'name\', \'values\', \'collections\', \'family\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "image"
+ argspec: "args=[\'name\', \'tensor\', \'max_outputs\', \'collections\', \'family\'], varargs=None, keywords=None, defaults=[\'3\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "merge"
+ argspec: "args=[\'inputs\', \'collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "merge_all"
+ argspec: "args=[\'key\', \'scope\', \'name\'], varargs=None, keywords=None, defaults=[\'summaries\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "scalar"
+ argspec: "args=[\'name\', \'tensor\', \'collections\', \'family\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "tensor_summary"
+ argspec: "args=[\'name\', \'tensor\', \'summary_description\', \'collections\', \'summary_metadata\', \'family\', \'display_name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "text"
+ argspec: "args=[\'name\', \'tensor\', \'collections\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.sysconfig.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.sysconfig.pbtxt
new file mode 100644
index 0000000000..2f00aeac25
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.sysconfig.pbtxt
@@ -0,0 +1,19 @@
+path: "tensorflow.sysconfig"
+tf_module {
+ member_method {
+ name: "get_compile_flags"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_include"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_lib"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_link_flags"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.test.-benchmark.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.test.-benchmark.pbtxt
new file mode 100644
index 0000000000..df528e26b6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.test.-benchmark.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.test.Benchmark"
+tf_class {
+ is_instance: "<class \'tensorflow.python.platform.benchmark.TensorFlowBenchmark\'>"
+ is_instance: "<class \'tensorflow.python.platform.benchmark.Benchmark\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "is_abstract"
+ argspec: "args=[\'cls\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "report_benchmark"
+ argspec: "args=[\'self\', \'iters\', \'cpu_time\', \'wall_time\', \'throughput\', \'extras\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "run_op_benchmark"
+ argspec: "args=[\'self\', \'sess\', \'op_or_tensor\', \'feed_dict\', \'burn_iters\', \'min_iters\', \'store_trace\', \'store_memory_usage\', \'name\', \'extras\', \'mbs\'], varargs=None, keywords=None, defaults=[\'None\', \'2\', \'10\', \'False\', \'True\', \'None\', \'None\', \'0\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.test.-stub-out-for-testing.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.test.-stub-out-for-testing.pbtxt
new file mode 100644
index 0000000000..e02a0c6097
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.test.-stub-out-for-testing.pbtxt
@@ -0,0 +1,28 @@
+path: "tensorflow.test.StubOutForTesting"
+tf_class {
+ is_instance: "<class \'tensorflow.python.platform.googletest.StubOutForTesting\'>"
+ member_method {
+ name: "CleanUp"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Set"
+ argspec: "args=[\'self\', \'parent\', \'child_name\', \'new_child\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "SmartSet"
+ argspec: "args=[\'self\', \'obj\', \'attr_name\', \'new_attr\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "SmartUnsetAll"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "UnsetAll"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.test.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.test.pbtxt
new file mode 100644
index 0000000000..abe9b068ae
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.test.pbtxt
@@ -0,0 +1,59 @@
+path: "tensorflow.test"
+tf_module {
+ member {
+ name: "Benchmark"
+ mtype: "<class \'tensorflow.python.platform.benchmark._BenchmarkRegistrar\'>"
+ }
+ member {
+ name: "StubOutForTesting"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "TestCase"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "mock"
+ mtype: "<type \'module\'>"
+ }
+ member_method {
+ name: "assert_equal_graph_def"
+ argspec: "args=[\'actual\', \'expected\', \'checkpoint_v2\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ }
+ member_method {
+ name: "compute_gradient"
+ argspec: "args=[\'x\', \'x_shape\', \'y\', \'y_shape\', \'x_init_value\', \'delta\', \'init_targets\', \'extra_feed_dict\'], varargs=None, keywords=None, defaults=[\'None\', \'0.001\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradient_error"
+ argspec: "args=[\'x\', \'x_shape\', \'y\', \'y_shape\', \'x_init_value\', \'delta\', \'init_targets\', \'extra_feed_dict\'], varargs=None, keywords=None, defaults=[\'None\', \'0.001\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "create_local_cluster"
+ argspec: "args=[\'num_workers\', \'num_ps\', \'protocol\', \'worker_config\', \'ps_config\'], varargs=None, keywords=None, defaults=[\'grpc\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_temp_dir"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "gpu_device_name"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_built_with_cuda"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_gpu_available"
+ argspec: "args=[\'cuda_only\', \'min_cuda_compute_capability\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "main"
+ argspec: "args=[\'argv\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "test_src_dir_path"
+ argspec: "args=[\'relative_path\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-adadelta-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-adadelta-optimizer.pbtxt
new file mode 100644
index 0000000000..1f1d8b6f9e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-adadelta-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.AdadeltaOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.adadelta.AdadeltaOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'rho\', \'epsilon\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.001\', \'0.95\', \'1e-08\', \'False\', \'Adadelta\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-d-a-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-d-a-optimizer.pbtxt
new file mode 100644
index 0000000000..a7c05d4849
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-d-a-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.AdagradDAOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.adagrad_da.AdagradDAOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'global_step\', \'initial_gradient_squared_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.1\', \'0.0\', \'0.0\', \'False\', \'AdagradDA\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-optimizer.pbtxt
new file mode 100644
index 0000000000..bc8b92389c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-adagrad-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.AdagradOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.adagrad.AdagradOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'initial_accumulator_value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.1\', \'False\', \'Adagrad\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-adam-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-adam-optimizer.pbtxt
new file mode 100644
index 0000000000..5d17be9378
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-adam-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.AdamOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.adam.AdamOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'beta1\', \'beta2\', \'epsilon\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.001\', \'0.9\', \'0.999\', \'1e-08\', \'False\', \'Adam\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-bytes-list.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-bytes-list.pbtxt
new file mode 100644
index 0000000000..87e4f160e5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-bytes-list.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.train.BytesList"
+tf_proto {
+ descriptor {
+ name: "BytesList"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_BYTES
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-hook.pbtxt
new file mode 100644
index 0000000000..c3037baa8c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.CheckpointSaverHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'checkpoint_dir\', \'save_secs\', \'save_steps\', \'saver\', \'checkpoint_basename\', \'scaffold\', \'listeners\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'model.ckpt\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-listener.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-listener.pbtxt
new file mode 100644
index 0000000000..9d3688e565
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint-saver-listener.pbtxt
@@ -0,0 +1,24 @@
+path: "tensorflow.train.CheckpointSaverListener"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.CheckpointSaverListener\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "after_save"
+ argspec: "args=[\'self\', \'session\', \'global_step_value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_save"
+ argspec: "args=[\'self\', \'session\', \'global_step_value\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\', \'global_step_value\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint.pbtxt
new file mode 100644
index 0000000000..5be37200f3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-checkpoint.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.train.Checkpoint"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.checkpointable.util.Checkpoint\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.tracking.Checkpointable\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "save_counter"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "restore"
+ argspec: "args=[\'self\', \'save_path\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'file_prefix\', \'session\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "write"
+ argspec: "args=[\'self\', \'file_prefix\', \'session\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-chief-session-creator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-chief-session-creator.pbtxt
new file mode 100644
index 0000000000..abbe273be3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-chief-session-creator.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.train.ChiefSessionCreator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.ChiefSessionCreator\'>"
+ is_instance: "<class \'tensorflow.python.training.monitored_session.SessionCreator\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'scaffold\', \'master\', \'config\', \'checkpoint_dir\', \'checkpoint_filename_with_path\'], varargs=None, keywords=None, defaults=[\'None\', \'\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "create_session"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-def.pbtxt
new file mode 100644
index 0000000000..f9de26839f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-def.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.train.ClusterDef"
+tf_proto {
+ descriptor {
+ name: "ClusterDef"
+ field {
+ name: "job"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.JobDef"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-spec.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-spec.pbtxt
new file mode 100644
index 0000000000..1658b15a5f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-cluster-spec.pbtxt
@@ -0,0 +1,37 @@
+path: "tensorflow.train.ClusterSpec"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.server_lib.ClusterSpec\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "jobs"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'cluster\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_cluster_def"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "as_dict"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "job_tasks"
+ argspec: "args=[\'self\', \'job_name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "num_tasks"
+ argspec: "args=[\'self\', \'job_name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "task_address"
+ argspec: "args=[\'self\', \'job_name\', \'task_index\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "task_indices"
+ argspec: "args=[\'self\', \'job_name\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-coordinator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-coordinator.pbtxt
new file mode 100644
index 0000000000..11277f077e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-coordinator.pbtxt
@@ -0,0 +1,45 @@
+path: "tensorflow.train.Coordinator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.coordinator.Coordinator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "joined"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'clean_stop_exception_types\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "clear_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "join"
+ argspec: "args=[\'self\', \'threads\', \'stop_grace_period_secs\', \'ignore_live_threads\'], varargs=None, keywords=None, defaults=[\'None\', \'120\', \'False\'], "
+ }
+ member_method {
+ name: "raise_requested_exception"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "register_thread"
+ argspec: "args=[\'self\', \'thread\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "request_stop"
+ argspec: "args=[\'self\', \'ex\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "should_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "stop_on_exception"
+ argspec: "args=[], varargs=args, keywords=kwds, defaults=None"
+ }
+ member_method {
+ name: "wait_for_stop"
+ argspec: "args=[\'self\', \'timeout\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-example.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-example.pbtxt
new file mode 100644
index 0000000000..23c30f1ef4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-example.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.train.Example"
+tf_proto {
+ descriptor {
+ name: "Example"
+ field {
+ name: "features"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Features"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-exponential-moving-average.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-exponential-moving-average.pbtxt
new file mode 100644
index 0000000000..c9fe136e68
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-exponential-moving-average.pbtxt
@@ -0,0 +1,29 @@
+path: "tensorflow.train.ExponentialMovingAverage"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.moving_averages.ExponentialMovingAverage\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'decay\', \'num_updates\', \'zero_debias\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'ExponentialMovingAverage\'], "
+ }
+ member_method {
+ name: "apply"
+ argspec: "args=[\'self\', \'var_list\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "average"
+ argspec: "args=[\'self\', \'var\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "average_name"
+ argspec: "args=[\'self\', \'var\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "variables_to_restore"
+ argspec: "args=[\'self\', \'moving_avg_variables\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-list.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-list.pbtxt
new file mode 100644
index 0000000000..2a8b3714fc
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-list.pbtxt
@@ -0,0 +1,13 @@
+path: "tensorflow.train.FeatureList"
+tf_proto {
+ descriptor {
+ name: "FeatureList"
+ field {
+ name: "feature"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Feature"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt
new file mode 100644
index 0000000000..cd1d56e606
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.-feature-list-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.train.FeatureLists.FeatureListEntry"
+tf_proto {
+ descriptor {
+ name: "FeatureListEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureList"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.pbtxt
new file mode 100644
index 0000000000..3c183a6476
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature-lists.pbtxt
@@ -0,0 +1,32 @@
+path: "tensorflow.train.FeatureLists"
+tf_proto {
+ descriptor {
+ name: "FeatureLists"
+ field {
+ name: "feature_list"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureLists.FeatureListEntry"
+ }
+ nested_type {
+ name: "FeatureListEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureList"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-feature.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature.pbtxt
new file mode 100644
index 0000000000..5d0eb871c2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-feature.pbtxt
@@ -0,0 +1,33 @@
+path: "tensorflow.train.Feature"
+tf_proto {
+ descriptor {
+ name: "Feature"
+ field {
+ name: "bytes_list"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.BytesList"
+ oneof_index: 0
+ }
+ field {
+ name: "float_list"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FloatList"
+ oneof_index: 0
+ }
+ field {
+ name: "int64_list"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Int64List"
+ oneof_index: 0
+ }
+ oneof_decl {
+ name: "kind"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-features.-feature-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-features.-feature-entry.pbtxt
new file mode 100644
index 0000000000..f912005f1c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-features.-feature-entry.pbtxt
@@ -0,0 +1,22 @@
+path: "tensorflow.train.Features.FeatureEntry"
+tf_proto {
+ descriptor {
+ name: "FeatureEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Feature"
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-features.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-features.pbtxt
new file mode 100644
index 0000000000..b788ca1d57
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-features.pbtxt
@@ -0,0 +1,32 @@
+path: "tensorflow.train.Features"
+tf_proto {
+ descriptor {
+ name: "Features"
+ field {
+ name: "feature"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Features.FeatureEntry"
+ }
+ nested_type {
+ name: "FeatureEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Feature"
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-feed-fn-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-feed-fn-hook.pbtxt
new file mode 100644
index 0000000000..7bec4d032c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-feed-fn-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.FeedFnHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.FeedFnHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'feed_fn\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-final-ops-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-final-ops-hook.pbtxt
new file mode 100644
index 0000000000..31cf9aaeb2
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-final-ops-hook.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.train.FinalOpsHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.FinalOpsHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "final_ops_values"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'final_ops\', \'final_ops_feed_dict\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-float-list.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-float-list.pbtxt
new file mode 100644
index 0000000000..55d3b46f20
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-float-list.pbtxt
@@ -0,0 +1,15 @@
+path: "tensorflow.train.FloatList"
+tf_proto {
+ descriptor {
+ name: "FloatList"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_FLOAT
+ options {
+ packed: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-ftrl-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-ftrl-optimizer.pbtxt
new file mode 100644
index 0000000000..d265fdeb01
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-ftrl-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.FtrlOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.ftrl.FtrlOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'learning_rate_power\', \'initial_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\', \'accum_name\', \'linear_name\', \'l2_shrinkage_regularization_strength\'], varargs=None, keywords=None, defaults=[\'-0.5\', \'0.1\', \'0.0\', \'0.0\', \'False\', \'Ftrl\', \'None\', \'None\', \'0.0\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-global-step-waiter-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-global-step-waiter-hook.pbtxt
new file mode 100644
index 0000000000..147448618e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-global-step-waiter-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.GlobalStepWaiterHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.GlobalStepWaiterHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'wait_until_step\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-gradient-descent-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-gradient-descent-optimizer.pbtxt
new file mode 100644
index 0000000000..c673e29cd4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-gradient-descent-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.GradientDescentOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.gradient_descent.GradientDescentOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'GradientDescent\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-int64-list.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-int64-list.pbtxt
new file mode 100644
index 0000000000..1de92b3ab7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-int64-list.pbtxt
@@ -0,0 +1,15 @@
+path: "tensorflow.train.Int64List"
+tf_proto {
+ descriptor {
+ name: "Int64List"
+ field {
+ name: "value"
+ number: 1
+ label: LABEL_REPEATED
+ type: TYPE_INT64
+ options {
+ packed: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.-tasks-entry.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.-tasks-entry.pbtxt
new file mode 100644
index 0000000000..58115590a5
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.-tasks-entry.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.train.JobDef.TasksEntry"
+tf_proto {
+ descriptor {
+ name: "TasksEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.pbtxt
new file mode 100644
index 0000000000..d7eb505e27
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-job-def.pbtxt
@@ -0,0 +1,37 @@
+path: "tensorflow.train.JobDef"
+tf_proto {
+ descriptor {
+ name: "JobDef"
+ field {
+ name: "name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "tasks"
+ number: 2
+ label: LABEL_REPEATED
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.JobDef.TasksEntry"
+ }
+ nested_type {
+ name: "TasksEntry"
+ field {
+ name: "key"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "value"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ options {
+ map_entry: true
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-logging-tensor-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-logging-tensor-hook.pbtxt
new file mode 100644
index 0000000000..9801c05df1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-logging-tensor-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.LoggingTensorHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.LoggingTensorHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'tensors\', \'every_n_iter\', \'every_n_secs\', \'at_end\', \'formatter\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-looper-thread.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-looper-thread.pbtxt
new file mode 100644
index 0000000000..c61859004e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-looper-thread.pbtxt
@@ -0,0 +1,73 @@
+path: "tensorflow.train.LooperThread"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.coordinator.LooperThread\'>"
+ is_instance: "<class \'threading.Thread\'>"
+ member {
+ name: "daemon"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "ident"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'coord\', \'timer_interval_secs\', \'target\', \'args\', \'kwargs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "getName"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "isAlive"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "isDaemon"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "is_alive"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "join"
+ argspec: "args=[\'self\', \'timeout\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "loop"
+ argspec: "args=[\'coord\', \'timer_interval_secs\', \'target\', \'args\', \'kwargs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "run"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "run_loop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "setDaemon"
+ argspec: "args=[\'self\', \'daemonic\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "setName"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start_loop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "stop_loop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-momentum-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-momentum-optimizer.pbtxt
new file mode 100644
index 0000000000..8199f63b9b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-momentum-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.MomentumOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.momentum.MomentumOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'momentum\', \'use_locking\', \'name\', \'use_nesterov\'], varargs=None, keywords=None, defaults=[\'False\', \'Momentum\', \'False\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.-step-context.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.-step-context.pbtxt
new file mode 100644
index 0000000000..03efe6639e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.-step-context.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.train.MonitoredSession.StepContext"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.StepContext\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "session"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'session\', \'run_with_hooks_fn\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "request_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "run_with_hooks"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.pbtxt
new file mode 100644
index 0000000000..09b7b3fb53
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-monitored-session.pbtxt
@@ -0,0 +1,34 @@
+path: "tensorflow.train.MonitoredSession"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.MonitoredSession\'>"
+ is_instance: "<class \'tensorflow.python.training.monitored_session._MonitoredSession\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "StepContext"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'session_creator\', \'hooks\', \'stop_grace_period_secs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'120\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "run"
+ argspec: "args=[\'self\', \'fetches\', \'feed_dict\', \'options\', \'run_metadata\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "run_step_fn"
+ argspec: "args=[\'self\', \'step_fn\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "should_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-nan-loss-during-training-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-nan-loss-during-training-error.pbtxt
new file mode 100644
index 0000000000..25fd5e75a7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-nan-loss-during-training-error.pbtxt
@@ -0,0 +1,16 @@
+path: "tensorflow.train.NanLossDuringTrainingError"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.NanLossDuringTrainingError\'>"
+ is_instance: "<type \'exceptions.RuntimeError\'>"
+ member {
+ name: "args"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member {
+ name: "message"
+ mtype: "<type \'getset_descriptor\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-nan-tensor-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-nan-tensor-hook.pbtxt
new file mode 100644
index 0000000000..7d1c89f9b3
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-nan-tensor-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.NanTensorHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.NanTensorHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'loss_tensor\', \'fail_on_nan_loss\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-optimizer.pbtxt
new file mode 100644
index 0000000000..876bb35e39
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-optimizer.pbtxt
@@ -0,0 +1,50 @@
+path: "tensorflow.train.Optimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-profiler-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-profiler-hook.pbtxt
new file mode 100644
index 0000000000..4df6c4156a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-profiler-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.ProfilerHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.ProfilerHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'save_steps\', \'save_secs\', \'output_dir\', \'show_dataflow\', \'show_memory\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'\', \'True\', \'False\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-adagrad-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
new file mode 100644
index 0000000000..14349a74ef
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.ProximalAdagradOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.proximal_adagrad.ProximalAdagradOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'initial_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.1\', \'0.0\', \'0.0\', \'False\', \'ProximalAdagrad\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt
new file mode 100644
index 0000000000..7d982dc51f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-proximal-gradient-descent-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.ProximalGradientDescentOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.proximal_gradient_descent.ProximalGradientDescentOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0\', \'0.0\', \'False\', \'ProximalGradientDescent\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-r-m-s-prop-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-r-m-s-prop-optimizer.pbtxt
new file mode 100644
index 0000000000..906384a287
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-r-m-s-prop-optimizer.pbtxt
@@ -0,0 +1,51 @@
+path: "tensorflow.train.RMSPropOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.rmsprop.RMSPropOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'decay\', \'momentum\', \'epsilon\', \'use_locking\', \'centered\', \'name\'], varargs=None, keywords=None, defaults=[\'0.9\', \'0.0\', \'1e-10\', \'False\', \'False\', \'RMSProp\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-saver-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-saver-def.pbtxt
new file mode 100644
index 0000000000..4ec99469e4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-saver-def.pbtxt
@@ -0,0 +1,64 @@
+path: "tensorflow.train.SaverDef"
+tf_proto {
+ descriptor {
+ name: "SaverDef"
+ field {
+ name: "filename_tensor_name"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "save_tensor_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "restore_op_name"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "max_to_keep"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "sharded"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
+ field {
+ name: "keep_checkpoint_every_n_hours"
+ number: 6
+ label: LABEL_OPTIONAL
+ type: TYPE_FLOAT
+ }
+ field {
+ name: "version"
+ number: 7
+ label: LABEL_OPTIONAL
+ type: TYPE_ENUM
+ type_name: ".tensorflow.SaverDef.CheckpointFormatVersion"
+ }
+ enum_type {
+ name: "CheckpointFormatVersion"
+ value {
+ name: "LEGACY"
+ number: 0
+ }
+ value {
+ name: "V1"
+ number: 1
+ }
+ value {
+ name: "V2"
+ number: 2
+ }
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-saver.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-saver.pbtxt
new file mode 100644
index 0000000000..2cda458f46
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-saver.pbtxt
@@ -0,0 +1,53 @@
+path: "tensorflow.train.Saver"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.saver.Saver\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "last_checkpoints"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'var_list\', \'reshape\', \'sharded\', \'max_to_keep\', \'keep_checkpoint_every_n_hours\', \'name\', \'restore_sequentially\', \'saver_def\', \'builder\', \'defer_build\', \'allow_empty\', \'write_version\', \'pad_step_number\', \'save_relative_paths\', \'filename\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'False\', \'5\', \'10000.0\', \'None\', \'False\', \'None\', \'None\', \'False\', \'False\', \'2\', \'False\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "as_saver_def"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "build"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "export_meta_graph"
+ argspec: "args=[\'self\', \'filename\', \'collection_list\', \'as_text\', \'export_scope\', \'clear_devices\', \'clear_extraneous_savers\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'False\', \'None\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "from_proto"
+ argspec: "args=[\'saver_def\', \'import_scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "recover_last_checkpoints"
+ argspec: "args=[\'self\', \'checkpoint_paths\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "restore"
+ argspec: "args=[\'self\', \'sess\', \'save_path\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "save"
+ argspec: "args=[\'self\', \'sess\', \'save_path\', \'global_step\', \'latest_filename\', \'meta_graph_suffix\', \'write_meta_graph\', \'write_state\', \'strip_default_attrs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'meta\', \'True\', \'True\', \'False\'], "
+ }
+ member_method {
+ name: "set_last_checkpoints"
+ argspec: "args=[\'self\', \'last_checkpoints\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "set_last_checkpoints_with_time"
+ argspec: "args=[\'self\', \'last_checkpoints_with_time\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "to_proto"
+ argspec: "args=[\'self\', \'export_scope\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-scaffold.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-scaffold.pbtxt
new file mode 100644
index 0000000000..38cc98b48e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-scaffold.pbtxt
@@ -0,0 +1,53 @@
+path: "tensorflow.train.Scaffold"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.Scaffold\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "init_feed_dict"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "init_fn"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "init_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "local_init_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "ready_for_local_init_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "ready_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "saver"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "summary_op"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'init_op\', \'init_feed_dict\', \'init_fn\', \'ready_op\', \'ready_for_local_init_op\', \'local_init_op\', \'summary_op\', \'saver\', \'copy_from_scaffold\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "default_local_init_op"
+ argspec: "args=[], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "finalize"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_or_default"
+ argspec: "args=[\'arg_name\', \'collection_key\', \'default_constructor\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-second-or-step-timer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-second-or-step-timer.pbtxt
new file mode 100644
index 0000000000..3c5a6ac13c
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-second-or-step-timer.pbtxt
@@ -0,0 +1,26 @@
+path: "tensorflow.train.SecondOrStepTimer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.SecondOrStepTimer\'>"
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks._HookTimer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'every_secs\', \'every_steps\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "last_triggered_step"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "reset"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "should_trigger_for_step"
+ argspec: "args=[\'self\', \'step\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "update_last_triggered_step"
+ argspec: "args=[\'self\', \'step\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-sequence-example.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-sequence-example.pbtxt
new file mode 100644
index 0000000000..6a4553bbc1
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-sequence-example.pbtxt
@@ -0,0 +1,20 @@
+path: "tensorflow.train.SequenceExample"
+tf_proto {
+ descriptor {
+ name: "SequenceExample"
+ field {
+ name: "context"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.Features"
+ }
+ field {
+ name: "feature_lists"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.FeatureLists"
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-server-def.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-server-def.pbtxt
new file mode 100644
index 0000000000..83ee7b3eb9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-server-def.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.train.ServerDef"
+tf_proto {
+ descriptor {
+ name: "ServerDef"
+ field {
+ name: "cluster"
+ number: 1
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ClusterDef"
+ }
+ field {
+ name: "job_name"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ field {
+ name: "task_index"
+ number: 3
+ label: LABEL_OPTIONAL
+ type: TYPE_INT32
+ }
+ field {
+ name: "default_session_config"
+ number: 4
+ label: LABEL_OPTIONAL
+ type: TYPE_MESSAGE
+ type_name: ".tensorflow.ConfigProto"
+ }
+ field {
+ name: "protocol"
+ number: 5
+ label: LABEL_OPTIONAL
+ type: TYPE_STRING
+ }
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-server.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-server.pbtxt
new file mode 100644
index 0000000000..9b8f185f5b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-server.pbtxt
@@ -0,0 +1,29 @@
+path: "tensorflow.train.Server"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.server_lib.Server\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "server_def"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "target"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'server_or_cluster_def\', \'job_name\', \'task_index\', \'protocol\', \'config\', \'start\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'True\'], "
+ }
+ member_method {
+ name: "create_local_server"
+ argspec: "args=[\'config\', \'start\'], varargs=None, keywords=None, defaults=[\'None\', \'True\'], "
+ }
+ member_method {
+ name: "join"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-session-creator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-creator.pbtxt
new file mode 100644
index 0000000000..beb232715f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-creator.pbtxt
@@ -0,0 +1,12 @@
+path: "tensorflow.train.SessionCreator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.SessionCreator\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "create_session"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-session-manager.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-manager.pbtxt
new file mode 100644
index 0000000000..448764fe08
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-manager.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.train.SessionManager"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.session_manager.SessionManager\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'local_init_op\', \'ready_op\', \'ready_for_local_init_op\', \'graph\', \'recovery_wait_secs\', \'local_init_run_options\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'30\', \'None\'], "
+ }
+ member_method {
+ name: "prepare_session"
+ argspec: "args=[\'self\', \'master\', \'init_op\', \'saver\', \'checkpoint_dir\', \'checkpoint_filename_with_path\', \'wait_for_checkpoint\', \'max_wait_secs\', \'config\', \'init_feed_dict\', \'init_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'False\', \'7200\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "recover_session"
+ argspec: "args=[\'self\', \'master\', \'saver\', \'checkpoint_dir\', \'checkpoint_filename_with_path\', \'wait_for_checkpoint\', \'max_wait_secs\', \'config\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'7200\', \'None\'], "
+ }
+ member_method {
+ name: "wait_for_session"
+ argspec: "args=[\'self\', \'master\', \'config\', \'max_wait_secs\'], varargs=None, keywords=None, defaults=[\'None\', \'inf\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-args.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-args.pbtxt
new file mode 100644
index 0000000000..442990893e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-args.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.train.SessionRunArgs"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunArgs\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunArgs\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "feed_dict"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "fetches"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "options"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-context.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-context.pbtxt
new file mode 100644
index 0000000000..d5adb15c95
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-context.pbtxt
@@ -0,0 +1,25 @@
+path: "tensorflow.train.SessionRunContext"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunContext\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "original_args"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "session"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "stop_requested"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'original_args\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "request_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-hook.pbtxt
new file mode 100644
index 0000000000..db1aa24acf
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-hook.pbtxt
@@ -0,0 +1,28 @@
+path: "tensorflow.train.SessionRunHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-values.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-values.pbtxt
new file mode 100644
index 0000000000..0b401d59c4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-session-run-values.pbtxt
@@ -0,0 +1,27 @@
+path: "tensorflow.train.SessionRunValues"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunValues\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunValues\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "options"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "results"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "run_metadata"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.-step-context.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.-step-context.pbtxt
new file mode 100644
index 0000000000..36d8ce7ff8
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.-step-context.pbtxt
@@ -0,0 +1,21 @@
+path: "tensorflow.train.SingularMonitoredSession.StepContext"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.StepContext\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "session"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'session\', \'run_with_hooks_fn\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "request_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "run_with_hooks"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.pbtxt
new file mode 100644
index 0000000000..de0f2c1c1a
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-singular-monitored-session.pbtxt
@@ -0,0 +1,38 @@
+path: "tensorflow.train.SingularMonitoredSession"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.SingularMonitoredSession\'>"
+ is_instance: "<class \'tensorflow.python.training.monitored_session._MonitoredSession\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "StepContext"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "graph"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'hooks\', \'scaffold\', \'master\', \'config\', \'checkpoint_dir\', \'stop_grace_period_secs\', \'checkpoint_filename_with_path\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'\', \'None\', \'None\', \'120\', \'None\'], "
+ }
+ member_method {
+ name: "close"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "raw_session"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "run"
+ argspec: "args=[\'self\', \'fetches\', \'feed_dict\', \'options\', \'run_metadata\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "run_step_fn"
+ argspec: "args=[\'self\', \'step_fn\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "should_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-step-counter-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-step-counter-hook.pbtxt
new file mode 100644
index 0000000000..13261f6dde
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-step-counter-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.StepCounterHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.StepCounterHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'every_n_steps\', \'every_n_secs\', \'output_dir\', \'summary_writer\'], varargs=None, keywords=None, defaults=[\'100\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-stop-at-step-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-stop-at-step-hook.pbtxt
new file mode 100644
index 0000000000..e388599b0b
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-stop-at-step-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.StopAtStepHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.StopAtStepHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_steps\', \'last_step\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-summary-saver-hook.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-summary-saver-hook.pbtxt
new file mode 100644
index 0000000000..697c3667b0
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-summary-saver-hook.pbtxt
@@ -0,0 +1,30 @@
+path: "tensorflow.train.SummarySaverHook"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.basic_session_run_hooks.SummarySaverHook\'>"
+ is_instance: "<class \'tensorflow.python.training.session_run_hook.SessionRunHook\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'save_steps\', \'save_secs\', \'output_dir\', \'summary_writer\', \'scaffold\', \'summary_op\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "after_create_session"
+ argspec: "args=[\'self\', \'session\', \'coord\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "after_run"
+ argspec: "args=[\'self\', \'run_context\', \'run_values\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "before_run"
+ argspec: "args=[\'self\', \'run_context\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "begin"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "end"
+ argspec: "args=[\'self\', \'session\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-supervisor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-supervisor.pbtxt
new file mode 100644
index 0000000000..9677e5a98e
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-supervisor.pbtxt
@@ -0,0 +1,153 @@
+path: "tensorflow.train.Supervisor"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.supervisor.Supervisor\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "USE_DEFAULT"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "coord"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "global_step"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "init_feed_dict"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "init_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_chief"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "ready_for_local_init_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "ready_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_model_secs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_path"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "save_summaries_secs"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "saver"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "session_manager"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "summary_op"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "summary_writer"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "Loop"
+ argspec: "args=[\'self\', \'timer_interval_secs\', \'target\', \'args\', \'kwargs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "PrepareSession"
+ argspec: "args=[\'self\', \'master\', \'config\', \'wait_for_checkpoint\', \'max_wait_secs\', \'start_standard_services\'], varargs=None, keywords=None, defaults=[\'\', \'None\', \'False\', \'7200\', \'True\'], "
+ }
+ member_method {
+ name: "RequestStop"
+ argspec: "args=[\'self\', \'ex\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "ShouldStop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "StartQueueRunners"
+ argspec: "args=[\'self\', \'sess\', \'queue_runners\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "StartStandardServices"
+ argspec: "args=[\'self\', \'sess\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "Stop"
+ argspec: "args=[\'self\', \'threads\', \'close_summary_writer\', \'ignore_live_threads\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'False\'], "
+ }
+ member_method {
+ name: "StopOnException"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "SummaryComputed"
+ argspec: "args=[\'self\', \'sess\', \'summary\', \'global_step\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "WaitForStop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'graph\', \'ready_op\', \'ready_for_local_init_op\', \'is_chief\', \'init_op\', \'init_feed_dict\', \'local_init_op\', \'logdir\', \'summary_op\', \'saver\', \'global_step\', \'save_summaries_secs\', \'save_model_secs\', \'recovery_wait_secs\', \'stop_grace_secs\', \'checkpoint_basename\', \'session_manager\', \'summary_writer\', \'init_fn\', \'local_init_run_options\'], varargs=None, keywords=None, defaults=[\'None\', \'0\', \'0\', \'True\', \'0\', \'None\', \'0\', \'None\', \'0\', \'0\', \'0\', \'120\', \'600\', \'30\', \'120\', \'model.ckpt\', \'None\', \'0\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "loop"
+ argspec: "args=[\'self\', \'timer_interval_secs\', \'target\', \'args\', \'kwargs\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "managed_session"
+ argspec: "args=[], varargs=args, keywords=kwds, defaults=None"
+ }
+ member_method {
+ name: "prepare_or_wait_for_session"
+ argspec: "args=[\'self\', \'master\', \'config\', \'wait_for_checkpoint\', \'max_wait_secs\', \'start_standard_services\'], varargs=None, keywords=None, defaults=[\'\', \'None\', \'False\', \'7200\', \'True\'], "
+ }
+ member_method {
+ name: "request_stop"
+ argspec: "args=[\'self\', \'ex\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "should_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "start_queue_runners"
+ argspec: "args=[\'self\', \'sess\', \'queue_runners\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "start_standard_services"
+ argspec: "args=[\'self\', \'sess\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "stop"
+ argspec: "args=[\'self\', \'threads\', \'close_summary_writer\', \'ignore_live_threads\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'False\'], "
+ }
+ member_method {
+ name: "stop_on_exception"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "summary_computed"
+ argspec: "args=[\'self\', \'sess\', \'summary\', \'global_step\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "wait_for_stop"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-sync-replicas-optimizer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-sync-replicas-optimizer.pbtxt
new file mode 100644
index 0000000000..2c0fda3c72
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-sync-replicas-optimizer.pbtxt
@@ -0,0 +1,63 @@
+path: "tensorflow.train.SyncReplicasOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.sync_replicas_optimizer.SyncReplicasOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'opt\', \'replicas_to_aggregate\', \'total_num_replicas\', \'variable_averages\', \'variables_to_average\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'sync_replicas\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "get_chief_queue_runner"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_init_tokens_op"
+ argspec: "args=[\'self\', \'num_tokens\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None"
+ }
+ member_method {
+ name: "make_session_run_hook"
+ argspec: "args=[\'self\', \'is_chief\', \'num_tokens\'], varargs=None, keywords=None, defaults=[\'-1\'], "
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "variables"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt
new file mode 100644
index 0000000000..39b946b82f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt
@@ -0,0 +1,43 @@
+path: "tensorflow.train.VocabInfo"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
+ is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
+ is_instance: "<type \'tuple\'>"
+ member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "backup_initializer"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "new_vocab"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "new_vocab_size"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "num_oov_buckets"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "old_vocab"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "old_vocab_size"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "count"
+ }
+ member_method {
+ name: "index"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-worker-session-creator.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-worker-session-creator.pbtxt
new file mode 100644
index 0000000000..ac26358068
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-worker-session-creator.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.train.WorkerSessionCreator"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.monitored_session.WorkerSessionCreator\'>"
+ is_instance: "<class \'tensorflow.python.training.monitored_session.SessionCreator\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'scaffold\', \'master\', \'config\', \'max_wait_secs\'], varargs=None, keywords=None, defaults=[\'None\', \'\', \'None\', \'1800\'], "
+ }
+ member_method {
+ name: "create_session"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt
new file mode 100644
index 0000000000..b21dabbde7
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt
@@ -0,0 +1,391 @@
+path: "tensorflow.train"
+tf_module {
+ member {
+ name: "AdadeltaOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AdagradDAOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AdagradOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "AdamOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "BytesList"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "Checkpoint"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "CheckpointSaverHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "CheckpointSaverListener"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ChiefSessionCreator"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ClusterDef"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "ClusterSpec"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Coordinator"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Example"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "ExponentialMovingAverage"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Feature"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "FeatureList"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "FeatureLists"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "Features"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "FeedFnHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FinalOpsHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "FloatList"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "FtrlOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GlobalStepWaiterHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "GradientDescentOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Int64List"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "JobDef"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "LoggingTensorHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "LooperThread"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MomentumOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "MonitoredSession"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "NanLossDuringTrainingError"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "NanTensorHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Optimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ProfilerHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ProximalAdagradOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ProximalGradientDescentOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "RMSPropOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Saver"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SaverDef"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "Scaffold"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SecondOrStepTimer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SequenceExample"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "Server"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "ServerDef"
+ mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
+ }
+ member {
+ name: "SessionCreator"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SessionManager"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SessionRunArgs"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SessionRunContext"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SessionRunHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SessionRunValues"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SingularMonitoredSession"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "StepCounterHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "StopAtStepHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SummarySaverHook"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "Supervisor"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "SyncReplicasOptimizer"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "VocabInfo"
+ mtype: "<type \'type\'>"
+ }
+ member {
+ name: "WorkerSessionCreator"
+ mtype: "<type \'type\'>"
+ }
+ member_method {
+ name: "MonitoredTrainingSession"
+ argspec: "args=[\'master\', \'is_chief\', \'checkpoint_dir\', \'scaffold\', \'hooks\', \'chief_only_hooks\', \'save_checkpoint_secs\', \'save_summaries_steps\', \'save_summaries_secs\', \'config\', \'stop_grace_period_secs\', \'log_step_count_steps\', \'max_wait_secs\', \'save_checkpoint_steps\', \'summary_dir\'], varargs=None, keywords=None, defaults=[\'\', \'True\', \'None\', \'None\', \'None\', \'None\', \'<object object instance>\', \'<object object instance>\', \'<object object instance>\', \'None\', \'120\', \'100\', \'7200\', \'<object object instance>\', \'None\'], "
+ }
+ member_method {
+ name: "NewCheckpointReader"
+ argspec: "args=[\'filepattern\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "assert_global_step"
+ argspec: "args=[\'global_step_tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "basic_train_loop"
+ argspec: "args=[\'supervisor\', \'train_step_fn\', \'args\', \'kwargs\', \'master\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'\'], "
+ }
+ member_method {
+ name: "checkpoint_exists"
+ argspec: "args=[\'checkpoint_prefix\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "cosine_decay"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'alpha\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\'], "
+ }
+ member_method {
+ name: "cosine_decay_restarts"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'first_decay_steps\', \'t_mul\', \'m_mul\', \'alpha\', \'name\'], varargs=None, keywords=None, defaults=[\'2.0\', \'1.0\', \'0.0\', \'None\'], "
+ }
+ member_method {
+ name: "create_global_step"
+ argspec: "args=[\'graph\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "do_quantize_training_on_graphdef"
+ argspec: "args=[\'input_graph\', \'num_bits\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "exponential_decay"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'decay_rate\', \'staircase\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "export_meta_graph"
+ argspec: "args=[\'filename\', \'meta_info_def\', \'graph_def\', \'saver_def\', \'collection_list\', \'as_text\', \'graph\', \'export_scope\', \'clear_devices\', \'clear_extraneous_savers\', \'strip_default_attrs\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'False\', \'None\', \'None\', \'False\', \'False\', \'False\'], "
+ }
+ member_method {
+ name: "generate_checkpoint_state_proto"
+ argspec: "args=[\'save_dir\', \'model_checkpoint_path\', \'all_model_checkpoint_paths\', \'all_model_checkpoint_timestamps\', \'last_preserved_timestamp\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "get_checkpoint_mtimes"
+ argspec: "args=[\'checkpoint_prefixes\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_checkpoint_state"
+ argspec: "args=[\'checkpoint_dir\', \'latest_filename\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_global_step"
+ argspec: "args=[\'graph\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "get_or_create_global_step"
+ argspec: "args=[\'graph\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "global_step"
+ argspec: "args=[\'sess\', \'global_step_tensor\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "import_meta_graph"
+ argspec: "args=[\'meta_graph_or_file\', \'clear_devices\', \'import_scope\'], varargs=None, keywords=kwargs, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "init_from_checkpoint"
+ argspec: "args=[\'ckpt_dir_or_file\', \'assignment_map\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "inverse_time_decay"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'decay_rate\', \'staircase\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "latest_checkpoint"
+ argspec: "args=[\'checkpoint_dir\', \'latest_filename\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "linear_cosine_decay"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'num_periods\', \'alpha\', \'beta\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'0.0\', \'0.001\', \'None\'], "
+ }
+ member_method {
+ name: "list_variables"
+ argspec: "args=[\'ckpt_dir_or_file\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_checkpoint"
+ argspec: "args=[\'ckpt_dir_or_file\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "load_variable"
+ argspec: "args=[\'ckpt_dir_or_file\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "match_filenames_once"
+ argspec: "args=[\'pattern\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "natural_exp_decay"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'decay_rate\', \'staircase\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
+ }
+ member_method {
+ name: "noisy_linear_cosine_decay"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'initial_variance\', \'variance_decay\', \'num_periods\', \'alpha\', \'beta\', \'name\'], varargs=None, keywords=None, defaults=[\'1.0\', \'0.55\', \'0.5\', \'0.0\', \'0.001\', \'None\'], "
+ }
+ member_method {
+ name: "piecewise_constant"
+ argspec: "args=[\'x\', \'boundaries\', \'values\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "polynomial_decay"
+ argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'end_learning_rate\', \'power\', \'cycle\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0001\', \'1.0\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "remove_checkpoint"
+ argspec: "args=[\'checkpoint_prefix\', \'checkpoint_format_version\', \'meta_graph_suffix\'], varargs=None, keywords=None, defaults=[\'2\', \'meta\'], "
+ }
+ member_method {
+ name: "replica_device_setter"
+ argspec: "args=[\'ps_tasks\', \'ps_device\', \'worker_device\', \'merge_devices\', \'cluster\', \'ps_ops\', \'ps_strategy\'], varargs=None, keywords=None, defaults=[\'0\', \'/job:ps\', \'/job:worker\', \'True\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "sdca_fprint"
+ argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "sdca_optimizer"
+ argspec: "args=[\'sparse_example_indices\', \'sparse_feature_indices\', \'sparse_feature_values\', \'dense_features\', \'example_weights\', \'example_labels\', \'sparse_indices\', \'sparse_weights\', \'dense_weights\', \'example_state_data\', \'loss_type\', \'l1\', \'l2\', \'num_loss_partitions\', \'num_inner_iterations\', \'adaptative\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], "
+ }
+ member_method {
+ name: "sdca_shrink_l1"
+ argspec: "args=[\'weights\', \'l1\', \'l2\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "summary_iterator"
+ argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "update_checkpoint_state"
+ argspec: "args=[\'save_dir\', \'model_checkpoint_path\', \'all_model_checkpoint_paths\', \'latest_filename\', \'all_model_checkpoint_timestamps\', \'last_preserved_timestamp\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "warm_start"
+ argspec: "args=[\'ckpt_to_initialize_from\', \'vars_to_warm_start\', \'var_name_to_vocab_info\', \'var_name_to_prev_var_name\'], varargs=None, keywords=None, defaults=[\'.*\', \'None\', \'None\'], "
+ }
+ member_method {
+ name: "write_graph"
+ argspec: "args=[\'graph_or_graph_def\', \'logdir\', \'name\', \'as_text\'], varargs=None, keywords=None, defaults=[\'True\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.truncated_normal_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.truncated_normal_initializer.pbtxt
new file mode 100644
index 0000000000..c1e1c230a9
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.truncated_normal_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.truncated_normal_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.TruncatedNormal\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'mean\', \'stddev\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'0.0\', \'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.uniform_unit_scaling_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.uniform_unit_scaling_initializer.pbtxt
new file mode 100644
index 0000000000..e1b18dc92f
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.uniform_unit_scaling_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.uniform_unit_scaling_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.UniformUnitScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'factor\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.variable_scope.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.variable_scope.pbtxt
new file mode 100644
index 0000000000..e62dec93e6
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.variable_scope.pbtxt
@@ -0,0 +1,9 @@
+path: "tensorflow.variable_scope"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.variable_scope.variable_scope\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'name_or_scope\', \'default_name\', \'values\', \'initializer\', \'regularizer\', \'caching_device\', \'partitioner\', \'custom_getter\', \'reuse\', \'dtype\', \'use_resource\', \'constraint\', \'auxiliary_name_scope\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'True\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.variance_scaling_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.variance_scaling_initializer.pbtxt
new file mode 100644
index 0000000000..09d7bc03b4
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.variance_scaling_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.variance_scaling_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.VarianceScaling\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'scale\', \'mode\', \'distribution\', \'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'1.0\', \'fan_in\', \'truncated_normal\', \'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.zeros_initializer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.zeros_initializer.pbtxt
new file mode 100644
index 0000000000..e229b02cee
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.zeros_initializer.pbtxt
@@ -0,0 +1,18 @@
+path: "tensorflow.zeros_initializer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Zeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.init_ops.Initializer\'>"
+ is_instance: "<type \'object\'>"
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'dtype\'], varargs=None, keywords=None, defaults=[\"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "from_config"
+ argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_config"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/lib/api_objects.proto b/tensorflow/tools/api/lib/api_objects.proto
index 0966a5f1d5..7207b9c5a9 100644
--- a/tensorflow/tools/api/lib/api_objects.proto
+++ b/tensorflow/tools/api/lib/api_objects.proto
@@ -1,5 +1,7 @@
syntax = "proto2";
+import "google/protobuf/descriptor.proto";
+
package third_party.tensorflow.tools.api;
message TFAPIMember {
@@ -24,8 +26,17 @@ message TFAPIClass {
repeated TFAPIMethod member_method = 3;
};
+message TFAPIProto {
+ // Suppress generation of the proto API's descriptor() method lest it
+ // conflict with the standard accessor for the field having the same name.
+ option no_standard_descriptor_accessor = true;
+
+ optional google.protobuf.DescriptorProto descriptor = 1;
+};
+
message TFAPIObject {
optional string path = 1;
optional TFAPIModule tf_module = 2;
optional TFAPIClass tf_class = 3;
+ optional TFAPIProto tf_proto = 4;
};
diff --git a/tensorflow/tools/api/lib/python_object_to_proto_visitor.py b/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
index 0b30f7b4d1..3a48cf683c 100644
--- a/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
+++ b/tensorflow/tools/api/lib/python_object_to_proto_visitor.py
@@ -19,6 +19,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from google.protobuf import message
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
@@ -87,6 +88,9 @@ def _SanitizedMRO(obj):
"""
return_list = []
for cls in tf_inspect.getmro(obj):
+ if cls.__name__ == '_NewClass':
+ # Ignore class created by @deprecated_alias decorator.
+ continue
str_repr = str(cls)
return_list.append(str_repr)
if 'tensorflow' not in str_repr:
@@ -101,6 +105,11 @@ def _SanitizedMRO(obj):
return return_list
+def _IsProtoClass(obj):
+ """Returns whether the passed obj is a Protocol Buffer class."""
+ return isinstance(obj, type) and issubclass(obj, message.Message)
+
+
class PythonObjectToProtoVisitor(object):
"""A visitor that summarizes given python objects as protobufs."""
@@ -153,6 +162,13 @@ class PythonObjectToProtoVisitor(object):
# Store the constructed module object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_module=module_obj)
+ elif _IsProtoClass(parent):
+ proto_obj = api_objects_pb2.TFAPIProto()
+ parent.DESCRIPTOR.CopyToProto(proto_obj.descriptor)
+
+ # Store the constructed proto object.
+ self._protos[lib_path] = api_objects_pb2.TFAPIObject(
+ path=lib_path, tf_proto=proto_obj)
elif tf_inspect.isclass(parent):
# Construct a class.
class_obj = api_objects_pb2.TFAPIClass()
@@ -161,7 +177,7 @@ class PythonObjectToProtoVisitor(object):
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
- module_obj.member.add(**(parent_corner_cases[name]))
+ class_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, class_obj)
diff --git a/tensorflow/tools/api/tests/BUILD b/tensorflow/tools/api/tests/BUILD
index 724b12cd47..4efa4a9651 100644
--- a/tensorflow/tools/api/tests/BUILD
+++ b/tensorflow/tools/api/tests/BUILD
@@ -15,9 +15,13 @@ load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
py_test(
name = "api_compatibility_test",
- srcs = ["api_compatibility_test.py"],
+ srcs = [
+ "api_compatibility_test.py",
+ "//tensorflow:tf_python_api_gen_v2",
+ ],
data = [
- "//tensorflow/tools/api/golden:api_golden",
+ "//tensorflow/tools/api/golden:api_golden_v1",
+ "//tensorflow/tools/api/golden:api_golden_v2",
"//tensorflow/tools/api/tests:API_UPDATE_WARNING.txt",
"//tensorflow/tools/api/tests:README.txt",
],
diff --git a/tensorflow/tools/api/tests/api_compatibility_test.py b/tensorflow/tools/api/tests/api_compatibility_test.py
index 1ad6b6d1c0..d06c7f2d49 100644
--- a/tensorflow/tools/api/tests/api_compatibility_test.py
+++ b/tensorflow/tools/api/tests/api_compatibility_test.py
@@ -34,7 +34,9 @@ import sys
import unittest
import tensorflow as tf
+from tensorflow._api import v2 as tf_v2
+from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
@@ -46,7 +48,6 @@ from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
-
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
@@ -61,19 +62,25 @@ _VERBOSE_DIFFS_HELP = """
false, only print which libraries have differences.
"""
-_API_GOLDEN_FOLDER = 'tensorflow/tools/api/golden'
+_API_GOLDEN_FOLDER_V1 = 'tensorflow/tools/api/golden/v1'
+_API_GOLDEN_FOLDER_V2 = 'tensorflow/tools/api/golden/v2'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
-def _KeyToFilePath(key):
- """From a given key, construct a filepath."""
+def _KeyToFilePath(key, api_version):
+ """From a given key, construct a filepath.
+
+ Filepath will be inside golden folder for api_version.
+ """
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
- return os.path.join(_API_GOLDEN_FOLDER, '%s.pbtxt' % case_insensitive_key)
+ api_folder = (
+ _API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
+ return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
@@ -89,6 +96,21 @@ def _FileNameToKey(filename):
return api_object_key
+def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
+ """A Visitor that crashes on subclasses of generated proto classes."""
+ # If the traversed object is a proto Message class
+ if not (isinstance(parent, type) and
+ issubclass(parent, message.Message)):
+ return
+ if parent is message.Message:
+ return
+ # Check that it is a direct subclass of Message.
+ if message.Message not in parent.__bases__:
+ raise NotImplementedError(
+ 'Object tf.%s is a subclass of a generated proto Message. '
+ 'They are not yet supported by the API tools.' % path)
+
+
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
@@ -111,7 +133,8 @@ class ApiCompatibilityTest(test.TestCase):
actual_dict,
verbose=False,
update_goldens=False,
- additional_missing_object_message=''):
+ additional_missing_object_message='',
+ api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
@@ -124,6 +147,7 @@ class ApiCompatibilityTest(test.TestCase):
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
+ api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
@@ -149,6 +173,8 @@ class ApiCompatibilityTest(test.TestCase):
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
+ # Do not truncate diff
+ self.maxDiff = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
@@ -179,13 +205,13 @@ class ApiCompatibilityTest(test.TestCase):
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
- filepath = _KeyToFilePath(key)
+ filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
- filepath = _KeyToFilePath(key)
+ filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
@@ -195,25 +221,45 @@ class ApiCompatibilityTest(test.TestCase):
else:
logging.info('No differences found between API and golden.')
- @unittest.skipUnless(
- sys.version_info.major == 2,
- 'API compabitility test goldens are generated using python2.')
- def testAPIBackwardsCompatibility(self):
+ def testNoSubclassOfMessage(self):
+ visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
+ visitor.do_not_descend_map['tf'].append('contrib')
+ # Skip compat.v1 and compat.v2 since they are validated in separate tests.
+ visitor.private_map['tf.compat'] = ['v1', 'v2']
+ traverse.traverse(tf, visitor)
+
+ def testNoSubclassOfMessageV1(self):
+ if not hasattr(tf.compat, 'v1'):
+ return
+ visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
+ visitor.do_not_descend_map['tf'].append('contrib')
+ traverse.traverse(tf_v2.compat.v1, visitor)
+
+ def testNoSubclassOfMessageV2(self):
+ if not hasattr(tf.compat, 'v2'):
+ return
+ visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
+ visitor.do_not_descend_map['tf'].append('contrib')
+ traverse.traverse(tf_v2, visitor)
+
+ def _checkBackwardsCompatibility(
+ self, root, golden_file_pattern, api_version,
+ additional_private_map=None):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
- public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
- traverse.traverse(tf, public_api_visitor)
+ public_api_visitor.do_not_descend_map['tf.GPUOptions'] = [
+ 'Experimental']
+ if additional_private_map:
+ public_api_visitor.private_map.update(additional_private_map)
+ traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
- expression = os.path.join(
- resource_loader.get_root_dir_with_all_resources(),
- _KeyToFilePath('*'))
- golden_file_list = file_io.get_matching_files(expression)
+ golden_file_list = file_io.get_matching_files(golden_file_pattern)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
@@ -232,7 +278,47 @@ class ApiCompatibilityTest(test.TestCase):
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
- update_goldens=FLAGS.update_goldens)
+ update_goldens=FLAGS.update_goldens,
+ api_version=api_version)
+
+ @unittest.skipUnless(
+ sys.version_info.major == 2,
+ 'API compabitility test goldens are generated using python2.')
+ def testAPIBackwardsCompatibility(self):
+ api_version = 1
+ golden_file_pattern = os.path.join(
+ resource_loader.get_root_dir_with_all_resources(),
+ _KeyToFilePath('*', api_version))
+ self._checkBackwardsCompatibility(
+ tf,
+ golden_file_pattern,
+ api_version,
+ # Skip compat.v1 and compat.v2 since they are validated
+ # in separate tests.
+ additional_private_map={'tf.compat': ['v1', 'v2']})
+
+ @unittest.skipUnless(
+ sys.version_info.major == 2,
+ 'API compabitility test goldens are generated using python2.')
+ def testAPIBackwardsCompatibilityV1(self):
+ api_version = 1
+ golden_file_pattern = os.path.join(
+ resource_loader.get_root_dir_with_all_resources(),
+ _KeyToFilePath('*', api_version))
+ self._checkBackwardsCompatibility(
+ tf_v2.compat.v1, golden_file_pattern, api_version)
+
+ @unittest.skipUnless(
+ sys.version_info.major == 2,
+ 'API compabitility test goldens are generated using python2.')
+ def testAPIBackwardsCompatibilityV2(self):
+ api_version = 2
+ golden_file_pattern = os.path.join(
+ resource_loader.get_root_dir_with_all_resources(),
+ _KeyToFilePath('*', api_version))
+ self._checkBackwardsCompatibility(
+ tf_v2, golden_file_pattern, api_version,
+ additional_private_map={'tf.compat': ['v1']})
if __name__ == '__main__':
diff --git a/tensorflow/tools/benchmark/benchmark_model.cc b/tensorflow/tools/benchmark/benchmark_model.cc
index 15523028c7..de93b12b97 100644
--- a/tensorflow/tools/benchmark/benchmark_model.cc
+++ b/tensorflow/tools/benchmark/benchmark_model.cc
@@ -262,6 +262,10 @@ Status InitializeSession(int num_threads, const string& graph,
tensorflow::GraphDef tensorflow_graph;
Status s = ReadBinaryProto(Env::Default(), graph, graph_def->get());
if (!s.ok()) {
+ s = ReadTextProto(Env::Default(), graph, graph_def->get());
+ }
+
+ if (!s.ok()) {
LOG(ERROR) << "Could not create TensorFlow Graph: " << s;
return s;
}
@@ -663,12 +667,12 @@ int Main(int argc, char** argv) {
output_prefix, benchmark_name, "meta-init-plus-first-inference", 1,
initialization_time_s + (warmup_time_us / 1000000.0) / warmup_runs);
- std::map<string, int64> node_type_map_count;
- std::map<string, int64> node_type_map_time;
- std::map<string, int64> node_type_map_memory;
- std::map<string, int64> node_type_map_times_called;
+ std::map<std::string, int64_t> node_type_map_count;
+ std::map<std::string, int64_t> node_type_map_time;
+ std::map<std::string, int64_t> node_type_map_memory;
+ std::map<std::string, int64_t> node_type_map_times_called;
- int64 accumulated_us;
+ int64_t accumulated_us;
stats->ComputeStatsByType(&node_type_map_count, &node_type_map_time,
&node_type_map_memory,
&node_type_map_times_called, &accumulated_us);
diff --git a/tensorflow/tools/benchmark/benchmark_model_test.cc b/tensorflow/tools/benchmark/benchmark_model_test.cc
index 16ab2ff66e..6813045d63 100644
--- a/tensorflow/tools/benchmark/benchmark_model_test.cc
+++ b/tensorflow/tools/benchmark/benchmark_model_test.cc
@@ -26,30 +26,36 @@ limitations under the License.
namespace tensorflow {
namespace {
-TEST(BenchmarkModelTest, InitializeAndRun) {
- const string dir = testing::TmpDir();
- const string filename_pb = io::JoinPath(dir, "graphdef.pb");
-
+void CreateTestGraph(const ::tensorflow::Scope& root,
+ benchmark_model::InputLayerInfo* input,
+ string* output_name, GraphDef* graph_def) {
// Create a simple graph and write it to filename_pb.
const int input_width = 400;
const int input_height = 10;
- benchmark_model::InputLayerInfo input;
- input.shape = TensorShape({input_width, input_height});
- input.data_type = DT_FLOAT;
+ input->shape = TensorShape({input_width, input_height});
+ input->data_type = DT_FLOAT;
const TensorShape constant_shape({input_height, input_width});
Tensor constant_tensor(DT_FLOAT, constant_shape);
test::FillFn<float>(&constant_tensor, [](int) -> float { return 3.0; });
- auto root = Scope::NewRootScope().ExitOnError();
auto placeholder =
- ops::Placeholder(root, DT_FLOAT, ops::Placeholder::Shape(input.shape));
- input.name = placeholder.node()->name();
+ ops::Placeholder(root, DT_FLOAT, ops::Placeholder::Shape(input->shape));
+ input->name = placeholder.node()->name();
auto m = ops::MatMul(root, placeholder, constant_tensor);
- const string output_name = m.node()->name();
+ *output_name = m.node()->name();
+ TF_ASSERT_OK(root.ToGraphDef(graph_def));
+}
+
+TEST(BenchmarkModelTest, InitializeAndRun) {
+ const string dir = testing::TmpDir();
+ const string filename_pb = io::JoinPath(dir, "graphdef.pb");
+ auto root = Scope::NewRootScope().ExitOnError();
+ benchmark_model::InputLayerInfo input;
+ string output_name;
GraphDef graph_def;
- TF_ASSERT_OK(root.ToGraphDef(&graph_def));
+ CreateTestGraph(root, &input, &output_name, &graph_def);
string graph_def_serialized;
graph_def.SerializeToString(&graph_def_serialized);
TF_ASSERT_OK(
@@ -69,5 +75,30 @@ TEST(BenchmarkModelTest, InitializeAndRun) {
ASSERT_EQ(num_runs, 10);
}
+TEST(BenchmarkModeTest, TextProto) {
+ const string dir = testing::TmpDir();
+ const string filename_txt = io::JoinPath(dir, "graphdef.pb.txt");
+ auto root = Scope::NewRootScope().ExitOnError();
+
+ benchmark_model::InputLayerInfo input;
+ string output_name;
+ GraphDef graph_def;
+ CreateTestGraph(root, &input, &output_name, &graph_def);
+ TF_ASSERT_OK(WriteTextProto(Env::Default(), filename_txt, graph_def));
+
+ std::unique_ptr<Session> session;
+ std::unique_ptr<GraphDef> loaded_graph_def;
+ TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_txt, &session,
+ &loaded_graph_def));
+ std::unique_ptr<StatSummarizer> stats;
+ stats.reset(new tensorflow::StatSummarizer(*(loaded_graph_def.get())));
+ int64 time;
+ int64 num_runs = 0;
+ TF_ASSERT_OK(benchmark_model::TimeMultipleRuns(
+ 0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(),
+ &time, &num_runs));
+ ASSERT_EQ(num_runs, 10);
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/tools/ci_build/Dockerfile.cmake b/tensorflow/tools/ci_build/Dockerfile.cmake
index d5dea4f3e4..b7450c83de 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cmake
+++ b/tensorflow/tools/ci_build/Dockerfile.cmake
@@ -28,6 +28,8 @@ RUN pip install --upgrade astor
RUN pip install --upgrade gast
RUN pip install --upgrade numpy
RUN pip install --upgrade termcolor
+RUN pip install keras_applications==1.0.5
+RUN pip install keras_preprocessing==1.0.3
# Install golang
RUN apt-get install -t xenial-backports -y golang-1.9
diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
new file mode 100644
index 0000000000..ada2c63880
--- /dev/null
+++ b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
@@ -0,0 +1,20 @@
+FROM ubuntu:16.04
+
+LABEL maintainer="William Irons <wdirons@us.ibm.com>"
+
+# Copy and run the install scripts.
+COPY install/*.sh /install/
+RUN /install/install_bootstrap_deb_packages.sh
+RUN add-apt-repository -y ppa:openjdk-r/ppa
+RUN /install/install_deb_packages.sh
+RUN /install/install_openblas_ppc64le.sh
+RUN /install/install_hdf5_ppc64le.sh
+RUN /install/install_pip_packages.sh
+RUN /install/install_bazel_from_source.sh
+RUN /install/install_proto3.sh
+RUN /install/install_buildifier_from_source.sh
+RUN /install/install_auditwheel.sh
+RUN /install/install_golang_ppc64le.sh
+
+# Set up the master bazelrc configuration file.
+COPY install/.bazelrc /etc/bazel.bazelrc
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu
index 7591ecc04e..a4cad4b6c6 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu
@@ -14,6 +14,7 @@ RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa && \
add-apt-repository -y ppa:george-edison55/cmake-3.x
RUN /install/install_deb_packages.sh
+
RUN /install/install_pip_packages.sh
RUN /install/install_bazel.sh
RUN /install/install_golang.sh
@@ -22,6 +23,11 @@ RUN /install/install_golang.sh
COPY install/.bazelrc /etc/bazel.bazelrc
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
# Configure the build for our CUDA configuration.
ENV TF_NEED_CUDA 1
-ENV TF_CUDA_COMPUTE_CAPABILITIES 3.0
+ENV TF_NEED_TENSORRT 1
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
new file mode 100644
index 0000000000..0a55b84ac4
--- /dev/null
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
@@ -0,0 +1,33 @@
+FROM nvidia/cuda-ppc64le:9.2-cudnn7-devel-ubuntu16.04
+
+LABEL maintainer="William Irons <wdirons@us.ibm.com>"
+
+# In the Ubuntu 16.04 images, cudnn is placed in system paths. Move them to
+# /usr/local/cuda
+RUN cp -P /usr/include/cudnn.h /usr/local/cuda/include
+RUN cp -P /usr/lib/powerpc64le-linux-gnu/libcudnn* /usr/local/cuda/lib64
+
+# Copy and run the install scripts.
+COPY install/*.sh /install/
+ARG DEBIAN_FRONTEND=noninteractive
+RUN /install/install_bootstrap_deb_packages.sh
+RUN add-apt-repository -y ppa:openjdk-r/ppa
+RUN /install/install_deb_packages.sh
+RUN /install/install_openblas_ppc64le.sh
+RUN /install/install_hdf5_ppc64le.sh
+RUN /install/install_pip_packages.sh
+RUN /install/install_bazel_from_source.sh
+RUN /install/install_golang_ppc64le.sh
+
+# Set up the master bazelrc configuration file.
+COPY install/.bazelrc /etc/bazel.bazelrc
+ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
+
+# Configure the build for our CUDA configuration.
+ENV TF_NEED_CUDA 1
+ENV TF_CUDA_COMPUTE_CAPABILITIES 3.0
+ENV TF_CUDA_VERSION 9.2
+ENV CUDA_TOOLKIT_PATH /usr/local/cuda-9.2
+
+# TODO get NCCL 2 in the docker image
+ENV TF_NCCL_VERSION 1
diff --git a/tensorflow/tools/ci_build/Dockerfile.rbe.cpu b/tensorflow/tools/ci_build/Dockerfile.rbe.cpu
index 6f0798b1af..7e5860aeec 100644
--- a/tensorflow/tools/ci_build/Dockerfile.rbe.cpu
+++ b/tensorflow/tools/ci_build/Dockerfile.rbe.cpu
@@ -1,4 +1,4 @@
-FROM launcher.gcr.io/google/rbe-debian8:r322167
+FROM launcher.gcr.io/google/rbe-ubuntu16-04:r327695
LABEL maintainer="Yu Yi <yiyu@google.com>"
# Copy install scripts
@@ -9,6 +9,6 @@ ENV CC /usr/local/bin/clang
ENV CXX /usr/local/bin/clang++
ENV AR /usr/bin/ar
-# Run pip install script for RBE Debian8 container.
+# Run pip install script for RBE Ubuntu 16-04 container.
RUN /install/install_pip_packages_remote.sh
RUN /install/install_pip_packages.sh
diff --git a/tensorflow/tools/ci_build/Dockerfile.rbe.cuda9.0-cudnn7-ubuntu14.04 b/tensorflow/tools/ci_build/Dockerfile.rbe.cuda9.0-cudnn7-ubuntu14.04
new file mode 100644
index 0000000000..a30858db82
--- /dev/null
+++ b/tensorflow/tools/ci_build/Dockerfile.rbe.cuda9.0-cudnn7-ubuntu14.04
@@ -0,0 +1,83 @@
+# To push a new version, run:
+# $ docker build -f Dockerfile.rbe.cuda9.0-cudnn7-ubuntu14.04 \
+# --tag "gcr.io/asci-toolchain/nosla-cuda9.0-cudnn7-ubuntu14.04" .
+# $ docker push gcr.io/asci-toolchain/nosla-cuda9.0-cudnn7-ubuntu14.04
+#
+# TODO(klimek): Include clang in this image so we can also target clang
+# builds.
+
+FROM ubuntu:14.04
+LABEL maintainer="Manuel Klimek <klimek@google.com>"
+
+RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates apt-transport-https gnupg-curl && \
+ rm -rf /var/lib/apt/lists/* && \
+ NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \
+ NVIDIA_GPGKEY_FPR=ae09fe4bbd223a84b2ccfce3f60f4b3d7fa2af80 && \
+ apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/7fa2af80.pub && \
+ apt-key adv --export --no-emit-version -a $NVIDIA_GPGKEY_FPR | tail -n +2 > cudasign.pub && \
+ echo "$NVIDIA_GPGKEY_SUM cudasign.pub" | sha256sum -c --strict - && rm cudasign.pub && \
+ echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \
+ echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
+
+ENV CUDA_VERSION 9.0.176
+ENV CUDA_PKG_VERSION 9-0=$CUDA_VERSION-1
+ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
+ENV NVIDIA_VISIBLE_DEVICES all
+ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
+ENV NVIDIA_REQUIRE_CUDA "cuda>=9.0"
+ENV NCCL_VERSION 2.2.13
+ENV CUDNN_VERSION 7.2.1.38
+
+# TODO(b/110903506): /usr/loca/cuda/lib64/stubs should not be needed in
+# LD_LIBRARY_PATH. The stubs/libcuda.so is not meant to used at runtime. The
+# correct way to pass the path to bfd-ld is to pass
+# -Wl,-rpath-link=/usr/local/cuda/lib64/stubs to all binaries transitively
+# depending on libcuda. Optimally, builds targeting cuda would do that
+# internally.
+ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64/stubs
+
+LABEL com.nvidia.volumes.needed="nvidia_driver"
+LABEL com.nvidia.cuda.version="${CUDA_VERSION}"
+LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}"
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ cuda-cudart-$CUDA_PKG_VERSION \
+ cuda-libraries-$CUDA_PKG_VERSION \
+ cuda-cublas-9-0=9.0.176.4-1 \
+ libnccl2=$NCCL_VERSION-1+cuda9.0 \
+ cuda-libraries-dev-$CUDA_PKG_VERSION \
+ cuda-nvml-dev-$CUDA_PKG_VERSION \
+ cuda-minimal-build-$CUDA_PKG_VERSION \
+ cuda-command-line-tools-$CUDA_PKG_VERSION \
+ cuda-core-9-0=9.0.176.3-1 \
+ cuda-cublas-dev-9-0=9.0.176.4-1 \
+ libnccl-dev=$NCCL_VERSION-1+cuda9.0 \
+ libcudnn7-dev=$CUDNN_VERSION-1+cuda9.0 \
+ libcudnn7=$CUDNN_VERSION-1+cuda9.0 && \
+ ln -s cuda-9.0 /usr/local/cuda && \
+ apt-mark hold libnccl2 && \
+ apt-mark hold libcudnn7 libcudnn7-dev && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \
+ echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
+
+# TODO(b/110903506): Provide a link to the SONAME of libcuda.so.
+# https://github.com/NVIDIA/nvidia-docker/issues/775
+RUN ln -s libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1
+
+# TODO(klimek): Once the TODO in tensorflow's configure.py to correctly find
+# libnccl is resolved, delete this block.
+RUN ln -s /usr/lib/x86_64-linux-gnu/libnccl.so /usr/lib/libnccl.so \
+ && ln -s /usr/lib/x86_64-linux-gnu/libnccl.so /usr/lib/libnccl.so.2
+
+# Copy and run the install scripts.
+COPY install/*.sh /install/
+ARG DEBIAN_FRONTEND=noninteractive
+RUN /install/install_bootstrap_deb_packages.sh
+RUN add-apt-repository -y ppa:openjdk-r/ppa && \
+ add-apt-repository -y ppa:george-edison55/cmake-3.x
+RUN /install/install_deb_packages.sh
+RUN /install/install_pip_packages.sh
+RUN /install/install_golang.sh
+
diff --git a/tensorflow/tools/ci_build/Dockerfile.rbe.gpu b/tensorflow/tools/ci_build/Dockerfile.rbe.gpu
index 24ff4765a6..b656205836 100644
--- a/tensorflow/tools/ci_build/Dockerfile.rbe.gpu
+++ b/tensorflow/tools/ci_build/Dockerfile.rbe.gpu
@@ -19,8 +19,8 @@ RUN /install/install_golang.sh
# Install clang from pre-built package
RUN cd /tmp && \
- wget https://storage.googleapis.com/clang-builds-stable/clang-ubuntu16_04/clang_r323528.tar.gz && \
- echo "26752d9f5785df07193fac8316ba5d5ba3bec36d970c29a1577360848818ac74 clang_r323528.tar.gz" | sha256sum -c && \
+ wget https://storage.googleapis.com/clang-builds-stable/clang-ubuntu16_04/clang_r337145.tar.gz && \
+ echo "ab98c63eb09c04112cc992bc95ebc0dcea8c5e9d0760438789be2896cdc69ff8 clang_r337145.tar.gz" | sha256sum -c && \
tar -C /usr/local -xf clang_r323528.tar.gz && \
- rm clang_r323528.tar.gz
+ rm clang_r337145.tar.gz
diff --git a/tensorflow/tools/ci_build/README.md b/tensorflow/tools/ci_build/README.md
index f2161b700a..e2fd977f50 100644
--- a/tensorflow/tools/ci_build/README.md
+++ b/tensorflow/tools/ci_build/README.md
@@ -24,7 +24,7 @@ natively on your system.
### Run TensorFlow CI Scripts Natively on your Machine
-1. Follow the instructions at https://www.tensorflow.org/install/install_sources,
+1. Follow the instructions at https://www.tensorflow.org/install/source,
but stop when you get to the section "Configure the installation". You do not
need to configure the installation to run the CI scripts.
diff --git a/tensorflow/tools/ci_build/builds/android.sh b/tensorflow/tools/ci_build/builds/android.sh
index d81793efe0..7c3e308229 100755
--- a/tensorflow/tools/ci_build/builds/android.sh
+++ b/tensorflow/tools/ci_build/builds/android.sh
@@ -26,13 +26,19 @@ configure_android_workspace
# android_full.sh
echo "========== TensorFlow Demo Build Test =========="
+TARGETS=
+TARGETS+=" //tensorflow/examples/android:tensorflow_demo"
+# Also build the Eager Runtime so it remains compatible with Android for the
+# benefits of clients like TensorFlow Lite. For now it is enough to build only
+# :execute, which what TF Lite needs.
+TARGETS+=" //tensorflow/core/common_runtime/eager:execute"
# Enable sandboxing so that zip archives don't get incorrectly packaged
# in assets/ dir (see https://github.com/bazelbuild/bazel/issues/2334)
# TODO(gunan): remove extra flags once sandboxing is enabled for all builds.
bazel --bazelrc=/dev/null build \
--compilation_mode=opt --cxxopt=-std=c++11 --fat_apk_cpu=x86_64 \
--spawn_strategy=sandboxed --genrule_strategy=sandboxed \
- //tensorflow/examples/android:tensorflow_demo
+ ${TARGETS}
echo "========== Makefile Build Test =========="
# Test Makefile build just to make sure it still works.
diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh
index 82042b93c0..fef121ab5a 100755
--- a/tensorflow/tools/ci_build/builds/pip.sh
+++ b/tensorflow/tools/ci_build/builds/pip.sh
@@ -123,6 +123,10 @@ done
BAZEL_FLAGS=$(str_strip "${BAZEL_FLAGS}")
+if [[ -z "$GIT_TAG_OVERRIDE" ]]; then
+ BAZEL_FLAGS+=" --action_env=GIT_TAG_OVERRIDE"
+fi
+
echo "Using Bazel flags: ${BAZEL_FLAGS}"
PIP_BUILD_TARGET="//tensorflow/tools/pip_package:build_pip_package"
@@ -310,7 +314,10 @@ create_activate_virtualenv_and_install_tensorflow() {
# Upgrade pip so it supports tags such as cp27mu, manylinux1 etc.
echo "Upgrade pip in virtualenv"
- pip install --upgrade pip==9.0.1
+
+ # NOTE: pip install --upgrade pip leads to a documented TLS issue for
+ # some versions in python
+ curl https://bootstrap.pypa.io/get-pip.py | python
# Force tensorflow reinstallation. Otherwise it may not get installed from
# last build if it had the same version number as previous build.
@@ -318,6 +325,10 @@ create_activate_virtualenv_and_install_tensorflow() {
pip install -v ${PIP_FLAGS} ${WHL_PATH} || \
die "pip install (forcing to reinstall tensorflow) FAILED"
echo "Successfully installed pip package ${TF_WHEEL_PATH}"
+
+ # Force downgrade setuptools.
+ pip install --upgrade setuptools==39.1.0
+
}
################################################################################
diff --git a/tensorflow/tools/ci_build/builds/run_pip_tests.sh b/tensorflow/tools/ci_build/builds/run_pip_tests.sh
index 29680e6882..17198a6560 100755
--- a/tensorflow/tools/ci_build/builds/run_pip_tests.sh
+++ b/tensorflow/tools/ci_build/builds/run_pip_tests.sh
@@ -64,7 +64,7 @@ while true; do
fi
done
-TF_GPU_COUNT=${TF_GPU_COUNT:-8}
+TF_GPU_COUNT=${TF_GPU_COUNT:-4}
# PIP tests should have a "different" path. Different than the one we place
# virtualenv, because we are deleting and recreating it here.
@@ -76,7 +76,7 @@ ln -s $(pwd)/tensorflow ${PIP_TEST_ROOT}/tensorflow
# Do not run tests with "no_pip" tag. If running GPU tests, also do not run
# tests with no_pip_gpu tag.
-PIP_TEST_FILTER_TAG="-no_pip,-no_oss"
+PIP_TEST_FILTER_TAG="-no_pip,-no_oss,-benchmark-test"
if [[ ${IS_OSS_SERIAL} == "1" ]]; then
PIP_TEST_FILTER_TAG="$(echo "${PIP_TEST_FILTER_TAG}" | sed s/-no_oss//)"
PIP_TEST_FILTER_TAG="${PIP_TEST_FILTER_TAG},oss_serial"
@@ -85,7 +85,7 @@ else
fi
if [[ ${IS_GPU} == "1" ]]; then
- PIP_TEST_FILTER_TAG="-no_pip_gpu,${PIP_TEST_FILTER_TAG}"
+ PIP_TEST_FILTER_TAG="-no_gpu,-no_pip_gpu,${PIP_TEST_FILTER_TAG}"
fi
if [[ ${IS_MAC} == "1" ]]; then
PIP_TEST_FILTER_TAG="-nomac,${PIP_TEST_FILTER_TAG}"
@@ -97,7 +97,8 @@ fi
# TF_BUILD_APPEND_ARGUMENTS any user supplied args.
BAZEL_FLAGS="--define=no_tensorflow_py_deps=true --test_lang_filters=py \
--build_tests_only -k --test_tag_filters=${PIP_TEST_FILTER_TAG} \
- --test_timeout 300,450,1200,3600 ${TF_BUILD_APPEND_ARGUMENTS}"
+ --test_timeout 300,450,1200,3600 ${TF_BUILD_APPEND_ARGUMENTS} \
+ --test_output=errors"
BAZEL_TEST_TARGETS="//${PIP_TEST_PREFIX}/tensorflow/contrib/... \
//${PIP_TEST_PREFIX}/tensorflow/python/... \
diff --git a/tensorflow/tools/ci_build/builds/test_user_ops.sh b/tensorflow/tools/ci_build/builds/test_user_ops.sh
index caa3a40817..25ecee4725 100755
--- a/tensorflow/tools/ci_build/builds/test_user_ops.sh
+++ b/tensorflow/tools/ci_build/builds/test_user_ops.sh
@@ -213,27 +213,35 @@ USER_OP=$(echo "${USER_OP_SO}" | sed -e 's/\.so//')
echo "Invoking user op ${USER_OP} defined in file ${USER_OP_SO} "\
"via pip installation"
-ORIG_OUTPUT=$("${PYTHON_BIN_PATH}" -c "import tensorflow as tf; print(tf.Session('').run(tf.load_op_library('./${USER_OP_SO}').${USER_OP}(${OP_INPUT})))")
+function run_op() {
+ local ORIG_OUTPUT=$1
+ local ADDITIONAL_LOG=$2
+
+ # Format OUTPUT for analysis
+ if [[ -z $(echo "${ORIG_OUTPUT}" | grep -o ',') ]]; then
+ if [[ ${IS_MAC} == "1" ]]; then
+ local OUTPUT=$(echo "${ORIG_OUTPUT}" | sed -E -e 's/[ \t]+/,/g')
+ else
+ local OUTPUT=$(echo "${ORIG_OUTPUT}" | sed -r -e 's/[ \t]+/,/g')
+ fi
+ else
+ local OUTPUT="${ORIG_OUTPUT}"
+ fi
-# Format OUTPUT for analysis
-if [[ -z $(echo "${ORIG_OUTPUT}" | grep -o ',') ]]; then
- if [[ ${IS_MAC} == "1" ]]; then
- OUTPUT=$(echo "${ORIG_OUTPUT}" | sed -E -e 's/[ \t]+/,/g')
+ local EQUALS_EXPECTED=$("${PYTHON_BIN_PATH}" -c "print(${OUTPUT} == ${EXPECTED_OUTPUT})")
+
+ if [[ "${EQUALS_EXPECTED}" != "True" ]]; then
+ local ERROR="FAILED: Output from user op (${OUTPUT}) does not match expected "\
+ "output ${EXPECTED_OUTPUT}"${ADDITIONAL_LOG}
+ die ${ERROR}
else
- OUTPUT=$(echo "${ORIG_OUTPUT}" | sed -r -e 's/[ \t]+/,/g')
+ echo "Output from user op (${OUTPUT}) matches expected output"
fi
-else
- OUTPUT="${ORIG_OUTPUT}"
-fi
+}
-EQUALS_EXPECTED=$("${PYTHON_BIN_PATH}" -c "print(${OUTPUT} == ${EXPECTED_OUTPUT})")
+run_op "$("${PYTHON_BIN_PATH}" -c "import tensorflow as tf; print(tf.Session('').run(tf.load_op_library('./${USER_OP_SO}').${USER_OP}(${OP_INPUT})))")"
+run_op "$("${PYTHON_BIN_PATH}" -c "import tensorflow as tf; tf.enable_eager_execution(); print(tf.load_op_library('./${USER_OP_SO}').${USER_OP}(${OP_INPUT}).numpy())")" " in eager mode"
-if [[ "${EQUALS_EXPECTED}" != "True" ]]; then
- die "FAILED: Output from user op (${OUTPUT}) does not match expected "\
-"output ${EXPECTED_OUTPUT}"
-else
- echo "Output from user op (${OUTPUT}) matches expected output"
-fi
popd
diff --git a/tensorflow/tools/ci_build/builds/with_the_same_user b/tensorflow/tools/ci_build/builds/with_the_same_user
index d4bf546d40..b216e3549f 100755
--- a/tensorflow/tools/ci_build/builds/with_the_same_user
+++ b/tensorflow/tools/ci_build/builds/with_the_same_user
@@ -40,7 +40,7 @@ if [ -n "${CI_BUILD_USER_FORCE_BADNAME}" ]; then
ADDUSER_OPTS="--force-badname"
fi
-getent group "${CI_BUILD_GID}" || addgroup --gid "${CI_BUILD_GID}" "${CI_BUILD_GROUP}"
+getent group "${CI_BUILD_GID}" || addgroup ${ADDUSER_OPTS} --gid "${CI_BUILD_GID}" "${CI_BUILD_GROUP}"
getent passwd "${CI_BUILD_UID}" || adduser ${ADDUSER_OPTS} \
--gid "${CI_BUILD_GID}" --uid "${CI_BUILD_UID}" \
--gecos "${CI_BUILD_USER} (generated by with_the_same_user script)" \
diff --git a/tensorflow/tools/ci_build/ci_build.sh b/tensorflow/tools/ci_build/ci_build.sh
index 072dd6ab99..77265e0f50 100755
--- a/tensorflow/tools/ci_build/ci_build.sh
+++ b/tensorflow/tools/ci_build/ci_build.sh
@@ -79,7 +79,7 @@ if [[ "${CONTAINER_TYPE}" == "cmake" ]]; then
fi
# Use nvidia-docker if the container is GPU.
-if [[ "${CONTAINER_TYPE}" == "gpu" ]]; then
+if [[ "${CONTAINER_TYPE}" == gpu* ]]; then
DOCKER_BINARY="nvidia-docker"
else
DOCKER_BINARY="docker"
@@ -99,7 +99,7 @@ BUILD_TAG="${BUILD_TAG:-tf_ci}"
# Add extra params for cuda devices and libraries for GPU container.
# And clear them if we are not building for GPU.
-if [[ "${CONTAINER_TYPE}" != "gpu" ]]; then
+if [[ "${CONTAINER_TYPE}" != gpu* ]]; then
GPU_EXTRA_PARAMS=""
fi
@@ -115,6 +115,7 @@ DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | tr '[:upper:]' '[:lower:]')
# Print arguments.
echo "WORKSPACE: ${WORKSPACE}"
+echo "CI_DOCKER_BUILD_EXTRA_PARAMS: ${CI_DOCKER_BUILD_EXTRA_PARAMS[*]}"
echo "CI_DOCKER_EXTRA_PARAMS: ${CI_DOCKER_EXTRA_PARAMS[*]}"
echo "COMMAND: ${COMMAND[*]}"
echo "CI_COMMAND_PREFIX: ${CI_COMMAND_PREFIX[*]}"
@@ -126,7 +127,7 @@ echo ""
# Build the docker container.
echo "Building container (${DOCKER_IMG_NAME})..."
-docker build -t ${DOCKER_IMG_NAME} \
+docker build -t ${DOCKER_IMG_NAME} ${CI_DOCKER_BUILD_EXTRA_PARAMS[@]} \
-f "${DOCKERFILE_PATH}" "${DOCKER_CONTEXT_PATH}"
# Check docker build status
@@ -134,6 +135,12 @@ if [[ $? != "0" ]]; then
die "ERROR: docker build failed. Dockerfile is at ${DOCKERFILE_PATH}"
fi
+# If caller wants the with_the_same_user script to allow bad usernames,
+# pass the var to the docker environment
+if [ -n "${CI_BUILD_USER_FORCE_BADNAME}" ]; then
+ CI_BUILD_USER_FORCE_BADNAME_ENV="-e CI_BUILD_USER_FORCE_BADNAME=yes"
+fi
+
# Run the command inside the container.
echo "Running '${COMMAND[*]}' inside ${DOCKER_IMG_NAME}..."
mkdir -p ${WORKSPACE}/bazel-ci_build-cache
@@ -148,6 +155,7 @@ ${DOCKER_BINARY} run --rm --pid=host \
-e "CI_BUILD_GROUP=$(id -g -n)" \
-e "CI_BUILD_GID=$(id -g)" \
-e "CI_TENSORFLOW_SUBMODULE_PATH=${CI_TENSORFLOW_SUBMODULE_PATH}" \
+ ${CI_BUILD_USER_FORCE_BADNAME_ENV} \
-v ${WORKSPACE}:/workspace \
-w /workspace \
${GPU_EXTRA_PARAMS} \
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index 9d23b508aa..49a9048c03 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -59,6 +59,9 @@
# TF_BUILD_BAZEL_CLEAN:
# Will perform "bazel clean", if and only if this variable
# is set to any non-empty and non-0 value
+# TF_BAZEL_BUILD_ONLY:
+# If it is set to any non-empty value that is not "0", Bazel
+# will only build specified targets
# TF_GPU_COUNT:
# Run this many parallel tests for serial builds.
# For now, only can be edited for PIP builds.
@@ -83,7 +86,7 @@
# When set, overrides TF_BUILD_IS_OPT and TF_BUILD_MAVX
# options, as this will replace the two.
# TF_SKIP_CONTRIB_TESTS:
-# If set to any non-empty or non-0 value, will skipp running
+# If set to any non-empty or non-0 value, will skip running
# contrib tests.
# TF_NIGHTLY:
# If this run is being used to build the tf_nightly pip
@@ -94,10 +97,6 @@
#
# This script can be used by Jenkins parameterized / matrix builds.
-# TODO(jhseu): Temporary for the gRPC pull request due to the
-# protobuf -> protobuf_archive rename. Remove later.
-TF_BUILD_BAZEL_CLEAN=1
-
# Helper function: Convert to lower case
to_lower () {
echo "$1" | tr '[:upper:]' '[:lower:]'
@@ -128,11 +127,19 @@ NO_DOCKER_OPT_FLAG="--genrule_strategy=standalone"
DO_DOCKER=1
-BAZEL_CMD="bazel test"
-BAZEL_BUILD_ONLY_CMD="bazel build"
-BAZEL_CLEAN_CMD="bazel clean"
-DEFAULT_BAZEL_CONFIGS="--config=gcp --config=hdfs"
+# Helpful flags:
+# --test_summary=detailed: Tell us more about which targets are being built
+# --keep_going: Don't stop at the first failure; tell us all the failures
+# --build_tests_only: Don't build targets depended on by tests if the test is
+# disabled. Also saves some compilation time. Otherwise,
+# tries to build everything.
+BAZEL_TEST_FLAGS="--test_summary=detailed --build_tests_only --keep_going"
+BAZEL_BUILD_FLAGS="--keep_going"
+
+BAZEL_CMD="bazel test ${BAZEL_TEST_FLAGS}"
+BAZEL_BUILD_ONLY_CMD="bazel build ${BAZEL_BUILD_FLAGS}"
+BAZEL_CLEAN_CMD="bazel clean"
PIP_CMD="${CI_BUILD_DIR}/builds/pip.sh"
PIP_TEST_TUTORIALS_FLAG="--test_tutorials"
@@ -140,7 +147,7 @@ PIP_INTEGRATION_TESTS_FLAG="--integration_tests"
ANDROID_CMD="${CI_BUILD_DIR}/builds/android.sh"
ANDROID_FULL_CMD="${CI_BUILD_DIR}/builds/android_full.sh"
-TF_GPU_COUNT=${TF_GPU_COUNT:-8}
+TF_GPU_COUNT=${TF_GPU_COUNT:-4}
PARALLEL_GPU_TEST_CMD='//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute'
BENCHMARK_CMD="${CI_BUILD_DIR}/builds/benchmark.sh"
@@ -149,39 +156,7 @@ EXTRA_PARAMS=""
BAZEL_TARGET="//tensorflow/... -//tensorflow/compiler/..."
if [[ -n "$TF_SKIP_CONTRIB_TESTS" ]]; then
- BAZEL_TARGET="$BAZEL_TARGET -//tensorflow/contrib/..."
-else
- BAZEL_TARGET="${BAZEL_TARGET} -//tensorflow/contrib/lite/..."
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:context_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:framework"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:interpreter_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:model_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/toco:toco"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:simple_memory_arena_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:string_util_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:activations_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:add_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:basic_rnn_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:concatenation_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:conv_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:depthwise_conv_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:embedding_lookup_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:embedding_lookup_sparse_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:fully_connected_test"
- # BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/testing:generated_examples_zip_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:hashtable_lookup_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:local_response_norm_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:lsh_projection_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:lstm_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:l2norm_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:mul_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:pooling_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:reshape_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:resize_bilinear_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:skip_gram_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:softmax_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:space_to_depth_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:svdf_test"
+ BAZEL_TARGET="${BAZEL_TARGET} -//tensorflow/contrib/..."
fi
TUT_TEST_DATA_DIR="/tmp/tf_tutorial_test_data"
@@ -237,7 +212,7 @@ function get_cuda_capability_version() {
CTYPE=${TF_BUILD_CONTAINER_TYPE}
# Determine if the machine is a Mac
-OPT_FLAG=""
+OPT_FLAG="--test_output=errors"
if [[ "$(uname -s)" == "Darwin" ]]; then
DO_DOCKER=0
@@ -263,9 +238,9 @@ function set_script_variable() {
# Process container type
-if [[ ${CTYPE} == "cpu" ]] || [[ ${CTYPE} == "debian.jessie.cpu" ]]; then
+if [[ ${CTYPE} == cpu* ]] || [[ ${CTYPE} == "debian.jessie.cpu" ]]; then
:
-elif [[ ${CTYPE} == "gpu" ]]; then
+elif [[ ${CTYPE} == gpu* ]]; then
set_script_variable TF_NEED_CUDA 1
if [[ $TF_CUDA_CLANG == "1" ]]; then
@@ -408,6 +383,10 @@ else
if [[ ${IS_MAC} == "1" ]]; then
EXTRA_ARGS="${EXTRA_ARGS},-nomac"
fi
+ EXTRA_ARGS="${EXTRA_ARGS} --build_tag_filters=-no_oss,-oss_serial,-benchmark-test"
+ if [[ ${IS_MAC} == "1" ]]; then
+ EXTRA_ARGS="${EXTRA_ARGS},-nomac"
+ fi
fi
# For any "tool" dependencies in genrules, Bazel will build them for host
@@ -415,6 +394,11 @@ fi
# this flag, and it only affects a few tests.
EXTRA_ARGS="${EXTRA_ARGS} --distinct_host_configuration=false"
+if [[ ! -z "${TF_BAZEL_BUILD_ONLY}" ]] &&
+ [[ "${TF_BAZEL_BUILD_ONLY}" != "0" ]];then
+ BAZEL_CMD=${BAZEL_BUILD_ONLY_CMD}
+fi
+
# Process PIP install-test option
if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
[[ ${TF_BUILD_IS_PIP} == "both" ]]; then
@@ -423,12 +407,12 @@ if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]] ||
BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}
fi
- if [[ ${CTYPE} == "cpu" ]] || \
+ if [[ ${CTYPE} == cpu* ]] || \
[[ ${CTYPE} == "debian.jessie.cpu" ]]; then
# CPU only command, fully parallel.
NO_PIP_MAIN_CMD="${MAIN_CMD} ${BAZEL_CMD} ${OPT_FLAG} ${EXTRA_ARGS} -- "\
"${BAZEL_TARGET}"
- elif [[ ${CTYPE} == "gpu" ]]; then
+ elif [[ ${CTYPE} == gpu* ]]; then
# GPU only command, run as many jobs as the GPU count only.
NO_PIP_MAIN_CMD="${BAZEL_CMD} ${OPT_FLAG} "\
"--local_test_jobs=${TF_GPU_COUNT} "\
@@ -567,33 +551,35 @@ echo ""
TMP_DIR=""
DOCKERFILE_FLAG=""
-if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ]] ||
- [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then
- # Modify Dockerfile for Python3.5 | Python3.6 build
- TMP_DIR=$(mktemp -d)
- echo "Docker build will occur in temporary directory: ${TMP_DIR}"
-
- # Copy the files required for the docker build
- SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
- cp -r "${SCRIPT_DIR}/install" "${TMP_DIR}/install" || \
- die "ERROR: Failed to copy directory ${SCRIPT_DIR}/install"
-
- DOCKERFILE="${SCRIPT_DIR}/Dockerfile.${TF_BUILD_CONTAINER_TYPE}"
- cp "${DOCKERFILE}" "${TMP_DIR}/" || \
- die "ERROR: Failed to copy Dockerfile at ${DOCKERFILE}"
- DOCKERFILE="${TMP_DIR}/Dockerfile.${TF_BUILD_CONTAINER_TYPE}"
-
- # Replace a line in the Dockerfile
- if sed -i \
- "s/RUN \/install\/install_pip_packages.sh/RUN \/install\/install_${TF_BUILD_PYTHON_VERSION}_pip_packages.sh/g" \
- "${DOCKERFILE}"
- then
- echo "Copied and modified Dockerfile for ${TF_BUILD_PYTHON_VERSION} build: ${DOCKERFILE}"
- else
- die "ERROR: Faild to copy and modify Dockerfile: ${DOCKERFILE}"
- fi
+if [[ "${DO_DOCKER}" == "1" ]]; then
+ if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ]] ||
+ [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then
+ # Modify Dockerfile for Python3.5 | Python3.6 build
+ TMP_DIR=$(mktemp -d)
+ echo "Docker build will occur in temporary directory: ${TMP_DIR}"
+
+ # Copy the files required for the docker build
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ cp -r "${SCRIPT_DIR}/install" "${TMP_DIR}/install" || \
+ die "ERROR: Failed to copy directory ${SCRIPT_DIR}/install"
+
+ DOCKERFILE="${SCRIPT_DIR}/Dockerfile.${TF_BUILD_CONTAINER_TYPE}"
+ cp "${DOCKERFILE}" "${TMP_DIR}/" || \
+ die "ERROR: Failed to copy Dockerfile at ${DOCKERFILE}"
+ DOCKERFILE="${TMP_DIR}/Dockerfile.${TF_BUILD_CONTAINER_TYPE}"
+
+ # Replace a line in the Dockerfile
+ if sed -i \
+ "s/RUN \/install\/install_pip_packages.sh/RUN \/install\/install_${TF_BUILD_PYTHON_VERSION}_pip_packages.sh/g" \
+ "${DOCKERFILE}"
+ then
+ echo "Copied and modified Dockerfile for ${TF_BUILD_PYTHON_VERSION} build: ${DOCKERFILE}"
+ else
+ die "ERROR: Faild to copy and modify Dockerfile: ${DOCKERFILE}"
+ fi
- DOCKERFILE_FLAG="--dockerfile ${DOCKERFILE}"
+ DOCKERFILE_FLAG="--dockerfile ${DOCKERFILE}"
+ fi
fi
chmod +x ${TMP_SCRIPT}
diff --git a/tensorflow/tools/ci_build/ci_sanity.sh b/tensorflow/tools/ci_build/ci_sanity.sh
index 9627475d84..a98c15d961 100755
--- a/tensorflow/tools/ci_build/ci_sanity.sh
+++ b/tensorflow/tools/ci_build/ci_sanity.sh
@@ -99,9 +99,11 @@ do_pylint() {
"^tensorflow/contrib/layers/python/layers/feature_column\.py.*\[E0110.*abstract-class-instantiated "\
"^tensorflow/contrib/eager/python/evaluator\.py.*\[E0202.*method-hidden "\
"^tensorflow/contrib/eager/python/metrics_impl\.py.*\[E0202.*method-hidden "\
+"^tensorflow/contrib/rate/rate\.py.*\[E0202.*method-hidden "\
"^tensorflow/python/platform/gfile\.py.*\[E0301.*non-iterator "\
-"^tensorflow/python/keras/_impl/keras/callbacks\.py.*\[E1133.*not-an-iterable "\
-"^tensorflow/python/keras/_impl/keras/layers/recurrent\.py.*\[E0203.*access-member-before-definition "\
+"^tensorflow/python/keras/callbacks\.py.*\[E1133.*not-an-iterable "\
+"^tensorflow/python/keras/engine/base_layer.py.*\[E0203.*access-member-before-definition "\
+"^tensorflow/python/keras/layers/recurrent\.py.*\[E0203.*access-member-before-definition "\
"^tensorflow/python/kernel_tests/constant_op_eager_test.py.*\[E0303.*invalid-length-returned"
echo "ERROR_WHITELIST=\"${ERROR_WHITELIST}\""
@@ -348,12 +350,12 @@ do_external_licenses_check(){
# Blacklist
echo ${MISSING_LICENSES_FILE}
- grep -e "@bazel_tools//third_party/" -e "@com_google_absl//absl" -e "@org_tensorflow//" -v ${MISSING_LICENSES_FILE} > temp.txt
+ grep -e "@bazel_tools//third_party/" -e "@com_google_absl//absl" -e "@org_tensorflow//" -e "@com_github_googlecloudplatform_google_cloud_cpp//google" -v ${MISSING_LICENSES_FILE} > temp.txt
mv temp.txt ${MISSING_LICENSES_FILE}
# Whitelist
echo ${EXTRA_LICENSE_FILE}
- grep -e "@bazel_tools//src" -e "@bazel_tools//tools/" -e "@com_google_absl//" -e "//external" -e "@local" -v ${EXTRA_LICENSES_FILE} > temp.txt
+ grep -e "@bazel_tools//src" -e "@bazel_tools//tools/" -e "@com_google_absl//" -e "//external" -e "@local" -e "@com_github_googlecloudplatform_google_cloud_cpp//" -e "@embedded_jdk//" -v ${EXTRA_LICENSES_FILE} > temp.txt
mv temp.txt ${EXTRA_LICENSES_FILE}
@@ -542,7 +544,7 @@ SANITY_STEPS=("do_pylint PYTHON2" "do_pylint PYTHON3" "do_check_futures_test" "d
SANITY_STEPS_DESC=("Python 2 pylint" "Python 3 pylint" "Check that python files have certain __future__ imports" "buildifier check" "bazel nobuild" "pip: license check for external dependencies" "C library: license check for external dependencies" "Java Native Library: license check for external dependencies" "Pip Smoke Test: Checking py_test dependencies exist in pip package" "Check load py_test: Check that BUILD files with py_test target properly load py_test" "Code Link Check: Check there are no broken links" "Test entries in /tensorflow/contrib/cmake/python_{modules|protos|protos_cc}.txt for validity and consistency" "Check file names for cases")
INCREMENTAL_FLAG=""
-DEFAULT_BAZEL_CONFIGS="--config=hdfs --config=gcp"
+DEFAULT_BAZEL_CONFIGS=""
# Parse command-line arguments
BAZEL_FLAGS=${DEFAULT_BAZEL_CONFIGS}
diff --git a/tensorflow/tools/ci_build/copy_binary.py b/tensorflow/tools/ci_build/copy_binary.py
index 420d390d2b..148526492d 100755
--- a/tensorflow/tools/ci_build/copy_binary.py
+++ b/tensorflow/tools/ci_build/copy_binary.py
@@ -32,7 +32,8 @@ import shutil
import tempfile
import zipfile
-TF_NIGHTLY_REGEX = r"(.+)tf_nightly(|_gpu)-(\d\.\d\.\d.dev[\d]{0,8})-(.+)\.whl"
+TF_NIGHTLY_REGEX = (r"(.+)tf_nightly(|_gpu)-(\d\.[\d]{1,2}"
+ "\.\d.dev[\d]{0,8})-(.+)\.whl")
BINARY_STRING_TEMPLATE = "%s-%s-%s.whl"
diff --git a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
index d0816c92b7..cd7206baf8 100755
--- a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
+++ b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
@@ -16,44 +16,67 @@
#
#
# A script to run multiple GPU tests in parallel controlled with an environment
-# variable. This script will assume that when it runs, one of the locks are
-# already released. So the program calling this script is expected to make sure
-# that only $TF_GPU_COUNT processes are running at any gien time.
+# variable.
#
# Required environment variables:
-# TF_GPU_COUNT = Number of GPUs available. This HAS TO BE IN SYNC with the
-# value of --local_test_jobs flag for bazel.
+# TF_GPU_COUNT = Number of GPUs available.
-BASH_VER_MAJOR=$(echo ${BASH_VERSION} | cut -d '.' -f 1)
-BASH_VER_MINOR=$(echo ${BASH_VERSION} | cut -d '.' -f 2)
+TF_GPU_COUNT=${TF_GPU_COUNT:-4}
+TF_TESTS_PER_GPU=${TF_TESTS_PER_GPU:-8}
+# We want to allow running one of the following configs:
+# - 4 tests per GPU on k80
+# - 8 tests per GPU on p100
+# p100 has minimum 12G memory. Therefore, we should limit each test to 1.5G.
+# To leave some room in case we want to run more tests in parallel in the
+# future and to use a rounder number, we set it to 1G.
+export TF_PER_DEVICE_MEMORY_LIMIT_MB=1024
-if [[ ${BASH_VER_MAJOR} -lt 4 ]]; then
- echo "Insufficient bash version: ${BASH_VERSION} < 4.2" >&2
- exit 1
-elif [[ ${BASH_VER_MAJOR} -eq 4 ]] && [[ ${BASH_VER_MINOR} -lt 2 ]]; then
- echo "Insufficient bash version: ${BASH_VERSION} < 4.2" >&2
- exit 1
-fi
+# *******************************************************************
+# This section of the script is needed to
+# make things work on windows under msys.
+# *******************************************************************
+RUNFILES_MANIFEST_FILE="${TEST_SRCDIR}/MANIFEST"
+function rlocation() {
+ if is_absolute "$1" ; then
+ # If the file path is already fully specified, simply return it.
+ echo "$1"
+ elif [[ -e "$TEST_SRCDIR/$1" ]]; then
+ # If the file exists in the $TEST_SRCDIR then just use it.
+ echo "$TEST_SRCDIR/$1"
+ elif [[ -e "$RUNFILES_MANIFEST_FILE" ]]; then
+ # If a runfiles manifest file exists then use it.
+ echo "$(grep "^$1 " "$RUNFILES_MANIFEST_FILE" | sed 's/[^ ]* //')"
+ fi
+}
-TF_GPU_COUNT=${TF_GPU_COUNT:-8}
+TEST_BINARY="$(rlocation $TEST_WORKSPACE/${1#./})"
+shift
+# *******************************************************************
-for i in `seq 0 $((TF_GPU_COUNT-1))`; do
- exec {lock_fd}>/var/lock/gpulock$i || exit 1
- if flock -n "$lock_fd";
- then
- (
- # This export only works within the brackets, so it is isolated to one
- # single command.
- export CUDA_VISIBLE_DEVICES=$i
- echo "Running test $* on GPU $CUDA_VISIBLE_DEVICES"
- $@
- )
- return_code=$?
- flock -u "$lock_fd"
- exit $return_code
- fi
+mkdir -p /var/lock
+# Try to acquire any of the TF_GPU_COUNT * TF_TESTS_PER_GPU
+# slots to run a test at.
+#
+# Prefer to allocate 1 test per GPU over 4 tests on 1 GPU.
+# So, we iterate over TF_TESTS_PER_GPU first.
+for j in `seq 0 $((TF_TESTS_PER_GPU-1))`; do
+ for i in `seq 0 $((TF_GPU_COUNT-1))`; do
+ exec {lock_fd}>/var/lock/gpulock${i}_${j} || exit 1
+ if flock -n "$lock_fd";
+ then
+ (
+ # This export only works within the brackets, so it is isolated to one
+ # single command.
+ export CUDA_VISIBLE_DEVICES=$i
+ echo "Running test $TEST_BINARY $* on GPU $CUDA_VISIBLE_DEVICES"
+ "$TEST_BINARY" $@
+ )
+ return_code=$?
+ flock -u "$lock_fd"
+ exit $return_code
+ fi
+ done
done
echo "Cannot find a free GPU to run the test $* on, exiting with failure..."
exit 1
-
diff --git a/tensorflow/tools/ci_build/install/install_bazel.sh b/tensorflow/tools/ci_build/install/install_bazel.sh
index 3e27a94cf2..e284401b8a 100755
--- a/tensorflow/tools/ci_build/install/install_bazel.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel.sh
@@ -15,7 +15,7 @@
# ==============================================================================
# Select bazel version.
-BAZEL_VERSION="0.11.0"
+BAZEL_VERSION="0.15.0"
set +e
local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
diff --git a/tensorflow/tools/ci_build/install/install_bazel_from_source.sh b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
new file mode 100755
index 0000000000..87be81577d
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+# This script is to be used to install bzel on non x86_64 systems
+# It will compile bazel from source and install it in /usr/local/bin
+
+# Select bazel version.
+BAZEL_VERSION="0.15.0"
+
+set +e
+local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
+
+if [[ "$local_bazel_ver" == "$BAZEL_VERSION" ]]; then
+ exit 0
+fi
+
+set -e
+
+# Compile bazel from source
+mkdir -p /bazel
+cd /bazel
+
+curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-dist.zip
+unzip bazel-$BAZEL_VERSION-dist.zip
+bash ./compile.sh
+cp output/bazel /usr/local/bin/
+rm -rf /bazel
diff --git a/tensorflow/tools/ci_build/install/install_buildifier_from_source.sh b/tensorflow/tools/ci_build/install/install_buildifier_from_source.sh
new file mode 100755
index 0000000000..a93c258fad
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_buildifier_from_source.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+set -e
+BUILDTOOLS_VERSION="0.11.1"
+
+# Clone buildtools
+git clone -b $BUILDTOOLS_VERSION https://github.com/bazelbuild/buildtools
+cd buildtools
+
+# Build buildifier
+bazel build //buildifier
+sudo mv bazel-bin/buildifier/linux*stripped/buildifier /usr/local/bin
+
+# Build buildozer
+bazel build //buildozer
+sudo mv bazel-bin/buildozer/linux*stripped/buildozer /usr/local/bin
diff --git a/tensorflow/tools/ci_build/install/install_deb_packages.sh b/tensorflow/tools/ci_build/install/install_deb_packages.sh
index 9640810533..179fc42d60 100755
--- a/tensorflow/tools/ci_build/install/install_deb_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_deb_packages.sh
@@ -67,6 +67,12 @@ apt-get install -y --no-install-recommends \
zip \
zlib1g-dev
+apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0 && \
+ apt-get install libnvinfer-dev=4.1.2-1+cuda9.0
+
# populate the database
updatedb
diff --git a/tensorflow/tools/ci_build/install/install_golang_ppc64le.sh b/tensorflow/tools/ci_build/install/install_golang_ppc64le.sh
new file mode 100755
index 0000000000..47d23a59b3
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_golang_ppc64le.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+set -ex
+
+GOLANG_URL="https://storage.googleapis.com/golang/go1.10.linux-ppc64le.tar.gz"
+
+sudo mkdir -p /usr/local
+wget -q -O - "${GOLANG_URL}" | sudo tar -C /usr/local -xz
diff --git a/tensorflow/tools/ci_build/install/install_hdf5_ppc64le.sh b/tensorflow/tools/ci_build/install/install_hdf5_ppc64le.sh
new file mode 100755
index 0000000000..4989d986b8
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_hdf5_ppc64le.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+
+#This is required because pypi doesn't have a pre-built h5py binary for ppc64le
+#It has to be compiled from source during the install
+apt-get update
+apt-get install -y libhdf5-dev
+
+#h5py is not expecting the shared libraries to have _serial in the name.
+ln -s /usr/lib/powerpc64le-linux-gnu/libhdf5_serial.so /usr/lib/powerpc64le-linux-gnu/libhdf5.so
+ln -s /usr/lib/powerpc64le-linux-gnu/libhdf5_serial_hl.so /usr/lib/powerpc64le-linux-gnu/libhdf5_hl.so
+
+#pip is not installed yet, so use easy_install
+#CPATH is the location of hdf5.h
+CPATH=/usr/include/hdf5/serial/ easy_install -U h5py
+CPATH=/usr/include/hdf5/serial/ easy_install3 -U h5py
diff --git a/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
new file mode 100755
index 0000000000..107cc61ff5
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+OPENBLAS_SRC_PATH=/tmp/openblas_src/
+POWER="POWER8"
+USE_OPENMP="USE_OPENMP=1"
+OPENBLAS_INSTALL_PATH="/usr"
+apt-get update
+apt-get install -y gfortran gfortran-5
+rm -rf ${OPENBLAS_SRC_PATH}
+git clone -b release-0.3.0 https://github.com/xianyi/OpenBLAS ${OPENBLAS_SRC_PATH}
+cd ${OPENBLAS_SRC_PATH}
+# Pick up fix for OpenBLAS issue 1571
+git cherry-pick -X theirs 961d25e9c7e4a1758adb1dbeaa15187de69dd052
+make TARGET=${POWER} ${USE_OPENMP} FC=gfortran
+make PREFIX=${OPENBLAS_INSTALL_PATH} install
diff --git a/tensorflow/tools/ci_build/install/install_pip_packages.sh b/tensorflow/tools/ci_build/install/install_pip_packages.sh
index d406b83a62..4ced96f90b 100755
--- a/tensorflow/tools/ci_build/install/install_pip_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_pip_packages.sh
@@ -16,10 +16,10 @@
set -e
-# We don't apt-get install so that we can install a newer version of pip. Not
-# needed after we upgrade to Ubuntu 16.04
-easy_install -U pip
-easy_install3 -U pip
+# We don't apt-get install so that we can install a newer version of pip.
+# Only needed for Ubuntu 14.04 and 16.04; not needed for 18.04 and Debian 8,9?
+easy_install -U pip==9.0.3
+easy_install3 -U pip==9.0.3
# Install pip packages from whl files to avoid the time-consuming process of
# building from source.
@@ -51,8 +51,8 @@ pip2 install --upgrade markdown==2.6.8
pip3 install --upgrade markdown==2.6.8
# Install protobuf.
-pip2 install --upgrade protobuf==3.3.0
-pip3 install --upgrade protobuf==3.3.0
+pip2 install --upgrade protobuf==3.6.0
+pip3 install --upgrade protobuf==3.6.0
# Remove obsolete version of six, which can sometimes confuse virtualenv.
rm -rf /usr/lib/python3/dist-packages/six*
@@ -60,11 +60,16 @@ rm -rf /usr/lib/python3/dist-packages/six*
# numpy needs to be installed from source to fix segfaults. See:
# https://github.com/tensorflow/tensorflow/issues/6968
# This workaround isn't needed for Ubuntu 16.04 or later.
-pip2 install --no-binary=:all: --upgrade numpy==1.12.0
-pip3 install --no-binary=:all: --upgrade numpy==1.12.0
+if $(cat /etc/*-release | grep -q 14.04); then
+ pip2 install --no-binary=:all: --upgrade numpy==1.14.5
+ pip3 install --no-binary=:all: --upgrade numpy==1.14.5
+else
+ pip2 install --upgrade numpy==1.14.5
+ pip3 install --upgrade numpy==1.14.5
+fi
-pip2 install scipy==0.18.1
-pip3 install scipy==0.18.1
+pip2 install scipy==1.1.0
+pip3 install scipy==1.1.0
pip2 install scikit-learn==0.18.1
pip3 install scikit-learn==0.18.1
@@ -104,3 +109,19 @@ pip2 install --upgrade gast
pip3 install --upgrade gast
pip2 install --upgrade termcolor
pip3 install --upgrade termcolor
+
+# Install last working version of setuptools.
+pip2 install --upgrade setuptools==39.1.0
+pip3 install --upgrade setuptools==39.1.0
+
+# Keras
+pip2 install keras_applications==1.0.5 --no-deps
+pip3 install keras_applications==1.0.5 --no-deps
+pip2 install keras_preprocessing==1.0.3 --no-deps
+pip3 install keras_preprocessing==1.0.3 --no-deps
+pip2 install --upgrade h5py==2.8.0
+pip3 install --upgrade h5py==2.8.0
+
+# Install last working version of setuptools.
+pip2 install --upgrade setuptools==39.1.0
+pip3 install --upgrade setuptools==39.1.0
diff --git a/tensorflow/tools/ci_build/install/install_pip_packages_remote.sh b/tensorflow/tools/ci_build/install/install_pip_packages_remote.sh
index 39a6d557d1..721590f4d6 100755
--- a/tensorflow/tools/ci_build/install/install_pip_packages_remote.sh
+++ b/tensorflow/tools/ci_build/install/install_pip_packages_remote.sh
@@ -20,10 +20,8 @@ if [ ! -f /usr/bin/x86_64-linux-gnu-gcc ]; then
ln -s /usr/local/bin/clang /usr/bin/x86_64-linux-gnu-gcc
fi
-pip2 install -U pip
-pip3 install -U pip
-pip2 install -U setuptools
-pip3 install -U setuptools
+easy_install -U pip==9.0.3
+easy_install3 -U pip==9.0.3
# The rest of the pip packages will be installed in
# `install_pip_packages.sh`
diff --git a/tensorflow/tools/ci_build/install/install_proto3.sh b/tensorflow/tools/ci_build/install/install_proto3.sh
index 7934002b2c..821d50baff 100755
--- a/tensorflow/tools/ci_build/install/install_proto3.sh
+++ b/tensorflow/tools/ci_build/install/install_proto3.sh
@@ -17,7 +17,7 @@
# Install protobuf3.
# Select protobuf version.
-PROTOBUF_VERSION="3.3.0"
+PROTOBUF_VERSION="3.6.0"
protobuf_ver_flat=$(echo $PROTOBUF_VERSION | sed 's/\.//g' | sed 's/^0*//g')
local_protobuf_ver=$(protoc --version)
local_protobuf_ver_flat=$(echo $local_protobuf_ver | sed 's/\.//g' | sed 's/^0*//g')
diff --git a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh
index aefc49f604..37e6b51f66 100755
--- a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh
@@ -39,6 +39,8 @@ if [[ -z $pip35_version ]]; then
fi
set -e
+pip3.5 install --upgrade pip
+
pip3.5 install --upgrade virtualenv
# Install six.
@@ -46,7 +48,7 @@ pip3.5 install --upgrade absl-py
pip3.5 install --upgrade six==1.10.0
# Install protobuf.
-pip3.5 install --upgrade protobuf==3.3.0
+pip3.5 install --upgrade protobuf==3.6.0
# Remove obsolete version of six, which can sometimes confuse virtualenv.
rm -rf /usr/lib/python3/dist-packages/six*
@@ -56,7 +58,7 @@ rm -rf /usr/lib/python3/dist-packages/six*
# numpy needs to be installed from source to fix segfaults. See:
# https://github.com/tensorflow/tensorflow/issues/6968
# This workaround isn't needed for Ubuntu 16.04 or later.
-pip3.5 install --no-binary=:all: --upgrade numpy==1.12.0
+pip3.5 install --no-binary=:all: --upgrade numpy==1.14.5
pip3.5 install scipy==0.18.1
@@ -79,4 +81,15 @@ pip3.5 install --upgrade astor
pip3.5 install --upgrade gast
pip3.5 install --upgrade termcolor
+# Install last working version of setuptools.
+pip3.5 install --upgrade setuptools==39.1.0
+
+# Keras
+pip3.5 install keras_applications==1.0.5
+pip3.5 install keras_preprocessing==1.0.3
+pip3.5 install --upgrade h5py==2.8.0
+
+# Install last working version of setuptools.
+pip3.5 install --upgrade setuptools==39.1.0
+
# LINT.ThenChange(//tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh)
diff --git a/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh b/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh
index bfaa044c82..7520ff74cb 100755
--- a/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh
+++ b/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh
@@ -49,15 +49,18 @@ cd Python-3.6.1
make altinstall
ln -s /usr/local/bin/pip3.6 /usr/local/bin/pip3
+pip3 install --upgrade pip
+
pip3 install --upgrade virtualenv
set -e
+
# Install six.
pip3 install --upgrade absl-py
pip3 install --upgrade six==1.10.0
# Install protobuf.
-pip3 install --upgrade protobuf==3.3.0
+pip3 install --upgrade protobuf==3.6.0
# Remove obsolete version of six, which can sometimes confuse virtualenv.
rm -rf /usr/lib/python3/dist-packages/six*
@@ -67,7 +70,7 @@ rm -rf /usr/lib/python3/dist-packages/six*
# numpy needs to be installed from source to fix segfaults. See:
# https://github.com/tensorflow/tensorflow/issues/6968
# This workaround isn't needed for Ubuntu 16.04 or later.
-pip3 install --no-binary=:all: --upgrade numpy==1.12.0
+pip3 install --no-binary=:all: --upgrade numpy==1.14.5
pip3 install scipy==0.18.1
@@ -94,4 +97,12 @@ pip3 install --upgrade astor
pip3 install --upgrade gast
pip3 install --upgrade termcolor
+# Install last working version of setuptools.
+pip3 install --upgrade setuptools==39.1.0
+pip3 install --upgrade h5py==2.8.0
+
+# Keras
+pip3 install keras_applications==1.0.5
+pip3 install keras_preprocessing==1.0.3
+
# LINT.ThenChange(//tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh)
diff --git a/tensorflow/tools/ci_build/linux/cpu/run_cc_core.sh b/tensorflow/tools/ci_build/linux/cpu/run_cc_core.sh
index 51e10f81f8..8eeddcdb82 100755
--- a/tensorflow/tools/ci_build/linux/cpu/run_cc_core.sh
+++ b/tensorflow/tools/ci_build/linux/cpu/run_cc_core.sh
@@ -34,5 +34,5 @@ yes "" | $PYTHON_BIN_PATH configure.py
# Run bazel test command. Double test timeouts to avoid flakes.
bazel test --test_tag_filters=-no_oss,-gpu,-benchmark-test --test_lang_filters=cc,java -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --config=opt \
- --test_output=errors -- \
+ --test_output=errors --test_size_filters=small,medium -- \
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh b/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh
index dbf376be6f..7be5f454ec 100755
--- a/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh
+++ b/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh
@@ -30,7 +30,10 @@ export PYTHON_BIN_PATH=`which python2`
yes "" | $PYTHON_BIN_PATH configure.py
# Run bazel test command. Double test timeouts to avoid flakes.
-bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test --test_lang_filters=py -k \
+# Setting KMP_BLOCKTIME to 0 lets OpenMP threads to sleep right after parallel execution
+# in an MKL primitive. This reduces the effects of an oversubscription of OpenMP threads
+# caused by executing multiple tests concurrently.
+bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test --test_lang_filters=cc,py -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only \
- --config=mkl --config=opt --test_output=errors -- \
+ --config=mkl --test_env=KMP_BLOCKTIME=0 --config=opt --test_output=errors -- \
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/cpu/run_py2_core.sh b/tensorflow/tools/ci_build/linux/cpu/run_py2_core.sh
index ea14848b1a..8eca1987f0 100755
--- a/tensorflow/tools/ci_build/linux/cpu/run_py2_core.sh
+++ b/tensorflow/tools/ci_build/linux/cpu/run_py2_core.sh
@@ -33,5 +33,5 @@ yes "" | $PYTHON_BIN_PATH configure.py
# Run bazel test command. Double test timeouts to avoid flakes.
bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test --test_lang_filters=py -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only --config=opt \
- --test_output=errors -- \
+ --test_output=errors --test_size_filters=small,medium -- \
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh b/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh
index 6d017c8a1f..f6fa9251d4 100755
--- a/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh
+++ b/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh
@@ -33,36 +33,5 @@ yes "" | $PYTHON_BIN_PATH configure.py
# Run bazel test command. Double test timeouts to avoid flakes.
bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --config=opt \
- --test_output=errors -- \
- //tensorflow/contrib/... \
- -//tensorflow/contrib/lite/... \
- //tensorflow/contrib/lite:context_test \
- //tensorflow/contrib/lite:framework \
- //tensorflow/contrib/lite:interpreter_test \
- //tensorflow/contrib/lite:model_test \
- //tensorflow/contrib/lite/toco:toco \
- //tensorflow/contrib/lite:simple_memory_arena_test \
- //tensorflow/contrib/lite:string_util_test \
- //tensorflow/contrib/lite/kernels:activations_test \
- //tensorflow/contrib/lite/kernels:add_test \
- //tensorflow/contrib/lite/kernels:basic_rnn_test \
- //tensorflow/contrib/lite/kernels:concatenation_test \
- //tensorflow/contrib/lite/kernels:conv_test \
- //tensorflow/contrib/lite/kernels:depthwise_conv_test \
- //tensorflow/contrib/lite/kernels:embedding_lookup_test \
- //tensorflow/contrib/lite/kernels:embedding_lookup_sparse_test \
- //tensorflow/contrib/lite/kernels:fully_connected_test \
- //tensorflow/contrib/lite/testing:generated_examples_zip_test \
- //tensorflow/contrib/lite/kernels:hashtable_lookup_test \
- //tensorflow/contrib/lite/kernels:local_response_norm_test \
- //tensorflow/contrib/lite/kernels:lsh_projection_test \
- //tensorflow/contrib/lite/kernels:lstm_test \
- //tensorflow/contrib/lite/kernels:l2norm_test \
- //tensorflow/contrib/lite/kernels:mul_test \
- //tensorflow/contrib/lite/kernels:pooling_test \
- //tensorflow/contrib/lite/kernels:reshape_test \
- //tensorflow/contrib/lite/kernels:resize_bilinear_test \
- //tensorflow/contrib/lite/kernels:skip_gram_test \
- //tensorflow/contrib/lite/kernels:softmax_test \
- //tensorflow/contrib/lite/kernels:space_to_depth_test \
- //tensorflow/contrib/lite/kernels:svdf_test
+ --test_size_filters=small,medium --test_output=errors -- \
+ //tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/cpu/run_py3_core.sh b/tensorflow/tools/ci_build/linux/cpu/run_py3_core.sh
index a9accb9dd5..51eb2cd7e6 100755
--- a/tensorflow/tools/ci_build/linux/cpu/run_py3_core.sh
+++ b/tensorflow/tools/ci_build/linux/cpu/run_py3_core.sh
@@ -33,5 +33,5 @@ yes "" | $PYTHON_BIN_PATH configure.py
# Run bazel test command. Double test timeouts to avoid flakes.
bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test --test_lang_filters=py -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only --config=opt \
- --test_output=errors -- \
+ --test_output=errors --test_size_filters=small,medium -- \
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/gpu/run_cc_core.sh b/tensorflow/tools/ci_build/linux/gpu/run_cc_core.sh
index 02224d8e9d..9d2c8383fa 100755
--- a/tensorflow/tools/ci_build/linux/gpu/run_cc_core.sh
+++ b/tensorflow/tools/ci_build/linux/gpu/run_cc_core.sh
@@ -37,5 +37,6 @@ yes "" | $PYTHON_BIN_PATH configure.py
bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
--test_lang_filters=cc --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 \
--build_tests_only --test_output=errors --local_test_jobs=8 --config=opt \
+ --test_size_filters=small,medium \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/gpu/run_mkl.sh b/tensorflow/tools/ci_build/linux/gpu/run_mkl.sh
new file mode 100755
index 0000000000..50ee07e727
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/gpu/run_mkl.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ==============================================================================
+
+set -e
+set -x
+
+N_JOBS=$(grep -c ^processor /proc/cpuinfo)
+
+echo ""
+echo "Bazel will use ${N_JOBS} concurrent job(s)."
+echo ""
+
+# Run configure.
+export PYTHON_BIN_PATH=`which python2`
+
+export TF_NEED_CUDA=1
+export TF_CUDA_VERSION=9.0
+export TF_CUDNN_VERSION=7
+export TF_CUDA_COMPUTE_CAPABILITIES=3.7
+
+yes "" | $PYTHON_BIN_PATH configure.py
+
+# Run bazel test command. Double test timeouts to avoid flakes.
+# Setting KMP_BLOCKTIME to 0 lets OpenMP threads to sleep right after parallel execution
+# in an MKL primitive. This reduces the effects of an oversubscription of OpenMP threads
+# caused by executing multiple tests concurrently.
+bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test \
+ --test_lang_filters=cc,py -k --jobs="${N_JOBS}" \
+ --test_timeout 300,450,1200,3600 --build_tests_only --test_env=KMP_BLOCKTIME=0\
+ --config=mkl --config=opt --test_output=errors --local_test_jobs=8 \
+ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \
+ //tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
+
diff --git a/tensorflow/tools/ci_build/linux/gpu/run_py3_core.sh b/tensorflow/tools/ci_build/linux/gpu/run_py3_core.sh
index 0367a53d14..5b3383e105 100755
--- a/tensorflow/tools/ci_build/linux/gpu/run_py3_core.sh
+++ b/tensorflow/tools/ci_build/linux/gpu/run_py3_core.sh
@@ -37,5 +37,6 @@ yes "" | $PYTHON_BIN_PATH configure.py
bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
--test_lang_filters=py --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 \
--build_tests_only --test_output=errors --local_test_jobs=8 --config=opt \
+ --test_size_filters=small,medium \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh b/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh
index e5d8303c6e..60c974c36b 100755
--- a/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh
+++ b/tensorflow/tools/ci_build/linux/libtensorflow_docker.sh
@@ -21,7 +21,12 @@
# See libtensorflow_cpu.sh and libtensorflow_gpu.sh
set -ex
+
+# Current script directory
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+source "${SCRIPT_DIR}/../builds/builds_common.sh"
DOCKER_CONTEXT_PATH="$(realpath ${SCRIPT_DIR}/..)"
ROOT_DIR="$(realpath ${SCRIPT_DIR}/../../../../)"
@@ -45,9 +50,9 @@ ${DOCKER_BINARY} run \
-v ${ROOT_DIR}:/workspace \
-w /workspace \
-e "PYTHON_BIN_PATH=/usr/bin/python" \
- -e "TF_NEED_GCP=0" \
-e "TF_NEED_HDFS=0" \
-e "TF_NEED_CUDA=${TF_NEED_CUDA}" \
+ -e "TF_NEED_TENSORRT=${TF_NEED_CUDA}" \
-e "TF_NEED_OPENCL_SYCL=0" \
"${DOCKER_IMAGE}" \
"/workspace/tensorflow/tools/ci_build/linux/libtensorflow.sh"
diff --git a/tensorflow/tools/ci_build/linux/mkl/basic-mkl-gpu-test.sh b/tensorflow/tools/ci_build/linux/mkl/basic-mkl-gpu-test.sh
new file mode 100755
index 0000000000..68354bf7c1
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/mkl/basic-mkl-gpu-test.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Usage: basic_mkl_test.sh
+
+# Helper function to traverse directories up until given file is found.
+function upsearch () {
+ test / == "$PWD" && return || \
+ test -e "$1" && echo "$PWD" && return || \
+ cd .. && upsearch "$1"
+}
+
+# Set up WORKSPACE.
+WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
+
+BUILD_TAG=mkl-gpu-ci-test CI_BUILD_USER_FORCE_BADNAME=yes ${WORKSPACE}/tensorflow/tools/ci_build/ci_build.sh gpu tensorflow/tools/ci_build/linux/gpu/run_mkl.sh
diff --git a/tensorflow/tools/ci_build/linux/mkl/basic-mkl-test.sh b/tensorflow/tools/ci_build/linux/mkl/basic-mkl-test.sh
new file mode 100755
index 0000000000..10a09a415a
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/mkl/basic-mkl-test.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# Usage: basic_mkl_test.sh
+
+# Helper function to traverse directories up until given file is found.
+function upsearch () {
+ test / == "$PWD" && return || \
+ test -e "$1" && echo "$PWD" && return || \
+ cd .. && upsearch "$1"
+}
+
+# Set up WORKSPACE.
+WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
+
+BUILD_TAG=mkl-ci-test CI_BUILD_USER_FORCE_BADNAME=yes ${WORKSPACE}/tensorflow/tools/ci_build/ci_build.sh cpu tensorflow/tools/ci_build/linux/cpu/run_mkl.sh
diff --git a/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh b/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
new file mode 100755
index 0000000000..b497326d98
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# Build a whl and container with Intel(R) MKL support
+# Usage: build-dev-container.sh
+
+# Helper function to traverse directories up until given file is found.
+function upsearch () {
+ test / == "$PWD" && return || \
+ test -e "$1" && echo "$PWD" && return || \
+ cd .. && upsearch "$1"
+}
+
+# Set up WORKSPACE.
+WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
+
+TF_DOCKER_BUILD_DEVEL_BRANCH=${TF_DOCKER_BUILD_DEVEL_BRANCH:-master}
+TF_DOCKER_BUILD_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME:-intel-mkl/tensorflow}
+TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION:-nightly}
+
+echo "TF_DOCKER_BUILD_DEVEL_BRANCH=${TF_DOCKER_BUILD_DEVEL_BRANCH}"
+echo "TF_DOCKER_BUILD_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME}"
+echo "TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION}"
+
+# Build containers for AVX
+# Include the instructions for sandybridge and later, but tune for ivybridge
+TF_BAZEL_BUILD_OPTIONS="--config=mkl --copt=-march=sandybridge --copt=-mtune=ivybridge --copt=-O3 --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0"
+
+# build the python 2 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
+# build the python 3 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}" \
+ TF_DOCKER_BUILD_PYTHON_VERSION="PYTHON3" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
+# build the python3.6 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}" \
+ TF_DOCKER_BUILD_PYTHON_VERSION="PYTHON3.6" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
+
+# Build containers for AVX2
+# Include the instructions for haswell and later, but tune for broadwell
+TF_BAZEL_BUILD_OPTIONS="--config=mkl --copt=-march=haswell --copt=-mtune=broadwell --copt=-O3 --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0"
+
+# build the python 2 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}-avx2" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
+# build the python 3 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}-avx2" \
+ TF_DOCKER_BUILD_PYTHON_VERSION="PYTHON3" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
+# build the python3.6 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}-avx2" \
+ TF_DOCKER_BUILD_PYTHON_VERSION="PYTHON3.6" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
diff --git a/tensorflow/tools/ci_build/linux/ppc64le/cpu/run_py2.sh b/tensorflow/tools/ci_build/linux/ppc64le/cpu/run_py2.sh
new file mode 100755
index 0000000000..e13de35061
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/ppc64le/cpu/run_py2.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ==============================================================================
+
+set -e
+set -x
+
+N_JOBS=$(grep -c ^processor /proc/cpuinfo)
+
+echo ""
+echo "Bazel will use ${N_JOBS} concurrent job(s)."
+echo ""
+
+# Run configure.
+export TF_NEED_CUDA=0
+export CC_OPT_FLAGS='-mcpu=power8 -mtune=power8'
+export PYTHON_BIN_PATH=`which python2`
+yes "" | $PYTHON_BIN_PATH configure.py
+
+# Run bazel test command. Double test timeouts to avoid flakes.
+bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test -k \
+ --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only --config=opt \
+ --test_output=errors --test_size_filters=small,medium -- \
+ //tensorflow/... -//tensorflow/compiler/...
diff --git a/tensorflow/tools/ci_build/linux/ppc64le/cpu/run_py3.sh b/tensorflow/tools/ci_build/linux/ppc64le/cpu/run_py3.sh
new file mode 100755
index 0000000000..a04ac158f5
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/ppc64le/cpu/run_py3.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ==============================================================================
+
+set -e
+set -x
+
+N_JOBS=$(grep -c ^processor /proc/cpuinfo)
+
+echo ""
+echo "Bazel will use ${N_JOBS} concurrent job(s)."
+echo ""
+
+# Run configure.
+export TF_NEED_CUDA=0
+export CC_OPT_FLAGS='-mcpu=power8 -mtune=power8'
+export PYTHON_BIN_PATH=`which python3`
+yes "" | $PYTHON_BIN_PATH configure.py
+
+# Run bazel test command. Double test timeouts to avoid flakes.
+bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test -k \
+ --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only --config=opt \
+ --test_output=errors --test_size_filters=small,medium -- \
+ //tensorflow/... -//tensorflow/compiler/...
diff --git a/tensorflow/tools/ci_build/linux/ppc64le/gpu/run_py2.sh b/tensorflow/tools/ci_build/linux/ppc64le/gpu/run_py2.sh
new file mode 100755
index 0000000000..77286e8448
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/ppc64le/gpu/run_py2.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ==============================================================================
+
+set -e
+set -x
+
+N_JOBS=$(grep -c ^processor /proc/cpuinfo)
+LT_JOBS=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader | wc -l)
+
+echo ""
+echo "Bazel will use ${N_JOBS} concurrent job(s)."
+echo "Bazel will use ${LT_JOBS} local test job(s)."
+echo ""
+
+# Run configure.
+export PYTHON_BIN_PATH=`which python2`
+export CC_OPT_FLAGS='-mcpu=power8 -mtune=power8'
+
+export TF_NEED_CUDA=1
+export TF_CUDA_COMPUTE_CAPABILITIES=3.7
+
+yes "" | $PYTHON_BIN_PATH configure.py
+
+# Run bazel test command. Double test timeouts to avoid flakes.
+bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
+ --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 \
+ --test_output=errors --local_test_jobs=${LT_JOBS} --build_tests_only --config=opt \
+ --test_size_filters=small,medium \
+ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \
+ //tensorflow/... -//tensorflow/compiler/...
diff --git a/tensorflow/tools/ci_build/linux/ppc64le/gpu/run_py3.sh b/tensorflow/tools/ci_build/linux/ppc64le/gpu/run_py3.sh
new file mode 100755
index 0000000000..17aa52ee6b
--- /dev/null
+++ b/tensorflow/tools/ci_build/linux/ppc64le/gpu/run_py3.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ==============================================================================
+
+set -e
+set -x
+
+N_JOBS=$(grep -c ^processor /proc/cpuinfo)
+LT_JOBS=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader | wc -l)
+
+echo ""
+echo "Bazel will use ${N_JOBS} concurrent job(s)."
+echo "Bazel will use ${LT_JOBS} local test job(s)."
+echo ""
+
+# Run configure.
+export PYTHON_BIN_PATH=`which python3`
+export CC_OPT_FLAGS='-mcpu=power8 -mtune=power8'
+
+export TF_NEED_CUDA=1
+export TF_CUDA_COMPUTE_CAPABILITIES=3.7
+
+yes "" | $PYTHON_BIN_PATH configure.py
+
+# Run bazel test command. Double test timeouts to avoid flakes.
+bazel test --config=cuda --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
+ --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 \
+ --test_output=errors --local_test_jobs=${LT_JOBS} --build_tests_only --config=opt \
+ --test_size_filters=small,medium \
+ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \
+ //tensorflow/... -//tensorflow/compiler/...
diff --git a/tensorflow/tools/ci_build/osx/libtensorflow_cpu.sh b/tensorflow/tools/ci_build/osx/libtensorflow_cpu.sh
index 7d471b4703..9ae5fc6bea 100755
--- a/tensorflow/tools/ci_build/osx/libtensorflow_cpu.sh
+++ b/tensorflow/tools/ci_build/osx/libtensorflow_cpu.sh
@@ -24,7 +24,6 @@ source "${SCRIPT_DIR}/../builds/libtensorflow.sh"
# Configure script
export PYTHON_BIN_PATH="/usr/bin/python"
-export TF_NEED_GCP=0
export TF_NEED_HDFS=0
export TF_NEED_CUDA=0
export TF_NEED_OPENCL_SYCL=0
diff --git a/tensorflow/tools/ci_build/osx/libtensorflow_gpu.sh b/tensorflow/tools/ci_build/osx/libtensorflow_gpu.sh
index 5a901af3e5..d95fcdeb85 100755
--- a/tensorflow/tools/ci_build/osx/libtensorflow_gpu.sh
+++ b/tensorflow/tools/ci_build/osx/libtensorflow_gpu.sh
@@ -26,7 +26,6 @@ source "${SCRIPT_DIR}/../builds/libtensorflow.sh"
export TF_NEED_CUDA=1
export LD_LIBRARY_PATH="/usr/local/cuda/lib:/usr/local/cuda/extras/CUPTI/lib:${LD_LIBRARY_PATH}"
export PYTHON_BIN_PATH="/usr/bin/python"
-export TF_NEED_GCP=0
export TF_NEED_HDFS=0
export TF_NEED_OPENCL_SYCL=0
export TF_NEED_MKL=0
diff --git a/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh b/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh
index 1bd1852ffc..3d27e84b81 100755
--- a/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh
+++ b/tensorflow/tools/ci_build/pi/build_raspberry_pi.sh
@@ -65,6 +65,10 @@ OPENBLAS_SRC_PATH=/tmp/openblas_src/
sudo rm -rf ${OPENBLAS_SRC_PATH}
git clone https://github.com/xianyi/OpenBLAS ${OPENBLAS_SRC_PATH}
cd ${OPENBLAS_SRC_PATH}
+# The commit after this introduced Fortran compile issues. In theory they should
+# be solvable using NOFORTRAN=1 on the make command, but my initial tries didn't
+# work, so pinning to the last know good version.
+git checkout 5a6a2bed9aff0ba8a18651d5514d029c8cae336a
# If this path is changed, you'll also need to update
# cxx_builtin_include_directory in third_party/toolchains/cpus/arm/CROSSTOOL.tpl
OPENBLAS_INSTALL_PATH=/tmp/openblas_install/
@@ -79,6 +83,7 @@ if [[ $1 == "PI_ONE" ]]; then
--linkopt=-L${OPENBLAS_INSTALL_PATH}/lib/
--linkopt=-l:libopenblas.a"
echo "Building for the Pi One/Zero, with no NEON support"
+ WHEEL_ARCH=linux_armv6l
else
PI_COPTS='--copt=-march=armv7-a --copt=-mfpu=neon-vfpv4
--copt=-std=gnu11 --copt=-DS_IREAD=S_IRUSR --copt=-DS_IWRITE=S_IWUSR
@@ -86,6 +91,7 @@ else
--copt=-U__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1
--copt=-U__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2
--copt=-U__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8'
+ WHEEL_ARCH=linux_armv7l
echo "Building for the Pi Two/Three, with NEON acceleration"
fi
@@ -100,6 +106,8 @@ bazel build -c opt ${PI_COPTS} \
--copt=-fomit-frame-pointer --cpu=armeabi \
--crosstool_top=@local_config_arm_compiler//:toolchain \
--verbose_failures \
+ //tensorflow:libtensorflow.so \
+ //tensorflow:libtensorflow_framework.so \
//tensorflow/tools/benchmark:benchmark_model \
//tensorflow/tools/pip_package:build_pip_package
@@ -112,10 +120,12 @@ BDIST_OPTS="--universal" \
bazel-bin/tensorflow/tools/pip_package/build_pip_package "${OUTDIR}"
OLD_FN=$(ls "${OUTDIR}" | grep -m 1 \.whl)
-SUB='s/tensorflow-([^-]+)-([^-]+)-.*/tensorflow-\1-\2-none-any.whl/; print'
+SUB='s/tensorflow-([^-]+)-([^-]+)-.*/tensorflow-\1-\2-none-'${WHEEL_ARCH}'.whl/; print'
NEW_FN=$(echo "${OLD_FN}" | perl -ne "${SUB}")
mv "${OUTDIR}/${OLD_FN}" "${OUTDIR}/${NEW_FN}"
cp bazel-bin/tensorflow/tools/benchmark/benchmark_model "${OUTDIR}"
+cp bazel-bin/tensorflow/libtensorflow.so "${OUTDIR}"
+cp bazel-bin/tensorflow/libtensorflow_framework.so "${OUTDIR}"
echo "Output can be found here:"
find "${OUTDIR}"
diff --git a/tensorflow/tools/ci_build/update_version.py b/tensorflow/tools/ci_build/update_version.py
index 52a0da9a14..4373d464b6 100755
--- a/tensorflow/tools/ci_build/update_version.py
+++ b/tensorflow/tools/ci_build/update_version.py
@@ -37,7 +37,7 @@ SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR
README_MD = "./README.md"
DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel" % TF_SRC_DIR
GPU_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-gpu" % TF_SRC_DIR
-CPU_MKL_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-cpu-mkl" % TF_SRC_DIR
+CPU_MKL_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-mkl" % TF_SRC_DIR
RELEVANT_FILES = [TF_SRC_DIR,
VERSION_H,
SETUP_PY,
@@ -211,44 +211,6 @@ def update_readme(old_version, new_version):
"%s-" % pep_440_str, README_MD)
-def update_md_files(old_version, new_version):
- """Update the md doc files.
-
- Args:
- old_version: Version object of current version
- new_version: Version object of new version
- """
-
- old_pep_version = old_version.pep_440_str
- new_pep_version = new_version.pep_440_str
- for filename in ["linux", "mac", "windows", "sources"]:
- filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR,
- filename)
-
- if filename == "sources" and "rc0" in new_pep_version:
- replace_string_in_line("(?<!<td>)tensorflow-%s" % old_pep_version,
- "tensorflow-%s" % new_pep_version, filepath)
- replace_string_in_line("(?<!<td>)tensorflow_gpu-%s" % old_pep_version,
- "tensorflow_gpu-%s" % new_pep_version, filepath)
- else:
- replace_string_in_line("tensorflow-%s" % old_pep_version,
- "tensorflow-%s" % new_pep_version, filepath)
- replace_string_in_line("tensorflow_gpu-%s" % old_pep_version,
- "tensorflow_gpu-%s" % new_pep_version, filepath)
- replace_string_in_line("TensorFlow %s" % old_pep_version,
- "TensorFlow %s" % new_pep_version, filepath)
-
- for filename in ["java", "go", "c"]:
- filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR,
- filename)
- replace_string_in_line(r"x86_64-%s" % old_version,
- "x86_64-%s" % new_version, filepath)
- replace_string_in_line(r"libtensorflow-%s.jar" % old_version,
- "libtensorflow-%s.jar" % new_version, filepath)
- replace_string_in_line(r"<version>%s<\/version>" % old_version,
- "<version>%s</version>" % new_version, filepath)
-
-
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
@@ -350,7 +312,6 @@ def main():
update_version_h(old_version, new_version)
update_setup_dot_py(old_version, new_version)
update_readme(old_version, new_version)
- update_md_files(old_version, new_version)
update_dockerfiles(old_version, new_version)
# Print transition details.
@@ -359,12 +320,6 @@ def main():
print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch))
check_for_old_version(old_version, new_version)
- if "rc0" in str(new_version):
- print("\n\n\033[93mNOTE: Please update the tensorflow/docs_src/install/"
- "install_sources.md and add a line for tensorflow-%s and "
- "tensorflow_gpu-%s in the tested source configurations "
- "table.\033[0m\n" % (new_version.pep_440_str,
- new_version.pep_440_str))
if __name__ == "__main__":
diff --git a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
index d654b433e7..27b350e13e 100644
--- a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
+++ b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
@@ -14,130 +14,30 @@
# limitations under the License.
# ==============================================================================
#
-# C++ tests
-failing_cpu_cc_tests="\
- //tensorflow/core/kernels:control_flow_ops_test + \
- //tensorflow/core:example_example_parser_configuration_test + \
- //tensorflow/core:lib_core_status_test + \
- //tensorflow/core:lib_monitoring_collection_registry_test + \
- //tensorflow/core:lib_strings_numbers_test + \
- //tensorflow/core/platform/hadoop:hadoop_file_system_test + \
- //tensorflow/core:platform_file_system_test + \
- //tensorflow/core:platform_logging_test + \
- //tensorflow/core:util_sparse_sparse_tensor_test + \
- //tensorflow/cc:framework_gradient_checker_test + \
- //tensorflow/cc:framework_gradients_test + \
- //tensorflow/cc:gradients_array_grad_test + \
- //tensorflow/cc:gradients_math_grad_test + \
- //tensorflow/cc:gradients_nn_grad_test + \
- //tensorflow/cc/saved_model:loader_test \
-"
-
-broken_cpu_cc_tests="\
- //tensorflow/cc:framework_cc_ops_test + \
- //tensorflow/core/platform/cloud:time_util_test + \
- //tensorflow/core/platform/cloud:oauth_client_test + \
- //tensorflow/core/platform/cloud:http_request_test + \
- //tensorflow/core/platform/cloud:google_auth_provider_test + \
- //tensorflow/core/platform/cloud:gcs_file_system_test + \
- //tensorflow/core/kernels/cloud:bigquery_table_accessor_test + \
- //tensorflow/core/kernels/hexagon:graph_transferer_test + \
- //tensorflow/core/kernels:remote_fused_graph_execute_utils_test + \
- //tensorflow/core/kernels:requantize_op_test + \
- //tensorflow/core/kernels:requantization_range_op_test + \
- //tensorflow/core/kernels:quantized_reshape_op_test + \
- //tensorflow/core/kernels:quantized_pooling_ops_test + \
- //tensorflow/core/kernels:quantized_matmul_op_test + \
- //tensorflow/core/kernels:quantized_conv_ops_test + \
- //tensorflow/core/kernels:quantized_concat_op_test + \
- //tensorflow/core/kernels:quantized_bias_add_op_test + \
- //tensorflow/core/kernels:quantized_batch_norm_op_test + \
- //tensorflow/core/kernels:quantized_activation_ops_test + \
- //tensorflow/core/kernels:quantize_op_test + \
- //tensorflow/core/kernels:quantize_down_and_shrink_range_op_test + \
- //tensorflow/core/kernels:quantize_and_dequantize_op_test_gpu + \
- //tensorflow/core/kernels:quantize_and_dequantize_op_test + \
- //tensorflow/core/kernels:quantization_utils_test + \
- //tensorflow/core/kernels:debug_ops_test + \
- //tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr_test_gpu + \
- //tensorflow/core/distributed_runtime/rpc:rpc_rendezvous_mgr_test + \
- //tensorflow/core/distributed_runtime/rpc:grpc_tensor_coding_test + \
- //tensorflow/core/distributed_runtime/rpc:grpc_session_test_gpu + \
- //tensorflow/core/distributed_runtime/rpc:grpc_session_test + \
- //tensorflow/core/distributed_runtime/rpc:grpc_channel_test_gpu + \
- //tensorflow/core/distributed_runtime/rpc:grpc_channel_test + \
- //tensorflow/core/distributed_runtime:remote_device_test_gpu + \
- //tensorflow/core/distributed_runtime:remote_device_test + \
- //tensorflow/core/distributed_runtime:executor_test_gpu + \
- //tensorflow/core/distributed_runtime:executor_test + \
- //tensorflow/core/debug:debug_gateway_test + \
- //tensorflow/core/debug:debug_grpc_io_utils_test + \
- //tensorflow/core:util_reporter_test + \
- //tensorflow/core:util_memmapped_file_system_test + \
- //tensorflow/core:platform_subprocess_test + \
- //tensorflow/core:platform_profile_utils_cpu_utils_test + \
- //tensorflow/core:lib_jpeg_jpeg_mem_unittest + \
- //tensorflow/core/debug:debug_io_utils_test \
-"
-
-# lib_core_threadpool_test is timeout, but it passes when running alone
-extra_failing_gpu_cc_tests="\
- //tensorflow/core:lib_core_threadpool_test + \
- //tensorflow/core:cuda_libdevice_path_test + \
- //tensorflow/core:common_runtime_direct_session_test + \
- //tensorflow/core:common_runtime_direct_session_with_tracking_alloc_test + \
- //tensorflow/core:device_tracer_test + \
- //tensorflow/core:ops_math_grad_test \
-"
-
-exclude_cpu_cc_tests="${failing_cpu_cc_tests} + ${broken_cpu_cc_tests}"
-
-exclude_gpu_cc_tests="${extra_failing_gpu_cc_tests} + ${exclude_cpu_cc_tests}"
function run_configure_for_cpu_build {
- # Due to a bug in Bazel: https://github.com/bazelbuild/bazel/issues/2182
- # yes "" | ./configure doesn't work on Windows, so we set all the
- # environment variables in advance to avoid interact with the script.
- export TF_NEED_CUDA=0
- if [ -z "$TF_ENABLE_XLA" ]; then
- export TF_ENABLE_XLA=0
- fi
- if [ -z "$TF_NEED_MKL" ]; then
- export TF_NEED_MKL=0
- fi
- export TF_NEED_VERBS=0
- export TF_NEED_GCP=1
- export TF_NEED_HDFS=0
- export TF_NEED_OPENCL_SYCL=0
- echo "" | ./configure
+ yes "" | ./configure
}
function run_configure_for_gpu_build {
- # Due to a bug in Bazel: https://github.com/bazelbuild/bazel/issues/2182
- # yes "" | ./configure doesn't work on Windows, so we set all the
- # environment variables in advance to avoid interact with the script.
+ # Enable CUDA support
export TF_NEED_CUDA=1
- export TF_CUDA_VERSION=9.0
- export CUDA_TOOLKIT_PATH="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v9.0"
- export TF_CUDNN_VERSION=7.0
- if [ -z "$CUDNN_INSTALL_PATH" ]; then
- export CUDNN_INSTALL_PATH="C:/tools/cuda"
- fi
- export TF_CUDA_COMPUTE_CAPABILITIES="3.7"
- if [ -z "$TF_ENABLE_XLA" ]; then
- export TF_ENABLE_XLA=0
- fi
- export TF_NEED_VERBS=0
- export TF_NEED_MKL=0
- export TF_NEED_GCP=0
- export TF_NEED_HDFS=0
- export TF_NEED_OPENCL_SYCL=0
- # TODO(pcloudy): Remove this after TensorFlow uses its own CRSOOTOOL
- # for GPU build on Windows
- export USE_MSVC_WRAPPER=1
+ yes "" | ./configure
+}
- echo "" | ./configure
+function set_remote_cache_options {
+ echo "build --remote_instance_name=projects/tensorflow-testing/instances/default_instance" >> "${TMP_BAZELRC}"
+ echo "build --experimental_remote_platform_override='properties:{name:\"build\" value:\"windows-x64\"}'" >> "${TMP_BAZELRC}"
+ echo "build --remote_cache=remotebuildexecution.googleapis.com" >> "${TMP_BAZELRC}"
+ echo "build --tls_enabled=true" >> "${TMP_BAZELRC}"
+ echo "build --remote_timeout=3600" >> "${TMP_BAZELRC}"
+ echo "build --auth_enabled=true" >> "${TMP_BAZELRC}"
+ echo "build --spawn_strategy=standalone" >> "${TMP_BAZELRC}"
+ echo "build --strategy=Javac=standalone" >> "${TMP_BAZELRC}"
+ echo "build --strategy=Closure=standalone" >> "${TMP_BAZELRC}"
+ echo "build --genrule_strategy=standalone" >> "${TMP_BAZELRC}"
+ echo "build --google_credentials=$GOOGLE_CLOUD_CREDENTIAL" >> "${TMP_BAZELRC}"
}
function create_python_test_dir() {
diff --git a/tensorflow/tools/ci_build/windows/bazel/common_env.sh b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
index 7d4cc7ac30..c18f0d6e69 100644
--- a/tensorflow/tools/ci_build/windows/bazel/common_env.sh
+++ b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
@@ -26,7 +26,8 @@
# * Bazel windows executable copied as "bazel.exe" and included in PATH.
# Use a temporary directory with a short name.
-export TMPDIR="C:/tmp"
+export TMPDIR=${TMPDIR:-"C:/tmp"}
+export TMPDIR=$(cygpath -m "$TMPDIR")
mkdir -p "$TMPDIR"
# Set bash path
@@ -44,11 +45,20 @@ export PYTHON_LIB_PATH="C:/${PYTHON_BASE_PATH}/lib/site-packages"
# Add python into PATH, it's needed because gen_git_source.py uses
# '/usr/bin/env python' as a shebang
export PATH="/c/${PYTHON_BASE_PATH}:$PATH"
+# Add git into PATH needed for gen_git_source.py
+export PATH="/c/Program Files/Git/cmd:$PATH"
# Make sure we have pip in PATH
export PATH="/c/${PYTHON_BASE_PATH}/Scripts:$PATH"
+# Setting default values to CUDA related environment variables
+export TF_CUDA_VERSION=${TF_CUDA_VERSION:-9.0}
+export TF_CUDNN_VERSION=${TF_CUDNN_VERSION:-7}
+export TF_CUDA_COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES:-3.7}
+export CUDA_TOOLKIT_PATH=${CUDA_TOOLKIT_PATH:-"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${TF_CUDA_VERSION}"}
+export CUDNN_INSTALL_PATH=${CUDNN_INSTALL_PATH:-"C:/tools/cuda"}
+
# Add Cuda and Cudnn dll directories into PATH
-export PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v9.0/bin:$PATH"
-export PATH="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v9.0/extras/CUPTI/libx64:$PATH"
-export PATH="/c/tools/cuda/bin:$PATH"
+export PATH="$(cygpath -u "${CUDA_TOOLKIT_PATH}")/bin:$PATH"
+export PATH="$(cygpath -u "${CUDA_TOOLKIT_PATH}")/extras/CUPTI/libx64:$PATH"
+export PATH="$(cygpath -u "${CUDNN_INSTALL_PATH}")/bin:$PATH"
diff --git a/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh b/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh
index 748a961e44..dc9af221ec 100644
--- a/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh
+++ b/tensorflow/tools/ci_build/windows/cpu/bazel/run_cc_test_windows.sh
@@ -44,7 +44,7 @@ source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
run_configure_for_cpu_build
-# Compliling the following test is extremely slow with -c opt
+# Compiling the following test is extremely slow with -c opt
slow_compiling_test="//tensorflow/core/kernels:eigen_backward_spatial_convolutions_test"
# Find all the passing cc_tests on Windows and store them in a variable
diff --git a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
index 5e9ae497e1..177ef390db 100644
--- a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
@@ -42,41 +42,93 @@ source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
|| { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
-skip_test=0
+# Recreate an empty bazelrc file under source root
+export TMP_BAZELRC=.tmp.bazelrc
+rm -f "${TMP_BAZELRC}"
+touch "${TMP_BAZELRC}"
+function cleanup {
+ # Remove all options in .tmp.bazelrc
+ echo "" > "${TMP_BAZELRC}"
+}
+trap cleanup EXIT
+
+PY_TEST_DIR="py_test_dir"
+
+SKIP_TEST=0
+RELEASE_BUILD=0
+TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/..."
+
+# --skip_test Skip running tests
+# --enable_remote_cache Add options to enable remote cache for build and test
+# --release_build Build for release, compilation time will be longer to
+# ensure performance
+# --test_core_only Use tensorflow/python/... as test target
+# --test_contrib_only Use tensorflow/contrib/... as test target
for ARG in "$@"; do
- if [[ "$ARG" == --skip_test ]]; then
- skip_test=1
- fi
+ case "$ARG" in
+ --tf_nightly) TF_NIGHTLY=1 ;;
+ --skip_test) SKIP_TEST=1 ;;
+ --enable_remote_cache) set_remote_cache_options ;;
+ --release_build) RELEASE_BUILD=1 ;;
+ --test_core_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/..." ;;
+ --test_contrib_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/contrib/..." ;;
+ *)
+ esac
done
+if [[ "$RELEASE_BUILD" == 1 ]]; then
+ # Overriding eigen strong inline speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
+ # by 20 minutes. See https://github.com/tensorflow/tensorflow/issues/10521
+ # Because this hurts the performance of TF, we don't override it in release build.
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=0
+else
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=1
+fi
+
+if [[ "$TF_NIGHTLY" == 1 ]]; then
+ python tensorflow/tools/ci_build/update_version.py --nightly
+ EXTRA_PIP_FLAG="--nightly_flag"
+fi
+
+# Enable short object file path to avoid long path issue on Windows.
+echo "startup --output_user_root=${TMPDIR}" >> "${TMP_BAZELRC}"
+
+if ! grep -q "import %workspace%/${TMP_BAZELRC}" .bazelrc; then
+ echo "import %workspace%/${TMP_BAZELRC}" >> .bazelrc
+fi
+
run_configure_for_cpu_build
-# --define=override_eigen_strong_inline=true speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
-# by 20 minutes. See https://github.com/tensorflow/tensorflow/issues/10521
-BUILD_OPTS="--define=override_eigen_strong_inline=true"
-bazel build -c opt $BUILD_OPTS tensorflow/tools/pip_package:build_pip_package || exit $?
+bazel build --announce_rc --config=opt tensorflow/tools/pip_package:build_pip_package || exit $?
-if [[ "$skip_test" == 1 ]]; then
+if [[ "$SKIP_TEST" == 1 ]]; then
exit 0
fi
# Create a python test directory to avoid package name conflict
-PY_TEST_DIR="py_test_dir"
create_python_test_dir "${PY_TEST_DIR}"
-./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}"
+./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}" "${EXTRA_PIP_FLAG}"
+
+if [[ "$TF_NIGHTLY" == 1 ]]; then
+ exit 0
+fi
# Running python tests on Windows needs pip package installed
PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
reinstall_tensorflow_pip ${PIP_NAME}
+# NUMBER_OF_PROCESSORS is predefined on Windows
+N_JOBS="${NUMBER_OF_PROCESSORS}"
+
# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
# which will result testing system installed tensorflow
-bazel test -c opt $BUILD_OPTS -k --test_output=errors \
+bazel test --announce_rc --config=opt -k --test_output=errors \
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
--test_tag_filters=-no_pip,-no_windows,-no_oss \
--build_tag_filters=-no_pip,-no_windows,-no_oss --build_tests_only \
+ --test_size_filters=small,medium \
+ --jobs="${N_JOBS}" --test_timeout="300,450,1200,3600" \
--flaky_test_attempts=3 \
- //${PY_TEST_DIR}/tensorflow/python/... \
- //${PY_TEST_DIR}/tensorflow/contrib/...
+ ${TEST_TARGET}
diff --git a/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh b/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh
index f26f8727e5..f1114f4ffa 100644
--- a/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh
+++ b/tensorflow/tools/ci_build/windows/gpu/bazel/run_cc_test_windows.sh
@@ -46,7 +46,7 @@ clean_output_base
run_configure_for_gpu_build
-# Compliling the following test is extremely slow with -c opt
+# Compiling the following test is extremely slow with -c opt
slow_compiling_test="//tensorflow/core/kernels:eigen_backward_spatial_convolutions_test"
# Find all the passing cc_tests on Windows and store them in a variable
diff --git a/tensorflow/tools/ci_build/windows/gpu/cmake/run_build.bat b/tensorflow/tools/ci_build/windows/gpu/cmake/run_build.bat
index 4656afe025..cec5b717f8 100644
--- a/tensorflow/tools/ci_build/windows/gpu/cmake/run_build.bat
+++ b/tensorflow/tools/ci_build/windows/gpu/cmake/run_build.bat
@@ -30,7 +30,6 @@ IF DEFINED SWIG_EXE (ECHO SWIG_EXE is set to %SWIG_EXE%) ELSE (SET SWIG_EXE="C:\
IF DEFINED PY_EXE (ECHO PY_EXE is set to %PY_EXE%) ELSE (SET PY_EXE="C:\Program Files\Anaconda3\python.exe")
IF DEFINED PY_LIB (ECHO PY_LIB is set to %PY_LIB%) ELSE (SET PY_LIB="C:\Program Files\Anaconda3\libs\python35.lib")
IF DEFINED CUDNN_HOME (ECHO CUDNN_HOME is set to %CUDNN_HOME%) ELSE (SET CUDNN_HOME="c:\tools\cuda")
-verbosity:quiet
IF DEFINED DISABLE_FORCEINLINE (ECHO DISABLE_FORCEINLINE is set to %DISABLE_FORCEINLINE%) ELSE (SET DISABLE_FORCEINLINE="OFF")
SET CMAKE_DIR=%REPO_ROOT%\tensorflow\contrib\cmake
diff --git a/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat b/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat
index 97829892b1..3b437d3c58 100644
--- a/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat
+++ b/tensorflow/tools/ci_build/windows/gpu/cmake/run_py.bat
@@ -31,6 +31,9 @@ IF DEFINED PIP_EXE (ECHO PIP_EXE is set to %PIP_EXE%) ELSE (SET PIP_EXE="C:\Prog
:: Set ctest binary location.
IF DEFINED CTEST_EXE (ECHO CTEST_EXE is set to %CTEST_EXE%) ELSE (SET CTEST_EXE="C:\Program Files\cmake\bin\ctest.exe")
+:: Install absl-py.
+%PIP_EXE% install --upgrade absl-py
+
:: Run the CMAKE build to build the pip package.
CALL %REPO_ROOT%\tensorflow\tools\ci_build\windows\gpu\cmake\run_build.bat
if %errorlevel% neq 0 exit /b %errorlevel%
@@ -40,9 +43,6 @@ DIR %REPO_ROOT%\%BUILD_DIR%\tf_python\dist\ /S /B > wheel_filename_file
set /p WHEEL_FILENAME=<wheel_filename_file
del wheel_filename_file
-:: Install absl-py.
-%PIP_EXE% install --upgrade absl-py
-
:: Install the pip package.
echo Installing PIP package...
%PIP_EXE% install --upgrade --no-deps %WHEEL_FILENAME% -v -v
diff --git a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
index 922bb67bbf..34847e637a 100644
--- a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
@@ -42,25 +42,98 @@ source "tensorflow/tools/ci_build/windows/bazel/common_env.sh" \
source "tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh" \
|| { echo "Failed to source bazel_test_lib.sh" >&2; exit 1; }
+# Recreate an empty bazelrc file under source root
+export TMP_BAZELRC=.tmp.bazelrc
+rm -f "${TMP_BAZELRC}"
+touch "${TMP_BAZELRC}"
+
+function cleanup {
+ # Remove all options in .tmp.bazelrc
+ echo "" > "${TMP_BAZELRC}"
+}
+trap cleanup EXIT
+
+PY_TEST_DIR="py_test_dir"
+
+SKIP_TEST=0
+RELEASE_BUILD=0
+TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/..."
+
+# --skip_test Skip running tests
+# --enable_remote_cache Add options to enable remote cache for build and test
+# --release_build Build for release, compilation time will be longer to
+# ensure performance
+# --test_core_only Use tensorflow/python/... as test target
+# --test_contrib_only Use tensorflow/contrib/... as test target
+for ARG in "$@"; do
+ case "$ARG" in
+ --tf_nightly) TF_NIGHTLY=1 ;;
+ --skip_test) SKIP_TEST=1 ;;
+ --enable_remote_cache) set_remote_cache_options ;;
+ --release_build) RELEASE_BUILD=1 ;;
+ --test_core_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/..." ;;
+ --test_contrib_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/contrib/..." ;;
+ *)
+ esac
+done
+
+if [[ "$RELEASE_BUILD" == 1 ]]; then
+ # Overriding eigen strong inline speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
+ # by 20 minutes. See https://github.com/tensorflow/tensorflow/issues/10521
+ # Because this hurts the performance of TF, we don't override it in release build.
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=0
+else
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=1
+fi
+
+if [[ "$TF_NIGHTLY" == 1 ]]; then
+ python tensorflow/tools/ci_build/update_version.py --nightly
+ EXTRA_PIP_FLAG="--nightly_flag"
+fi
+
+# Enable short object file path to avoid long path issue on Windows.
+echo "startup --output_user_root=${TMPDIR}" >> "${TMP_BAZELRC}"
+
+# Disable nvcc warnings to reduce log file size.
+echo "build --copt=-nvcc_options=disable-warnings" >> "${TMP_BAZELRC}"
+
+if ! grep -q "import %workspace%/${TMP_BAZELRC}" .bazelrc; then
+ echo "import %workspace%/${TMP_BAZELRC}" >> .bazelrc
+fi
+
run_configure_for_gpu_build
-bazel build -c opt tensorflow/tools/pip_package:build_pip_package || exit $?
+bazel build --announce_rc --config=opt tensorflow/tools/pip_package:build_pip_package || exit $?
+
+if [[ "$SKIP_TEST" == 1 ]]; then
+ exit 0
+fi
# Create a python test directory to avoid package name conflict
-PY_TEST_DIR="py_test_dir"
create_python_test_dir "${PY_TEST_DIR}"
-./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}"
+./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}" --gpu "${EXTRA_PIP_FLAG}"
+
+if [[ "$TF_NIGHTLY" == 1 ]]; then
+ exit 0
+fi
# Running python tests on Windows needs pip package installed
-PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
+PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow_gpu-*.whl)
reinstall_tensorflow_pip ${PIP_NAME}
+TF_GPU_COUNT=${TF_GPU_COUNT:-4}
+
# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
# which will result testing system installed tensorflow
# GPU tests are very flaky when running concurrently, so set local_test_jobs=1
-bazel test -c opt -k --test_output=errors \
+bazel test --announce_rc --config=opt -k --test_output=errors \
+ --test_env=TF_GPU_COUNT \
+ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
- --test_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,no_oss \
- --build_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,no_oss \
- --local_test_jobs=1 --build_tests_only //${PY_TEST_DIR}/tensorflow/python/...
+ --test_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss \
+ --build_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss --build_tests_only \
+ --test_size_filters=small,medium \
+ --local_test_jobs=$TF_GPU_COUNT --test_timeout="300,450,1200,3600" \
+ --flaky_test_attempts=3 \
+ ${TEST_TARGET}
diff --git a/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh b/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh
index 583d1d5f09..fdbd1120b2 100755
--- a/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh
+++ b/tensorflow/tools/ci_build/windows/libtensorflow_cpu.sh
@@ -41,7 +41,7 @@ run_configure_for_cpu_build
# build_libtensorflow_tarball in ../builds/libtensorflow.sh
# cannot be used on Windows since it relies on pkg_tar rules.
# So we do something special here
-bazel build -c opt --copt=/arch:AVX \
+bazel --output_user_root=${TMPDIR} build -c opt --copt=/arch:AVX \
tensorflow:libtensorflow.so \
tensorflow/tools/lib_package:clicenses_generate \
tensorflow/java:libtensorflow_jni.so \
diff --git a/tensorflow/tools/ci_build/xla/linux/gpu/run_py3.sh b/tensorflow/tools/ci_build/xla/linux/gpu/run_py3.sh
index a94a627dfb..d085e21b03 100755
--- a/tensorflow/tools/ci_build/xla/linux/gpu/run_py3.sh
+++ b/tensorflow/tools/ci_build/xla/linux/gpu/run_py3.sh
@@ -35,8 +35,9 @@ echo "build --distinct_host_configuration=false" >> .tf_configure.bazelrc
bazel clean
# Run bazel test command. Double test timeouts to avoid flakes.
-bazel test --config=cuda --test_tag_filters=-no_gpu,-benchmark-test -k \
+bazel test --config=cuda --test_tag_filters=-no_gpu,-benchmark-test,-no_oss -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 \
+ --test_size_filters=small,medium \
--build_tests_only --test_output=errors --local_test_jobs=8 \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
--config=xla -- \
diff --git a/tensorflow/tools/common/BUILD b/tensorflow/tools/common/BUILD
index b9032c046e..8c01d15a80 100644
--- a/tensorflow/tools/common/BUILD
+++ b/tensorflow/tools/common/BUILD
@@ -40,7 +40,24 @@ py_test(
srcs = ["traverse_test.py"],
srcs_version = "PY2AND3",
deps = [
+ ":test_module1",
+ ":test_module2",
":traverse",
"//tensorflow/python:platform_test",
],
)
+
+py_library(
+ name = "test_module1",
+ srcs = ["test_module1.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":test_module2",
+ ],
+)
+
+py_library(
+ name = "test_module2",
+ srcs = ["test_module2.py"],
+ srcs_version = "PY2AND3",
+)
diff --git a/tensorflow/tools/common/public_api.py b/tensorflow/tools/common/public_api.py
index e0acead919..82bb0713c4 100644
--- a/tensorflow/tools/common/public_api.py
+++ b/tensorflow/tools/common/public_api.py
@@ -50,6 +50,7 @@ class PublicAPIVisitor(object):
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
+ 'compiler',
'core',
'examples',
'flags', # Don't add flags
@@ -69,6 +70,8 @@ class PublicAPIVisitor(object):
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
+ # Externalized modules of the Keras API.
+ 'tf.keras': ['applications', 'preprocessing']
}
@property
@@ -99,9 +102,10 @@ class PublicAPIVisitor(object):
"""Override the default root name of 'tf'."""
self._root_name = root_name
- def _is_private(self, path, name):
+ def _is_private(self, path, name, obj=None):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
+ del obj # Unused.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
@@ -126,7 +130,7 @@ class PublicAPIVisitor(object):
# Remove things that are not visible.
for name, child in list(children):
- if self._is_private(full_path, name):
+ if self._is_private(full_path, name, child):
children.remove((name, child))
self._visitor(path, parent, children)
diff --git a/tensorflow/tools/common/test_module1.py b/tensorflow/tools/common/test_module1.py
new file mode 100644
index 0000000000..cc185cf36e
--- /dev/null
+++ b/tensorflow/tools/common/test_module1.py
@@ -0,0 +1,31 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A module target for TraverseTest.test_module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.tools.common import test_module2
+
+
+class ModuleClass1(object):
+
+ def __init__(self):
+ self._m2 = test_module2.ModuleClass2()
+
+ def __model_class1_method__(self):
+ pass
+
diff --git a/tensorflow/tools/common/test_module2.py b/tensorflow/tools/common/test_module2.py
new file mode 100644
index 0000000000..d9da99d9c0
--- /dev/null
+++ b/tensorflow/tools/common/test_module2.py
@@ -0,0 +1,29 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A module target for TraverseTest.test_module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+
+class ModuleClass2(object):
+
+ def __init__(self):
+ pass
+
+ def __model_class1_method__(self):
+ pass
+
diff --git a/tensorflow/tools/common/traverse_test.py b/tensorflow/tools/common/traverse_test.py
index eb195ec18e..ed410694ce 100644
--- a/tensorflow/tools/common/traverse_test.py
+++ b/tensorflow/tools/common/traverse_test.py
@@ -18,9 +18,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import sys
-
from tensorflow.python.platform import googletest
+from tensorflow.tools.common import test_module1
+from tensorflow.tools.common import test_module2
from tensorflow.tools.common import traverse
@@ -30,10 +30,6 @@ class TestVisitor(object):
self.call_log = []
def __call__(self, path, parent, children):
- # Do not traverse googletest, it's very deep.
- for item in list(children):
- if item[1] is googletest:
- children.remove(item)
self.call_log += [(path, parent, children)]
@@ -51,13 +47,12 @@ class TraverseTest(googletest.TestCase):
def test_module(self):
visitor = TestVisitor()
- traverse.traverse(sys.modules[__name__], visitor)
+ traverse.traverse(test_module1, visitor)
called = [parent for _, parent, _ in visitor.call_log]
- self.assertIn(TestVisitor, called)
- self.assertIn(TraverseTest, called)
- self.assertIn(traverse, called)
+ self.assertIn(test_module1.ModuleClass1, called)
+ self.assertIn(test_module2.ModuleClass2, called)
def test_class(self):
visitor = TestVisitor()
diff --git a/tensorflow/tools/compatibility/BUILD b/tensorflow/tools/compatibility/BUILD
index b7bfb29aae..55792c51fe 100644
--- a/tensorflow/tools/compatibility/BUILD
+++ b/tensorflow/tools/compatibility/BUILD
@@ -8,10 +8,17 @@ load(
"tf_cc_test", # @unused
)
+py_library(
+ name = "ast_edits",
+ srcs = ["ast_edits.py"],
+ srcs_version = "PY2AND3",
+)
+
py_binary(
name = "tf_upgrade",
srcs = ["tf_upgrade.py"],
srcs_version = "PY2AND3",
+ deps = [":ast_edits"],
)
py_test(
@@ -26,6 +33,28 @@ py_test(
],
)
+py_binary(
+ name = "tf_upgrade_v2",
+ srcs = [
+ "renames_v2.py",
+ "tf_upgrade_v2.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [":ast_edits"],
+)
+
+py_test(
+ name = "tf_upgrade_v2_test",
+ srcs = ["tf_upgrade_v2_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":tf_upgrade_v2",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_test_lib",
+ "@six_archive//:six",
+ ],
+)
+
# Keep for reference, this test will succeed in 0.11 but fail in 1.0
# py_test(
# name = "test_file_v0_11",
@@ -62,9 +91,37 @@ py_test(
],
)
+genrule(
+ name = "generate_upgraded_file_v2",
+ testonly = 1,
+ srcs = ["testdata/test_file_v1_10.py"],
+ outs = [
+ "test_file_v2_0.py",
+ "report_v2.txt",
+ ],
+ cmd = ("$(location :tf_upgrade_v2)" +
+ " --infile $(location testdata/test_file_v1_10.py)" +
+ " --outfile $(location test_file_v2_0.py)" +
+ " --reportfile $(location report_v2.txt)"),
+ tools = [":tf_upgrade_v2"],
+)
+
+py_test(
+ name = "test_file_v2_0",
+ size = "small",
+ srcs = ["test_file_v2_0.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow:tensorflow_py",
+ ],
+)
+
exports_files(
[
+ "ast_edits.py",
"tf_upgrade.py",
+ "renames_v2.py",
"testdata/test_file_v0_11.py",
+ "testdata/test_file_v1_10.py",
],
)
diff --git a/tensorflow/tools/compatibility/ast_edits.py b/tensorflow/tools/compatibility/ast_edits.py
new file mode 100644
index 0000000000..23cc4a21a9
--- /dev/null
+++ b/tensorflow/tools/compatibility/ast_edits.py
@@ -0,0 +1,502 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Upgrader for Python scripts according to an API change specification."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import ast
+import collections
+import os
+import shutil
+import sys
+import tempfile
+import traceback
+
+
+class APIChangeSpec(object):
+ """This class defines the transformations that need to happen.
+
+ This class must provide the following fields:
+
+ * `function_keyword_renames`: maps function names to a map of old -> new
+ argument names
+ * `function_renames`: maps function names to new function names
+ * `change_to_function`: a set of function names that have changed (for
+ notifications)
+ * `function_reorders`: maps functions whose argument order has changed to the
+ list of arguments in the new order
+ * `function_handle`: maps function names to custom handlers for the function
+
+ For an example, see `TFAPIChangeSpec`.
+ """
+
+
+class _FileEditTuple(
+ collections.namedtuple("_FileEditTuple",
+ ["comment", "line", "start", "old", "new"])):
+ """Each edit that is recorded by a _FileEditRecorder.
+
+ Fields:
+ comment: A description of the edit and why it was made.
+ line: The line number in the file where the edit occurs (1-indexed).
+ start: The line number in the file where the edit occurs (0-indexed).
+ old: text string to remove (this must match what was in file).
+ new: text string to add in place of `old`.
+ """
+
+ __slots__ = ()
+
+
+class _FileEditRecorder(object):
+ """Record changes that need to be done to the file."""
+
+ def __init__(self, filename):
+ # all edits are lists of chars
+ self._filename = filename
+
+ self._line_to_edit = collections.defaultdict(list)
+ self._errors = []
+
+ def process(self, text):
+ """Process a list of strings, each corresponding to the recorded changes.
+
+ Args:
+ text: A list of lines of text (assumed to contain newlines)
+ Returns:
+ A tuple of the modified text and a textual description of what is done.
+ Raises:
+ ValueError: if substitution source location does not have expected text.
+ """
+
+ change_report = ""
+
+ # Iterate of each line
+ for line, edits in self._line_to_edit.items():
+ offset = 0
+ # sort by column so that edits are processed in order in order to make
+ # indexing adjustments cumulative for changes that change the string
+ # length
+ edits.sort(key=lambda x: x.start)
+
+ # Extract each line to a list of characters, because mutable lists
+ # are editable, unlike immutable strings.
+ char_array = list(text[line - 1])
+
+ # Record a description of the change
+ change_report += "%r Line %d\n" % (self._filename, line)
+ change_report += "-" * 80 + "\n\n"
+ for e in edits:
+ change_report += "%s\n" % e.comment
+ change_report += "\n Old: %s" % (text[line - 1])
+
+ # Make underscore buffers for underlining where in the line the edit was
+ change_list = [" "] * len(text[line - 1])
+ change_list_new = [" "] * len(text[line - 1])
+
+ # Iterate for each edit
+ for e in edits:
+ # Create effective start, end by accounting for change in length due
+ # to previous edits
+ start_eff = e.start + offset
+ end_eff = start_eff + len(e.old)
+
+ # Make sure the edit is changing what it should be changing
+ old_actual = "".join(char_array[start_eff:end_eff])
+ if old_actual != e.old:
+ raise ValueError("Expected text %r but got %r" %
+ ("".join(e.old), "".join(old_actual)))
+ # Make the edit
+ char_array[start_eff:end_eff] = list(e.new)
+
+ # Create the underline highlighting of the before and after
+ change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
+ change_list_new[start_eff:end_eff] = "~" * len(e.new)
+
+ # Keep track of how to generate effective ranges
+ offset += len(e.new) - len(e.old)
+
+ # Finish the report comment
+ change_report += " %s\n" % "".join(change_list)
+ text[line - 1] = "".join(char_array)
+ change_report += " New: %s" % (text[line - 1])
+ change_report += " %s\n\n" % "".join(change_list_new)
+ return "".join(text), change_report, self._errors
+
+ def add(self, comment, line, start, old, new, error=None):
+ """Add a new change that is needed.
+
+ Args:
+ comment: A description of what was changed
+ line: Line number (1 indexed)
+ start: Column offset (0 indexed)
+ old: old text
+ new: new text
+ error: this "edit" is something that cannot be fixed automatically
+ Returns:
+ None
+ """
+
+ self._line_to_edit[line].append(
+ _FileEditTuple(comment, line, start, old, new))
+ if error:
+ self._errors.append("%s:%d: %s" % (self._filename, line, error))
+
+
+class _ASTCallVisitor(ast.NodeVisitor):
+ """AST Visitor that processes function calls.
+
+ Updates function calls from old API version to new API version using a given
+ change spec.
+ """
+
+ def __init__(self, filename, lines, api_change_spec):
+ self._filename = filename
+ self._file_edit = _FileEditRecorder(filename)
+ self._lines = lines
+ self._api_change_spec = api_change_spec
+
+ def process(self, lines):
+ return self._file_edit.process(lines)
+
+ def generic_visit(self, node):
+ ast.NodeVisitor.generic_visit(self, node)
+
+ def _rename_functions(self, node, full_name):
+ function_renames = self._api_change_spec.function_renames
+ try:
+ new_name = function_renames[full_name]
+ self._file_edit.add("Renamed function %r to %r" % (full_name, new_name),
+ node.lineno, node.col_offset, full_name, new_name)
+ except KeyError:
+ pass
+
+ def _get_attribute_full_path(self, node):
+ """Traverse an attribute to generate a full name e.g. tf.foo.bar.
+
+ Args:
+ node: A Node of type Attribute.
+
+ Returns:
+ a '.'-delimited full-name or None if the tree was not a simple form.
+ i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
+ """
+ curr = node
+ items = []
+ while not isinstance(curr, ast.Name):
+ if not isinstance(curr, ast.Attribute):
+ return None
+ items.append(curr.attr)
+ curr = curr.value
+ items.append(curr.id)
+ return ".".join(reversed(items))
+
+ def _find_true_position(self, node):
+ """Return correct line number and column offset for a given node.
+
+ This is necessary mainly because ListComp's location reporting reports
+ the next token after the list comprehension list opening.
+
+ Args:
+ node: Node for which we wish to know the lineno and col_offset
+ """
+ import re
+ find_open = re.compile("^\s*(\\[).*$")
+ find_string_chars = re.compile("['\"]")
+
+ if isinstance(node, ast.ListComp):
+ # Strangely, ast.ListComp returns the col_offset of the first token
+ # after the '[' token which appears to be a bug. Workaround by
+ # explicitly finding the real start of the list comprehension.
+ line = node.lineno
+ col = node.col_offset
+ # loop over lines
+ while 1:
+ # Reverse the text to and regular expression search for whitespace
+ text = self._lines[line - 1]
+ reversed_preceding_text = text[:col][::-1]
+ # First find if a [ can be found with only whitespace between it and
+ # col.
+ m = find_open.match(reversed_preceding_text)
+ if m:
+ new_col_offset = col - m.start(1) - 1
+ return line, new_col_offset
+ else:
+ if (reversed_preceding_text == "" or
+ reversed_preceding_text.isspace()):
+ line = line - 1
+ prev_line = self._lines[line - 1]
+ # TODO(aselle):
+ # this is poor comment detection, but it is good enough for
+ # cases where the comment does not contain string literal starting/
+ # ending characters. If ast gave us start and end locations of the
+ # ast nodes rather than just start, we could use string literal
+ # node ranges to filter out spurious #'s that appear in string
+ # literals.
+ comment_start = prev_line.find("#")
+ if comment_start == -1:
+ col = len(prev_line) - 1
+ elif find_string_chars.search(prev_line[comment_start:]) is None:
+ col = comment_start
+ else:
+ return None, None
+ else:
+ return None, None
+ # Most other nodes return proper locations (with notably does not), but
+ # it is not possible to use that in an argument.
+ return node.lineno, node.col_offset
+
+ def visit_Call(self, node): # pylint: disable=invalid-name
+ """Handle visiting a call node in the AST.
+
+ Args:
+ node: Current Node
+ """
+
+ # Find a simple attribute name path e.g. "tf.foo.bar"
+ full_name = self._get_attribute_full_path(node.func)
+
+ # Make sure the func is marked as being part of a call
+ node.func.is_function_for_call = True
+
+ if full_name:
+ # Call special handlers
+ function_handles = self._api_change_spec.function_handle
+ if full_name in function_handles:
+ function_handles[full_name](self._file_edit, node)
+
+ # Examine any non-keyword argument and make it into a keyword argument
+ # if reordering required.
+ function_reorders = self._api_change_spec.function_reorders
+ function_keyword_renames = (
+ self._api_change_spec.function_keyword_renames)
+
+ if full_name in function_reorders:
+ reordered = function_reorders[full_name]
+ for idx, arg in enumerate(node.args):
+ lineno, col_offset = self._find_true_position(arg)
+ if lineno is None or col_offset is None:
+ self._file_edit.add(
+ "Failed to add keyword %r to reordered function %r" %
+ (reordered[idx], full_name),
+ arg.lineno,
+ arg.col_offset,
+ "",
+ "",
+ error="A necessary keyword argument failed to be inserted.")
+ else:
+ keyword_arg = reordered[idx]
+ if (full_name in function_keyword_renames and
+ keyword_arg in function_keyword_renames[full_name]):
+ keyword_arg = function_keyword_renames[full_name][keyword_arg]
+ self._file_edit.add("Added keyword %r to reordered function %r" %
+ (reordered[idx], full_name), lineno, col_offset,
+ "", keyword_arg + "=")
+
+ # Examine each keyword argument and convert it to the final renamed form
+ renamed_keywords = ({} if full_name not in function_keyword_renames else
+ function_keyword_renames[full_name])
+ for keyword in node.keywords:
+ argkey = keyword.arg
+ argval = keyword.value
+
+ if argkey in renamed_keywords:
+ argval_lineno, argval_col_offset = self._find_true_position(argval)
+ if argval_lineno is not None and argval_col_offset is not None:
+ # TODO(aselle): We should scan backward to find the start of the
+ # keyword key. Unfortunately ast does not give you the location of
+ # keyword keys, so we are forced to infer it from the keyword arg
+ # value.
+ key_start = argval_col_offset - len(argkey) - 1
+ key_end = key_start + len(argkey) + 1
+ if (self._lines[argval_lineno - 1][key_start:key_end] == argkey +
+ "="):
+ self._file_edit.add("Renamed keyword argument from %r to %r" %
+ (argkey,
+ renamed_keywords[argkey]), argval_lineno,
+ argval_col_offset - len(argkey) - 1,
+ argkey + "=", renamed_keywords[argkey] + "=")
+ continue
+ self._file_edit.add(
+ "Failed to rename keyword argument from %r to %r" %
+ (argkey, renamed_keywords[argkey]),
+ argval.lineno,
+ argval.col_offset - len(argkey) - 1,
+ "",
+ "",
+ error="Failed to find keyword lexographically. Fix manually.")
+
+ ast.NodeVisitor.generic_visit(self, node)
+
+ def visit_Attribute(self, node): # pylint: disable=invalid-name
+ """Handle bare Attributes i.e. [tf.foo, tf.bar].
+
+ Args:
+ node: Node that is of type ast.Attribute
+ """
+ full_name = self._get_attribute_full_path(node)
+ if full_name:
+ self._rename_functions(node, full_name)
+ if full_name in self._api_change_spec.change_to_function:
+ if not hasattr(node, "is_function_for_call"):
+ new_text = full_name + "()"
+ self._file_edit.add("Changed %r to %r" % (full_name, new_text),
+ node.lineno, node.col_offset, full_name, new_text)
+
+ ast.NodeVisitor.generic_visit(self, node)
+
+
+class ASTCodeUpgrader(object):
+ """Handles upgrading a set of Python files using a given API change spec."""
+
+ def __init__(self, api_change_spec):
+ if not isinstance(api_change_spec, APIChangeSpec):
+ raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
+ type(api_change_spec))
+ self._api_change_spec = api_change_spec
+
+ def process_file(self, in_filename, out_filename):
+ """Process the given python file for incompatible changes.
+
+ Args:
+ in_filename: filename to parse
+ out_filename: output file to write to
+ Returns:
+ A tuple representing number of files processed, log of actions, errors
+ """
+
+ # Write to a temporary file, just in case we are doing an implace modify.
+ with open(in_filename, "r") as in_file, \
+ tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
+ ret = self.process_opened_file(in_filename, in_file, out_filename,
+ temp_file)
+
+ shutil.move(temp_file.name, out_filename)
+ return ret
+
+ # Broad exceptions are required here because ast throws whatever it wants.
+ # pylint: disable=broad-except
+ def process_opened_file(self, in_filename, in_file, out_filename, out_file):
+ """Process the given python file for incompatible changes.
+
+ This function is split out to facilitate StringIO testing from
+ tf_upgrade_test.py.
+
+ Args:
+ in_filename: filename to parse
+ in_file: opened file (or StringIO)
+ out_filename: output file to write to
+ out_file: opened file (or StringIO)
+ Returns:
+ A tuple representing number of files processed, log of actions, errors
+ """
+ process_errors = []
+ text = "-" * 80 + "\n"
+ text += "Processing file %r\n outputting to %r\n" % (in_filename,
+ out_filename)
+ text += "-" * 80 + "\n\n"
+
+ parsed_ast = None
+ lines = in_file.readlines()
+ try:
+ parsed_ast = ast.parse("".join(lines))
+ except Exception:
+ text += "Failed to parse %r\n\n" % in_filename
+ text += traceback.format_exc()
+ if parsed_ast:
+ visitor = _ASTCallVisitor(in_filename, lines, self._api_change_spec)
+ visitor.visit(parsed_ast)
+ out_text, new_text, process_errors = visitor.process(lines)
+ text += new_text
+ if out_file:
+ out_file.write(out_text)
+ text += "\n"
+ return 1, text, process_errors
+
+ # pylint: enable=broad-except
+
+ def process_tree(self, root_directory, output_root_directory,
+ copy_other_files):
+ """Processes upgrades on an entire tree of python files in place.
+
+ Note that only Python files. If you have custom code in other languages,
+ you will need to manually upgrade those.
+
+ Args:
+ root_directory: Directory to walk and process.
+ output_root_directory: Directory to use as base.
+ copy_other_files: Copy files that are not touched by this converter.
+
+ Returns:
+ A tuple of files processed, the report string ofr all files, and errors
+ """
+
+ # make sure output directory doesn't exist
+ if output_root_directory and os.path.exists(output_root_directory):
+ print("Output directory %r must not already exist." %
+ (output_root_directory))
+ sys.exit(1)
+
+ # make sure output directory does not overlap with root_directory
+ norm_root = os.path.split(os.path.normpath(root_directory))
+ norm_output = os.path.split(os.path.normpath(output_root_directory))
+ if norm_root == norm_output:
+ print("Output directory %r same as input directory %r" %
+ (root_directory, output_root_directory))
+ sys.exit(1)
+
+ # Collect list of files to process (we do this to correctly handle if the
+ # user puts the output directory in some sub directory of the input dir)
+ files_to_process = []
+ files_to_copy = []
+ for dir_name, _, file_list in os.walk(root_directory):
+ py_files = [f for f in file_list if f.endswith(".py")]
+ copy_files = [f for f in file_list if not f.endswith(".py")]
+ for filename in py_files:
+ fullpath = os.path.join(dir_name, filename)
+ fullpath_output = os.path.join(output_root_directory,
+ os.path.relpath(fullpath,
+ root_directory))
+ files_to_process.append((fullpath, fullpath_output))
+ if copy_other_files:
+ for filename in copy_files:
+ fullpath = os.path.join(dir_name, filename)
+ fullpath_output = os.path.join(output_root_directory,
+ os.path.relpath(
+ fullpath, root_directory))
+ files_to_copy.append((fullpath, fullpath_output))
+
+ file_count = 0
+ tree_errors = []
+ report = ""
+ report += ("=" * 80) + "\n"
+ report += "Input tree: %r\n" % root_directory
+ report += ("=" * 80) + "\n"
+
+ for input_path, output_path in files_to_process:
+ output_directory = os.path.dirname(output_path)
+ if not os.path.isdir(output_directory):
+ os.makedirs(output_directory)
+ file_count += 1
+ _, l_report, l_errors = self.process_file(input_path, output_path)
+ tree_errors += l_errors
+ report += l_report
+ for input_path, output_path in files_to_copy:
+ output_directory = os.path.dirname(output_path)
+ if not os.path.isdir(output_directory):
+ os.makedirs(output_directory)
+ shutil.copy(input_path, output_path)
+ return file_count, report, tree_errors
diff --git a/tensorflow/tools/compatibility/renames_v2.py b/tensorflow/tools/compatibility/renames_v2.py
new file mode 100644
index 0000000000..7e66ad816a
--- /dev/null
+++ b/tensorflow/tools/compatibility/renames_v2.py
@@ -0,0 +1,135 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# pylint: disable=line-too-long
+"""List of renames to apply when converting from TF 1.0 to TF 2.0.
+
+THIS FILE IS AUTOGENERATED: To update, please run:
+ bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
+ bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
+This file should be updated whenever endpoints are deprecated.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+renames = {
+ 'tf.acos': 'tf.math.acos',
+ 'tf.acosh': 'tf.math.acosh',
+ 'tf.add': 'tf.math.add',
+ 'tf.as_string': 'tf.dtypes.as_string',
+ 'tf.asin': 'tf.math.asin',
+ 'tf.asinh': 'tf.math.asinh',
+ 'tf.atan': 'tf.math.atan',
+ 'tf.atan2': 'tf.math.atan2',
+ 'tf.atanh': 'tf.math.atanh',
+ 'tf.batch_to_space_nd': 'tf.manip.batch_to_space_nd',
+ 'tf.betainc': 'tf.math.betainc',
+ 'tf.ceil': 'tf.math.ceil',
+ 'tf.check_numerics': 'tf.debugging.check_numerics',
+ 'tf.cholesky': 'tf.linalg.cholesky',
+ 'tf.cos': 'tf.math.cos',
+ 'tf.cosh': 'tf.math.cosh',
+ 'tf.cross': 'tf.linalg.cross',
+ 'tf.decode_base64': 'tf.io.decode_base64',
+ 'tf.decode_compressed': 'tf.io.decode_compressed',
+ 'tf.decode_json_example': 'tf.io.decode_json_example',
+ 'tf.decode_raw': 'tf.io.decode_raw',
+ 'tf.dequantize': 'tf.quantization.dequantize',
+ 'tf.diag': 'tf.linalg.tensor_diag',
+ 'tf.diag_part': 'tf.linalg.tensor_diag_part',
+ 'tf.digamma': 'tf.math.digamma',
+ 'tf.encode_base64': 'tf.io.encode_base64',
+ 'tf.equal': 'tf.math.equal',
+ 'tf.erfc': 'tf.math.erfc',
+ 'tf.exp': 'tf.math.exp',
+ 'tf.expm1': 'tf.math.expm1',
+ 'tf.extract_image_patches': 'tf.image.extract_image_patches',
+ 'tf.fake_quant_with_min_max_args': 'tf.quantization.fake_quant_with_min_max_args',
+ 'tf.fake_quant_with_min_max_args_gradient': 'tf.quantization.fake_quant_with_min_max_args_gradient',
+ 'tf.fake_quant_with_min_max_vars': 'tf.quantization.fake_quant_with_min_max_vars',
+ 'tf.fake_quant_with_min_max_vars_gradient': 'tf.quantization.fake_quant_with_min_max_vars_gradient',
+ 'tf.fake_quant_with_min_max_vars_per_channel': 'tf.quantization.fake_quant_with_min_max_vars_per_channel',
+ 'tf.fake_quant_with_min_max_vars_per_channel_gradient': 'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
+ 'tf.fft': 'tf.spectral.fft',
+ 'tf.floor': 'tf.math.floor',
+ 'tf.gather_nd': 'tf.manip.gather_nd',
+ 'tf.GraphKeys.VARIABLES': 'tf.GraphKeys.GLOBAL_VARIABLES',
+ 'tf.greater': 'tf.math.greater',
+ 'tf.greater_equal': 'tf.math.greater_equal',
+ 'tf.ifft': 'tf.spectral.ifft',
+ 'tf.igamma': 'tf.math.igamma',
+ 'tf.igammac': 'tf.math.igammac',
+ 'tf.invert_permutation': 'tf.math.invert_permutation',
+ 'tf.is_finite': 'tf.debugging.is_finite',
+ 'tf.is_inf': 'tf.debugging.is_inf',
+ 'tf.is_nan': 'tf.debugging.is_nan',
+ 'tf.less': 'tf.math.less',
+ 'tf.less_equal': 'tf.math.less_equal',
+ 'tf.lgamma': 'tf.math.lgamma',
+ 'tf.log': 'tf.math.log',
+ 'tf.log1p': 'tf.math.log1p',
+ 'tf.logical_and': 'tf.math.logical_and',
+ 'tf.logical_not': 'tf.math.logical_not',
+ 'tf.logical_or': 'tf.math.logical_or',
+ 'tf.matching_files': 'tf.io.matching_files',
+ 'tf.matrix_band_part': 'tf.linalg.band_part',
+ 'tf.matrix_determinant': 'tf.linalg.det',
+ 'tf.matrix_diag': 'tf.linalg.diag',
+ 'tf.matrix_diag_part': 'tf.linalg.diag_part',
+ 'tf.matrix_inverse': 'tf.linalg.inv',
+ 'tf.matrix_set_diag': 'tf.linalg.set_diag',
+ 'tf.matrix_solve': 'tf.linalg.solve',
+ 'tf.matrix_triangular_solve': 'tf.linalg.triangular_solve',
+ 'tf.maximum': 'tf.math.maximum',
+ 'tf.minimum': 'tf.math.minimum',
+ 'tf.not_equal': 'tf.math.not_equal',
+ 'tf.parse_tensor': 'tf.io.parse_tensor',
+ 'tf.polygamma': 'tf.math.polygamma',
+ 'tf.qr': 'tf.linalg.qr',
+ 'tf.quantized_concat': 'tf.quantization.quantized_concat',
+ 'tf.read_file': 'tf.io.read_file',
+ 'tf.reciprocal': 'tf.math.reciprocal',
+ 'tf.regex_replace': 'tf.strings.regex_replace',
+ 'tf.reshape': 'tf.manip.reshape',
+ 'tf.reverse': 'tf.manip.reverse',
+ 'tf.reverse_v2': 'tf.manip.reverse',
+ 'tf.rint': 'tf.math.rint',
+ 'tf.rsqrt': 'tf.math.rsqrt',
+ 'tf.scatter_nd': 'tf.manip.scatter_nd',
+ 'tf.segment_max': 'tf.math.segment_max',
+ 'tf.segment_mean': 'tf.math.segment_mean',
+ 'tf.segment_min': 'tf.math.segment_min',
+ 'tf.segment_prod': 'tf.math.segment_prod',
+ 'tf.segment_sum': 'tf.math.segment_sum',
+ 'tf.sin': 'tf.math.sin',
+ 'tf.sinh': 'tf.math.sinh',
+ 'tf.space_to_batch_nd': 'tf.manip.space_to_batch_nd',
+ 'tf.squared_difference': 'tf.math.squared_difference',
+ 'tf.string_join': 'tf.strings.join',
+ 'tf.string_strip': 'tf.strings.strip',
+ 'tf.string_to_hash_bucket': 'tf.strings.to_hash_bucket',
+ 'tf.string_to_hash_bucket_fast': 'tf.strings.to_hash_bucket_fast',
+ 'tf.string_to_hash_bucket_strong': 'tf.strings.to_hash_bucket_strong',
+ 'tf.string_to_number': 'tf.strings.to_number',
+ 'tf.substr': 'tf.strings.substr',
+ 'tf.tan': 'tf.math.tan',
+ 'tf.tile': 'tf.manip.tile',
+ 'tf.unsorted_segment_max': 'tf.math.unsorted_segment_max',
+ 'tf.unsorted_segment_min': 'tf.math.unsorted_segment_min',
+ 'tf.unsorted_segment_prod': 'tf.math.unsorted_segment_prod',
+ 'tf.unsorted_segment_sum': 'tf.math.unsorted_segment_sum',
+ 'tf.write_file': 'tf.io.write_file',
+ 'tf.zeta': 'tf.math.zeta'
+}
diff --git a/tensorflow/tools/compatibility/testdata/test_file_v0_11.py b/tensorflow/tools/compatibility/testdata/test_file_v0_11.py
index 40526d930c..68ba7a2630 100644
--- a/tensorflow/tools/compatibility/testdata/test_file_v0_11.py
+++ b/tensorflow/tools/compatibility/testdata/test_file_v0_11.py
@@ -35,7 +35,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
"""
def testArgRenames(self):
- with self.test_session():
+ with self.cached_session():
a = [[1., 2., 3.], [4., 5., 6.]]
b = [[True, False, False], [False, True, True]]
@@ -98,7 +98,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
[[[1, 2]], [[3, 4]]])
def testArgMinMax(self):
- with self.test_session():
+ with self.cached_session():
self.assertAllEqual(
tf.argmin([[1, 2, 3], [4, 1, 0]], dimension=1).eval(),
[0, 2])
@@ -113,7 +113,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
[1, 0, 0])
def testExpandAndSqueeze(self):
- with self.test_session():
+ with self.cached_session():
# TODO(aselle): sparse_split, sparse_reduce_sum,
# sparse_reduce_sum_sparse, reduce_join
@@ -140,7 +140,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
a)
def testArithmeticRenames(self):
- with self.test_session() as s:
+ with self.cached_session() as s:
stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
vals = s.run(stuff)
self.assertAllEqual(vals,
@@ -164,7 +164,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
# ]
def testBatchAndSvd(self):
- with self.test_session():
+ with self.cached_session():
mat = [[1., 2.], [2., 3.]]
batched_mat = tf.expand_dims(mat, [0])
result = tf.matmul(mat, mat).eval()
@@ -176,7 +176,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
def testCrossEntropy(self):
# TODO(aselle): Test sparse_softmax_...
- with self.test_session():
+ with self.cached_session():
labels = [.8, .5, .2, .1]
logits = [.9, .1, .3, .1]
self.assertAllEqual(
@@ -191,7 +191,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
labels=labels, logits=logits).eval())
def testVariables(self):
- with self.test_session() as s:
+ with self.cached_session() as s:
# make some variables
_ = [tf.Variable([1, 2, 3], dtype=tf.float32),
@@ -201,7 +201,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
_ = [v.name for v in tf.local_variables()]
def testSummaries(self):
- with self.test_session() as s:
+ with self.cached_session() as s:
var = tf.Variable([1, 2, 3], dtype=tf.float32)
s.run(tf.initialize_all_variables())
x, y = np.meshgrid(np.linspace(-10, 10, 256), np.linspace(-10, 10, 256))
diff --git a/tensorflow/tools/compatibility/testdata/test_file_v1_10.py b/tensorflow/tools/compatibility/testdata/test_file_v1_10.py
new file mode 100644
index 0000000000..e5ca8d3e2e
--- /dev/null
+++ b/tensorflow/tools/compatibility/testdata/test_file_v1_10.py
@@ -0,0 +1,34 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tf upgrader."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import tensorflow as tf
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import test as test_lib
+
+
+class TestUpgrade(test_util.TensorFlowTestCase):
+ """Test various APIs that have been changed in 2.0."""
+
+ def testRenames(self):
+ with self.cached_session():
+ self.assertAllClose(1.04719755, tf.acos(0.5).eval())
+ self.assertAllClose(0.5, tf.rsqrt(4.0).eval())
+
+if __name__ == "__main__":
+ test_lib.main()
diff --git a/tensorflow/tools/compatibility/tf_upgrade.py b/tensorflow/tools/compatibility/tf_upgrade.py
index 1f8833582a..96705b1a4c 100644
--- a/tensorflow/tools/compatibility/tf_upgrade.py
+++ b/tensorflow/tools/compatibility/tf_upgrade.py
@@ -19,491 +19,11 @@ from __future__ import division
from __future__ import print_function
import argparse
-import ast
-import collections
-import os
-import shutil
-import sys
-import tempfile
-import traceback
+from tensorflow.tools.compatibility import ast_edits
-class APIChangeSpec(object):
- """This class defines the transformations that need to happen.
- This class must provide the following fields:
-
- * `function_keyword_renames`: maps function names to a map of old -> new
- argument names
- * `function_renames`: maps function names to new function names
- * `change_to_function`: a set of function names that have changed (for
- notifications)
- * `function_reorders`: maps functions whose argument order has changed to the
- list of arguments in the new order
- * `function_handle`: maps function names to custom handlers for the function
-
- For an example, see `TFAPIChangeSpec`.
- """
-
-
-class _FileEditTuple(
- collections.namedtuple("_FileEditTuple",
- ["comment", "line", "start", "old", "new"])):
- """Each edit that is recorded by a _FileEditRecorder.
-
- Fields:
- comment: A description of the edit and why it was made.
- line: The line number in the file where the edit occurs (1-indexed).
- start: The line number in the file where the edit occurs (0-indexed).
- old: text string to remove (this must match what was in file).
- new: text string to add in place of `old`.
- """
-
- __slots__ = ()
-
-
-class _FileEditRecorder(object):
- """Record changes that need to be done to the file."""
-
- def __init__(self, filename):
- # all edits are lists of chars
- self._filename = filename
-
- self._line_to_edit = collections.defaultdict(list)
- self._errors = []
-
- def process(self, text):
- """Process a list of strings, each corresponding to the recorded changes.
-
- Args:
- text: A list of lines of text (assumed to contain newlines)
- Returns:
- A tuple of the modified text and a textual description of what is done.
- Raises:
- ValueError: if substitution source location does not have expected text.
- """
-
- change_report = ""
-
- # Iterate of each line
- for line, edits in self._line_to_edit.items():
- offset = 0
- # sort by column so that edits are processed in order in order to make
- # indexing adjustments cumulative for changes that change the string
- # length
- edits.sort(key=lambda x: x.start)
-
- # Extract each line to a list of characters, because mutable lists
- # are editable, unlike immutable strings.
- char_array = list(text[line - 1])
-
- # Record a description of the change
- change_report += "%r Line %d\n" % (self._filename, line)
- change_report += "-" * 80 + "\n\n"
- for e in edits:
- change_report += "%s\n" % e.comment
- change_report += "\n Old: %s" % (text[line - 1])
-
- # Make underscore buffers for underlining where in the line the edit was
- change_list = [" "] * len(text[line - 1])
- change_list_new = [" "] * len(text[line - 1])
-
- # Iterate for each edit
- for e in edits:
- # Create effective start, end by accounting for change in length due
- # to previous edits
- start_eff = e.start + offset
- end_eff = start_eff + len(e.old)
-
- # Make sure the edit is changing what it should be changing
- old_actual = "".join(char_array[start_eff:end_eff])
- if old_actual != e.old:
- raise ValueError("Expected text %r but got %r" %
- ("".join(e.old), "".join(old_actual)))
- # Make the edit
- char_array[start_eff:end_eff] = list(e.new)
-
- # Create the underline highlighting of the before and after
- change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
- change_list_new[start_eff:end_eff] = "~" * len(e.new)
-
- # Keep track of how to generate effective ranges
- offset += len(e.new) - len(e.old)
-
- # Finish the report comment
- change_report += " %s\n" % "".join(change_list)
- text[line - 1] = "".join(char_array)
- change_report += " New: %s" % (text[line - 1])
- change_report += " %s\n\n" % "".join(change_list_new)
- return "".join(text), change_report, self._errors
-
- def add(self, comment, line, start, old, new, error=None):
- """Add a new change that is needed.
-
- Args:
- comment: A description of what was changed
- line: Line number (1 indexed)
- start: Column offset (0 indexed)
- old: old text
- new: new text
- error: this "edit" is something that cannot be fixed automatically
- Returns:
- None
- """
-
- self._line_to_edit[line].append(
- _FileEditTuple(comment, line, start, old, new))
- if error:
- self._errors.append("%s:%d: %s" % (self._filename, line, error))
-
-
-class _ASTCallVisitor(ast.NodeVisitor):
- """AST Visitor that processes function calls.
-
- Updates function calls from old API version to new API version using a given
- change spec.
- """
-
- def __init__(self, filename, lines, api_change_spec):
- self._filename = filename
- self._file_edit = _FileEditRecorder(filename)
- self._lines = lines
- self._api_change_spec = api_change_spec
-
- def process(self, lines):
- return self._file_edit.process(lines)
-
- def generic_visit(self, node):
- ast.NodeVisitor.generic_visit(self, node)
-
- def _rename_functions(self, node, full_name):
- function_renames = self._api_change_spec.function_renames
- try:
- new_name = function_renames[full_name]
- self._file_edit.add("Renamed function %r to %r" % (full_name, new_name),
- node.lineno, node.col_offset, full_name, new_name)
- except KeyError:
- pass
-
- def _get_attribute_full_path(self, node):
- """Traverse an attribute to generate a full name e.g. tf.foo.bar.
-
- Args:
- node: A Node of type Attribute.
-
- Returns:
- a '.'-delimited full-name or None if the tree was not a simple form.
- i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
- """
- curr = node
- items = []
- while not isinstance(curr, ast.Name):
- if not isinstance(curr, ast.Attribute):
- return None
- items.append(curr.attr)
- curr = curr.value
- items.append(curr.id)
- return ".".join(reversed(items))
-
- def _find_true_position(self, node):
- """Return correct line number and column offset for a given node.
-
- This is necessary mainly because ListComp's location reporting reports
- the next token after the list comprehension list opening.
-
- Args:
- node: Node for which we wish to know the lineno and col_offset
- """
- import re
- find_open = re.compile("^\s*(\\[).*$")
- find_string_chars = re.compile("['\"]")
-
- if isinstance(node, ast.ListComp):
- # Strangely, ast.ListComp returns the col_offset of the first token
- # after the '[' token which appears to be a bug. Workaround by
- # explicitly finding the real start of the list comprehension.
- line = node.lineno
- col = node.col_offset
- # loop over lines
- while 1:
- # Reverse the text to and regular expression search for whitespace
- text = self._lines[line - 1]
- reversed_preceding_text = text[:col][::-1]
- # First find if a [ can be found with only whitespace between it and
- # col.
- m = find_open.match(reversed_preceding_text)
- if m:
- new_col_offset = col - m.start(1) - 1
- return line, new_col_offset
- else:
- if (reversed_preceding_text == "" or
- reversed_preceding_text.isspace()):
- line = line - 1
- prev_line = self._lines[line - 1]
- # TODO(aselle):
- # this is poor comment detection, but it is good enough for
- # cases where the comment does not contain string literal starting/
- # ending characters. If ast gave us start and end locations of the
- # ast nodes rather than just start, we could use string literal
- # node ranges to filter out spurious #'s that appear in string
- # literals.
- comment_start = prev_line.find("#")
- if comment_start == -1:
- col = len(prev_line) - 1
- elif find_string_chars.search(prev_line[comment_start:]) is None:
- col = comment_start
- else:
- return None, None
- else:
- return None, None
- # Most other nodes return proper locations (with notably does not), but
- # it is not possible to use that in an argument.
- return node.lineno, node.col_offset
-
- def visit_Call(self, node): # pylint: disable=invalid-name
- """Handle visiting a call node in the AST.
-
- Args:
- node: Current Node
- """
-
- # Find a simple attribute name path e.g. "tf.foo.bar"
- full_name = self._get_attribute_full_path(node.func)
-
- # Make sure the func is marked as being part of a call
- node.func.is_function_for_call = True
-
- if full_name:
- # Call special handlers
- function_handles = self._api_change_spec.function_handle
- if full_name in function_handles:
- function_handles[full_name](self._file_edit, node)
-
- # Examine any non-keyword argument and make it into a keyword argument
- # if reordering required.
- function_reorders = self._api_change_spec.function_reorders
- function_keyword_renames = (
- self._api_change_spec.function_keyword_renames)
-
- if full_name in function_reorders:
- reordered = function_reorders[full_name]
- for idx, arg in enumerate(node.args):
- lineno, col_offset = self._find_true_position(arg)
- if lineno is None or col_offset is None:
- self._file_edit.add(
- "Failed to add keyword %r to reordered function %r" %
- (reordered[idx], full_name),
- arg.lineno,
- arg.col_offset,
- "",
- "",
- error="A necessary keyword argument failed to be inserted.")
- else:
- keyword_arg = reordered[idx]
- if (full_name in function_keyword_renames and
- keyword_arg in function_keyword_renames[full_name]):
- keyword_arg = function_keyword_renames[full_name][keyword_arg]
- self._file_edit.add("Added keyword %r to reordered function %r" %
- (reordered[idx], full_name), lineno, col_offset,
- "", keyword_arg + "=")
-
- # Examine each keyword argument and convert it to the final renamed form
- renamed_keywords = ({} if full_name not in function_keyword_renames else
- function_keyword_renames[full_name])
- for keyword in node.keywords:
- argkey = keyword.arg
- argval = keyword.value
-
- if argkey in renamed_keywords:
- argval_lineno, argval_col_offset = self._find_true_position(argval)
- if argval_lineno is not None and argval_col_offset is not None:
- # TODO(aselle): We should scan backward to find the start of the
- # keyword key. Unfortunately ast does not give you the location of
- # keyword keys, so we are forced to infer it from the keyword arg
- # value.
- key_start = argval_col_offset - len(argkey) - 1
- key_end = key_start + len(argkey) + 1
- if (self._lines[argval_lineno - 1][key_start:key_end] == argkey +
- "="):
- self._file_edit.add("Renamed keyword argument from %r to %r" %
- (argkey,
- renamed_keywords[argkey]), argval_lineno,
- argval_col_offset - len(argkey) - 1,
- argkey + "=", renamed_keywords[argkey] + "=")
- continue
- self._file_edit.add(
- "Failed to rename keyword argument from %r to %r" %
- (argkey, renamed_keywords[argkey]),
- argval.lineno,
- argval.col_offset - len(argkey) - 1,
- "",
- "",
- error="Failed to find keyword lexographically. Fix manually.")
-
- ast.NodeVisitor.generic_visit(self, node)
-
- def visit_Attribute(self, node): # pylint: disable=invalid-name
- """Handle bare Attributes i.e. [tf.foo, tf.bar].
-
- Args:
- node: Node that is of type ast.Attribute
- """
- full_name = self._get_attribute_full_path(node)
- if full_name:
- self._rename_functions(node, full_name)
- if full_name in self._api_change_spec.change_to_function:
- if not hasattr(node, "is_function_for_call"):
- new_text = full_name + "()"
- self._file_edit.add("Changed %r to %r" % (full_name, new_text),
- node.lineno, node.col_offset, full_name, new_text)
-
- ast.NodeVisitor.generic_visit(self, node)
-
-
-class ASTCodeUpgrader(object):
- """Handles upgrading a set of Python files using a given API change spec."""
-
- def __init__(self, api_change_spec):
- if not isinstance(api_change_spec, APIChangeSpec):
- raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
- type(api_change_spec))
- self._api_change_spec = api_change_spec
-
- def process_file(self, in_filename, out_filename):
- """Process the given python file for incompatible changes.
-
- Args:
- in_filename: filename to parse
- out_filename: output file to write to
- Returns:
- A tuple representing number of files processed, log of actions, errors
- """
-
- # Write to a temporary file, just in case we are doing an implace modify.
- with open(in_filename, "r") as in_file, \
- tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
- ret = self.process_opened_file(in_filename, in_file, out_filename,
- temp_file)
-
- shutil.move(temp_file.name, out_filename)
- return ret
-
- # Broad exceptions are required here because ast throws whatever it wants.
- # pylint: disable=broad-except
- def process_opened_file(self, in_filename, in_file, out_filename, out_file):
- """Process the given python file for incompatible changes.
-
- This function is split out to facilitate StringIO testing from
- tf_upgrade_test.py.
-
- Args:
- in_filename: filename to parse
- in_file: opened file (or StringIO)
- out_filename: output file to write to
- out_file: opened file (or StringIO)
- Returns:
- A tuple representing number of files processed, log of actions, errors
- """
- process_errors = []
- text = "-" * 80 + "\n"
- text += "Processing file %r\n outputting to %r\n" % (in_filename,
- out_filename)
- text += "-" * 80 + "\n\n"
-
- parsed_ast = None
- lines = in_file.readlines()
- try:
- parsed_ast = ast.parse("".join(lines))
- except Exception:
- text += "Failed to parse %r\n\n" % in_filename
- text += traceback.format_exc()
- if parsed_ast:
- visitor = _ASTCallVisitor(in_filename, lines, self._api_change_spec)
- visitor.visit(parsed_ast)
- out_text, new_text, process_errors = visitor.process(lines)
- text += new_text
- if out_file:
- out_file.write(out_text)
- text += "\n"
- return 1, text, process_errors
-
- # pylint: enable=broad-except
-
- def process_tree(self, root_directory, output_root_directory,
- copy_other_files):
- """Processes upgrades on an entire tree of python files in place.
-
- Note that only Python files. If you have custom code in other languages,
- you will need to manually upgrade those.
-
- Args:
- root_directory: Directory to walk and process.
- output_root_directory: Directory to use as base.
- copy_other_files: Copy files that are not touched by this converter.
-
- Returns:
- A tuple of files processed, the report string ofr all files, and errors
- """
-
- # make sure output directory doesn't exist
- if output_root_directory and os.path.exists(output_root_directory):
- print("Output directory %r must not already exist." %
- (output_root_directory))
- sys.exit(1)
-
- # make sure output directory does not overlap with root_directory
- norm_root = os.path.split(os.path.normpath(root_directory))
- norm_output = os.path.split(os.path.normpath(output_root_directory))
- if norm_root == norm_output:
- print("Output directory %r same as input directory %r" %
- (root_directory, output_root_directory))
- sys.exit(1)
-
- # Collect list of files to process (we do this to correctly handle if the
- # user puts the output directory in some sub directory of the input dir)
- files_to_process = []
- files_to_copy = []
- for dir_name, _, file_list in os.walk(root_directory):
- py_files = [f for f in file_list if f.endswith(".py")]
- copy_files = [f for f in file_list if not f.endswith(".py")]
- for filename in py_files:
- fullpath = os.path.join(dir_name, filename)
- fullpath_output = os.path.join(output_root_directory,
- os.path.relpath(fullpath,
- root_directory))
- files_to_process.append((fullpath, fullpath_output))
- if copy_other_files:
- for filename in copy_files:
- fullpath = os.path.join(dir_name, filename)
- fullpath_output = os.path.join(output_root_directory,
- os.path.relpath(
- fullpath, root_directory))
- files_to_copy.append((fullpath, fullpath_output))
-
- file_count = 0
- tree_errors = []
- report = ""
- report += ("=" * 80) + "\n"
- report += "Input tree: %r\n" % root_directory
- report += ("=" * 80) + "\n"
-
- for input_path, output_path in files_to_process:
- output_directory = os.path.dirname(output_path)
- if not os.path.isdir(output_directory):
- os.makedirs(output_directory)
- file_count += 1
- _, l_report, l_errors = self.process_file(input_path, output_path)
- tree_errors += l_errors
- report += l_report
- for input_path, output_path in files_to_copy:
- output_directory = os.path.dirname(output_path)
- if not os.path.isdir(output_directory):
- os.makedirs(output_directory)
- shutil.copy(input_path, output_path)
- return file_count, report, tree_errors
-
-
-class TFAPIChangeSpec(APIChangeSpec):
+class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
@@ -718,7 +238,7 @@ Simple usage:
default="report.txt")
args = parser.parse_args()
- upgrade = ASTCodeUpgrader(TFAPIChangeSpec())
+ upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
report_text = None
report_filename = args.report_filename
files_processed = 0
diff --git a/tensorflow/tools/compatibility/tf_upgrade_test.py b/tensorflow/tools/compatibility/tf_upgrade_test.py
index 3d02eacba6..66325ea2ad 100644
--- a/tensorflow/tools/compatibility/tf_upgrade_test.py
+++ b/tensorflow/tools/compatibility/tf_upgrade_test.py
@@ -22,6 +22,7 @@ import tempfile
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
+from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade
@@ -36,7 +37,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
- upgrader = tf_upgrade.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
@@ -139,7 +140,7 @@ class TestUpgradeFiles(test_util.TensorFlowTestCase):
upgraded = "tf.multiply(a, b)\n"
temp_file.write(original)
temp_file.close()
- upgrader = tf_upgrade.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2.py b/tensorflow/tools/compatibility/tf_upgrade_v2.py
new file mode 100644
index 0000000000..53c546b10c
--- /dev/null
+++ b/tensorflow/tools/compatibility/tf_upgrade_v2.py
@@ -0,0 +1,147 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import functools
+
+from tensorflow.tools.compatibility import ast_edits
+from tensorflow.tools.compatibility import renames_v2
+
+
+class TFAPIChangeSpec(ast_edits.APIChangeSpec):
+ """List of maps that describe what changed in the API."""
+
+ def __init__(self):
+ # Maps from a function name to a dictionary that describes how to
+ # map from an old argument keyword to the new argument keyword.
+ self.function_keyword_renames = {}
+
+ # Mapping from function to the new name of the function
+ self.function_renames = renames_v2.renames
+
+ # Variables that should be changed to functions.
+ self.change_to_function = {}
+
+ # Functions that were reordered should be changed to the new keyword args
+ # for safety, if positional arguments are used. If you have reversed the
+ # positional arguments yourself, this could do the wrong thing.
+ self.function_reorders = {}
+
+ # Specially handled functions.
+ self.function_handle = {}
+ for decay in ["tf.train.exponential_decay", "tf.train.piecewise_constant",
+ "tf.train.polynomial_decay", "tf.train.natural_exp_decay",
+ "tf.train.inverse_time_decay", "tf.train.cosine_decay",
+ "tf.train.cosine_decay_restarts",
+ "tf.train.linear_cosine_decay",
+ "tf.train.noisy_linear_cosine_decay"]:
+ self.function_handle[decay] = functools.partial(
+ self._learning_rate_decay_handler, decay_name=decay)
+
+ @staticmethod
+ def _learning_rate_decay_handler(file_edit_recorder, node, decay_name):
+ comment = ("ERROR: %s has been changed to return a callable instead of a "
+ "tensor when graph building, but its functionality remains "
+ "unchanged during eager execution (returns a callable like "
+ "before). The converter cannot detect and fix this reliably, so "
+ "you need to inspect this usage manually.\n") % decay_name
+ file_edit_recorder.add(
+ comment,
+ node.lineno,
+ node.col_offset,
+ decay_name,
+ decay_name,
+ error="%s requires manual check." % decay_name)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description="""Convert a TensorFlow Python file to 2.0
+
+Simple usage:
+ tf_convert_v2.py --infile foo.py --outfile bar.py
+ tf_convert_v2.py --intree ~/code/old --outtree ~/code/new
+""")
+ parser.add_argument(
+ "--infile",
+ dest="input_file",
+ help="If converting a single file, the name of the file "
+ "to convert")
+ parser.add_argument(
+ "--outfile",
+ dest="output_file",
+ help="If converting a single file, the output filename.")
+ parser.add_argument(
+ "--intree",
+ dest="input_tree",
+ help="If converting a whole tree of files, the directory "
+ "to read from (relative or absolute).")
+ parser.add_argument(
+ "--outtree",
+ dest="output_tree",
+ help="If converting a whole tree of files, the output "
+ "directory (relative or absolute).")
+ parser.add_argument(
+ "--copyotherfiles",
+ dest="copy_other_files",
+ help=("If converting a whole tree of files, whether to "
+ "copy the other files."),
+ type=bool,
+ default=False)
+ parser.add_argument(
+ "--reportfile",
+ dest="report_filename",
+ help=("The name of the file where the report log is "
+ "stored."
+ "(default: %(default)s)"),
+ default="report.txt")
+ args = parser.parse_args()
+
+ upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
+ report_text = None
+ report_filename = args.report_filename
+ files_processed = 0
+ if args.input_file:
+ if not args.output_file:
+ raise ValueError(
+ "--outfile=<output file> argument is required when converting a "
+ "single file.")
+ files_processed, report_text, errors = upgrade.process_file(
+ args.input_file, args.output_file)
+ files_processed = 1
+ elif args.input_tree:
+ if not args.output_tree:
+ raise ValueError(
+ "--outtree=<output directory> argument is required when converting a "
+ "file tree.")
+ files_processed, report_text, errors = upgrade.process_tree(
+ args.input_tree, args.output_tree, args.copy_other_files)
+ else:
+ parser.print_help()
+ if report_text:
+ open(report_filename, "w").write(report_text)
+ print("TensorFlow 2.0 Upgrade Script")
+ print("-----------------------------")
+ print("Converted %d files\n" % files_processed)
+ print("Detected %d errors that require attention" % len(errors))
+ print("-" * 80)
+ print("\n".join(errors))
+ print("\nMake sure to read the detailed log %r\n" % report_filename)
diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
new file mode 100644
index 0000000000..3886c1e8b9
--- /dev/null
+++ b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
@@ -0,0 +1,96 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tf 2.0 upgrader."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import os
+import tempfile
+import six
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import test as test_lib
+from tensorflow.tools.compatibility import ast_edits
+from tensorflow.tools.compatibility import tf_upgrade_v2
+
+
+class TestUpgrade(test_util.TensorFlowTestCase):
+ """Test various APIs that have been changed in 2.0.
+
+ We also test whether a converted file is executable. test_file_v1_10.py
+ aims to exhaustively test that API changes are convertible and actually
+ work when run with current TensorFlow.
+ """
+
+ def _upgrade(self, old_file_text):
+ in_file = six.StringIO(old_file_text)
+ out_file = six.StringIO()
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
+ count, report, errors = (
+ upgrader.process_opened_file("test.py", in_file,
+ "test_out.py", out_file))
+ return count, report, errors, out_file.getvalue()
+
+ def testParseError(self):
+ _, report, unused_errors, unused_new_text = self._upgrade(
+ "import tensorflow as tf\na + \n")
+ self.assertTrue(report.find("Failed to parse") != -1)
+
+ def testReport(self):
+ text = "tf.acos(a)\n"
+ _, report, unused_errors, unused_new_text = self._upgrade(text)
+ # This is not a complete test, but it is a sanity test that a report
+ # is generating information.
+ self.assertTrue(report.find("Renamed function `tf.acos` to `tf.math.acos`"))
+
+ def testRename(self):
+ text = "tf.acos(a)\n"
+ _, unused_report, unused_errors, new_text = self._upgrade(text)
+ self.assertEqual(new_text, "tf.math.acos(a)\n")
+ text = "tf.rsqrt(tf.log(3.8))\n"
+ _, unused_report, unused_errors, new_text = self._upgrade(text)
+ self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log(3.8))\n")
+
+ def testLearningRateDecay(self):
+ for decay in ["tf.train.exponential_decay", "tf.train.piecewise_constant",
+ "tf.train.polynomial_decay", "tf.train.natural_exp_decay",
+ "tf.train.inverse_time_decay", "tf.train.cosine_decay",
+ "tf.train.cosine_decay_restarts",
+ "tf.train.linear_cosine_decay",
+ "tf.train.noisy_linear_cosine_decay"]:
+
+ text = "%s(a, b)\n" % decay
+ _, unused_report, errors, new_text = self._upgrade(text)
+ self.assertEqual(text, new_text)
+ self.assertEqual(errors, ["test.py:1: %s requires manual check." % decay])
+
+
+class TestUpgradeFiles(test_util.TensorFlowTestCase):
+
+ def testInplace(self):
+ """Check to make sure we don't have a file system race."""
+ temp_file = tempfile.NamedTemporaryFile("w", delete=False)
+ original = "tf.acos(a, b)\n"
+ upgraded = "tf.math.acos(a, b)\n"
+ temp_file.write(original)
+ temp_file.close()
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
+ upgrader.process_file(temp_file.name, temp_file.name)
+ self.assertAllEqual(open(temp_file.name).read(), upgraded)
+ os.unlink(temp_file.name)
+
+
+if __name__ == "__main__":
+ test_lib.main()
diff --git a/tensorflow/tools/compatibility/update/BUILD b/tensorflow/tools/compatibility/update/BUILD
new file mode 100644
index 0000000000..feb37c902e
--- /dev/null
+++ b/tensorflow/tools/compatibility/update/BUILD
@@ -0,0 +1,15 @@
+licenses(["notice"]) # Apache 2.0
+
+package(default_visibility = ["//visibility:private"])
+
+py_binary(
+ name = "generate_v2_renames_map",
+ srcs = ["generate_v2_renames_map.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/python:lib",
+ "//tensorflow/tools/common:public_api",
+ "//tensorflow/tools/common:traverse",
+ ],
+)
diff --git a/tensorflow/tools/compatibility/update/generate_v2_renames_map.py b/tensorflow/tools/compatibility/update/generate_v2_renames_map.py
new file mode 100644
index 0000000000..567eceb0b6
--- /dev/null
+++ b/tensorflow/tools/compatibility/update/generate_v2_renames_map.py
@@ -0,0 +1,103 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# pylint: disable=line-too-long
+"""Script for updating tensorflow/tools/compatibility/renames_v2.py.
+
+To update renames_v2.py, run:
+ bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
+ bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
+"""
+# pylint: enable=line-too-long
+
+import tensorflow as tf
+
+from tensorflow.python.lib.io import file_io
+from tensorflow.python.util import tf_decorator
+from tensorflow.python.util import tf_export
+from tensorflow.tools.common import public_api
+from tensorflow.tools.common import traverse
+
+
+_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/renames_v2.py'
+_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# pylint: disable=line-too-long
+\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0.
+
+THIS FILE IS AUTOGENERATED: To update, please run:
+ bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
+ bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
+This file should be updated whenever endpoints are deprecated.
+\"\"\"
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+"""
+
+
+def update_renames_v2(output_file_path):
+ """Writes a Python dictionary mapping deprecated to canonical API names.
+
+ Args:
+ output_file_path: File path to write output to. Any existing contents
+ would be replaced.
+ """
+ # Set of rename lines to write to output file in the form:
+ # 'tf.deprecated_name': 'tf.canonical_name'
+ rename_line_set = set()
+ # _tf_api_names attribute name
+ tensorflow_api_attr = tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].names
+
+ def visit(unused_path, unused_parent, children):
+ """Visitor that collects rename strings to add to rename_line_set."""
+ for child in children:
+ _, attr = tf_decorator.unwrap(child[1])
+ if not hasattr(attr, '__dict__'):
+ continue
+ api_names = attr.__dict__.get(tensorflow_api_attr, [])
+ deprecated_api_names = attr.__dict__.get('_tf_deprecated_api_names', [])
+ canonical_name = tf_export.get_canonical_name(
+ api_names, deprecated_api_names)
+ for name in deprecated_api_names:
+ rename_line_set.add(' \'tf.%s\': \'tf.%s\'' % (name, canonical_name))
+
+ visitor = public_api.PublicAPIVisitor(visit)
+ visitor.do_not_descend_map['tf'].append('contrib')
+ traverse.traverse(tf, visitor)
+
+ renames_file_text = '%srenames = {\n%s\n}\n' % (
+ _FILE_HEADER, ',\n'.join(sorted(rename_line_set)))
+ file_io.write_string_to_file(output_file_path, renames_file_text)
+
+
+def main(unused_argv):
+ update_renames_v2(_OUTPUT_FILE_PATH)
+
+
+if __name__ == '__main__':
+ tf.app.run(main=main)
diff --git a/tensorflow/tools/def_file_filter/def_file_filter.py.tpl b/tensorflow/tools/def_file_filter/def_file_filter.py.tpl
index 8bdc03eb0f..4bfcc2570c 100644
--- a/tensorflow/tools/def_file_filter/def_file_filter.py.tpl
+++ b/tensorflow/tools/def_file_filter/def_file_filter.py.tpl
@@ -48,6 +48,7 @@ EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::")
INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
r"google::protobuf::internal::ArenaImpl::AllocateAligned|" # for contrib/data/_prefetching_ops
r"google::protobuf::internal::ArenaImpl::AddCleanup|" # for contrib/data/_prefetching_ops
+ r"google::protobuf::internal::LogMessage|" # for contrib/data/_prefetching_ops
r"google::protobuf::Arena::OnArenaAllocation|" # for contrib/data/_prefetching_ops
r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::LogString|"
diff --git a/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl b/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
index 47539b2423..df0fd05319 100644
--- a/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
+++ b/tensorflow/tools/def_file_filter/def_file_filter_configure.bzl
@@ -24,23 +24,27 @@ load("@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_msvc_tool")
load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "auto_configure_fail")
def _def_file_filter_configure_impl(repository_ctx):
- if repository_ctx.os.name.lower().find("windows") == -1:
+ if repository_ctx.os.name.lower().find("windows") == -1:
+ repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
+ repository_ctx.file("def_file_filter.py", "")
+ return
+ vc_path = find_vc_path(repository_ctx)
+ if vc_path == None:
+ auto_configure_fail("Visual C++ build tools not found on your machine")
+
+ undname = find_msvc_tool(repository_ctx, vc_path, "undname.exe")
+ if undname == None:
+ auto_configure_fail("Couldn't find undname.exe under %s, please check your VC installation and set BAZEL_VC environment variable correctly." % vc_path)
+ undname_bin_path = undname.replace("\\", "\\\\")
+
+ repository_ctx.template(
+ "def_file_filter.py",
+ Label("//tensorflow/tools/def_file_filter:def_file_filter.py.tpl"),
+ {
+ "%{undname_bin_path}": undname_bin_path,
+ },
+ )
repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
- repository_ctx.file("def_file_filter.py", "")
- return
- vc_path = find_vc_path(repository_ctx)
- if vc_path == "visual-studio-not-found":
- auto_configure_fail("Visual C++ build tools not found on your machine")
- undname_bin_path = find_msvc_tool(repository_ctx, vc_path, "undname.exe").replace("\\", "\\\\")
-
- repository_ctx.template(
- "def_file_filter.py",
- Label("//tensorflow/tools/def_file_filter:def_file_filter.py.tpl"),
- {
- "%{undname_bin_path}": undname_bin_path,
- })
- repository_ctx.symlink(Label("//tensorflow/tools/def_file_filter:BUILD.tpl"), "BUILD")
-
def_file_filter_configure = repository_rule(
implementation = _def_file_filter_configure_impl,
@@ -51,6 +55,6 @@ def_file_filter_configure = repository_rule(
"VS100COMNTOOLS",
"VS110COMNTOOLS",
"VS120COMNTOOLS",
- "VS140COMNTOOLS"
+ "VS140COMNTOOLS",
],
)
diff --git a/tensorflow/tools/dist_test/README.md b/tensorflow/tools/dist_test/README.md
index 228d5ee35d..f8ed74aaf7 100644
--- a/tensorflow/tools/dist_test/README.md
+++ b/tensorflow/tools/dist_test/README.md
@@ -23,7 +23,7 @@ You can test specify version of TensorFlow:
./local_test.sh ${whl_file_url}
```
-For example, you can find these TensorFlow python package URLs from [here](https://www.tensorflow.org/install/install_linux#the_url_of_the_tensorflow_python_package) for Ubuntu.
+For example, you can find these TensorFlow python package URLs from [here](https://www.tensorflow.org/install/pip) for Ubuntu.
**2) Launch a remote k8s cluster on Google Kubernetes Engine (GKE) and run the
test suite on it**
diff --git a/tensorflow/tools/dist_test/build_server.sh b/tensorflow/tools/dist_test/build_server.sh
index 225c034741..345217d733 100755
--- a/tensorflow/tools/dist_test/build_server.sh
+++ b/tensorflow/tools/dist_test/build_server.sh
@@ -23,7 +23,7 @@
# E.g.: tensorflow/tf_grpc_test_server:0.11.0rc1
#
# whl_file_location: URL from which the TensorFlow whl file will be downloaded.
-# E.g.: https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc1-cp27-none-linux_x86_64.whl
+# E.g.: https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.5.0-cp27-none-linux_x86_64.whl
# E.g.: /path/to/folder/tensorflow-0.11.0rc1-cp27-none-linux_x86_64.whl
#
# The optional flag --test lets the script to use the Dockerfile for the
diff --git a/tensorflow/tools/dist_test/local_test.sh b/tensorflow/tools/dist_test/local_test.sh
index caae7fd530..b0114721bd 100755
--- a/tensorflow/tools/dist_test/local_test.sh
+++ b/tensorflow/tools/dist_test/local_test.sh
@@ -35,7 +35,7 @@
#
# Arguments:
# whl_file_location: URL from which the TensorFlow whl file will be acquired.
-# E.g.: https://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=PYTHON2,label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-0.11.0rc1-cp27-none-linux_x86_64.whl
+# E.g.: https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.5.0-cp27-none-linux_x86_64.whl
# E.g.: /path/to/folder/tensorflow-0.11.0rc1-cp27-none-linux_x86_64.whl
#
# --leave_container_running: Do not stop the docker-in-docker container after
@@ -64,9 +64,6 @@ die() {
# Configurations
DOCKER_IMG_NAME="tensorflow/tf-dist-test-local-cluster"
-# Use TensorFlow v1.5.0 for Python 2.7 and CPU only as we set num_gpus to 0 in the below
-DEFAULT_WHL_FILE_LOCATION="https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.5.0-cp27-none-linux_x86_64.whl"
-
# Parse input arguments
LEAVE_CONTAINER_RUNNING=0
MODEL_NAME=""
@@ -77,8 +74,7 @@ SYNC_REPLICAS_FLAG=""
WHL_FILE_LOCATION=${1}
if [[ -z "${WHL_FILE_LOCATION}" ]]; then
- WHL_FILE_LOCATION=${DEFAULT_WHL_FILE_LOCATION}
- echo "use default whl file location"
+ echo "WARNING: No wheel url passed. Will use latest tf-nightly cpu p2 wheel."
fi
while true; do
@@ -131,7 +127,11 @@ echo "Building in temporary directory: ${BUILD_DIR}"
cp -r ${DIR}/* "${BUILD_DIR}"/ || \
die "Failed to copy files to ${BUILD_DIR}"
-if [[ $WHL_FILE_LOCATION =~ 'http://' || $WHL_FILE_LOCATION =~ 'https://' ]]; then
+# Download whl file into the build context directory.
+if [[ -z "${WHL_FILE_LOCATION}" ]]; then
+ pip2 download --no-deps tf-nightly
+ cp tf-nightly-*.whl "${BUILD_DIR}"/tensorflow-none-any.whl
+elif [[ $WHL_FILE_LOCATION =~ 'http://' || $WHL_FILE_LOCATION =~ 'https://' ]]; then
# Download whl file into the build context directory.
wget -P "${BUILD_DIR}" "${WHL_FILE_LOCATION}" || \
die "Failed to download tensorflow whl file from URL: ${WHL_FILE_LOCATION}"
diff --git a/tensorflow/tools/dist_test/remote_test.sh b/tensorflow/tools/dist_test/remote_test.sh
index 935535312d..e188c88c8f 100755
--- a/tensorflow/tools/dist_test/remote_test.sh
+++ b/tensorflow/tools/dist_test/remote_test.sh
@@ -108,7 +108,7 @@ fi
# Parse command-line arguments.
WHL_URL=${1}
if [[ -z "${WHL_URL}" ]]; then
- die "whl URL is not specified"
+ echo "WARNING: No wheel url passed. Will use latest tf-nightly cpu p2 wheel."
fi
# Create docker build context directory.
@@ -121,8 +121,13 @@ cp -r ${DIR}/* ${BUILD_DIR}/ || \
die "Failed to copy files to ${BUILD_DIR}"
# Download whl file into the build context directory.
-wget -P "${BUILD_DIR}" ${WHL_URL} || \
- die "Failed to download tensorflow whl file from URL: ${WHL_URL}"
+if [[ -z "${WHL_URL}" ]]; then
+ pip2 download --no-deps tf-nightly
+ cp tf-nightly-*.whl "${BUILD_DIR}"/tensorflow-none-any.whl
+else
+ wget -P "${BUILD_DIR}" ${WHL_URL} || \
+ die "Failed to download tensorflow whl file from URL: ${WHL_URL}"
+fi
# Build docker image for test.
docker build ${NO_CACHE_FLAG} \
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
index 78cb4d250e..b5a6c05193 100644
--- a/tensorflow/tools/docker/Dockerfile
+++ b/tensorflow/tools/docker/Dockerfile
@@ -7,6 +7,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
curl \
libfreetype6-dev \
+ libhdf5-serial-dev \
libpng12-dev \
libzmq3-dev \
pkg-config \
@@ -28,6 +29,8 @@ RUN pip --no-cache-dir install \
h5py \
ipykernel \
jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
matplotlib \
numpy \
pandas \
diff --git a/tensorflow/tools/docker/Dockerfile.devel b/tensorflow/tools/docker/Dockerfile.devel
index b3dbe475d2..c741e8ad0c 100644
--- a/tensorflow/tools/docker/Dockerfile.devel
+++ b/tensorflow/tools/docker/Dockerfile.devel
@@ -8,6 +8,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
git \
libcurl3-dev \
libfreetype6-dev \
+ libhdf5-serial-dev \
libpng12-dev \
libzmq3-dev \
pkg-config \
@@ -28,9 +29,14 @@ RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
rm get-pip.py
RUN pip --no-cache-dir install \
+ Pillow \
+ h5py \
ipykernel \
jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
matplotlib \
+ mock \
numpy \
scipy \
sklearn \
@@ -59,7 +65,7 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.11.0
+ENV BAZEL_VERSION 0.15.0
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
@@ -72,7 +78,7 @@ RUN mkdir /bazel && \
# Download and build TensorFlow.
WORKDIR /tensorflow
-RUN git clone --branch=r1.7 --depth=1 https://github.com/tensorflow/tensorflow.git .
+RUN git clone --branch=r1.11 --depth=1 https://github.com/tensorflow/tensorflow.git .
# TODO(craigcitro): Don't install the pip package, since it makes it
# more difficult to experiment with local changes. Instead, just add
@@ -81,7 +87,7 @@ RUN git clone --branch=r1.7 --depth=1 https://github.com/tensorflow/tensorflow.g
ENV CI_BUILD_PYTHON python
RUN tensorflow/tools/ci_build/builds/configured CPU \
- bazel build -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" \
+ bazel build -c opt --copt=-mavx --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" \
# For optimized builds appropriate for the hardware platform of your choosing, uncomment below...
# For ivy-bridge or sandy-bridge
# --copt=-march="ivybridge" \
diff --git a/tensorflow/tools/docker/Dockerfile.devel-cpu-mkl b/tensorflow/tools/docker/Dockerfile.devel-cpu-mkl
deleted file mode 100644
index 037d13116e..0000000000
--- a/tensorflow/tools/docker/Dockerfile.devel-cpu-mkl
+++ /dev/null
@@ -1,83 +0,0 @@
-FROM tensorflow/tensorflow:latest-devel
-
-LABEL maintainer="Clayne Robison<clayne.b.robison@intel.com>"
-
-# These arguments are parameterized. Use --build-args to override.
-ARG TF_BRANCH=r1.7
-ARG WHL_DIR=/whl
-
-RUN apt-get update && apt-get install -y --no-install-recommends \
- golang \
- vim \
- emacs \
- && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
-RUN pip --no-cache-dir install --upgrade \
- pip setuptools
-
-RUN pip --no-cache-dir install wheel
-
-# Download and build TensorFlow.
-WORKDIR /
-RUN rm -rf tensorflow && \
- git clone https://github.com/tensorflow/tensorflow.git && \
- cd tensorflow && \
- git checkout ${TF_BRANCH}
-WORKDIR /tensorflow
-
-# Configure the build for CPU with MKL by accepting default build options and
-# setting library locations
-ENV CI_BUILD_PYTHON=python \
- LD_LIBRARY_PATH=${LD_LIBRARY_PATH} \
- PYTHON_BIN_PATH=/usr/bin/python \
- PYTHON_LIB_PATH=/usr/local/lib/python2.7/dist-packages \
- CC_OPT_FLAGS='-march=native' \
- TF_NEED_JEMALLOC=0 \
- TF_NEED_GCP=0 \
- TF_NEED_CUDA=0 \
- TF_NEED_HDFS=0 \
- TF_NEED_S3=0 \
- TF_NEED_OPENCL=0 \
- TF_NEED_GDR=0 \
- TF_ENABLE_XLA=0 \
- TF_NEED_VERBS=0 \
- TF_NEED_MPI=0
-RUN ./configure
-
-# Build and Install TensorFlow.
-# The 'mkl' option builds with Intel(R) Math Kernel Library (MKL), which detects
-# the platform it is currently running on and takes appropriately optimized
-# paths. The -march=native option is for code that is not in MKL, and assumes
-# this container will be run on the same architecture on which it is built.
-RUN LD_LIBRARY_PATH=${LD_LIBRARY_PATH} \
- bazel build --config=mkl \
- --config="opt" \
- --copt="-march=broadwell" \
- --copt="-O3" \
- //tensorflow/tools/pip_package:build_pip_package && \
- mkdir ${WHL_DIR} && \
- bazel-bin/tensorflow/tools/pip_package/build_pip_package ${WHL_DIR}
-
-# Clean up Bazel cache when done, but leave the whl.
-# This will upgrade the default Tensorflow version with the Intel MKL version
-RUN pip --no-cache-dir install --upgrade ${WHL_DIR}/tensorflow-*.whl && \
- rm -rf /root/.cache
-
-WORKDIR /root
-
-#add welcome message with instructions
-
-RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/issue && cat /etc/motd' \
- >> /etc/bash.bashrc \
- ; echo "\
-||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n\
-| \n\
-| Docker container running Ubuntu \n\
-| with TensorFlow ${TF_BRANCH} optimized for CPU \n\
-| with Intel(R) MKL \n\
-| \n\
-||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||\n\
-\n "\
- > /etc/motd
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index bfb96da58d..f544725af4 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -13,10 +13,13 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
cuda-cusparse-dev-9-0 \
curl \
git \
- libcudnn7=7.0.5.15-1+cuda9.0 \
- libcudnn7-dev=7.0.5.15-1+cuda9.0 \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libcudnn7-dev=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libnccl-dev=2.2.13-1+cuda9.0 \
libcurl3-dev \
libfreetype6-dev \
+ libhdf5-serial-dev \
libpng12-dev \
libzmq3-dev \
pkg-config \
@@ -32,14 +35,30 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0 && \
+ apt-get install libnvinfer-dev=4.1.2-1+cuda9.0
+
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
python get-pip.py && \
rm get-pip.py
RUN pip --no-cache-dir install \
+ Pillow \
+ h5py \
ipykernel \
jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
matplotlib \
+ mock \
numpy \
scipy \
sklearn \
@@ -68,7 +87,7 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.11.0
+ENV BAZEL_VERSION 0.15.0
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
@@ -81,20 +100,24 @@ RUN mkdir /bazel && \
# Download and build TensorFlow.
WORKDIR /tensorflow
-RUN git clone --branch=r1.7 --depth=1 https://github.com/tensorflow/tensorflow.git .
+RUN git clone --branch=r1.11 --depth=1 https://github.com/tensorflow/tensorflow.git .
# Configure the build for our CUDA configuration.
ENV CI_BUILD_PYTHON python
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
ENV TF_NEED_CUDA 1
-ENV TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2,6.0,6.1
+ENV TF_NEED_TENSORRT 1
+ENV TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0
ENV TF_CUDA_VERSION=9.0
ENV TF_CUDNN_VERSION=7
+# NCCL 2.x
+ENV TF_NCCL_VERSION=2
+
RUN ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:${LD_LIBRARY_PATH} \
tensorflow/tools/ci_build/builds/configured GPU \
- bazel build -c opt --config=cuda \
+ bazel build -c opt --copt=-mavx --config=cuda \
--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" \
tensorflow/tools/pip_package:build_pip_package && \
rm /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
diff --git a/tensorflow/tools/docker/Dockerfile.devel-mkl b/tensorflow/tools/docker/Dockerfile.devel-mkl
new file mode 100755
index 0000000000..db7c701289
--- /dev/null
+++ b/tensorflow/tools/docker/Dockerfile.devel-mkl
@@ -0,0 +1,143 @@
+FROM ubuntu:16.04
+
+LABEL maintainer="Clayne Robison <clayne.b.robison@intel.com>"
+
+# These parameters can be overridden by parameterized_docker_build.sh
+ARG TF_BUILD_VERSION=r1.11
+ARG PYTHON="python"
+ARG PYTHON3_DEV=""
+ARG WHL_DIR="/tmp/pip"
+ARG PIP="pip"
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ git \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ libssl-dev \
+ pkg-config \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless
+
+#install Python 3
+RUN if [ ${PYTHON} = "python3.6" ]; then \
+ curl https://www.python.org/ftp/python/3.6.5/Python-3.6.5.tar.xz -o /opt/python.tar.xz && \
+ cd /opt && tar xvf python.tar.xz && \
+ cd /opt/*/ && ./configure && \
+ make && make install; \
+ else \
+ apt-get install -y --no-install-recommends \
+ python-dev \
+ ${PYTHON3_DEV}; \
+ fi
+
+RUN apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
+ ${PYTHON} get-pip.py && \
+ rm get-pip.py
+
+RUN ${PIP} --no-cache-dir install \
+ Pillow \
+ h5py \
+ ipykernel \
+ jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
+ matplotlib \
+ mock \
+ numpy \
+ scipy \
+ sklearn \
+ pandas \
+ && \
+ ${PYTHON} -m ipykernel.kernelspec
+
+RUN if [ "${PYTHON}" = "python3" ]; then \
+ ln -s -f /usr/bin/python3 /usr/bin/python; \
+ elif [ "${PYTHON}" = "python3.6" ]; then \
+ ln -s -f /usr/local/bin/python3.6 /usr/bin/python; \
+ fi
+
+# Set up our notebook config.
+COPY jupyter_notebook_config.py /root/.jupyter/
+
+# Jupyter has issues with being run directly:
+# https://github.com/ipython/ipython/issues/7062
+# We just add a little wrapper script.
+COPY run_jupyter.sh /
+
+# Set up Bazel.
+
+# Running bazel inside a `docker build` command causes trouble, cf:
+# https://github.com/bazelbuild/bazel/issues/134
+# The easiest solution is to set up a bazelrc file forcing --batch.
+RUN echo "startup --batch" >>/etc/bazel.bazelrc
+# Similarly, we need to workaround sandboxing issues:
+# https://github.com/bazelbuild/bazel/issues/418
+RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
+ >>/etc/bazel.bazelrc
+# Install the most recent bazel release.
+ENV BAZEL_VERSION 0.15.0
+WORKDIR /
+RUN mkdir /bazel && \
+ cd /bazel && \
+ curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
+ chmod +x bazel-*.sh && \
+ ./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ cd / && \
+ rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
+
+# Download and build TensorFlow.
+WORKDIR /tensorflow
+
+# Download and build TensorFlow.
+# Enable checking out both tags and branches
+RUN export TAG_PREFIX="v" && \
+ echo ${TF_BUILD_VERSION} | grep -q ^${TAG_PREFIX}; \
+ if [ $? -eq 0 ]; then \
+ git clone --depth=1 https://github.com/tensorflow/tensorflow.git . && \
+ git fetch --tags && \
+ git checkout ${TF_BUILD_VERSION}; \
+ else \
+ git clone --depth=1 --branch=${TF_BUILD_VERSION} https://github.com/tensorflow/tensorflow.git . ; \
+ fi
+
+RUN yes "" | ${PYTHON} configure.py
+
+ENV CI_BUILD_PYTHON ${PYTHON}
+
+# Set bazel build parameters in .bazelrc in parameterized_docker_build.sh
+# Use --copt=-march values to get optimized builds appropriate for the hardware
+# platform of your choice.
+# For ivy-bridge or sandy-bridge
+# --copt=-march="avx" \
+# For haswell, broadwell, or skylake
+# --copt=-march="avx2" \
+COPY .bazelrc /root/.bazelrc
+
+RUN tensorflow/tools/ci_build/builds/configured CPU \
+ bazel --bazelrc=/root/.bazelrc build -c opt \
+ tensorflow/tools/pip_package:build_pip_package && \
+ bazel-bin/tensorflow/tools/pip_package/build_pip_package "${WHL_DIR}" && \
+ ${PIP} --no-cache-dir install --upgrade "${WHL_DIR}"/tensorflow-*.whl && \
+ rm -rf /root/.cache
+# Clean up Bazel cache when done.
+
+# TensorBoard
+EXPOSE 6006
+# IPython
+EXPOSE 8888
+
+WORKDIR /root
diff --git a/tensorflow/tools/docker/Dockerfile.devel-mkl-horovod b/tensorflow/tools/docker/Dockerfile.devel-mkl-horovod
new file mode 100755
index 0000000000..987b582d10
--- /dev/null
+++ b/tensorflow/tools/docker/Dockerfile.devel-mkl-horovod
@@ -0,0 +1,168 @@
+FROM ubuntu:16.04
+
+LABEL maintainer="Cong Xu <cong.xu@intel.com>"
+
+# These parameters can be overridden by parameterized_docker_build.sh
+ARG TF_BUILD_VERSION=r1.9
+ARG PYTHON="python"
+ARG PYTHON3_DEV=""
+ARG WHL_DIR="/tmp/pip"
+ARG PIP="pip"
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ git \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ python-dev \
+ ${PYTHON3_DEV} \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless \
+ wget \
+ numactl \
+ openssh-client \
+ openssh-server \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
+ ${PYTHON} get-pip.py && \
+ rm get-pip.py
+
+RUN ${PIP} --no-cache-dir install \
+ Pillow \
+ h5py \
+ ipykernel \
+ jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
+ matplotlib \
+ mock \
+ numpy \
+ scipy \
+ sklearn \
+ pandas \
+ && \
+ ${PYTHON} -m ipykernel.kernelspec
+
+RUN if [ "${PYTHON}" = "python3" ]; then \
+ ln -s -f /usr/bin/python3 /usr/bin/python; \
+ fi
+
+# Set up our notebook config.
+COPY jupyter_notebook_config.py /root/.jupyter/
+
+# Jupyter has issues with being run directly:
+# https://github.com/ipython/ipython/issues/7062
+# We just add a little wrapper script.
+COPY run_jupyter.sh /
+
+# Set up Bazel.
+
+# Running bazel inside a `docker build` command causes trouble, cf:
+# https://github.com/bazelbuild/bazel/issues/134
+# The easiest solution is to set up a bazelrc file forcing --batch.
+RUN echo "startup --batch" >>/etc/bazel.bazelrc
+# Similarly, we need to workaround sandboxing issues:
+# https://github.com/bazelbuild/bazel/issues/418
+RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
+ >>/etc/bazel.bazelrc
+# Install the most recent bazel release.
+ENV BAZEL_VERSION 0.15.0
+WORKDIR /
+RUN mkdir /bazel && \
+ cd /bazel && \
+ curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
+ chmod +x bazel-*.sh && \
+ ./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
+ cd / && \
+ rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
+
+# Download and build TensorFlow.
+WORKDIR /tensorflow
+
+# Download and build TensorFlow.
+# Enable checking out both tags and branches
+RUN export TAG_PREFIX="v" && \
+ echo ${TF_BUILD_VERSION} | grep -q ^${TAG_PREFIX}; \
+ if [ $? -eq 0 ]; then \
+ git clone --depth=1 https://github.com/tensorflow/tensorflow.git . && \
+ git fetch --tags && \
+ git checkout ${TF_BUILD_VERSION}; \
+ else \
+ git clone --depth=1 --branch=${TF_BUILD_VERSION} https://github.com/tensorflow/tensorflow.git . ; \
+ fi
+
+RUN yes "" | ${PYTHON} configure.py
+
+ENV CI_BUILD_PYTHON ${PYTHON}
+
+# Set bazel build parameters in .bazelrc in parameterized_docker_build.sh
+# Use --copt=-march values to get optimized builds appropriate for the hardware
+# platform of your choice.
+# For ivy-bridge or sandy-bridge
+# --copt=-march="avx" \
+# For haswell, broadwell, or skylake
+# --copt=-march="avx2" \
+COPY .bazelrc /root/.bazelrc
+
+RUN tensorflow/tools/ci_build/builds/configured CPU \
+ bazel --bazelrc=/root/.bazelrc build -c opt \
+ tensorflow/tools/pip_package:build_pip_package && \
+ bazel-bin/tensorflow/tools/pip_package/build_pip_package "${WHL_DIR}" && \
+ ${PIP} --no-cache-dir install --upgrade "${WHL_DIR}"/tensorflow-*.whl && \
+ rm -rf /root/.cache
+# Clean up Bazel cache when done.
+
+WORKDIR /root
+
+# Install Open MPI
+RUN mkdir /tmp/openmpi && \
+ cd /tmp/openmpi && \
+ wget https://www.open-mpi.org/software/ompi/v3.0/downloads/openmpi-3.0.0.tar.gz && \
+ tar zxf openmpi-3.0.0.tar.gz && \
+ cd openmpi-3.0.0 && \
+ ./configure --enable-orterun-prefix-by-default && \
+ make -j $(nproc) all && \
+ make install && \
+ ldconfig && \
+ rm -rf /tmp/openmpi
+
+# Create a wrapper for OpenMPI to allow running as root by default
+RUN mv /usr/local/bin/mpirun /usr/local/bin/mpirun.real && \
+ echo '#!/bin/bash' > /usr/local/bin/mpirun && \
+ echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/local/bin/mpirun && \
+ chmod a+x /usr/local/bin/mpirun
+
+# Configure OpenMPI to run good defaults:
+RUN echo "btl_tcp_if_exclude = lo,docker0" >> /usr/local/etc/openmpi-mca-params.conf
+
+# Install Horovod
+RUN ${PIP} install --no-cache-dir horovod
+
+# Install OpenSSH for MPI to communicate between containers
+RUN mkdir -p /var/run/sshd
+
+# Allow OpenSSH to talk to containers without asking for confirmation
+RUN cat /etc/ssh/ssh_config | grep -v StrictHostKeyChecking > /etc/ssh/ssh_config.new && \
+ echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config.new && \
+ mv /etc/ssh/ssh_config.new /etc/ssh/ssh_config
+
+# TensorBoard
+EXPOSE 6006
+# IPython
+EXPOSE 8888
+
+WORKDIR /root
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 9e1708662e..781bf9e851 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -12,8 +12,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
cuda-cusolver-9-0 \
cuda-cusparse-9-0 \
curl \
- libcudnn7=7.0.5.15-1+cuda9.0 \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
libfreetype6-dev \
+ libhdf5-serial-dev \
libpng12-dev \
libzmq3-dev \
pkg-config \
@@ -26,6 +28,11 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0
+
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
python get-pip.py && \
rm get-pip.py
@@ -35,6 +42,8 @@ RUN pip --no-cache-dir install \
h5py \
ipykernel \
jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
matplotlib \
numpy \
pandas \
diff --git a/tensorflow/tools/docker/Dockerfile.mkl b/tensorflow/tools/docker/Dockerfile.mkl
new file mode 100755
index 0000000000..641c9e3b16
--- /dev/null
+++ b/tensorflow/tools/docker/Dockerfile.mkl
@@ -0,0 +1,77 @@
+FROM ubuntu:16.04
+
+LABEL maintainer="Clayne Robison <clayne.b.robison@intel.com>"
+
+# This parameter MUST be set by parameterized_docker_build.sh
+ARG TF_WHL_URL
+
+# Optional parameters
+ARG TF_BUILD_VERSION=r1.9
+ARG PYTHON="python"
+ARG PYTHON_DEV="python-dev"
+ARG PIP="pip"
+
+# Pick up some TF dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ ${PYTHON} \
+ ${PYTHON_DEV} \
+ rsync \
+ software-properties-common \
+ unzip \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
+ ${PYTHON} get-pip.py && \
+ rm get-pip.py
+
+RUN ${PIP} --no-cache-dir install \
+ Pillow \
+ h5py \
+ ipykernel \
+ jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
+ matplotlib \
+ numpy \
+ pandas \
+ scipy \
+ sklearn \
+ && \
+ ${PYTHON} -m ipykernel.kernelspec
+
+COPY ${TF_WHL_URL} /
+RUN ${PIP} install --no-cache-dir --force-reinstall /${TF_WHL_URL} && \
+ rm -rf /${TF_WHL_URL}
+
+RUN if [ "${PYTHON}" = "python3" ]; then \
+ ln -s -f /usr/bin/python3 /usr/bin/python; \
+ fi
+
+# Set up our notebook config.
+COPY jupyter_notebook_config.py /root/.jupyter/
+
+# Copy sample notebooks.
+COPY notebooks /notebooks
+
+# Jupyter has issues with being run directly:
+# https://github.com/ipython/ipython/issues/7062
+# We just add a little wrapper script.
+COPY run_jupyter.sh /
+
+# TensorBoard
+EXPOSE 6006
+# IPython
+EXPOSE 8888
+
+WORKDIR "/notebooks"
+
+CMD ["/run_jupyter.sh", "--allow-root"]
diff --git a/tensorflow/tools/docker/Dockerfile.mkl-horovod b/tensorflow/tools/docker/Dockerfile.mkl-horovod
new file mode 100755
index 0000000000..2b11679f54
--- /dev/null
+++ b/tensorflow/tools/docker/Dockerfile.mkl-horovod
@@ -0,0 +1,111 @@
+FROM ubuntu:16.04
+
+LABEL maintainer="Cong Xu <cong.xu@intel.com>"
+
+# This parameter MUST be set by parameterized_docker_build.sh
+ARG TF_WHL_URL
+
+# Optional parameters
+ARG TF_BUILD_VERSION=r1.9
+ARG PYTHON="python"
+ARG PYTHON_DEV="python-dev"
+ARG PIP="pip"
+
+# Pick up some TF dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ python \
+ ${PYTHON_DEV} \
+ rsync \
+ software-properties-common \
+ unzip \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
+ python get-pip.py && \
+ rm get-pip.py
+
+RUN ${PIP} --no-cache-dir install \
+ Pillow \
+ h5py \
+ ipykernel \
+ jupyter \
+ keras_applications==1.0.5 \
+ keras_preprocessing==1.0.3 \
+ matplotlib \
+ numpy \
+ pandas \
+ scipy \
+ sklearn \
+ && \
+ python -m ipykernel.kernelspec
+
+COPY ${TF_WHL_URL} /
+RUN ${PIP} install --no-cache-dir --force-reinstall /${TF_WHL_URL} && \
+ rm -rf /${TF_WHL_URL}
+
+RUN if [ "${PYTHON}" = "python3" ]; then \
+ ln -s -f /usr/bin/python3 /usr/bin/python; \
+ fi
+
+# Set up our notebook config.
+COPY jupyter_notebook_config.py /root/.jupyter/
+
+# Copy sample notebooks.
+COPY notebooks /notebooks
+
+# Jupyter has issues with being run directly:
+# https://github.com/ipython/ipython/issues/7062
+# We just add a little wrapper script.
+COPY run_jupyter.sh /
+
+WORKDIR /root
+
+# Install Open MPI
+RUN mkdir /tmp/openmpi && \
+ cd /tmp/openmpi && \
+ wget https://www.open-mpi.org/software/ompi/v3.0/downloads/openmpi-3.0.0.tar.gz && \
+ tar zxf openmpi-3.0.0.tar.gz && \
+ cd openmpi-3.0.0 && \
+ ./configure --enable-orterun-prefix-by-default && \
+ make -j $(nproc) all && \
+ make install && \
+ ldconfig && \
+ rm -rf /tmp/openmpi
+
+# Create a wrapper for OpenMPI to allow running as root by default
+RUN mv /usr/local/bin/mpirun /usr/local/bin/mpirun.real && \
+ echo '#!/bin/bash' > /usr/local/bin/mpirun && \
+ echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/local/bin/mpirun && \
+ chmod a+x /usr/local/bin/mpirun
+
+# Configure OpenMPI to run good defaults:
+RUN echo "btl_tcp_if_exclude = lo,docker0" >> /usr/local/etc/openmpi-mca-params.conf
+
+# Install Horovod
+RUN ${PIP} install --no-cache-dir horovod
+
+# Install OpenSSH for MPI to communicate between containers
+RUN mkdir -p /var/run/sshd
+
+# Allow OpenSSH to talk to containers without asking for confirmation
+RUN cat /etc/ssh/ssh_config | grep -v StrictHostKeyChecking > /etc/ssh/ssh_config.new && \
+ echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config.new && \
+ mv /etc/ssh/ssh_config.new /etc/ssh/ssh_config
+
+# TensorBoard
+EXPOSE 6006
+# IPython
+EXPOSE 8888
+
+WORKDIR "/notebooks"
+
+CMD ["/run_jupyter.sh", "--allow-root"]
diff --git a/tensorflow/tools/docker/README.md b/tensorflow/tools/docker/README.md
index f46c56e11a..263f25bc48 100644
--- a/tensorflow/tools/docker/README.md
+++ b/tensorflow/tools/docker/README.md
@@ -1,3 +1,10 @@
+# WARNING: THESE IMAGES ARE DEPRECATED.
+
+TensorFlow's Dockerfiles are now located in
+[`tensorflow/tools/dockerfiles/`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/dockerfiles).
+
+This directory will eventually be removed.
+
# Using TensorFlow via Docker
This directory contains `Dockerfile`s to make it easy to get up and running with
@@ -16,12 +23,12 @@ quick links here:
We currently maintain two Docker container images:
-* `gcr.io/tensorflow/tensorflow` - TensorFlow with all dependencies - CPU only!
+* `tensorflow/tensorflow` - TensorFlow with all dependencies - CPU only!
-* `gcr.io/tensorflow/tensorflow:latest-gpu` - TensorFlow with all dependencies
+* `tensorflow/tensorflow:latest-gpu` - TensorFlow with all dependencies
and support for NVidia CUDA
-Note: We also publish the same containers into
+Note: We store all our containers on
[Docker Hub](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
@@ -29,12 +36,12 @@ Note: We also publish the same containers into
Run non-GPU container using
- $ docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow
+ $ docker run -it -p 8888:8888 tensorflow/tensorflow
For GPU support install NVidia drivers (ideally latest) and
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker). Run using
- $ nvidia-docker run -it -p 8888:8888 gcr.io/tensorflow/tensorflow:latest-gpu
+ $ nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:latest-gpu
Note: If you would have a problem running nvidia-docker you may try the old method
@@ -44,7 +51,7 @@ it there and try using nvidia-docker as described above.
$ # The old, not recommended way to run docker with gpu support:
$ export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
$ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
- $ docker run -it -p 8888:8888 $CUDA_SO $DEVICES gcr.io/tensorflow/tensorflow:latest-gpu
+ $ docker run -it -p 8888:8888 $CUDA_SO $DEVICES tensorflow/tensorflow:latest-gpu
## More containers
@@ -87,8 +94,10 @@ export TF_DOCKER_BUILD_IS_DEVEL=NO
export TF_DOCKER_BUILD_TYPE=CPU
export TF_DOCKER_BUILD_PYTHON_VERSION=PYTHON2
-export NIGHTLY_VERSION="1.head"
-export TF_DOCKER_BUILD_CENTRAL_PIP=$(echo ${TF_DOCKER_BUILD_PYTHON_VERSION} | sed s^PYTHON2^http://ci.tensorflow.org/view/Nightly/job/nightly-matrix-cpu/TF_BUILD_IS_OPT=OPT,TF_BUILD_IS_PIP=PIP,TF_BUILD_PYTHON_VERSION=${TF_DOCKER_BUILD_PYTHON_VERSION},label=cpu-slave/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-${NIGHTLY_VERSION}-cp27-cp27mu-manylinux1_x86_64.whl^ | sed s^PYTHON3^http://ci.tensorflow.org/view/Nightly/job/nightly-python35-linux-cpu/lastSuccessfulBuild/artifact/pip_test/whl/tensorflow-${NIGHTLY_VERSION}-cp35-cp35m-manylinux1_x86_64.whl^)
+pip download --no-deps tf-nightly
+
+export TF_DOCKER_BUILD_CENTRAL_PIP=$(ls tf_nightly*.whl)
+export TF_DOCKER_BUILD_CENTRAL_PIP_IS_LOCAL=1
tensorflow/tools/docker/parameterized_docker_build.sh
```
diff --git a/tensorflow/tools/docker/jupyter_notebook_config.py b/tensorflow/tools/docker/jupyter_notebook_config.py
index 05dcefb099..4449e3501f 100644
--- a/tensorflow/tools/docker/jupyter_notebook_config.py
+++ b/tensorflow/tools/docker/jupyter_notebook_config.py
@@ -16,7 +16,7 @@ import os
from IPython.lib import passwd
c = c # pylint:disable=undefined-variable
-c.NotebookApp.ip = '*'
+c.NotebookApp.ip = '0.0.0.0' # https://github.com/jupyter/notebook/issues/3946
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
diff --git a/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb b/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb
index 0633b03259..8fa871ef77 100644
--- a/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb
+++ b/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb
@@ -665,7 +665,7 @@
"source": [
"## What's next?\n",
"\n",
- "This has been a gentle introduction to TensorFlow, focused on what TensorFlow is and the very basics of doing anything in TensorFlow. If you'd like more, the next tutorial in the series is Getting Started with TensorFlow, also available in the [notebooks directory](..)."
+ "This has been a gentle introduction to TensorFlow, focused on what TensorFlow is and the very basics of doing anything in TensorFlow. If you'd like more, the next tutorial in the series is Getting Started with TensorFlow, also available in the [notebooks directory](../notebooks)."
]
}
],
diff --git a/tensorflow/tools/docker/parameterized_docker_build.sh b/tensorflow/tools/docker/parameterized_docker_build.sh
index 05de25f2cb..448a3a7647 100755
--- a/tensorflow/tools/docker/parameterized_docker_build.sh
+++ b/tensorflow/tools/docker/parameterized_docker_build.sh
@@ -19,8 +19,8 @@
# parameterized_docker_build.sh
#
# The script obeys the following environment variables:
-# TF_DOCKER_BUILD_TYPE: (CPU | GPU)
-# CPU or GPU image
+# TF_DOCKER_BUILD_TYPE: (CPU | GPU | MKL | MKL-HOROVOD)
+# CPU, GPU, MKL or MKL-HOROVOD image
#
# TF_DOCKER_BUILD_IS_DEVEL: (NO | YES)
# Is this developer image
@@ -87,6 +87,15 @@
# TF_DOCKER_BUILD_OPTIONS
# (Optional)
# Specifies the desired build options. Defaults to OPT.
+#
+# TF_DOCKER_BUILD_ARGS
+# (Optional)
+# A list (array) of docker build args. Will be passed to docker build
+# command as list of --build-arg parameters.
+#
+# TF_BAZEL_BUILD_OPTIONS
+# (Optional)
+# Bazel compiler flags to be passed to the bazelrc file
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
@@ -116,6 +125,8 @@ echo " TF_DOCKER_BUILD_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME}"
echo " TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION}"
echo " TF_DOCKER_BUILD_PORT=${TF_DOCKER_BUILD_PORT}"
echo " TF_DOCKER_BUILD_PUSH_CMD=${TF_DOCKER_BUILD_PUSH_CMD}"
+echo " TF_DOCKER_BUILD_ARGS=${TF_DOCKER_BUILD_ARGS[@]:-()}"
+echo " TF_BAZEL_BUILD_OPTIONS=${TF_BAZEL_BUILD_OPTIONS}"
CONTAINER_PORT=${TF_DOCKER_BUILD_PORT:-8888}
@@ -149,6 +160,24 @@ fi
if [[ ${TF_DOCKER_BUILD_TYPE} == "cpu" ]]; then
DOCKER_BINARY="docker"
+elif [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]]; then
+ DOCKER_BINARY="docker"
+ FINAL_TAG="${FINAL_TAG}-mkl"
+ if [[ ${ORIG_DOCKERFILE} == *"."* ]]; then
+ # There is already a dot in the tag, use "-"
+ ORIG_DOCKERFILE="${ORIG_DOCKERFILE}-mkl"
+ else
+ ORIG_DOCKERFILE="${ORIG_DOCKERFILE}.mkl"
+ fi
+elif [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
+ DOCKER_BINARY="docker"
+ FINAL_TAG="${FINAL_TAG}-mkl-horovod"
+ if [[ ${ORIG_DOCKERFILE} == *"."* ]]; then
+ # There is already a dot in the tag, use "-"
+ ORIG_DOCKERFILE="${ORIG_DOCKERFILE}-mkl-horovod"
+ else
+ ORIG_DOCKERFILE="${ORIG_DOCKERFILE}.mkl-horovod"
+ fi
elif [[ ${TF_DOCKER_BUILD_TYPE} == "gpu" ]]; then
DOCKER_BINARY="nvidia-docker"
@@ -168,6 +197,8 @@ if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python2" ]]; then
:
elif [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3" ]]; then
FINAL_TAG="${FINAL_TAG}-py3"
+elif [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then
+ FINAL_TAG="${FINAL_TAG}-py3.6"
else
die "Unrecognized value in TF_DOCKER_BUILD_PYTHON_VERSION: "\
"${TF_DOCKER_BUILD_PYTHON_VERSION}"
@@ -203,6 +234,14 @@ if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
export TF_BUILD_OPTIONS=${TF_DOCKER_BUILD_OPTIONS}
export TF_BUILD_IS_PIP="PIP"
+ if [[ "${TF_DOCKER_BUILD_TYPE}" == "mkl" ]]; then
+ die "FAIL: Non-development MKL builds require a pre-built pip whl."
+ fi
+
+ if [[ "${TF_DOCKER_BUILD_TYPE}" == "mkl-horovod" ]]; then
+ die "FAIL: Non-development MKL-HOROVOD builds require a pre-built pip whl."
+ fi
+
if [[ "${TF_DOCKER_BUILD_TYPE}" == "gpu" ]]; then
export TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS=\
"${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS} -e TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2"
@@ -255,25 +294,41 @@ if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
# Use string replacement to put the correct file name into the Dockerfile
PIP_WHL=$(basename "${PIP_WHL}")
- # Modify the non-devel Dockerfile to point to the correct pip whl file
- # location
- sed -e "/# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/,"\
+ if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
+ [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
+ TF_DOCKER_BUILD_ARGS+=("--build-arg TF_WHL_URL=${PIP_WHL}" )
+ cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
+ else
+ # Modify the non-devel Dockerfile to point to the correct pip whl file
+ # location
+ sed -e "/# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/,"\
"/# --- ~ DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/c"\
"COPY ${PIP_WHL} /\n"\
"RUN pip --no-cache-dir install /${PIP_WHL}" "${ORIG_DOCKERFILE}" \
- > "${DOCKERFILE}"
+ > "${DOCKERFILE}"
+ fi
echo "Using local pip wheel from: ${TF_DOCKER_BUILD_CENTRAL_PIP}"
echo
-
else
echo "Downloading pip wheel from: ${TF_DOCKER_BUILD_CENTRAL_PIP}"
- echo
-
- # Modify the non-devel Dockerfile to point to the correct pip whl URL.
- sed -e "/# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/,"\
+ if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
+ [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
+ pushd "${TMP_DIR}/"
+ curl -O ${TF_DOCKER_BUILD_CENTRAL_PIP}
+ popd
+ PIP_WHL_PATH=`find ${TMP_DIR} -name "*.whl"`
+ PIP_WHL=$(basename "${PIP_WHL_PATH}")
+ echo "PIP_WHL= ${PIP_WHL}"
+ echo
+ TF_DOCKER_BUILD_ARGS+=("--build-arg TF_WHL_URL=${PIP_WHL}")
+ cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
+ else
+ # Modify the non-devel Dockerfile to point to the correct pip whl URL.
+ sed -e "/# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/,"\
"/# --- ~ DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/c"\
"RUN pip --no-cache-dir install ${TF_DOCKER_BUILD_CENTRAL_PIP}" "${ORIG_DOCKERFILE}" \
- > "${DOCKERFILE}"
+ > "${DOCKERFILE}"
+ fi
fi
echo "Modified Dockerfile at: ${DOCKERFILE}"
@@ -281,36 +336,71 @@ if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
# Modify python/pip version if necessary.
if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3" ]]; then
- if sed -i -e 's/python /python3 /g' "${DOCKERFILE}" && \
- sed -i -e 's/python-dev/python3-dev/g' "${DOCKERFILE}" && \
- sed -i -e 's/pip /pip3 /g' "${DOCKERFILE}" && \
- sed -i -e 's^# RUN ln -s -f /usr/bin/python3 /usr/bin/python#^RUN ln -s -f /usr/bin/python3 /usr/bin/python^' "${DOCKERFILE}"
- then
- echo "Modified Dockerfile for python version "\
-"${TF_DOCKER_BUILD_PYTHON_VERSION} at: ${DOCKERFILE}"
+ if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
+ [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
+ TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON=${TF_DOCKER_BUILD_PYTHON_VERSION}")
+ TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON_DEV=python3-dev")
+ TF_DOCKER_BUILD_ARGS+=("--build-arg PIP=pip3")
+ cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
else
- die "FAILED to modify ${DOCKERFILE} for python3"
+ if sed -i -e 's/python /python3 /g' "${DOCKERFILE}" && \
+ sed -i -e 's/python-dev/python3-dev/g' "${DOCKERFILE}" && \
+ sed -i -e 's/pip /pip3 /g' "${DOCKERFILE}" && \
+ sed -i -e 's^# RUN ln -s -f /usr/bin/python3 /usr/bin/python#^RUN ln -s -f /usr/bin/python3 /usr/bin/python^' "${DOCKERFILE}"
+ then
+ echo "Modified Dockerfile for python version "\
+ "${TF_DOCKER_BUILD_PYTHON_VERSION} at: ${DOCKERFILE}"
+ else
+ die "FAILED to modify ${DOCKERFILE} for python3"
+ fi
fi
fi
-else
+else # TF_DOCKER_BUILD_IS_DEVEL == 'yes'
DOCKERFILE="${TMP_DIR}/Dockerfile"
- # Modify the devel Dockerfile to specify the git branch
- sed "s/^RUN git clone --branch=.* --depth=1/RUN git clone --branch=${TF_DOCKER_BUILD_DEVEL_BRANCH} --depth=1/" \
- "${ORIG_DOCKERFILE}" > "${DOCKERFILE}"
+ # Set up Dockerfile ARGS for mkl and mkl-horovod build
+ if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
+ [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
+ if [[ -z "${TF_BAZEL_BUILD_OPTIONS// }" ]]; then
+ TF_BAZEL_BUILD_OPTIONS=("--config=mkl --copt=-mavx --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0")
+ else
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}"
+ fi
+ TF_DOCKER_BUILD_ARGS+=("--build-arg TF_BUILD_VERSION=${TF_DOCKER_BUILD_DEVEL_BRANCH}")
+ echo "TF_DOCKER_BUILD_ARGS=${TF_DOCKER_BUILD_ARGS[@]}"
+
+ # Pass the build options to bazel using the user-specific .bazelrc file
+ echo "build ${TF_BAZEL_BUILD_OPTIONS}" >> ${TMP_DIR}/.bazelrc
+ cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
+ else
+ # Modify the devel Dockerfile to specify the git branch
+ sed "s/^RUN git clone --branch=.* --depth=1/RUN git clone --branch=${TF_DOCKER_BUILD_DEVEL_BRANCH} --depth=1/" \
+ "${ORIG_DOCKERFILE}" > "${DOCKERFILE}"
+ fi
# Modify python/pip version if necessary.
- if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3" ]]; then
- if sed -i -e 's/python-dev/python-dev python3-dev/g' "${DOCKERFILE}" && \
- sed -i -e 's/python /python3 /g' "${DOCKERFILE}" && \
- sed -i -e 's^/tmp/pip^/tmp/pip3^g' "${DOCKERFILE}" && \
- sed -i -e 's/pip /pip3 /g' "${DOCKERFILE}" && \
- sed -i -e 's/ENV CI_BUILD_PYTHON python/ENV CI_BUILD_PYTHON python3/g' "${DOCKERFILE}" && \
- sed -i -e 's^# RUN ln -s -f /usr/bin/python3 /usr/bin/python#^RUN ln -s -f /usr/bin/python3 /usr/bin/python^' "${DOCKERFILE}"
- then
- echo "Modified Dockerfile further for python version ${TF_DOCKER_BUILD_PYTHON_VERSION} at: ${DOCKERFILE}"
+ if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3" ]] || [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then
+ if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
+ TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON=${TF_DOCKER_BUILD_PYTHON_VERSION}")
+ TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON3_DEV=python3-dev")
+ TF_DOCKER_BUILD_ARGS+=("--build-arg WHL_DIR=/tmp/pip3")
+ TF_DOCKER_BUILD_ARGS+=("--build-arg PIP=pip3")
+ cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
else
- die "FAILED to modify ${DOCKERFILE} for python3"
+ if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3.6" ]] && [[ "${TF_DOCKER_BUILD_TYPE}" != "mkl" ]]; then
+ die "Python 3.6 build only supported for MKL builds."
+ fi
+ if sed -i -e 's/python-dev/python-dev python3-dev/g' "${DOCKERFILE}" && \
+ sed -i -e 's/python /python3 /g' "${DOCKERFILE}" && \
+ sed -i -e 's^/tmp/pip^/tmp/pip3^g' "${DOCKERFILE}" && \
+ sed -i -e 's/pip /pip3 /g' "${DOCKERFILE}" && \
+ sed -i -e 's/ENV CI_BUILD_PYTHON python/ENV CI_BUILD_PYTHON python3/g' "${DOCKERFILE}" && \
+ sed -i -e 's^# RUN ln -s -f /usr/bin/python3 /usr/bin/python#^RUN ln -s -f /usr/bin/python3 /usr/bin/python^' "${DOCKERFILE}"
+ then
+ echo "Modified Dockerfile further for python version ${TF_DOCKER_BUILD_PYTHON_VERSION} at: ${DOCKERFILE}"
+ else
+ die "FAILED to modify ${DOCKERFILE} for python3"
+ fi
fi
fi
fi
@@ -319,8 +409,11 @@ fi
# Intermediate image name with tag
IMG="${USER}/tensorflow:${FINAL_TAG}"
echo "Building docker image with image name and tag: ${IMG}"
+echo "TF_DOCKER_BUILD_ARGS=${TF_DOCKER_BUILD_ARGS[@]}"
+CMD="${DOCKER_BINARY} build ${TF_DOCKER_BUILD_ARGS[@]} --no-cache --pull -t ${IMG} -f ${DOCKERFILE} ${TMP_DIR}"
+echo "CMD=${CMD}"
+${CMD}
-"${DOCKER_BINARY}" build --no-cache --pull -t "${IMG}" -f "${DOCKERFILE}" "${TMP_DIR}"
if [[ $? == "0" ]]; then
echo "${DOCKER_BINARY} build of ${IMG} succeeded"
else
@@ -340,7 +433,7 @@ fi
DOCKER_RUN_LOG="${TMP_DIR}/docker_run.log"
echo ""
echo "Running docker container from image ${IMG}..."
-echo " (Log file is at: ${DOCKER_RUN_LOG}"
+echo " Log file is at: ${DOCKER_RUN_LOG}"
echo ""
if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
@@ -386,7 +479,6 @@ if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
# Stop the running docker container
sleep 1
"${DOCKER_BINARY}" stop --time=0 ${CONTAINER_ID}
-
fi
diff --git a/tensorflow/tools/dockerfiles/README.md b/tensorflow/tools/dockerfiles/README.md
new file mode 100644
index 0000000000..5996573cf1
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/README.md
@@ -0,0 +1,67 @@
+# TensorFlow Dockerfiles
+
+This directory houses TensorFlow's Dockerfiles. **DO NOT EDIT THE DOCKERFILES
+MANUALLY!** They are maintained by `assembler.py`, which builds Dockerfiles from
+the files in `partials/` and the rules in `spec.yml`. See [the Contributing
+section](#contributing) for more information.
+
+## Building
+
+The Dockerfiles in the `dockerfiles` directory must have their build context set
+to **the directory with this README.md** to copy in helper files. For example:
+
+```bash
+$ docker build -f ./dockerfiles/cpu.Dockerfile -t tf .
+```
+
+Each Dockerfile has its own set of available `--build-arg`s which are documented
+in the Dockerfile itself.
+
+## Running
+
+After building the image with the tag `tf` (for example), use `docker run` to
+run the images. Examples are below.
+
+Note for new Docker users: the `-v` and `-u` flags share directories between
+the Docker container and your machine, and very important. Without
+`-v`, your work will be wiped once the container quits, and without `-u`, files
+created by the container will have the wrong file permissions on your host
+machine. If you are confused, check out the [Docker run
+documentation](https://docs.docker.com/engine/reference/run/).
+
+```bash
+# Volume mount (-v) is optional but highly recommended, especially for Jupyter.
+# User permissions (-u) are required if you use (-v).
+
+# CPU-based images
+$ docker run -u $(id -u):$(id -g) -v $(pwd):/my-devel -it tf
+
+# GPU-based images (set up nvidia-docker2 first)
+$ docker run --runtime=nvidia -u $(id -u):$(id -g) -v $(pwd):/my-devel -it tf
+
+# Images with Jupyter run on port 8888, and needs a volume for notebooks
+$ docker run --user $(id -u):$(id -g) -p 8888:8888 -v $(pwd):/notebooks -it tf
+```
+
+These images do not come with the TensorFlow source code -- but the development
+images have git included, so you can `git clone` it yourself.
+
+## Contributing
+
+To make changes to TensorFlow's Dockerfiles, you'll update `spec.yml` and the
+`*.partial.Dockerfile` files in the `partials` directory, then run
+`assembler.py` to re-generate the full Dockerfiles before creating a pull
+request.
+
+You can use the `Dockerfile` in this directory to build an editing environment
+that has all of the Python dependencies you'll need:
+
+```bash
+$ docker build -t tf-assembler -f assembler.Dockerfile .
+
+# Set --user to set correct permissions on generated files
+$ docker run --user $(id -u):$(id -g) -it -v $(pwd):/tf tf-assembler bash
+
+# In the container...
+/tf $ python3 ./assembler.py -o dockerfiles -s spec.yml
+```
diff --git a/tensorflow/tools/dockerfiles/assembler.Dockerfile b/tensorflow/tools/dockerfiles/assembler.Dockerfile
new file mode 100644
index 0000000000..7a8e07fced
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/assembler.Dockerfile
@@ -0,0 +1,30 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+#
+# TensorFlow Dockerfile Development Container
+#
+# You can use this image to quickly develop changes to the Dockerfile assembler
+# or set of TF Docker partials. See README.md for usage instructions.
+FROM debian:stretch
+LABEL maintainer="Austin Anderson <angerson@google.com>"
+
+RUN apt-get update && apt-get install -y python3 python3-pip bash
+RUN pip3 install --upgrade pip setuptools pyyaml absl-py cerberus
+
+WORKDIR /tf
+VOLUME ["/tf"]
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
diff --git a/tensorflow/tools/dockerfiles/assembler.py b/tensorflow/tools/dockerfiles/assembler.py
new file mode 100644
index 0000000000..9cdd9bb0cb
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/assembler.py
@@ -0,0 +1,554 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Assemble common TF Dockerfiles from many parts.
+
+This script constructs TF's Dockerfiles by aggregating partial
+Dockerfiles. See README.md for usage examples.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import copy
+import errno
+import os
+import os.path
+import re
+import shutil
+import textwrap
+
+from absl import app
+from absl import flags
+import cerberus
+import yaml
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_boolean(
+ 'dry_run', False, 'Do not actually generate Dockerfiles', short_name='n')
+
+flags.DEFINE_string(
+ 'spec_file',
+ './spec.yml',
+ 'Path to a YAML specification file',
+ short_name='s')
+
+flags.DEFINE_string(
+ 'output_dir',
+ './dockerfiles', ('Path to an output directory for Dockerfiles. '
+ 'Will be created if it doesn\'t exist.'),
+ short_name='o')
+
+flags.DEFINE_string(
+ 'partial_dir',
+ './partials',
+ 'Path to a directory containing foo.partial.Dockerfile partial files.',
+ short_name='p')
+
+flags.DEFINE_boolean(
+ 'quiet_dry_run',
+ True,
+ 'Do not print contents of dry run Dockerfiles.',
+ short_name='q')
+
+flags.DEFINE_boolean(
+ 'validate', True, 'Validate generated Dockerfiles', short_name='c')
+
+# Schema to verify the contents of spec.yml with Cerberus.
+# Must be converted to a dict from yaml to work.
+# Note: can add python references with e.g.
+# !!python/name:builtins.str
+# !!python/name:__main__.funcname
+SCHEMA_TEXT = """
+header:
+ type: string
+
+partials:
+ type: dict
+ keyschema:
+ type: string
+ valueschema:
+ type: dict
+ schema:
+ desc:
+ type: string
+ args:
+ type: dict
+ keyschema:
+ type: string
+ valueschema:
+ anyof:
+ - type: [ boolean, number, string ]
+ - type: dict
+ schema:
+ default:
+ type: [ boolean, number, string ]
+ desc:
+ type: string
+ options:
+ type: list
+ schema:
+ type: string
+
+images:
+ keyschema:
+ type: string
+ valueschema:
+ type: dict
+ schema:
+ desc:
+ type: string
+ arg-defaults:
+ type: list
+ schema:
+ anyof:
+ - type: dict
+ keyschema:
+ type: string
+ arg_in_use: true
+ valueschema:
+ type: string
+ - type: string
+ isimage: true
+ create-dockerfile:
+ type: boolean
+ partials:
+ type: list
+ schema:
+ anyof:
+ - type: dict
+ keyschema:
+ type: string
+ regex: image
+ valueschema:
+ type: string
+ isimage: true
+ - type: string
+ ispartial: true
+"""
+
+
+class TfDockerValidator(cerberus.Validator):
+ """Custom Cerberus validator for TF dockerfile spec.
+
+ Note: Each _validate_foo function's docstring must end with a segment
+ describing its own validation schema, e.g. "The rule's arguments are...". If
+ you add a new validator, you can copy/paste that section.
+ """
+
+ def _validate_ispartial(self, ispartial, field, value):
+ """Validate that a partial references an existing partial spec.
+
+ Args:
+ ispartial: Value of the rule, a bool
+ field: The field being validated
+ value: The field's value
+
+ The rule's arguments are validated against this schema:
+ {'type': 'boolean'}
+ """
+ if ispartial and value not in self.root_document.get('partials', dict()):
+ self._error(field, '{} is not an existing partial.'.format(value))
+
+ def _validate_isimage(self, isimage, field, value):
+ """Validate that an image references an existing partial spec.
+
+ Args:
+ isimage: Value of the rule, a bool
+ field: The field being validated
+ value: The field's value
+
+ The rule's arguments are validated against this schema:
+ {'type': 'boolean'}
+ """
+ if isimage and value not in self.root_document.get('images', dict()):
+ self._error(field, '{} is not an existing image.'.format(value))
+
+ def _validate_arg_in_use(self, arg_in_use, field, value):
+ """Validate that an arg references an existing partial spec's args.
+
+ Args:
+ arg_in_use: Value of the rule, a bool
+ field: The field being validated
+ value: The field's value
+
+ The rule's arguments are validated against this schema:
+ {'type': 'boolean'}
+ """
+ if arg_in_use:
+ for partial in self.root_document.get('partials', dict()).values():
+ if value in partial.get('args', tuple()):
+ return
+
+ self._error(field, '{} is not an arg used in any partial.'.format(value))
+
+
+def build_partial_description(partial_spec):
+ """Create the documentation lines for a specific partial.
+
+ Generates something like this:
+
+ # This is the partial's description, from spec.yml.
+ # --build-arg ARG_NAME=argdefault
+ # this is one of the args.
+ # --build-arg ANOTHER_ARG=(some|choices)
+ # another arg.
+
+ Args:
+ partial_spec: A dict representing one of the partials from spec.yml. Doesn't
+ include the name of the partial; is a dict like { desc: ..., args: ... }.
+
+ Returns:
+ A commented string describing this partial.
+ """
+
+ # Start from linewrapped desc field
+ lines = []
+ wrapper = textwrap.TextWrapper(
+ initial_indent='# ', subsequent_indent='# ', width=80)
+ description = wrapper.fill(partial_spec.get('desc', '( no comments )'))
+ lines.extend(['#', description])
+
+ # Document each arg
+ for arg, arg_data in partial_spec.get('args', dict()).items():
+ # Wrap arg description with comment lines
+ desc = arg_data.get('desc', '( no description )')
+ desc = textwrap.fill(
+ desc,
+ initial_indent='# ',
+ subsequent_indent='# ',
+ width=80,
+ drop_whitespace=False)
+
+ # Document (each|option|like|this)
+ if 'options' in arg_data:
+ arg_options = ' ({})'.format('|'.join(arg_data['options']))
+ else:
+ arg_options = ''
+
+ # Add usage sample
+ arg_use = '# --build-arg {}={}{}'.format(arg,
+ arg_data.get('default', '(unset)'),
+ arg_options)
+ lines.extend([arg_use, desc])
+
+ return '\n'.join(lines)
+
+
+def construct_contents(partial_specs, image_spec):
+ """Assemble the dockerfile contents for an image spec.
+
+ It assembles a concrete list of partial references into a single, large
+ string.
+ Also expands argument defaults, so that the resulting Dockerfile doesn't have
+ to be configured with --build-arg=... every time. That is, any ARG directive
+ will be updated with a new default value.
+
+ Args:
+ partial_specs: The dict from spec.yml["partials"].
+ image_spec: One of the dict values from spec.yml["images"].
+
+ Returns:
+ A string containing a valid Dockerfile based on the partials listed in
+ image_spec.
+ """
+ processed_partial_strings = []
+ for partial_name in image_spec['partials']:
+ # Apply image arg-defaults to existing arg defaults
+ partial_spec = copy.deepcopy(partial_specs[partial_name])
+ args = partial_spec.get('args', dict())
+ for k_v in image_spec.get('arg-defaults', []):
+ arg, value = list(k_v.items())[0]
+ if arg in args:
+ args[arg]['default'] = value
+
+ # Read partial file contents
+ filename = partial_spec.get('file', partial_name)
+ partial_path = os.path.join(FLAGS.partial_dir,
+ '{}.partial.Dockerfile'.format(filename))
+ with open(partial_path, 'r') as f_partial:
+ partial_contents = f_partial.read()
+
+ # Replace ARG FOO=BAR with ARG FOO=[new-default]
+ for arg, arg_data in args.items():
+ if 'default' in arg_data and arg_data['default']:
+ default = '={}'.format(arg_data['default'])
+ else:
+ default = ''
+ partial_contents = re.sub(r'ARG {}.*'.format(arg), 'ARG {}{}'.format(
+ arg, default), partial_contents)
+
+ # Store updated partial contents
+ processed_partial_strings.append(partial_contents)
+
+ # Join everything together
+ return '\n'.join(processed_partial_strings)
+
+
+def mkdir_p(path):
+ """Create a directory and its parents, even if it already exists."""
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def construct_documentation(header, partial_specs, image_spec):
+ """Assemble all of the documentation for a single dockerfile.
+
+ Builds explanations of included partials and available build args.
+
+ Args:
+ header: The string from spec.yml["header"]; will be commented and wrapped.
+ partial_specs: The dict from spec.yml["partials"].
+ image_spec: The spec for the dockerfile being built.
+
+ Returns:
+ A string containing a commented header that documents the contents of the
+ dockerfile.
+
+ """
+ # Comment and wrap header and image description
+ commented_header = '\n'.join(
+ [('# ' + l).rstrip() for l in header.splitlines()])
+ commented_desc = '\n'.join(
+ ['# ' + l for l in image_spec.get('desc', '').splitlines()])
+ partial_descriptions = []
+
+ # Build documentation for each partial in the image
+ for partial in image_spec['partials']:
+ # Copy partial data for default args unique to this image
+ partial_spec = copy.deepcopy(partial_specs[partial])
+ args = partial_spec.get('args', dict())
+
+ # Overwrite any existing arg defaults
+ for k_v in image_spec.get('arg-defaults', []):
+ arg, value = list(k_v.items())[0]
+ if arg in args:
+ args[arg]['default'] = value
+
+ # Build the description from new args
+ partial_description = build_partial_description(partial_spec)
+ partial_descriptions.append(partial_description)
+
+ contents = [commented_header, '#', commented_desc] + partial_descriptions
+ return '\n'.join(contents) + '\n'
+
+
+def normalize_partial_args(partial_specs):
+ """Normalize the shorthand form of a partial's args specification.
+
+ Turns this:
+
+ partial:
+ args:
+ SOME_ARG: arg_value
+
+ Into this:
+
+ partial:
+ args:
+ SOME_ARG:
+ default: arg_value
+
+ Args:
+ partial_specs: The dict from spec.yml["partials"]. This dict is modified in
+ place.
+
+ Returns:
+ The modified contents of partial_specs.
+
+ """
+ for _, partial in partial_specs.items():
+ args = partial.get('args', dict())
+ for arg, value in args.items():
+ if not isinstance(value, dict):
+ new_value = {'default': value}
+ args[arg] = new_value
+
+ return partial_specs
+
+
+def flatten_args_references(image_specs):
+ """Resolve all default-args in each image spec to a concrete dict.
+
+ Turns this:
+
+ example-image:
+ arg-defaults:
+ - MY_ARG: ARG_VALUE
+
+ another-example:
+ arg-defaults:
+ - ANOTHER_ARG: ANOTHER_VALUE
+ - example_image
+
+ Into this:
+
+ example-image:
+ arg-defaults:
+ - MY_ARG: ARG_VALUE
+
+ another-example:
+ arg-defaults:
+ - ANOTHER_ARG: ANOTHER_VALUE
+ - MY_ARG: ARG_VALUE
+
+ Args:
+ image_specs: A dict of image_spec dicts; should be the contents of the
+ "images" key in the global spec.yaml. This dict is modified in place and
+ then returned.
+
+ Returns:
+ The modified contents of image_specs.
+ """
+ for _, image_spec in image_specs.items():
+ too_deep = 0
+ while str in map(type, image_spec.get('arg-defaults', [])) and too_deep < 5:
+ new_args = []
+ for arg in image_spec['arg-defaults']:
+ if isinstance(arg, str):
+ new_args.extend(image_specs[arg]['arg-defaults'])
+ else:
+ new_args.append(arg)
+
+ image_spec['arg-defaults'] = new_args
+ too_deep += 1
+
+ return image_specs
+
+
+def flatten_partial_references(image_specs):
+ """Resolve all partial references in each image spec to a concrete list.
+
+ Turns this:
+
+ example-image:
+ partials:
+ - foo
+
+ another-example:
+ partials:
+ - bar
+ - image: example-image
+ - bat
+
+ Into this:
+
+ example-image:
+ partials:
+ - foo
+
+ another-example:
+ partials:
+ - bar
+ - foo
+ - bat
+ Args:
+ image_specs: A dict of image_spec dicts; should be the contents of the
+ "images" key in the global spec.yaml. This dict is modified in place and
+ then returned.
+
+ Returns:
+ The modified contents of image_specs.
+ """
+ for _, image_spec in image_specs.items():
+ too_deep = 0
+ while dict in map(type, image_spec['partials']) and too_deep < 5:
+ new_partials = []
+ for partial in image_spec['partials']:
+ if isinstance(partial, str):
+ new_partials.append(partial)
+ else:
+ new_partials.extend(image_specs[partial['image']]['partials'])
+
+ image_spec['partials'] = new_partials
+ too_deep += 1
+
+ return image_specs
+
+
+def construct_dockerfiles(tf_spec):
+ """Generate a mapping of {"cpu": <cpu dockerfile contents>, ...}.
+
+ Args:
+ tf_spec: The full spec.yml loaded as a python object.
+
+ Returns:
+ A string:string dict of short names ("cpu-devel") to Dockerfile contents.
+ """
+ names_to_contents = dict()
+ image_specs = tf_spec['images']
+ image_specs = flatten_partial_references(image_specs)
+ image_specs = flatten_args_references(image_specs)
+ partial_specs = tf_spec['partials']
+ partial_specs = normalize_partial_args(partial_specs)
+
+ for name, image_spec in image_specs.items():
+ if not image_spec.get('create-dockerfile', True):
+ continue
+ documentation = construct_documentation(tf_spec['header'], partial_specs,
+ image_spec)
+ contents = construct_contents(partial_specs, image_spec)
+ names_to_contents[name] = '\n'.join([documentation, contents])
+
+ return names_to_contents
+
+
+def main(argv):
+ if len(argv) > 1:
+ raise app.UsageError('Unexpected command line args found: {}'.format(argv))
+
+ with open(FLAGS.spec_file, 'r') as spec_file:
+ tf_spec = yaml.load(spec_file)
+
+ # Abort if spec.yaml is invalid
+ if FLAGS.validate:
+ schema = yaml.load(SCHEMA_TEXT)
+ v = TfDockerValidator(schema)
+ if not v.validate(tf_spec):
+ print('>> ERROR: {} is an invalid spec! The errors are:'.format(
+ FLAGS.spec_file))
+ print(yaml.dump(v.errors, indent=2))
+ exit(1)
+ else:
+ print('>> WARNING: Not validating {}'.format(FLAGS.spec_file))
+
+ # Generate mapping of { "cpu-devel": "<cpu-devel dockerfile contents>", ... }
+ names_to_contents = construct_dockerfiles(tf_spec)
+
+ # Write each completed Dockerfile
+ if not FLAGS.dry_run:
+ print('>> Emptying destination dir "{}"'.format(FLAGS.output_dir))
+ shutil.rmtree(FLAGS.output_dir, ignore_errors=True)
+ mkdir_p(FLAGS.output_dir)
+ else:
+ print('>> Skipping creation of {} (dry run)'.format(FLAGS.output_dir))
+ for name, contents in names_to_contents.items():
+ path = os.path.join(FLAGS.output_dir, name + '.Dockerfile')
+ if FLAGS.dry_run:
+ print('>> Skipping writing contents of {} (dry run)'.format(path))
+ print(contents)
+ else:
+ mkdir_p(FLAGS.output_dir)
+ print('>> Writing {}'.format(path))
+ with open(path, 'w') as f:
+ f.write(contents)
+
+
+if __name__ == '__main__':
+ app.run(main)
diff --git a/tensorflow/tools/dockerfiles/bashrc b/tensorflow/tools/dockerfiles/bashrc
new file mode 100644
index 0000000000..48cacf20f6
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/bashrc
@@ -0,0 +1,50 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# ==============================================================================
+
+export PS1="\[\e[31m\]tf-docker\[\e[m\] \[\e[33m\]\w\[\e[m\] > "
+export TERM=xterm-256color
+alias grep="grep --color=auto"
+alias ls="ls --color=auto"
+
+echo -e "\e[1;31m"
+cat<<TF
+________ _______________
+___ __/__________________________________ ____/__ /________ __
+__ / _ _ \_ __ \_ ___/ __ \_ ___/_ /_ __ /_ __ \_ | /| / /
+_ / / __/ / / /(__ )/ /_/ / / _ __/ _ / / /_/ /_ |/ |/ /
+/_/ \___//_/ /_//____/ \____//_/ /_/ /_/ \____/____/|__/
+
+TF
+echo -e "\e[0;33m"
+
+if [[ $EUID -eq 0 ]]; then
+ cat <<WARN
+WARNING: You are running this container as root, which can cause new files in
+mounted volumes to be created as the root user on your host machine.
+
+To avoid this, run the container by specifying your user's userid:
+
+$ docker run -u \$(id -u):\$(id -g) args...
+WARN
+else
+ cat <<EXPL
+You are running this container as user with ID $(id -u) and group $(id -g),
+which should map to the ID and group for your user on the Docker host. Great!
+EXPL
+fi
+
+# Turn off colors
+echo -e "\e[m"
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/cpu-devel-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/cpu-devel-jupyter.Dockerfile
new file mode 100644
index 0000000000..dbbad7d03a
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/cpu-devel-jupyter.Dockerfile
@@ -0,0 +1,100 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, CPU-only environment for developing changes for TensorFlow, with Jupyter included.
+#
+# Start from Ubuntu, with TF development packages (no GPU support)
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the latest version of Bazel and Python development tools.
+#
+# Configure TensorFlow's shell prompt and login tools.
+#
+# Launch Jupyter on execution instead of a bash prompt.
+
+ARG UBUNTU_VERSION=16.04
+FROM ubuntu:${UBUNTU_VERSION}
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ git \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ python-dev \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ curl \
+ git \
+ openjdk-8-jdk \
+ ${PYTHON}-dev \
+ swig
+
+# Install bazel
+RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list && \
+ curl https://bazel.build/bazel-release.pub.gpg | apt-key add - && \
+ apt-get update && \
+ apt-get install -y bazel
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
+
+RUN ${PIP} install jupyter
+
+RUN mkdir /notebooks && chmod a+rwx /notebooks
+RUN mkdir /.local && chmod a+rwx /.local
+WORKDIR /notebooks
+EXPOSE 8888
+
+CMD ["bash", "-c", "source /etc/bash.bashrc && jupyter notebook --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root"]
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/cpu-devel.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/cpu-devel.Dockerfile
new file mode 100644
index 0000000000..160d7c02e2
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/cpu-devel.Dockerfile
@@ -0,0 +1,89 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, CPU-only environment for developing changes for TensorFlow.
+#
+# Start from Ubuntu, with TF development packages (no GPU support)
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the latest version of Bazel and Python development tools.
+#
+# Configure TensorFlow's shell prompt and login tools.
+
+ARG UBUNTU_VERSION=16.04
+FROM ubuntu:${UBUNTU_VERSION}
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ git \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ python-dev \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ curl \
+ git \
+ openjdk-8-jdk \
+ ${PYTHON}-dev \
+ swig
+
+# Install bazel
+RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list && \
+ curl https://bazel.build/bazel-release.pub.gpg | apt-key add - && \
+ apt-get update && \
+ apt-get install -y bazel
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile
new file mode 100644
index 0000000000..8d5d653ab7
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/cpu-jupyter.Dockerfile
@@ -0,0 +1,69 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, CPU-only environment for using TensorFlow, with Jupyter included.
+#
+# Start from Ubuntu (no GPU support)
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the TensorFlow Python package.
+# --build-arg TF_PACKAGE=tensorflow (tensorflow|tensorflow-gpu|tf-nightly|tf-nightly-gpu)
+# The specific TensorFlow Python package to install
+#
+# Configure TensorFlow's shell prompt and login tools.
+#
+# Launch Jupyter on execution instead of a bash prompt.
+
+ARG UBUNTU_VERSION=16.04
+FROM ubuntu:${UBUNTU_VERSION}
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+ARG TF_PACKAGE=tensorflow
+RUN ${PIP} install ${TF_PACKAGE}
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
+
+RUN ${PIP} install jupyter
+
+RUN mkdir /notebooks && chmod a+rwx /notebooks
+RUN mkdir /.local && chmod a+rwx /.local
+WORKDIR /notebooks
+EXPOSE 8888
+
+CMD ["bash", "-c", "source /etc/bash.bashrc && jupyter notebook --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root"]
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile
new file mode 100644
index 0000000000..35c41b49fd
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/cpu.Dockerfile
@@ -0,0 +1,58 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, CPU-only environment for using TensorFlow
+#
+# Start from Ubuntu (no GPU support)
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the TensorFlow Python package.
+# --build-arg TF_PACKAGE=tensorflow (tensorflow|tensorflow-gpu|tf-nightly|tf-nightly-gpu)
+# The specific TensorFlow Python package to install
+#
+# Configure TensorFlow's shell prompt and login tools.
+
+ARG UBUNTU_VERSION=16.04
+FROM ubuntu:${UBUNTU_VERSION}
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+ARG TF_PACKAGE=tensorflow
+RUN ${PIP} install ${TF_PACKAGE}
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel-jupyter.Dockerfile
new file mode 100644
index 0000000000..68c0e2f2bd
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel-jupyter.Dockerfile
@@ -0,0 +1,126 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, Nvidia-GPU-enabled environment for developing changes for TensorFlow, with Jupyter included.
+#
+# Start from Nvidia's Ubuntu base image with CUDA and CuDNN, with TF development
+# packages.
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the latest version of Bazel and Python development tools.
+#
+# Configure TensorFlow's shell prompt and login tools.
+#
+# Launch Jupyter on execution instead of a bash prompt.
+
+ARG UBUNTU_VERSION=16.04
+FROM nvidia/cuda:9.0-base-ubuntu${UBUNTU_VERSION}
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ cuda-command-line-tools-9-0 \
+ cuda-cublas-dev-9-0 \
+ cuda-cudart-dev-9-0 \
+ cuda-cufft-dev-9-0 \
+ cuda-curand-dev-9-0 \
+ cuda-cusolver-dev-9-0 \
+ cuda-cusparse-dev-9-0 \
+ curl \
+ git \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libcudnn7-dev=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libnccl-dev=2.2.13-1+cuda9.0 \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ wget \
+ && \
+ rm -rf /var/lib/apt/lists/* && \
+ find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
+ rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
+
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0 && \
+ apt-get install libnvinfer-dev=4.1.2-1+cuda9.0
+
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
+# TODO(tobyboyd): Remove after license is excluded from BUILD file.
+RUN gunzip /usr/share/doc/libnccl2/NCCL-SLA.txt.gz && \
+ cp /usr/share/doc/libnccl2/NCCL-SLA.txt /usr/local/cuda/
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ curl \
+ git \
+ openjdk-8-jdk \
+ ${PYTHON}-dev \
+ swig
+
+# Install bazel
+RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list && \
+ curl https://bazel.build/bazel-release.pub.gpg | apt-key add - && \
+ apt-get update && \
+ apt-get install -y bazel
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
+
+RUN ${PIP} install jupyter
+
+RUN mkdir /notebooks && chmod a+rwx /notebooks
+RUN mkdir /.local && chmod a+rwx /.local
+WORKDIR /notebooks
+EXPOSE 8888
+
+CMD ["bash", "-c", "source /etc/bash.bashrc && jupyter notebook --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root"]
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel.Dockerfile
new file mode 100644
index 0000000000..77be0dd287
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/nvidia-devel.Dockerfile
@@ -0,0 +1,115 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, Nvidia-GPU-enabled environment for developing changes for TensorFlow.
+#
+# Start from Nvidia's Ubuntu base image with CUDA and CuDNN, with TF development
+# packages.
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the latest version of Bazel and Python development tools.
+#
+# Configure TensorFlow's shell prompt and login tools.
+
+ARG UBUNTU_VERSION=16.04
+FROM nvidia/cuda:9.0-base-ubuntu${UBUNTU_VERSION}
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ cuda-command-line-tools-9-0 \
+ cuda-cublas-dev-9-0 \
+ cuda-cudart-dev-9-0 \
+ cuda-cufft-dev-9-0 \
+ cuda-curand-dev-9-0 \
+ cuda-cusolver-dev-9-0 \
+ cuda-cusparse-dev-9-0 \
+ curl \
+ git \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libcudnn7-dev=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libnccl-dev=2.2.13-1+cuda9.0 \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ wget \
+ && \
+ rm -rf /var/lib/apt/lists/* && \
+ find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
+ rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
+
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0 && \
+ apt-get install libnvinfer-dev=4.1.2-1+cuda9.0
+
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
+# TODO(tobyboyd): Remove after license is excluded from BUILD file.
+RUN gunzip /usr/share/doc/libnccl2/NCCL-SLA.txt.gz && \
+ cp /usr/share/doc/libnccl2/NCCL-SLA.txt /usr/local/cuda/
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ curl \
+ git \
+ openjdk-8-jdk \
+ ${PYTHON}-dev \
+ swig
+
+# Install bazel
+RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list && \
+ curl https://bazel.build/bazel-release.pub.gpg | apt-key add - && \
+ apt-get update && \
+ apt-get install -y bazel
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/nvidia-jupyter.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/nvidia-jupyter.Dockerfile
new file mode 100644
index 0000000000..5ff1fa917a
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/nvidia-jupyter.Dockerfile
@@ -0,0 +1,95 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, Nvidia-GPU-enabled environment for using TensorFlow, with Jupyter included.
+#
+# NVIDIA with CUDA and CuDNN, no dev stuff
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the TensorFlow Python package.
+# --build-arg TF_PACKAGE=tensorflow-gpu (tensorflow|tensorflow-gpu|tf-nightly|tf-nightly-gpu)
+# The specific TensorFlow Python package to install
+#
+# Configure TensorFlow's shell prompt and login tools.
+#
+# Launch Jupyter on execution instead of a bash prompt.
+
+FROM nvidia/cuda:9.0-base-ubuntu16.04
+
+# Pick up some TF dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ cuda-command-line-tools-9-0 \
+ cuda-cublas-9-0 \
+ cuda-cufft-9-0 \
+ cuda-curand-9-0 \
+ cuda-cusolver-9-0 \
+ cuda-cusparse-9-0 \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ software-properties-common \
+ unzip \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+ARG TF_PACKAGE=tensorflow-gpu
+RUN ${PIP} install ${TF_PACKAGE}
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
+
+RUN ${PIP} install jupyter
+
+RUN mkdir /notebooks && chmod a+rwx /notebooks
+RUN mkdir /.local && chmod a+rwx /.local
+WORKDIR /notebooks
+EXPOSE 8888
+
+CMD ["bash", "-c", "source /etc/bash.bashrc && jupyter notebook --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root"]
diff --git a/tensorflow/tools/dockerfiles/dockerfiles/nvidia.Dockerfile b/tensorflow/tools/dockerfiles/dockerfiles/nvidia.Dockerfile
new file mode 100644
index 0000000000..3df810b5fe
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/dockerfiles/nvidia.Dockerfile
@@ -0,0 +1,84 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+#
+# THIS IS A GENERATED DOCKERFILE.
+#
+# This file was assembled from multiple pieces, whose use is documented
+# below. Please refer to the the TensorFlow dockerfiles documentation for
+# more information. Build args are documented as their default value.
+#
+# Ubuntu-based, Nvidia-GPU-enabled environment for using TensorFlow.
+#
+# NVIDIA with CUDA and CuDNN, no dev stuff
+# --build-arg UBUNTU_VERSION=16.04
+# ( no description )
+#
+# Python is required for TensorFlow and other libraries.
+# --build-arg USE_PYTHON_3_NOT_2=True
+# Install python 3 over Python 2
+#
+# Install the TensorFlow Python package.
+# --build-arg TF_PACKAGE=tensorflow-gpu (tensorflow|tensorflow-gpu|tf-nightly|tf-nightly-gpu)
+# The specific TensorFlow Python package to install
+#
+# Configure TensorFlow's shell prompt and login tools.
+
+FROM nvidia/cuda:9.0-base-ubuntu16.04
+
+# Pick up some TF dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ cuda-command-line-tools-9-0 \
+ cuda-cublas-9-0 \
+ cuda-cufft-9-0 \
+ cuda-curand-9-0 \
+ cuda-cusolver-9-0 \
+ cuda-cusparse-9-0 \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ software-properties-common \
+ unzip \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0
+
+ARG USE_PYTHON_3_NOT_2=True
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
+
+ARG TF_PACKAGE=tensorflow-gpu
+RUN ${PIP} install ${TF_PACKAGE}
+
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
diff --git a/tensorflow/tools/dockerfiles/partials/bazel.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/bazel.partial.Dockerfile
new file mode 100644
index 0000000000..b08d8bdd14
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/bazel.partial.Dockerfile
@@ -0,0 +1,13 @@
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ curl \
+ git \
+ openjdk-8-jdk \
+ ${PYTHON}-dev \
+ swig
+
+# Install bazel
+RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list && \
+ curl https://bazel.build/bazel-release.pub.gpg | apt-key add - && \
+ apt-get update && \
+ apt-get install -y bazel
diff --git a/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile
new file mode 100644
index 0000000000..2c9b9f3f9a
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/jupyter.partial.Dockerfile
@@ -0,0 +1,8 @@
+RUN ${PIP} install jupyter
+
+RUN mkdir /notebooks && chmod a+rwx /notebooks
+RUN mkdir /.local && chmod a+rwx /.local
+WORKDIR /notebooks
+EXPOSE 8888
+
+CMD ["bash", "-c", "source /etc/bash.bashrc && jupyter notebook --notebook-dir=/notebooks --ip 0.0.0.0 --no-browser --allow-root"]
diff --git a/tensorflow/tools/dockerfiles/partials/nvidia-devel.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/nvidia-devel.partial.Dockerfile
new file mode 100644
index 0000000000..45159f711f
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/nvidia-devel.partial.Dockerfile
@@ -0,0 +1,49 @@
+ARG UBUNTU_VERSION=16.04
+FROM nvidia/cuda:9.0-base-ubuntu${UBUNTU_VERSION}
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ cuda-command-line-tools-9-0 \
+ cuda-cublas-dev-9-0 \
+ cuda-cudart-dev-9-0 \
+ cuda-cufft-dev-9-0 \
+ cuda-curand-dev-9-0 \
+ cuda-cusolver-dev-9-0 \
+ cuda-cusparse-dev-9-0 \
+ curl \
+ git \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libcudnn7-dev=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libnccl-dev=2.2.13-1+cuda9.0 \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ wget \
+ && \
+ rm -rf /var/lib/apt/lists/* && \
+ find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
+ rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
+
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0 && \
+ apt-get install libnvinfer-dev=4.1.2-1+cuda9.0
+
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
+# TODO(tobyboyd): Remove after license is excluded from BUILD file.
+RUN gunzip /usr/share/doc/libnccl2/NCCL-SLA.txt.gz && \
+ cp /usr/share/doc/libnccl2/NCCL-SLA.txt /usr/local/cuda/
diff --git a/tensorflow/tools/dockerfiles/partials/nvidia.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/nvidia.partial.Dockerfile
new file mode 100644
index 0000000000..1064390af3
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/nvidia.partial.Dockerfile
@@ -0,0 +1,28 @@
+FROM nvidia/cuda:9.0-base-ubuntu16.04
+
+# Pick up some TF dependencies
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ cuda-command-line-tools-9-0 \
+ cuda-cublas-9-0 \
+ cuda-cufft-9-0 \
+ cuda-curand-9-0 \
+ cuda-cusolver-9-0 \
+ cuda-cusparse-9-0 \
+ libcudnn7=7.2.1.38-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ software-properties-common \
+ unzip \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN apt-get update && \
+ apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
+ apt-get update && \
+ apt-get install libnvinfer4=4.1.2-1+cuda9.0
diff --git a/tensorflow/tools/dockerfiles/partials/python.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/python.partial.Dockerfile
new file mode 100644
index 0000000000..6f346236a5
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/python.partial.Dockerfile
@@ -0,0 +1,12 @@
+ARG USE_PYTHON_3_NOT_2
+ARG _PY_SUFFIX=${USE_PYTHON_3_NOT_2:+3}
+ARG PYTHON=python${_PY_SUFFIX}
+ARG PIP=pip${_PY_SUFFIX}
+
+RUN apt-get update && apt-get install -y \
+ ${PYTHON} \
+ ${PYTHON}-pip
+
+RUN ${PIP} install --upgrade \
+ pip \
+ setuptools
diff --git a/tensorflow/tools/dockerfiles/partials/shell.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/shell.partial.Dockerfile
new file mode 100644
index 0000000000..d641a11b06
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/shell.partial.Dockerfile
@@ -0,0 +1,2 @@
+COPY bashrc /etc/bash.bashrc
+RUN chmod a+rwx /etc/bash.bashrc
diff --git a/tensorflow/tools/dockerfiles/partials/tensorflow.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/tensorflow.partial.Dockerfile
new file mode 100644
index 0000000000..96e79547f0
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/tensorflow.partial.Dockerfile
@@ -0,0 +1,2 @@
+ARG TF_PACKAGE
+RUN ${PIP} install ${TF_PACKAGE}
diff --git a/tensorflow/tools/dockerfiles/partials/ubuntu-devel.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/ubuntu-devel.partial.Dockerfile
new file mode 100644
index 0000000000..bc79272276
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/ubuntu-devel.partial.Dockerfile
@@ -0,0 +1,24 @@
+ARG UBUNTU_VERSION=16.04
+FROM ubuntu:${UBUNTU_VERSION}
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential \
+ curl \
+ git \
+ libcurl3-dev \
+ libfreetype6-dev \
+ libhdf5-serial-dev \
+ libpng12-dev \
+ libzmq3-dev \
+ pkg-config \
+ python-dev \
+ rsync \
+ software-properties-common \
+ unzip \
+ zip \
+ zlib1g-dev \
+ openjdk-8-jdk \
+ openjdk-8-jre-headless \
+ && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
diff --git a/tensorflow/tools/dockerfiles/partials/ubuntu.partial.Dockerfile b/tensorflow/tools/dockerfiles/partials/ubuntu.partial.Dockerfile
new file mode 100644
index 0000000000..0a50735bf8
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/partials/ubuntu.partial.Dockerfile
@@ -0,0 +1,2 @@
+ARG UBUNTU_VERSION=16.04
+FROM ubuntu:${UBUNTU_VERSION}
diff --git a/tensorflow/tools/dockerfiles/spec.yml b/tensorflow/tools/dockerfiles/spec.yml
new file mode 100644
index 0000000000..28bf9a55da
--- /dev/null
+++ b/tensorflow/tools/dockerfiles/spec.yml
@@ -0,0 +1,195 @@
+# ======
+# HEADER
+# ======
+#
+# This is commented-out and prepended to each generated Dockerfile.
+header: |
+ Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ ============================================================================
+
+ THIS IS A GENERATED DOCKERFILE.
+
+ This file was assembled from multiple pieces, whose use is documented
+ below. Please refer to the the TensorFlow dockerfiles documentation for
+ more information. Build args are documented as their default value.
+
+# ========
+# PARTIALS
+# ========
+#
+# Represent and document pieces of a Dockerfile. Spec:
+#
+# name: the name of the partial, is referenced from the images section
+# desc: A description, inserted later into the Dockerfile
+# file: Alternative file prefix, e.g. file.partial.Dockerfile. The default is
+# the name of the partial.
+# args: A dict of ARGs in the Dockerfile; each entry has the format
+# ARG_NAME: VALUE where VALUE is one of:
+# - a dict:
+# desc: Documentation for the arg
+# default: Default value for the arg; is written to the Dockerfile
+# options: List of strings, part of documentation
+# - a concrete value: the same as a dictionary with default: [value].
+
+partials:
+ ubuntu:
+ desc: Start from Ubuntu (no GPU support)
+ args:
+ UBUNTU_VERSION: 16.04
+
+ ubuntu-devel:
+ desc: Start from Ubuntu, with TF development packages (no GPU support)
+ args:
+ UBUNTU_VERSION: 16.04
+
+ bazel:
+ desc: Install the latest version of Bazel and Python development tools.
+
+ nvidia:
+ desc: NVIDIA with CUDA and CuDNN, no dev stuff
+ args:
+ UBUNTU_VERSION: 16.04
+
+ nvidia-devel:
+ desc: >
+ Start from Nvidia's Ubuntu base image with CUDA and CuDNN, with TF
+ development packages.
+ args:
+ UBUNTU_VERSION: 16.04
+
+ python:
+ desc: Python is required for TensorFlow and other libraries.
+ args:
+ USE_PYTHON_3_NOT_2:
+ default: true
+ desc: Install python 3 over Python 2
+
+ tensorflow:
+ desc: Install the TensorFlow Python package.
+ args:
+ TF_PACKAGE:
+ default: tensorflow
+ options:
+ - tensorflow
+ - tensorflow-gpu
+ - tf-nightly
+ - tf-nightly-gpu
+ desc: The specific TensorFlow Python package to install
+ shell:
+ desc: Configure TensorFlow's shell prompt and login tools.
+ jupyter:
+ desc: Launch Jupyter on execution instead of a bash prompt.
+
+# ======
+# IMAGES
+# ======
+#
+# Represent Dockerfiles. Spec:
+#
+# name: the name of the image, possibly referenced by other images
+# desc: A description, inserted later into the Dockerfile
+# create-dockerfile: Create a dockerfile based on this. Useful for creating
+# extensible base images that don't need a file. Default is true.
+# partials: List of VALUEs, where a VALUE is either:
+# - the name of a partial, which inserts that partial into this image
+# - image: [name of another image], which inserts the partials from that
+# image into this image
+# arg-defaults: List of VALUEs, where a VALUE is either:
+# - ARG_NAME: VALUE, which sets the ARG_NAME to VALUE wherever it appears
+# in this image's partials
+# - [name of another image], which loads the default args from that image
+images:
+
+ nodev:
+ create-dockerfile: false
+ partials:
+ - python
+ - tensorflow
+ - shell
+
+ dev:
+ create-dockerfile: false
+ partials:
+ - python
+ - bazel
+ - shell
+
+ cpu:
+ desc: Ubuntu-based, CPU-only environment for using TensorFlow
+ partials:
+ - ubuntu
+ - image: nodev
+
+ cpu-devel:
+ desc: >
+ Ubuntu-based, CPU-only environment for developing changes for
+ TensorFlow.
+ partials:
+ - ubuntu-devel
+ - image: dev
+
+ nvidia:
+ desc: Ubuntu-based, Nvidia-GPU-enabled environment for using TensorFlow.
+ arg-defaults:
+ - TF_PACKAGE: tensorflow-gpu
+ partials:
+ - nvidia
+ - image: nodev
+
+ nvidia-devel:
+ desc: >
+ Ubuntu-based, Nvidia-GPU-enabled environment for developing changes
+ for TensorFlow.
+ arg-defaults:
+ - TF_PACKAGE: tensorflow-gpu
+ partials:
+ - nvidia-devel
+ - image: dev
+
+ cpu-jupyter:
+ desc: >
+ Ubuntu-based, CPU-only environment for using TensorFlow, with Jupyter
+ included.
+ partials:
+ - image: cpu
+ - jupyter
+
+ cpu-devel-jupyter:
+ desc: >
+ Ubuntu-based, CPU-only environment for developing changes for
+ TensorFlow, with Jupyter included.
+ partials:
+ - image: cpu-devel
+ - jupyter
+
+ nvidia-jupyter:
+ desc: >
+ Ubuntu-based, Nvidia-GPU-enabled environment for using TensorFlow, with
+ Jupyter included.
+ arg-defaults:
+ - nvidia
+ partials:
+ - image: nvidia
+ - jupyter
+
+ nvidia-devel-jupyter:
+ desc: >
+ Ubuntu-based, Nvidia-GPU-enabled environment for developing changes for
+ TensorFlow, with Jupyter included.
+ arg-defaults:
+ - nvidia-devel
+ partials:
+ - image: nvidia-devel
+ - jupyter
diff --git a/tensorflow/tools/docs/BUILD b/tensorflow/tools/docs/BUILD
index 0c1fd0cf9d..b218e900bf 100644
--- a/tensorflow/tools/docs/BUILD
+++ b/tensorflow/tools/docs/BUILD
@@ -28,6 +28,24 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":doc_generator_visitor",
+ ":generate_lib",
+ "//tensorflow/python:platform_test",
+ ],
+)
+
+py_library(
+ name = "doc_controls",
+ srcs = ["doc_controls.py"],
+ srcs_version = "PY2AND3",
+)
+
+py_test(
+ name = "doc_controls_test",
+ size = "small",
+ srcs = ["doc_controls_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":doc_controls",
"//tensorflow/python:platform_test",
],
)
@@ -37,7 +55,12 @@ py_library(
srcs = ["parser.py"],
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
- deps = ["@astor_archive//:astor"],
+ deps = [
+ ":doc_controls",
+ "//tensorflow/python:platform",
+ "//tensorflow/python:util",
+ "@astor_archive//:astor",
+ ],
)
py_test(
@@ -63,13 +86,15 @@ py_binary(
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
+ ":doc_controls",
":doc_generator_visitor",
":parser",
":pretty_docs",
":py_guide_parser",
- "//tensorflow/contrib/ffmpeg:ffmpeg_ops_py",
+ "//tensorflow/python:util",
"//tensorflow/tools/common:public_api",
"//tensorflow/tools/common:traverse",
+ "@six_archive//:six",
],
)
@@ -92,6 +117,7 @@ py_binary(
deps = [
":generate_lib",
"//tensorflow:tensorflow_py",
+ "//tensorflow/python:util",
"//tensorflow/python/debug:debug_py",
],
)
@@ -100,13 +126,14 @@ py_test(
name = "build_docs_test",
size = "small",
srcs = ["build_docs_test.py"],
- data = ["//tensorflow:docs_src"],
+ data = ["//tensorflow/docs_src"],
srcs_version = "PY2AND3",
tags = [
- # No reason to run sanitizers for this test.
+ # No reason to run sanitizers or fastbuild for this test.
"noasan",
"nomsan",
"notsan",
+ "optonly",
],
deps = [
":generate_lib",
diff --git a/tensorflow/tools/docs/doc_controls.py b/tensorflow/tools/docs/doc_controls.py
new file mode 100644
index 0000000000..5e526443cc
--- /dev/null
+++ b/tensorflow/tools/docs/doc_controls.py
@@ -0,0 +1,319 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Documentation control decorators."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+_DO_NOT_DOC = "_tf_docs_do_not_document"
+
+
+def do_not_generate_docs(obj):
+ """A decorator: Do not generate docs for this object.
+
+ For example the following classes:
+
+ ```
+ class Parent(object):
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+
+ class Child(Parent):
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+ ```
+
+ Produce the following api_docs:
+
+ ```
+ /Parent.md
+ # method1
+ # method2
+ /Child.md
+ # method1
+ # method2
+ ```
+
+ This decorator allows you to skip classes or methods:
+
+ ```
+ @do_not_generate_docs
+ class Parent(object):
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+
+ class Child(Parent):
+ @do_not_generate_docs
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+ ```
+
+ This will only produce the following docs:
+
+ ```
+ /Child.md
+ # method2
+ ```
+
+ Note: This is implemented by adding a hidden attribute on the object, so it
+ cannot be used on objects which do not allow new attributes to be added. So
+ this decorator must go *below* `@property`, `@classmethod`,
+ or `@staticmethod`:
+
+ ```
+ class Example(object):
+ @property
+ @do_not_generate_docs
+ def x(self):
+ return self._x
+ ```
+
+ Args:
+ obj: The object to hide from the generated docs.
+
+ Returns:
+ obj
+ """
+ setattr(obj, _DO_NOT_DOC, None)
+ return obj
+
+
+_DO_NOT_DOC_INHERITABLE = "_tf_docs_do_not_doc_inheritable"
+
+
+def do_not_doc_inheritable(obj):
+ """A decorator: Do not generate docs for this method.
+
+ This version of the decorator is "inherited" by subclasses. No docs will be
+ generated for the decorated method in any subclass. Even if the sub-class
+ overrides the method.
+
+ For example, to ensure that `method1` is **never documented** use this
+ decorator on the base-class:
+
+ ```
+ class Parent(object):
+ @do_not_doc_inheritable
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+
+ class Child(Parent):
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+ ```
+ This will produce the following docs:
+
+ ```
+ /Parent.md
+ # method2
+ /Child.md
+ # method2
+ ```
+
+ When generating docs for a class's arributes, the `__mro__` is searched and
+ the attribute will be skipped if this decorator is detected on the attribute
+ on any class in the `__mro__`.
+
+ Note: This is implemented by adding a hidden attribute on the object, so it
+ cannot be used on objects which do not allow new attributes to be added. So
+ this decorator must go *below* `@property`, `@classmethod`,
+ or `@staticmethod`:
+
+ ```
+ class Example(object):
+ @property
+ @do_not_doc_inheritable
+ def x(self):
+ return self._x
+ ```
+
+ Args:
+ obj: The class-attribute to hide from the generated docs.
+
+ Returns:
+ obj
+ """
+ setattr(obj, _DO_NOT_DOC_INHERITABLE, None)
+ return obj
+
+
+_FOR_SUBCLASS_IMPLEMENTERS = "_tf_docs_tools_for_subclass_implementers"
+
+
+def for_subclass_implementers(obj):
+ """A decorator: Only generate docs for this method in the defining class.
+
+ Also group this method's docs with and `@abstractmethod` in the class's docs.
+
+ No docs will generated for this class attribute in sub-classes.
+
+ The canonical use case for this is `tf.keras.layers.Layer.call`: It's a
+ public method, essential for anyone implementing a subclass, but it should
+ never be called directly.
+
+ Works on method, or other class-attributes.
+
+ When generating docs for a class's arributes, the `__mro__` is searched and
+ the attribute will be skipped if this decorator is detected on the attribute
+ on any **parent** class in the `__mro__`.
+
+ For example:
+
+ ```
+ class Parent(object):
+ @for_subclass_implementers
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+
+ class Child1(Parent):
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+
+ class Child2(Parent):
+ def method1(self):
+ pass
+ def method2(self):
+ pass
+ ```
+
+ This will produce the following docs:
+
+ ```
+ /Parent.md
+ # method1
+ # method2
+ /Child1.md
+ # method2
+ /Child2.md
+ # method2
+ ```
+
+ Note: This is implemented by adding a hidden attribute on the object, so it
+ cannot be used on objects which do not allow new attributes to be added. So
+ this decorator must go *below* `@property`, `@classmethod`,
+ or `@staticmethod`:
+
+ ```
+ class Example(object):
+ @property
+ @for_subclass_implementers
+ def x(self):
+ return self._x
+ ```
+
+ Args:
+ obj: The class-attribute to hide from the generated docs.
+
+ Returns:
+ obj
+ """
+ setattr(obj, _FOR_SUBCLASS_IMPLEMENTERS, None)
+ return obj
+
+
+def should_skip(obj):
+ """Returns true if docs generation should be skipped for this object.
+
+ checks for the `do_not_generate_docs` or `do_not_doc_inheritable` decorators.
+
+ Args:
+ obj: The object to document, or skip.
+
+ Returns:
+ True if the object should be skipped
+ """
+ # Unwrap fget if the object is a property
+ if isinstance(obj, property):
+ obj = obj.fget
+
+ return hasattr(obj, _DO_NOT_DOC) or hasattr(obj, _DO_NOT_DOC_INHERITABLE)
+
+
+def should_skip_class_attr(cls, name):
+ """Returns true if docs should be skipped for this class attribute.
+
+ Args:
+ cls: The class the attribute belongs to.
+ name: The name of the attribute.
+
+ Returns:
+ True if the attribute should be skipped.
+ """
+ # Get the object with standard lookup, from the nearest
+ # defining parent.
+ try:
+ obj = getattr(cls, name)
+ except AttributeError:
+ # Avoid error caused by enum metaclasses in python3
+ if name in ("name", "value"):
+ return True
+ raise
+
+ # Unwrap fget if the object is a property
+ if isinstance(obj, property):
+ obj = obj.fget
+
+ # Skip if the object is decorated with `do_not_generate_docs` or
+ # `do_not_doc_inheritable`
+ if should_skip(obj):
+ return True
+
+ # Use __dict__ lookup to get the version defined in *this* class.
+ obj = cls.__dict__.get(name, None)
+ if isinstance(obj, property):
+ obj = obj.fget
+ if obj is not None:
+ # If not none, the object is defined in *this* class.
+ # Do not skip if decorated with `for_subclass_implementers`.
+ if hasattr(obj, _FOR_SUBCLASS_IMPLEMENTERS):
+ return False
+
+ # for each parent class
+ for parent in cls.__mro__[1:]:
+ obj = getattr(parent, name, None)
+
+ if obj is None:
+ continue
+
+ if isinstance(obj, property):
+ obj = obj.fget
+
+ # Skip if the parent's definition is decorated with `do_not_doc_inheritable`
+ # or `for_subclass_implementers`
+ if hasattr(obj, _DO_NOT_DOC_INHERITABLE):
+ return True
+
+ if hasattr(obj, _FOR_SUBCLASS_IMPLEMENTERS):
+ return True
+
+ # No blockng decorators --> don't skip
+ return False
diff --git a/tensorflow/tools/docs/doc_controls_test.py b/tensorflow/tools/docs/doc_controls_test.py
new file mode 100644
index 0000000000..d5eb4ffc00
--- /dev/null
+++ b/tensorflow/tools/docs/doc_controls_test.py
@@ -0,0 +1,220 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for documentation control decorators."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.platform import googletest
+from tensorflow.tools.docs import doc_controls
+
+
+class DocControlsTest(googletest.TestCase):
+
+ def test_do_not_generate_docs(self):
+
+ @doc_controls.do_not_generate_docs
+ def dummy_function():
+ pass
+
+ self.assertTrue(doc_controls.should_skip(dummy_function))
+
+ def test_do_not_doc_on_method(self):
+ """The simple decorator is not aware of inheritance."""
+
+ class Parent(object):
+
+ @doc_controls.do_not_generate_docs
+ def my_method(self):
+ pass
+
+ class Child(Parent):
+
+ def my_method(self):
+ pass
+
+ class GrandChild(Child):
+ pass
+
+ self.assertTrue(doc_controls.should_skip(Parent.my_method))
+ self.assertFalse(doc_controls.should_skip(Child.my_method))
+ self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
+
+ self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
+ self.assertFalse(doc_controls.should_skip_class_attr(Child, 'my_method'))
+ self.assertFalse(
+ doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
+
+ def test_do_not_doc_inheritable(self):
+
+ class Parent(object):
+
+ @doc_controls.do_not_doc_inheritable
+ def my_method(self):
+ pass
+
+ class Child(Parent):
+
+ def my_method(self):
+ pass
+
+ class GrandChild(Child):
+ pass
+
+ self.assertTrue(doc_controls.should_skip(Parent.my_method))
+ self.assertFalse(doc_controls.should_skip(Child.my_method))
+ self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
+
+ self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
+ self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
+ self.assertTrue(
+ doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
+
+ def test_do_not_doc_inheritable_property(self):
+
+ class Parent(object):
+
+ @property
+ @doc_controls.do_not_doc_inheritable
+ def my_method(self):
+ pass
+
+ class Child(Parent):
+
+ @property
+ def my_method(self):
+ pass
+
+ class GrandChild(Child):
+ pass
+
+ self.assertTrue(doc_controls.should_skip(Parent.my_method))
+ self.assertFalse(doc_controls.should_skip(Child.my_method))
+ self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
+
+ self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
+ self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
+ self.assertTrue(
+ doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
+
+ def test_do_not_doc_inheritable_staticmethod(self):
+
+ class GrandParent(object):
+
+ def my_method(self):
+ pass
+
+ class Parent(GrandParent):
+
+ @staticmethod
+ @doc_controls.do_not_doc_inheritable
+ def my_method():
+ pass
+
+ class Child(Parent):
+
+ @staticmethod
+ def my_method():
+ pass
+
+ class GrandChild(Child):
+ pass
+
+ self.assertFalse(doc_controls.should_skip(GrandParent.my_method))
+ self.assertTrue(doc_controls.should_skip(Parent.my_method))
+ self.assertFalse(doc_controls.should_skip(Child.my_method))
+ self.assertFalse(doc_controls.should_skip(GrandChild.my_method))
+
+ self.assertFalse(
+ doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
+ self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
+ self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
+ self.assertTrue(
+ doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
+
+ def test_for_subclass_implementers(self):
+
+ class GrandParent(object):
+
+ def my_method(self):
+ pass
+
+ class Parent(GrandParent):
+
+ @doc_controls.for_subclass_implementers
+ def my_method(self):
+ pass
+
+ class Child(Parent):
+ pass
+
+ class GrandChild(Child):
+
+ def my_method(self):
+ pass
+
+ class Grand2Child(Child):
+ pass
+
+ self.assertFalse(
+ doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
+ self.assertFalse(doc_controls.should_skip_class_attr(Parent, 'my_method'))
+ self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
+ self.assertTrue(
+ doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
+ self.assertTrue(
+ doc_controls.should_skip_class_attr(Grand2Child, 'my_method'))
+
+ def test_for_subclass_implementers_short_circuit(self):
+
+ class GrandParent(object):
+
+ @doc_controls.for_subclass_implementers
+ def my_method(self):
+ pass
+
+ class Parent(GrandParent):
+
+ def my_method(self):
+ pass
+
+ class Child(Parent):
+
+ @doc_controls.do_not_doc_inheritable
+ def my_method(self):
+ pass
+
+ class GrandChild(Child):
+
+ @doc_controls.for_subclass_implementers
+ def my_method(self):
+ pass
+
+ class Grand2Child(Child):
+ pass
+
+ self.assertFalse(
+ doc_controls.should_skip_class_attr(GrandParent, 'my_method'))
+ self.assertTrue(doc_controls.should_skip_class_attr(Parent, 'my_method'))
+ self.assertTrue(doc_controls.should_skip_class_attr(Child, 'my_method'))
+ self.assertFalse(
+ doc_controls.should_skip_class_attr(GrandChild, 'my_method'))
+ self.assertTrue(
+ doc_controls.should_skip_class_attr(Grand2Child, 'my_method'))
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tensorflow/tools/docs/doc_generator_visitor.py b/tensorflow/tools/docs/doc_generator_visitor.py
index 259a4694fd..a66f3e4493 100644
--- a/tensorflow/tools/docs/doc_generator_visitor.py
+++ b/tensorflow/tools/docs/doc_generator_visitor.py
@@ -20,6 +20,7 @@ from __future__ import print_function
import six
+from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
@@ -158,6 +159,55 @@ class DocGeneratorVisitor(object):
self._index[full_name] = child
self._tree[parent_name].append(name)
+ def _score_name(self, name):
+ """Return a tuple of scores indicating how to sort for the best name.
+
+ This function is meant to be used as the `key` to the `sorted` function.
+
+ This sorting in order:
+ Prefers names refering to the defining class, over a subclass.
+ Prefers names that are not in "contrib".
+ prefers submodules to the root namespace.
+ Prefers short names `tf.thing` over `tf.a.b.c.thing`
+ Sorts lexicographically on name parts.
+
+ Args:
+ name: the full name to score, for example `tf.estimator.Estimator`
+
+ Returns:
+ A tuple of scores. When sorted the preferred name will have the lowest
+ value.
+ """
+ parts = name.split('.')
+ short_name = parts[-1]
+
+ container = self._index['.'.join(parts[:-1])]
+
+ defining_class_score = 1
+ if tf_inspect.isclass(container):
+ if short_name in container.__dict__:
+ # prefer the defining class
+ defining_class_score = -1
+
+ contrib_score = -1
+ if 'contrib' in parts:
+ contrib_score = 1
+
+ while parts:
+ parts.pop()
+ container = self._index['.'.join(parts)]
+ if tf_inspect.ismodule(container):
+ break
+ module_length = len(parts)
+ if len(parts) == 2:
+ # `tf.submodule.thing` is better than `tf.thing`
+ module_length_score = -1
+ else:
+ # shorter is better
+ module_length_score = module_length
+
+ return (defining_class_score, contrib_score, module_length_score, name)
+
def _maybe_find_duplicates(self):
"""Compute data structures containing information about duplicates.
@@ -191,7 +241,7 @@ class DocGeneratorVisitor(object):
if (py_object is not None and
not isinstance(py_object, six.integer_types + six.string_types +
(six.binary_type, six.text_type, float, complex, bool))
- and py_object is not ()):
+ and py_object is not ()): # pylint: disable=literal-comparison
object_id = id(py_object)
if object_id in reverse_index:
master_name = reverse_index[object_id]
@@ -201,7 +251,6 @@ class DocGeneratorVisitor(object):
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
-
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
@@ -211,10 +260,15 @@ class DocGeneratorVisitor(object):
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
-
- # Choose the lexicographically first name with the minimum number of
- # submodules. This will prefer highest level namespace for any symbol.
- master_name = min(names, key=lambda name: name.count('.'))
+ master_name = (
+ tf_export.get_canonical_name_for_symbol(self._index[names[0]])
+ if names else None)
+ if master_name:
+ master_name = 'tf.%s' % master_name
+ else:
+ # Choose the master name with a lexical sort on the tuples returned by
+ # by _score_name.
+ master_name = min(names, key=self._score_name)
duplicates[master_name] = names
for name in names:
diff --git a/tensorflow/tools/docs/doc_generator_visitor_test.py b/tensorflow/tools/docs/doc_generator_visitor_test.py
index cf5be45f40..1c2635d4a8 100644
--- a/tensorflow/tools/docs/doc_generator_visitor_test.py
+++ b/tensorflow/tools/docs/doc_generator_visitor_test.py
@@ -18,8 +18,21 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import types
+
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import doc_generator_visitor
+from tensorflow.tools.docs import generate_lib
+
+
+class NoDunderVisitor(doc_generator_visitor.DocGeneratorVisitor):
+
+ def __call__(self, parent_name, parent, children):
+ """Drop all the dunder methods to make testing easier."""
+ children = [
+ (name, obj) for (name, obj) in children if not name.startswith('_')
+ ]
+ super(NoDunderVisitor, self).__call__(parent_name, parent, children)
class DocGeneratorVisitorTest(googletest.TestCase):
@@ -57,52 +70,184 @@ class DocGeneratorVisitorTest(googletest.TestCase):
with self.assertRaises(RuntimeError):
visitor('non_class_or_module', 'non_class_or_module_object', [])
- def test_duplicates(self):
- visitor = doc_generator_visitor.DocGeneratorVisitor()
- visitor(
- 'submodule.DocGeneratorVisitor',
- doc_generator_visitor.DocGeneratorVisitor,
- [('index', doc_generator_visitor.DocGeneratorVisitor.index),
- ('index2', doc_generator_visitor.DocGeneratorVisitor.index)])
- visitor(
- 'submodule2.DocGeneratorVisitor',
- doc_generator_visitor.DocGeneratorVisitor,
- [('index', doc_generator_visitor.DocGeneratorVisitor.index),
- ('index2', doc_generator_visitor.DocGeneratorVisitor.index)])
- visitor(
- 'DocGeneratorVisitor2',
- doc_generator_visitor.DocGeneratorVisitor,
- [('index', doc_generator_visitor.DocGeneratorVisitor.index),
- ('index2', doc_generator_visitor.DocGeneratorVisitor.index)])
-
- # The shorter path should be master, or if equal, the lexicographically
- # first will be.
- self.assertEqual(
- {'DocGeneratorVisitor2': sorted(['submodule.DocGeneratorVisitor',
- 'submodule2.DocGeneratorVisitor',
- 'DocGeneratorVisitor2']),
- 'DocGeneratorVisitor2.index': sorted([
- 'submodule.DocGeneratorVisitor.index',
- 'submodule.DocGeneratorVisitor.index2',
- 'submodule2.DocGeneratorVisitor.index',
- 'submodule2.DocGeneratorVisitor.index2',
- 'DocGeneratorVisitor2.index',
- 'DocGeneratorVisitor2.index2'
- ]),
- }, visitor.duplicates)
- self.assertEqual({
- 'submodule.DocGeneratorVisitor': 'DocGeneratorVisitor2',
- 'submodule.DocGeneratorVisitor.index': 'DocGeneratorVisitor2.index',
- 'submodule.DocGeneratorVisitor.index2': 'DocGeneratorVisitor2.index',
- 'submodule2.DocGeneratorVisitor': 'DocGeneratorVisitor2',
- 'submodule2.DocGeneratorVisitor.index': 'DocGeneratorVisitor2.index',
- 'submodule2.DocGeneratorVisitor.index2': 'DocGeneratorVisitor2.index',
- 'DocGeneratorVisitor2.index2': 'DocGeneratorVisitor2.index'
+ def test_duplicates_module_class_depth(self):
+
+ class Parent(object):
+
+ class Nested(object):
+ pass
+
+ tf = types.ModuleType('tf')
+ tf.Parent = Parent
+ tf.submodule = types.ModuleType('submodule')
+ tf.submodule.Parent = Parent
+
+ visitor = generate_lib.extract(
+ [('tf', tf)],
+ private_map={},
+ do_not_descend_map={},
+ visitor_cls=NoDunderVisitor)
+
+ self.assertEqual({
+ 'tf.submodule.Parent':
+ sorted([
+ 'tf.Parent',
+ 'tf.submodule.Parent',
+ ]),
+ 'tf.submodule.Parent.Nested':
+ sorted([
+ 'tf.Parent.Nested',
+ 'tf.submodule.Parent.Nested',
+ ]),
+ }, visitor.duplicates)
+
+ self.assertEqual({
+ 'tf.Parent.Nested': 'tf.submodule.Parent.Nested',
+ 'tf.Parent': 'tf.submodule.Parent',
+ }, visitor.duplicate_of)
+
+ self.assertEqual({
+ id(Parent): 'tf.submodule.Parent',
+ id(Parent.Nested): 'tf.submodule.Parent.Nested',
+ id(tf): 'tf',
+ id(tf.submodule): 'tf.submodule',
+ }, visitor.reverse_index)
+
+ def test_duplicates_contrib(self):
+
+ class Parent(object):
+ pass
+
+ tf = types.ModuleType('tf')
+ tf.contrib = types.ModuleType('contrib')
+ tf.submodule = types.ModuleType('submodule')
+ tf.contrib.Parent = Parent
+ tf.submodule.Parent = Parent
+
+ visitor = generate_lib.extract(
+ [('tf', tf)],
+ private_map={},
+ do_not_descend_map={},
+ visitor_cls=NoDunderVisitor)
+
+ self.assertEqual({
+ 'tf.submodule.Parent':
+ sorted(['tf.contrib.Parent', 'tf.submodule.Parent']),
+ }, visitor.duplicates)
+
+ self.assertEqual({
+ 'tf.contrib.Parent': 'tf.submodule.Parent',
+ }, visitor.duplicate_of)
+
+ self.assertEqual({
+ id(tf): 'tf',
+ id(tf.submodule): 'tf.submodule',
+ id(Parent): 'tf.submodule.Parent',
+ id(tf.contrib): 'tf.contrib',
+ }, visitor.reverse_index)
+
+ def test_duplicates_defining_class(self):
+
+ class Parent(object):
+ obj1 = object()
+
+ class Child(Parent):
+ pass
+
+ tf = types.ModuleType('tf')
+ tf.Parent = Parent
+ tf.Child = Child
+
+ visitor = generate_lib.extract(
+ [('tf', tf)],
+ private_map={},
+ do_not_descend_map={},
+ visitor_cls=NoDunderVisitor)
+
+ self.assertEqual({
+ 'tf.Parent.obj1': sorted([
+ 'tf.Parent.obj1',
+ 'tf.Child.obj1',
+ ]),
+ }, visitor.duplicates)
+
+ self.assertEqual({
+ 'tf.Child.obj1': 'tf.Parent.obj1',
}, visitor.duplicate_of)
+
+ self.assertEqual({
+ id(tf): 'tf',
+ id(Parent): 'tf.Parent',
+ id(Child): 'tf.Child',
+ id(Parent.obj1): 'tf.Parent.obj1',
+ }, visitor.reverse_index)
+
+ def test_duplicates_module_depth(self):
+
+ class Parent(object):
+ pass
+
+ tf = types.ModuleType('tf')
+ tf.submodule = types.ModuleType('submodule')
+ tf.submodule.submodule2 = types.ModuleType('submodule2')
+ tf.Parent = Parent
+ tf.submodule.submodule2.Parent = Parent
+
+ visitor = generate_lib.extract(
+ [('tf', tf)],
+ private_map={},
+ do_not_descend_map={},
+ visitor_cls=NoDunderVisitor)
+
+ self.assertEqual({
+ 'tf.Parent': sorted(['tf.Parent', 'tf.submodule.submodule2.Parent']),
+ }, visitor.duplicates)
+
+ self.assertEqual({
+ 'tf.submodule.submodule2.Parent': 'tf.Parent'
+ }, visitor.duplicate_of)
+
+ self.assertEqual({
+ id(tf): 'tf',
+ id(tf.submodule): 'tf.submodule',
+ id(tf.submodule.submodule2): 'tf.submodule.submodule2',
+ id(Parent): 'tf.Parent',
+ }, visitor.reverse_index)
+
+ def test_duplicates_name(self):
+
+ class Parent(object):
+ obj1 = object()
+
+ Parent.obj2 = Parent.obj1
+
+ tf = types.ModuleType('tf')
+ tf.submodule = types.ModuleType('submodule')
+ tf.submodule.Parent = Parent
+
+ visitor = generate_lib.extract(
+ [('tf', tf)],
+ private_map={},
+ do_not_descend_map={},
+ visitor_cls=NoDunderVisitor)
+
+ self.assertEqual({
+ 'tf.submodule.Parent.obj1':
+ sorted([
+ 'tf.submodule.Parent.obj1',
+ 'tf.submodule.Parent.obj2',
+ ]),
+ }, visitor.duplicates)
+
+ self.assertEqual({
+ 'tf.submodule.Parent.obj2': 'tf.submodule.Parent.obj1',
+ }, visitor.duplicate_of)
+
self.assertEqual({
- id(doc_generator_visitor.DocGeneratorVisitor): 'DocGeneratorVisitor2',
- id(doc_generator_visitor.DocGeneratorVisitor.index):
- 'DocGeneratorVisitor2.index',
+ id(tf): 'tf',
+ id(tf.submodule): 'tf.submodule',
+ id(Parent): 'tf.submodule.Parent',
+ id(Parent.obj1): 'tf.submodule.Parent.obj1',
}, visitor.reverse_index)
if __name__ == '__main__':
diff --git a/tensorflow/tools/docs/generate.py b/tensorflow/tools/docs/generate.py
index c750539a76..fc93085e3e 100644
--- a/tensorflow/tools/docs/generate.py
+++ b/tensorflow/tools/docs/generate.py
@@ -43,10 +43,6 @@ if __name__ == '__main__':
flags = doc_generator.parse_known_args()
- # Suppress documentation of some symbols that users should never use.
- del tf.layers.Layer.inbound_nodes
- del tf.layers.Layer.outbound_nodes
-
# tf_debug is not imported with tf, it's a separate module altogether
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
diff --git a/tensorflow/tools/docs/generate_lib.py b/tensorflow/tools/docs/generate_lib.py
index 111d54d820..77a3ca2052 100644
--- a/tensorflow/tools/docs/generate_lib.py
+++ b/tensorflow/tools/docs/generate_lib.py
@@ -21,36 +21,27 @@ from __future__ import print_function
import argparse
import fnmatch
import os
+import shutil
+import tempfile
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
+from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
-def _is_free_function(py_object, full_name, index):
- """Check if input is a free function (and not a class- or static method)."""
- if not tf_inspect.isfunction(py_object):
- return False
-
- # Static methods are functions to tf_inspect (in 2.7), so check if the parent
- # is a class. If there is no parent, it's not a function.
- if '.' not in full_name:
- return False
-
- parent_name = full_name.rsplit('.', 1)[0]
- if tf_inspect.isclass(index[parent_name]):
- return False
-
- return True
-
-
-def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
+def write_docs(output_dir,
+ parser_config,
+ yaml_toc,
+ root_title='TensorFlow',
+ search_hints=True,
+ site_api_path=''):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
@@ -66,6 +57,10 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
+ search_hints: (bool) include meta-data search hints at the top of each
+ output file.
+ site_api_path: The output path relative to the site root. Used in the
+ `_toc.yaml` and `_redirects.yaml` files.
Raises:
ValueError: if `output_dir` is not an absolute path
@@ -75,12 +70,8 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
- try:
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
- except OSError as e:
- print('Creating output dir "%s" failed: %s' % (output_dir, e))
- raise
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
@@ -89,6 +80,9 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
# - symbol name(string):pathname (string)
symbol_to_file = {}
+ # Collect redirects for an api _redirects.yaml file.
+ redirects = []
+
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
@@ -98,7 +92,7 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
- _is_free_function(py_object, full_name, parser_config.index)):
+ parser.is_free_function(py_object, full_name, parser_config.index)):
continue
sitepath = os.path.join('api_docs/python',
@@ -123,8 +117,6 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
module_children.setdefault(subname, []).append(full_name)
break
- print('Writing docs for %s (%r).' % (full_name, py_object))
-
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
@@ -134,15 +126,43 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
if not os.path.exists(directory):
os.makedirs(directory)
# This function returns raw bytes in PY2 or unicode in PY3.
- text = pretty_docs.build_md_page(page_info)
+ if search_hints:
+ content = [page_info.get_metadata_html()]
+ else:
+ content = ['']
+
+ content.append(pretty_docs.build_md_page(page_info))
+ text = '\n'.join(content)
if six.PY3:
text = text.encode('utf-8')
with open(path, 'wb') as f:
f.write(text)
- except OSError as e:
- print('Cannot write documentation for %s to %s: %s' % (full_name,
- directory, e))
- raise
+ except OSError:
+ raise OSError(
+ 'Cannot write documentation for %s to %s' % (full_name, directory))
+
+ duplicates = parser_config.duplicates.get(full_name, [])
+ if not duplicates:
+ continue
+
+ duplicates = [item for item in duplicates if item != full_name]
+
+ for dup in duplicates:
+ from_path = os.path.join(site_api_path, dup.replace('.', '/'))
+ to_path = os.path.join(site_api_path, full_name.replace('.', '/'))
+ redirects.append((
+ os.path.join('/', from_path),
+ os.path.join('/', to_path)))
+
+ if redirects:
+ redirects = sorted(redirects)
+ template = ('- from: {}\n'
+ ' to: {}\n')
+ redirects = [template.format(f, t) for f, t in redirects]
+ api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
+ with open(api_redirects_path, 'w') as redirect_file:
+ redirect_file.write('redirects:\n')
+ redirect_file.write(''.join(redirects))
if yaml_toc:
# Generate table of contents
@@ -172,7 +192,8 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
'- title: ' + title,
' section:',
' - title: Overview',
- ' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[module]]
+ ' path: ' + os.path.join('/', site_api_path,
+ symbol_to_file[module])]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
@@ -183,7 +204,8 @@ def write_docs(output_dir, parser_config, yaml_toc, root_title='TensorFlow'):
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
- ' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[full_name]]
+ ' path: ' + os.path.join('/', site_api_path,
+ symbol_to_file[full_name])]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
@@ -204,12 +226,16 @@ def add_dict_to_dict(add_from, add_to):
# Exclude some libraries in contrib from the documentation altogether.
def _get_default_private_map():
- return {'tf.test': ['mock']}
+ return {
+ 'tf.contrib.autograph': ['utils', 'operators'],
+ 'tf.test': ['mock'],
+ 'tf.compat': ['v1', 'v2'],
+ }
# Exclude members of some libraries.
def _get_default_do_not_descend_map():
- # TODO(wicke): Shrink this list once the modules get sealed.
+ # TODO(markdaoust): Use docs_controls decorators, locally, instead.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
@@ -253,11 +279,23 @@ def _get_default_do_not_descend_map():
}
-def extract(py_modules, private_map, do_not_descend_map):
+class DocControlsAwareCrawler(public_api.PublicAPIVisitor):
+ """A `docs_controls` aware API-crawler."""
+
+ def _is_private(self, path, name, obj):
+ if doc_controls.should_skip(obj):
+ return True
+ return super(DocControlsAwareCrawler, self)._is_private(path, name, obj)
+
+
+def extract(py_modules,
+ private_map,
+ do_not_descend_map,
+ visitor_cls=doc_generator_visitor.DocGeneratorVisitor):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
- visitor = doc_generator_visitor.DocGeneratorVisitor(py_modules[0][0])
- api_visitor = public_api.PublicAPIVisitor(visitor)
+ visitor = visitor_cls(py_modules[0][0])
+ api_visitor = DocControlsAwareCrawler(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
@@ -363,8 +401,8 @@ class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
self.section_tag = tag
def process_line(self, _, line):
- """Index @{symbol} references as in the current file & section."""
- for match in parser.SYMBOL_REFERENCE_RE.finditer(line):
+ """Index the file and section of each `symbol` reference."""
+ for match in parser.AUTO_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
@@ -382,16 +420,44 @@ def _build_guide_index(guide_src_dir):
class _UpdateTags(py_guide_parser.PyGuideParser):
- """Rewrites a Python guide so that each section has an explicit tag."""
+ """Rewrites a Python guide so that each section has an explicit id tag.
+
+ "section" here refers to blocks delimited by second level headings.
+ """
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
+def update_id_tags_inplace(src_dir):
+ """Set explicit ids on all second-level headings to ensure back-links work.
+
+ Args:
+ src_dir: The directory of md-files to convert (inplace).
+ """
+ tag_updater = _UpdateTags()
+
+ for dirpath, _, filenames in os.walk(src_dir):
+ for base_name in filenames:
+ if not base_name.endswith('.md'):
+ continue
+ full_path = os.path.join(src_dir, dirpath, base_name)
+
+ # Tag updater loads the file, makes the replacements, and returns the
+ # modified file contents
+ content = tag_updater.process(full_path)
+ with open(full_path, 'w') as f:
+ f.write(content)
+
+
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
-def _other_docs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
+def replace_refs(src_dir,
+ output_dir,
+ reference_resolver,
+ file_pattern='*.md',
+ api_docs_relpath='api_docs'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
@@ -410,52 +476,44 @@ def _other_docs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
reference_resolver: A `parser.ReferenceResolver` to make the replacements.
file_pattern: Only replace references in files matching file_patters,
using fnmatch. Non-matching files are copied unchanged.
+ api_docs_relpath: Relative-path string to the api_docs, from the src_dir.
"""
# Iterate through all the source files and process them.
- tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
+ depth = os.path.relpath(src_dir, start=dirpath)
# How to get from `dirpath` to api_docs/python/
- relative_path_to_root = os.path.relpath(
- path=os.path.join(src_dir, 'api_docs/python'), start=dirpath)
+ relative_path_to_root = os.path.join(depth, api_docs_relpath, 'python')
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
- try:
- if not os.path.exists(new_dir):
- os.makedirs(new_dir)
- except OSError as e:
- print('Creating output dir "%s" failed: %s' % (new_dir, e))
- raise
+ if not os.path.exists(new_dir):
+ os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
- print('Skipping excluded file %s...' % base_name)
continue
full_in_path = os.path.join(dirpath, base_name)
+ # Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
+ # Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
- print('Copying un-matched file %s...' % suffix)
- open(full_out_path, 'wb').write(open(full_in_path, 'rb').read())
+ if full_in_path != full_out_path:
+ shutil.copyfile(full_in_path, full_out_path)
continue
- if dirpath.endswith('/api_guides/python'):
- print('Processing Python guide %s...' % base_name)
- content = tag_updater.process(full_in_path)
- else:
- print('Processing doc %s...' % suffix)
- content = open(full_in_path, 'rb').read().decode('utf-8')
+
+ with open(full_in_path, 'rb') as f:
+ content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(content.encode('utf-8'))
- print('Done.')
-
class DocGenerator(object):
"""Main entry point for generating docs."""
@@ -467,6 +525,25 @@ class DocGenerator(object):
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
+ self.argument_parser.add_argument(
+ '--no_search_hints',
+ dest='search_hints',
+ action='store_false',
+ default=True)
+
+ self.argument_parser.add_argument(
+ '--site_api_path',
+ type=str, default='',
+ help='The path from the site-root to api_docs'
+ 'directory for this project')
+
+ self.argument_parser.add_argument(
+ '--api_cache_out_path',
+ type=str,
+ default=None,
+ help='Path to store a json-serialized api-index, so links can be '
+ 'inserted into docs without rebuilding the api_docs')
+
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
@@ -479,9 +556,9 @@ class DocGenerator(object):
self.argument_parser.add_argument(
'--src_dir',
type=str,
- default=None,
- required=True,
- help='Directory with the source docs.')
+ default=tempfile.mkdtemp(),
+ required=False,
+ help='Optional directory of source docs to add api_docs links to')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
@@ -536,15 +613,46 @@ class DocGenerator(object):
self._do_not_descend_map)
def build(self, flags):
- """Actually build the docs."""
+ """Build all the docs.
+
+ This produces two outputs
+
+ python api docs:
+
+ * generated from modules set with `set_py_modules`.
+ * written to '{FLAGS.output_dir}/api_docs/python/'
+
+ non-api docs:
+
+ * Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
+ * '@{}' references in '.md' files are replaced with links.
+ * '.md' files under 'api_guides/python' have explicit ids set for their
+ second level headings.
+
+ Args:
+ flags:
+ * src_dir: Where to fetch the non-api-docs.
+ * base_dir: Base of the docs directory (Used to build correct
+ relative links).
+ * output_dir: Where to write the resulting docs.
+
+ Returns:
+ The number of errors encountered while processing.
+ """
+ # Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
+ if getattr(flags, 'api_cache_out_path', None):
+ reference_resolver.to_json_file(flags.api_cache_out_path)
+
+ # Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
+ # Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
@@ -553,9 +661,19 @@ class DocGenerator(object):
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
- root_title=root_title)
- _other_docs(flags.src_dir, flags.output_dir, reference_resolver)
-
+ root_title=root_title,
+ search_hints=getattr(flags, 'search_hints', True),
+ site_api_path=getattr(flags, 'site_api_path', ''))
+
+ # Replace all the @{} references in files under `FLAGS.src_dir`
+ replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
+ # Fix the tags in the guide dir.
+ guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
+ if os.path.exists(guide_dir):
+ update_id_tags_inplace(guide_dir)
+
+ # Report all errors found by the reference resolver, and return the error
+ # code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
diff --git a/tensorflow/tools/docs/generate_lib_test.py b/tensorflow/tools/docs/generate_lib_test.py
index ea6d28a02b..de18b13254 100644
--- a/tensorflow/tools/docs/generate_lib_test.py
+++ b/tensorflow/tools/docs/generate_lib_test.py
@@ -51,7 +51,9 @@ class DummyVisitor(object):
class GenerateTest(googletest.TestCase):
- def test_write(self):
+ def get_test_objects(self):
+ # These are all mutable objects, so rebuild them for each test.
+ # Don't cache the objects.
module = sys.modules[__name__]
index = {
@@ -98,9 +100,25 @@ class GenerateTest(googletest.TestCase):
guide_index={},
base_dir=base_dir)
+ return reference_resolver, parser_config
+
+ def test_write(self):
+ _, parser_config = self.get_test_objects()
+
output_dir = googletest.GetTempDir()
- generate_lib.write_docs(output_dir, parser_config, yaml_toc=True)
+ generate_lib.write_docs(output_dir, parser_config, yaml_toc=True,
+ site_api_path='api_docs/python')
+
+ # Check redirects
+ redirects_file = os.path.join(output_dir, '_redirects.yaml')
+ self.assertTrue(os.path.exists(redirects_file))
+ with open(redirects_file) as f:
+ redirects = f.read()
+ self.assertEqual(redirects.split(), [
+ 'redirects:', '-', 'from:', '/api_docs/python/tf/test_function', 'to:',
+ '/api_docs/python/tf/TestModule/test_function'
+ ])
# Make sure that the right files are written to disk.
self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))
@@ -127,6 +145,107 @@ class GenerateTest(googletest.TestCase):
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/test_function.md')))
+ def test_update_id_tags_inplace(self):
+ test_dir = googletest.GetTempDir()
+ test_sub_dir = os.path.join(test_dir, 'a/b')
+ os.makedirs(test_sub_dir)
+
+ test_path1 = os.path.join(test_dir, 'file1.md')
+ test_path2 = os.path.join(test_sub_dir, 'file2.md')
+ test_path3 = os.path.join(test_sub_dir, 'file3.notmd')
+
+ with open(test_path1, 'w') as f:
+ f.write('## abc&123')
+
+ with open(test_path2, 'w') as f:
+ f.write('# A Level 1 Heading\n')
+ f.write('## A Level 2 Heading')
+
+ with open(test_path3, 'w') as f:
+ f.write("## don\'t change this")
+
+ generate_lib.update_id_tags_inplace(test_dir)
+
+ with open(test_path1) as f:
+ content = f.read()
+
+ self.assertEqual(content, '<h2 id="abc_123">abc&123</h2>')
+
+ with open(test_path2) as f:
+ content = f.read()
+
+ self.assertEqual(
+ content, '# A Level 1 Heading\n'
+ '<h2 id="A_Level_2_Heading">A Level 2 Heading</h2>')
+
+ with open(test_path3) as f:
+ content = f.read()
+
+ self.assertEqual(content, "## don\'t change this")
+
+ def test_replace_refes(self):
+ test_dir = googletest.GetTempDir()
+ test_in_dir = os.path.join(test_dir, 'in')
+ test_in_dir_a = os.path.join(test_dir, 'in/a')
+ test_in_dir_b = os.path.join(test_dir, 'in/b')
+ os.makedirs(test_in_dir)
+ os.makedirs(test_in_dir_a)
+ os.makedirs(test_in_dir_b)
+
+ test_out_dir = os.path.join(test_dir, 'out')
+ os.makedirs(test_out_dir)
+
+ test_path1 = os.path.join(test_in_dir_a, 'file1.md')
+ test_path2 = os.path.join(test_in_dir_b, 'file2.md')
+ test_path3 = os.path.join(test_in_dir_b, 'file3.notmd')
+ test_path4 = os.path.join(test_in_dir_b, 'OWNERS')
+
+ with open(test_path1, 'w') as f:
+ f.write('Use `tf.test_function` to test things.')
+
+ with open(test_path2, 'w') as f:
+ f.write('Use @{tf.TestModule.TestClass.ChildClass} to test things.\n'
+ "`tf.whatever` doesn't exist")
+
+ with open(test_path3, 'w') as f:
+ file3_content = (
+ 'Not a .md file. Should be copied unchanged:'
+ '@{tf.TestModule.TestClass.ChildClass}, `tf.test_function`')
+ f.write(file3_content)
+
+ with open(test_path4, 'w') as f:
+ f.write('')
+
+ reference_resolver, _ = self.get_test_objects()
+ generate_lib.replace_refs(test_in_dir, test_out_dir, reference_resolver,
+ '*.md')
+
+ with open(os.path.join(test_out_dir, 'a/file1.md')) as f:
+ content = f.read()
+ self.assertEqual(
+ content,
+ 'Use <a href="../api_docs/python/tf/TestModule/test_function.md">'
+ '<code>tf.test_function</code></a> to test things.')
+
+ with open(os.path.join(test_out_dir, 'b/file2.md')) as f:
+ content = f.read()
+ self.assertEqual(
+ content,
+ 'Use '
+ '<a href="../api_docs/python/tf/TestModule/TestClass/ChildClass.md">'
+ '<code>tf.TestModule.TestClass.ChildClass</code></a> '
+ 'to test things.\n'
+ '`tf.whatever` doesn\'t exist')
+
+ with open(os.path.join(test_out_dir, 'b/file3.notmd')) as f:
+ content = f.read()
+ self.assertEqual(content, file3_content)
+
+ with self.assertRaises(IOError):
+ # This should fail. The OWNERS file should not be copied
+ with open(os.path.join(test_out_dir, 'b/OWNERS')) as f:
+ content = f.read()
+
if __name__ == '__main__':
googletest.main()
diff --git a/tensorflow/tools/docs/parser.py b/tensorflow/tools/docs/parser.py
index cec23b1a36..83b4bf8128 100644
--- a/tensorflow/tools/docs/parser.py
+++ b/tensorflow/tools/docs/parser.py
@@ -21,16 +21,40 @@ from __future__ import print_function
import ast
import collections
import functools
+import itertools
import json
import os
import re
-import sys
import astor
import six
from google.protobuf.message import Message as ProtoMessage
+from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
+from tensorflow.tools.docs import doc_controls
+
+
+def is_free_function(py_object, full_name, index):
+ """Check if input is a free function (and not a class- or static method).
+
+ Args:
+ py_object: The the object in question.
+ full_name: The full name of the object, like `tf.module.symbol`.
+ index: The {full_name:py_object} dictionary for the public API.
+
+ Returns:
+ True if the obeject is a stand-alone function, and not part of a class
+ definition.
+ """
+ if not tf_inspect.isfunction(py_object):
+ return False
+
+ parent_name = full_name.rsplit('.', 1)[0]
+ if tf_inspect.isclass(index[parent_name]):
+ return False
+
+ return True
# A regular expression capturing a python identifier.
@@ -52,7 +76,7 @@ class _Errors(object):
template = 'ERROR:\n output file name: %s\n %s\n\n'
for full_name, message in self._errors:
- print(template % (full_name, message), file=sys.stderr)
+ logging.warn(template, full_name, message)
def append(self, full_name, message):
"""Add an error to the collection.
@@ -72,7 +96,7 @@ class _Errors(object):
return self._errors == other._errors # pylint: disable=protected-access
-def documentation_path(full_name):
+def documentation_path(full_name, is_fragment=False):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
@@ -82,12 +106,22 @@ def documentation_path(full_name):
Args:
full_name: Fully qualified name of a library symbol.
-
+ is_fragment: If `False` produce a direct markdown link (`tf.a.b.c` -->
+ `tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` -->
+ `tf/a/b.md#c`
Returns:
The file path to which to write the documentation for `full_name`.
"""
- dirs = full_name.split('.')
- return os.path.join(*dirs) + '.md'
+ parts = full_name.split('.')
+ if is_fragment:
+ parts, fragment = parts[:-1], parts[-1]
+
+ result = os.path.join(*parts) + '.md'
+
+ if is_fragment:
+ result = result + '#' + fragment
+
+ return result
def _get_raw_docstring(py_object):
@@ -134,8 +168,7 @@ class ReferenceResolver(object):
doc.
"""
- def __init__(self, duplicate_of, doc_index, is_class, is_module,
- py_module_names):
+ def __init__(self, duplicate_of, doc_index, is_fragment, py_module_names):
"""Initializes a Reference Resolver.
Args:
@@ -143,15 +176,15 @@ class ReferenceResolver(object):
symbols.
doc_index: A `dict` mapping symbol name strings to objects with `url`
and `title` fields. Used to resolve @{$doc} references in docstrings.
- is_class: A map from full names to bool for each symbol.
- is_module: A map from full names to bool for each symbol.
+ is_fragment: A map from full names to bool for each symbol. If True the
+ object lives at a page fragment `tf.a.b.c` --> `tf/a/b#c`. If False
+ object has a page to itself: `tf.a.b.c` --> `tf/a/b/c`.
py_module_names: A list of string names of Python modules.
"""
self._duplicate_of = duplicate_of
self._doc_index = doc_index
- self._is_class = is_class
- self._is_module = is_module
- self._all_names = set(is_class.keys())
+ self._is_fragment = is_fragment
+ self._all_names = set(is_fragment.keys())
self._py_module_names = py_module_names
self.current_doc_full_name = None
@@ -178,21 +211,18 @@ class ReferenceResolver(object):
Returns:
an instance of `ReferenceResolver` ()
"""
- is_class = {
- name: tf_inspect.isclass(visitor.index[name])
- for name, obj in visitor.index.items()
- }
+ is_fragment = {}
+ for name, obj in visitor.index.items():
+ has_page = (
+ tf_inspect.isclass(obj) or tf_inspect.ismodule(obj) or
+ is_free_function(obj, name, visitor.index))
- is_module = {
- name: tf_inspect.ismodule(visitor.index[name])
- for name, obj in visitor.index.items()
- }
+ is_fragment[name] = not has_page
return cls(
duplicate_of=visitor.duplicate_of,
doc_index=doc_index,
- is_class=is_class,
- is_module=is_module,
+ is_fragment=is_fragment,
**kwargs)
@classmethod
@@ -208,6 +238,10 @@ class ReferenceResolver(object):
Args:
filepath: The file path to write the json to.
"""
+ try:
+ os.makedirs(os.path.dirname(filepath))
+ except OSError:
+ pass
json_dict = {}
for key, value in self.__dict__.items():
# Drop these two fields. `_doc_index` is not serializable. `_all_names` is
@@ -221,7 +255,7 @@ class ReferenceResolver(object):
json_dict[key.lstrip('_')] = value
with open(filepath, 'w') as f:
- json.dump(json_dict, f)
+ json.dump(json_dict, f, indent=2, sort_keys=True)
def replace_references(self, string, relative_path_to_root):
"""Replace "@{symbol}" references with links to symbol's documentation page.
@@ -337,19 +371,7 @@ class ReferenceResolver(object):
raise TFDocsError(
'Cannot make link to "%s": Not in index.' % master_name)
- # If this is a member of a class, link to the class page with an anchor.
- ref_path = None
- if not (self._is_class[master_name] or self._is_module[master_name]):
- idents = master_name.split('.')
- if len(idents) > 1:
- class_name = '.'.join(idents[:-1])
- assert class_name in self._all_names
- if self._is_class[class_name]:
- ref_path = documentation_path(class_name) + '#%s' % idents[-1]
-
- if not ref_path:
- ref_path = documentation_path(master_name)
-
+ ref_path = documentation_path(master_name, self._is_fragment[master_name])
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, match, relative_path_to_root):
@@ -614,6 +636,9 @@ def _parse_md_docstring(py_object, relative_path_to_root, reference_resolver):
docstring, compatibility = _handle_compatibility(raw_docstring)
docstring, function_details = _parse_function_details(docstring)
+ if 'Generated by: tensorflow/tools/api/generator' in docstring:
+ docstring = ''
+
return _DocstringInfo(
docstring.split('\n')[0], docstring, function_details, compatibility)
@@ -657,12 +682,14 @@ def _get_arg_spec(func):
argspec_defaults.pop(i-first_default_arg)
else:
first_default_arg -= 1
- return tf_inspect.FullArgSpec(args=argspec_args,
- varargs=argspec.varargs,
- varkw=argspec.varkw,
- defaults=tuple(argspec_defaults),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ return tf_inspect.FullArgSpec(
+ args=argspec_args,
+ varargs=argspec.varargs,
+ varkw=argspec.varkw,
+ defaults=tuple(argspec_defaults),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
else: # Regular function or method, getargspec will work fine.
return tf_inspect.getfullargspec(func)
@@ -672,7 +699,7 @@ def _remove_first_line_indent(string):
return '\n'.join([line[indent:] for line in string.split('\n')])
-PAREN_NUMBER_RE = re.compile("^\(([0-9.e-]+)\)")
+PAREN_NUMBER_RE = re.compile(r'^\(([0-9.e-]+)\)')
def _generate_signature(func, reverse_index):
@@ -755,8 +782,9 @@ def _generate_signature(func, reverse_index):
lookup_text = public_name + default_text[len(internal_name):]
break
if default_text is lookup_text:
- print('WARNING: Using default arg, failed lookup: %s, repr: %r' %
- (default_text, default))
+ logging.warn(
+ 'WARNING: Using default arg, failed lookup: %s, repr: %r',
+ default_text, default)
else:
default_text = lookup_text
else:
@@ -904,6 +932,9 @@ class _FunctionPageInfo(object):
def add_decorator(self, dec):
self._decorators.append(dec)
+ def get_metadata_html(self):
+ return _Metadata(self.full_name).build_html()
+
class _ClassPageInfo(object):
"""Collects docs for a class page.
@@ -936,6 +967,7 @@ class _ClassPageInfo(object):
self._aliases = None
self._doc = None
self._guides = None
+ self._namedtuplefields = None
self._bases = None
self._properties = []
@@ -1019,6 +1051,17 @@ class _ClassPageInfo(object):
self._guides = guides
@property
+ def namedtuplefields(self):
+ return self._namedtuplefields
+
+ def set_namedtuplefields(self, py_class):
+ if issubclass(py_class, tuple):
+ if all(
+ hasattr(py_class, attr)
+ for attr in ('_asdict', '_fields', '_make', '_replace')):
+ self._namedtuplefields = py_class._fields
+
+ @property
def bases(self):
"""Returns a list of `_LinkInfo` objects pointing to the class' parents."""
return self._bases
@@ -1055,7 +1098,15 @@ class _ClassPageInfo(object):
@property
def properties(self):
"""Returns a list of `_PropertyInfo` describing the class' properties."""
- return self._properties
+ props_dict = {prop.short_name: prop for prop in self._properties}
+ props = []
+ if self.namedtuplefields:
+ for field in self.namedtuplefields:
+ props.append(props_dict.pop(field))
+
+ props.extend(sorted(props_dict.values()))
+
+ return props
def _add_property(self, short_name, full_name, obj, doc):
"""Adds a `_PropertyInfo` entry to the `properties` list.
@@ -1066,6 +1117,9 @@ class _ClassPageInfo(object):
obj: The property object itself
doc: The property's parsed docstring, a `_DocstringInfo`.
"""
+ # Hide useless namedtuple docs-trings
+ if re.match('Alias for field number [0-9]+', doc.docstring):
+ doc = doc._replace(docstring='', brief='')
property_info = _PropertyInfo(short_name, full_name, obj, doc)
self._properties.append(property_info)
@@ -1097,6 +1151,14 @@ class _ClassPageInfo(object):
"""Returns a list of `_LinkInfo` pointing to any nested classes."""
return self._classes
+ def get_metadata_html(self):
+ meta_data = _Metadata(self.full_name)
+ for item in itertools.chain(self.classes, self.properties, self.methods,
+ self.other_members):
+ meta_data.append(item)
+
+ return meta_data.build_html()
+
def _add_class(self, short_name, full_name, obj, doc, url):
"""Adds a `_LinkInfo` for a nested class to `classes` list.
@@ -1137,6 +1199,7 @@ class _ClassPageInfo(object):
py_class: The class object being documented
parser_config: An instance of ParserConfig.
"""
+ self.set_namedtuplefields(py_class)
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
@@ -1145,10 +1208,11 @@ class _ClassPageInfo(object):
for short_name in parser_config.tree[self.full_name]:
# Remove builtin members that we never want to document.
- if short_name in ['__class__', '__base__', '__weakref__', '__doc__',
- '__module__', '__dict__', '__abstractmethods__',
- '__slots__', '__getnewargs__', '__str__',
- '__repr__', '__hash__']:
+ if short_name in [
+ '__class__', '__base__', '__weakref__', '__doc__', '__module__',
+ '__dict__', '__abstractmethods__', '__slots__', '__getnewargs__',
+ '__str__', '__repr__', '__hash__', '__reduce__'
+ ]:
continue
child_name = '.'.join([self.full_name, short_name])
@@ -1156,15 +1220,18 @@ class _ClassPageInfo(object):
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, short_name)
- if (defining_class is object or
- defining_class is type or defining_class is tuple or
- defining_class is BaseException or defining_class is Exception or
- # The following condition excludes most protobuf-defined symbols.
- defining_class and defining_class.__name__ in ['CMessage', 'Message',
- 'MessageMeta']):
+ if defining_class in [object, type, tuple, BaseException, Exception]:
+ continue
+
+ # The following condition excludes most protobuf-defined symbols.
+ if (defining_class and
+ defining_class.__name__ in ['CMessage', 'Message', 'MessageMeta']):
continue
# TODO(markdaoust): Add a note in child docs showing the defining class.
+ if doc_controls.should_skip_class_attr(py_class, short_name):
+ continue
+
child_doc = _parse_md_docstring(child, relative_path,
parser_config.reference_resolver)
@@ -1193,9 +1260,8 @@ class _ClassPageInfo(object):
# obvious what they do, don't include them in the docs if there's no
# docstring.
if not child_doc.brief.strip() and short_name in [
- '__del__', '__copy__']:
- print('Skipping %s, defined in %s, no docstring.' % (child_name,
- defining_class))
+ '__del__', '__copy__'
+ ]:
continue
try:
@@ -1326,6 +1392,16 @@ class _ModulePageInfo(object):
self._other_members.append(
_OtherMemberInfo(short_name, full_name, obj, doc))
+ def get_metadata_html(self):
+ meta_data = _Metadata(self.full_name)
+
+ # Objects with their own pages are not added to the matadata list for the
+ # module, the module only has a link to the object page. No docs.
+ for item in self.other_members:
+ meta_data.append(item)
+
+ return meta_data.build_html()
+
def collect_docs_for_module(self, parser_config):
"""Collect information necessary specifically for a module's doc page.
@@ -1342,7 +1418,8 @@ class _ModulePageInfo(object):
for name in member_names:
if name in ['__builtins__', '__doc__', '__file__',
- '__name__', '__path__', '__package__']:
+ '__name__', '__path__', '__package__',
+ '__cached__', '__loader__', '__spec__']:
continue
member_full_name = self.full_name + '.' + name if self.full_name else name
@@ -1402,7 +1479,7 @@ class ParserConfig(object):
self.base_dir = base_dir
self.defined_in_prefix = 'tensorflow/'
self.code_url_prefix = (
- 'https://www.tensorflow.org/code/tensorflow/') # pylint: disable=line-too-long
+ '/code/stable/tensorflow/') # pylint: disable=line-too-long
def py_name_to_object(self, full_name):
"""Return the Python object for a Python symbol name."""
@@ -1571,7 +1648,8 @@ class _GeneratedFile(object):
return True
def __str__(self):
- return 'Defined in `%s%s`.\n\n' % (self.path_prefix, self.path)
+ return 'Defined in generated file: `%s%s`.\n\n' % (self.path_prefix,
+ self.path)
def _get_defined_in(py_object, parser_config):
@@ -1608,6 +1686,8 @@ def _get_defined_in(py_object, parser_config):
if re.match(r'.*/gen_[^/]*\.py$', path):
return _GeneratedFile(path, parser_config)
+ if 'genfiles' in path or 'tools/api/generator' in path:
+ return _GeneratedFile(path, parser_config)
elif re.match(r'.*_pb2\.py$', path):
# The _pb2.py files all appear right next to their defining .proto file.
return _ProtoFile(path[:-7] + '.proto', parser_config)
@@ -1652,3 +1732,45 @@ def generate_global_index(library_name, index, reference_resolver):
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
+
+
+class _Metadata(object):
+ """A class for building a page's Metadata block.
+
+ Attributes:
+ name: The name of the page being described by the Metadata block.
+ version: The source version.
+ """
+
+ def __init__(self, name, version='Stable'):
+ """Creates a Metadata builder.
+
+ Args:
+ name: The name of the page being described by the Metadata block.
+ version: The source version.
+ """
+ self.name = name
+ self.version = version
+ self._content = []
+
+ def append(self, item):
+ """Adds an item from the page to the Metadata block.
+
+ Args:
+ item: The parsed page section to add.
+ """
+ self._content.append(item.short_name)
+
+ def build_html(self):
+ """Returns the Metadata block as an Html string."""
+ schema = 'http://developers.google.com/ReferenceObject'
+ parts = ['<div itemscope itemtype="%s">' % schema]
+
+ parts.append('<meta itemprop="name" content="%s" />' % self.name)
+ parts.append('<meta itemprop="path" content="%s" />' % self.version)
+ for item in self._content:
+ parts.append('<meta itemprop="property" content="%s"/>' % item)
+
+ parts.extend(['</div>', ''])
+
+ return '\n'.join(parts)
diff --git a/tensorflow/tools/docs/parser_test.py b/tensorflow/tools/docs/parser_test.py
index d7757d78ed..8a41796fb9 100644
--- a/tensorflow/tools/docs/parser_test.py
+++ b/tensorflow/tools/docs/parser_test.py
@@ -18,14 +18,22 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import collections
import functools
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
+from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import parser
+# The test needs a real module. `types.ModuleType()` doesn't work, as the result
+# is a `builtin` module. Using "parser" here is arbitraty. The tests don't
+# depend on the module contents. At this point in the process the public api
+# has already been extracted.
+test_module = parser
+
def test_function(unused_arg, unused_kwarg='default'):
"""Docstring for test function."""
@@ -37,13 +45,27 @@ def test_function_with_args_kwargs(unused_arg, *unused_args, **unused_kwargs):
pass
-class TestClass(object):
+class ParentClass(object):
+
+ @doc_controls.do_not_doc_inheritable
+ def hidden_method(self):
+ pass
+
+
+class TestClass(ParentClass):
"""Docstring for TestClass itself."""
def a_method(self, arg='default'):
"""Docstring for a method."""
pass
+ def hidden_method(self):
+ pass
+
+ @doc_controls.do_not_generate_docs
+ def hidden_method2(self):
+ pass
+
class ChildClass(object):
"""Docstring for a child class."""
pass
@@ -175,16 +197,159 @@ class ParserTest(googletest.TestCase):
# Make sure this file is contained as the definition location.
self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
+ def test_namedtuple_field_order(self):
+ namedtupleclass = collections.namedtuple('namedtupleclass',
+ {'z', 'y', 'x', 'w', 'v', 'u'})
+
+ index = {
+ 'namedtupleclass': namedtupleclass,
+ 'namedtupleclass.u': namedtupleclass.u,
+ 'namedtupleclass.v': namedtupleclass.v,
+ 'namedtupleclass.w': namedtupleclass.w,
+ 'namedtupleclass.x': namedtupleclass.x,
+ 'namedtupleclass.y': namedtupleclass.y,
+ 'namedtupleclass.z': namedtupleclass.z,
+ }
+
+ visitor = DummyVisitor(index=index, duplicate_of={})
+
+ reference_resolver = parser.ReferenceResolver.from_visitor(
+ visitor=visitor, doc_index={}, py_module_names=['tf'])
+
+ tree = {'namedtupleclass': {'u', 'v', 'w', 'x', 'y', 'z'}}
+ parser_config = parser.ParserConfig(
+ reference_resolver=reference_resolver,
+ duplicates={},
+ duplicate_of={},
+ tree=tree,
+ index=index,
+ reverse_index={},
+ guide_index={},
+ base_dir='/')
+
+ page_info = parser.docs_for_object(
+ full_name='namedtupleclass',
+ py_object=namedtupleclass,
+ parser_config=parser_config)
+
+ # Each namedtiple field has a docstring of the form:
+ # 'Alias for field number ##'. These props are returned sorted.
+
+ def sort_key(prop_info):
+ return int(prop_info.obj.__doc__.split(' ')[-1])
+
+ self.assertSequenceEqual(page_info.properties,
+ sorted(page_info.properties, key=sort_key))
+
+ def test_docs_for_class_should_skip(self):
+
+ class Parent(object):
+
+ @doc_controls.do_not_doc_inheritable
+ def a_method(self, arg='default'):
+ pass
+
+ class Child(Parent):
+
+ def a_method(self, arg='default'):
+ pass
+
+ index = {
+ 'Child': Child,
+ 'Child.a_method': Child.a_method,
+ }
+
+ visitor = DummyVisitor(index=index, duplicate_of={})
+
+ reference_resolver = parser.ReferenceResolver.from_visitor(
+ visitor=visitor, doc_index={}, py_module_names=['tf'])
+
+ tree = {
+ 'Child': ['a_method'],
+ }
+
+ parser_config = parser.ParserConfig(
+ reference_resolver=reference_resolver,
+ duplicates={},
+ duplicate_of={},
+ tree=tree,
+ index=index,
+ reverse_index={},
+ guide_index={},
+ base_dir='/')
+
+ page_info = parser.docs_for_object(
+ full_name='Child', py_object=Child, parser_config=parser_config)
+
+ # Make sure the `a_method` is not present
+ self.assertEqual(0, len(page_info.methods))
+
+ def test_docs_for_message_class(self):
+
+ class CMessage(object):
+
+ def hidden(self):
+ pass
+
+ class Message(object):
+
+ def hidden2(self):
+ pass
+
+ class MessageMeta(object):
+
+ def hidden3(self):
+ pass
+
+ class ChildMessage(CMessage, Message, MessageMeta):
+
+ def my_method(self):
+ pass
+
+ index = {
+ 'ChildMessage': ChildMessage,
+ 'ChildMessage.hidden': ChildMessage.hidden,
+ 'ChildMessage.hidden2': ChildMessage.hidden2,
+ 'ChildMessage.hidden3': ChildMessage.hidden3,
+ 'ChildMessage.my_method': ChildMessage.my_method,
+ }
+
+ visitor = DummyVisitor(index=index, duplicate_of={})
+
+ reference_resolver = parser.ReferenceResolver.from_visitor(
+ visitor=visitor, doc_index={}, py_module_names=['tf'])
+
+ tree = {'ChildMessage': ['hidden', 'hidden2', 'hidden3', 'my_method']}
+
+ parser_config = parser.ParserConfig(
+ reference_resolver=reference_resolver,
+ duplicates={},
+ duplicate_of={},
+ tree=tree,
+ index=index,
+ reverse_index={},
+ guide_index={},
+ base_dir='/')
+
+ page_info = parser.docs_for_object(
+ full_name='ChildMessage',
+ py_object=ChildMessage,
+ parser_config=parser_config)
+
+ self.assertEqual(1, len(page_info.methods))
+ self.assertEqual('my_method', page_info.methods[0].short_name)
+
def test_docs_for_module(self):
- # Get the current module.
- module = sys.modules[__name__]
index = {
- 'TestModule': module,
- 'TestModule.test_function': test_function,
+ 'TestModule':
+ test_module,
+ 'TestModule.test_function':
+ test_function,
'TestModule.test_function_with_args_kwargs':
- test_function_with_args_kwargs,
- 'TestModule.TestClass': TestClass,
+ test_function_with_args_kwargs,
+ 'TestModule.TestClass':
+ TestClass,
}
visitor = DummyVisitor(index=index, duplicate_of={})
@@ -207,11 +372,13 @@ class ParserTest(googletest.TestCase):
base_dir='/')
page_info = parser.docs_for_object(
- full_name='TestModule', py_object=module, parser_config=parser_config)
+ full_name='TestModule',
+ py_object=test_module,
+ parser_config=parser_config)
# Make sure the brief docstring is present
- self.assertEqual(tf_inspect.getdoc(module).split('\n')[0],
- page_info.doc.brief)
+ self.assertEqual(
+ tf_inspect.getdoc(test_module).split('\n')[0], page_info.doc.brief)
# Make sure that the members are there
funcs = {f_info.obj for f_info in page_info.functions}
@@ -220,8 +387,9 @@ class ParserTest(googletest.TestCase):
classes = {cls_info.obj for cls_info in page_info.classes}
self.assertEqual({TestClass}, classes)
- # Make sure this file is contained as the definition location.
- self.assertEqual(os.path.relpath(__file__, '/'), page_info.defined_in.path)
+ # Make sure the module's file is contained as the definition location.
+ self.assertEqual(
+ os.path.relpath(test_module.__file__, '/'), page_info.defined_in.path)
def test_docs_for_function(self):
index = {
@@ -337,6 +505,7 @@ class ParserTest(googletest.TestCase):
duplicate_of = {'tf.third': 'tf.fourth'}
index = {
+ 'tf': test_module,
'tf.fancy': test_function_with_fancy_docstring,
'tf.reference': HasOneMember,
'tf.reference.foo': HasOneMember.foo,
@@ -363,20 +532,18 @@ class ParserTest(googletest.TestCase):
'NumPy has nothing as awesome as this function.\n')
def test_generate_index(self):
- module = sys.modules[__name__]
index = {
- 'TestModule': module,
- 'test_function': test_function,
- 'TestModule.test_function': test_function,
- 'TestModule.TestClass': TestClass,
- 'TestModule.TestClass.a_method': TestClass.a_method,
- 'TestModule.TestClass.a_property': TestClass.a_property,
- 'TestModule.TestClass.ChildClass': TestClass.ChildClass,
- }
- duplicate_of = {
- 'TestModule.test_function': 'test_function'
+ 'tf': test_module,
+ 'tf.TestModule': test_module,
+ 'tf.test_function': test_function,
+ 'tf.TestModule.test_function': test_function,
+ 'tf.TestModule.TestClass': TestClass,
+ 'tf.TestModule.TestClass.a_method': TestClass.a_method,
+ 'tf.TestModule.TestClass.a_property': TestClass.a_property,
+ 'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
}
+ duplicate_of = {'tf.TestModule.test_function': 'tf.test_function'}
visitor = DummyVisitor(index=index, duplicate_of=duplicate_of)
@@ -395,7 +562,7 @@ class ParserTest(googletest.TestCase):
self.assertIn('TestModule.test_function', docs)
# Leading backtick to make sure it's included top-level.
# This depends on formatting, but should be stable.
- self.assertIn('<code>test_function', docs)
+ self.assertIn('<code>tf.test_function', docs)
def test_argspec_for_functools_partial(self):
# pylint: disable=unused-argument
@@ -408,67 +575,98 @@ class ParserTest(googletest.TestCase):
# pylint: disable=protected-access
# Make sure everything works for regular functions.
- expected = tf_inspect.FullArgSpec(args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
- varargs=None, varkw=None, defaults=(1, 2),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
+ varargs=None,
+ varkw=None,
+ defaults=(1, 2),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
self.assertEqual(expected, parser._get_arg_spec(test_function_for_partial1))
# Make sure doing nothing works.
- expected = tf_inspect.FullArgSpec(args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
- varargs=None, varkw=None, defaults=(1, 2),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=['arg1', 'arg2', 'kwarg1', 'kwarg2'],
+ varargs=None,
+ varkw=None,
+ defaults=(1, 2),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
partial = functools.partial(test_function_for_partial1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting args from the front works.
- expected = tf_inspect.FullArgSpec(args=['arg2', 'kwarg1', 'kwarg2'],
- varargs=None, varkw=None, defaults=(1, 2),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=['arg2', 'kwarg1', 'kwarg2'],
+ varargs=None,
+ varkw=None,
+ defaults=(1, 2),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
partial = functools.partial(test_function_for_partial1, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
- expected = tf_inspect.FullArgSpec(args=['kwarg2'],
- varargs=None, varkw=None, defaults=(2,),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=['kwarg2'],
+ varargs=None,
+ varkw=None,
+ defaults=(2,),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
partial = functools.partial(test_function_for_partial1, 1, 2, 3)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting kwargs works.
- expected = tf_inspect.FullArgSpec(args=['arg1', 'arg2', 'kwarg2'],
- varargs=None, varkw=None, defaults=(2,),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=['arg1', 'arg2', 'kwarg2'],
+ varargs=None,
+ varkw=None,
+ defaults=(2,),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
partial = functools.partial(test_function_for_partial1, kwarg1=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
- expected = tf_inspect.FullArgSpec(args=['arg1', 'arg2', 'kwarg1'],
- varargs=None, varkw=None, defaults=(1,),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=['arg1', 'arg2', 'kwarg1'],
+ varargs=None,
+ varkw=None,
+ defaults=(1,),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
partial = functools.partial(test_function_for_partial1, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
- expected = tf_inspect.FullArgSpec(args=['arg1'],
- varargs=None, varkw=None, defaults=(),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=['arg1'],
+ varargs=None,
+ varkw=None,
+ defaults=(),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
partial = functools.partial(test_function_for_partial1,
arg2=0, kwarg1=0, kwarg2=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure *args, *kwargs is accounted for.
- expected = tf_inspect.FullArgSpec(args=[],
- varargs='my_args', varkw='my_kwargs',
- defaults=(),
- kwonlyargs=[], kwonlydefaults=None,
- annotations={})
+ expected = tf_inspect.FullArgSpec(
+ args=[],
+ varargs='my_args',
+ varkw='my_kwargs',
+ defaults=(),
+ kwonlyargs=[],
+ kwonlydefaults=None,
+ annotations={})
partial = functools.partial(test_function_for_partial2, 0, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
-
+
# pylint: enable=protected-access
def testSaveReferenceResolver(self):
@@ -476,22 +674,18 @@ class ParserTest(googletest.TestCase):
duplicate_of = {'AClass': ['AClass2']}
doc_index = {'doc': you_cant_serialize_this}
- is_class = {
+ is_fragment = {
'tf': False,
- 'tf.AClass': True,
- 'tf.AClass2': True,
- 'tf.function': False
- }
- is_module = {
- 'tf': True,
+ 'tf.VERSION': True,
'tf.AClass': False,
+ 'tf.AClass.method': True,
'tf.AClass2': False,
'tf.function': False
}
py_module_names = ['tf', 'tfdbg']
- resolver = parser.ReferenceResolver(duplicate_of, doc_index, is_class,
- is_module, py_module_names)
+ resolver = parser.ReferenceResolver(duplicate_of, doc_index, is_fragment,
+ py_module_names)
outdir = googletest.GetTempDir()
@@ -503,6 +697,23 @@ class ParserTest(googletest.TestCase):
# There are no __slots__, so all fields are visible in __dict__.
self.assertEqual(resolver.__dict__, resolver2.__dict__)
+ def testIsFreeFunction(self):
+
+ result = parser.is_free_function(test_function, 'test_module.test_function',
+ {'test_module': test_module})
+ self.assertTrue(result)
+
+ result = parser.is_free_function(test_function, 'TestClass.test_function',
+ {'TestClass': TestClass})
+ self.assertFalse(result)
+
+ result = parser.is_free_function(TestClass, 'TestClass', {})
+ self.assertFalse(result)
+
+ result = parser.is_free_function(test_module, 'test_module', {})
+ self.assertFalse(result)
+
+
RELU_DOC = """Computes rectified linear: `max(features, 0)`
Args:
@@ -592,6 +803,5 @@ class TestGenerateSignature(googletest.TestCase):
sig = parser._generate_signature(example_fun, reverse_index={})
self.assertEqual(sig, ['arg1=a.b.c.d', 'arg2=a.b.c.d(1, 2)', "arg3=e['f']"])
-
if __name__ == '__main__':
googletest.main()
diff --git a/tensorflow/tools/docs/pretty_docs.py b/tensorflow/tools/docs/pretty_docs.py
index 55ab5bdd49..1a3e79621f 100644
--- a/tensorflow/tools/docs/pretty_docs.py
+++ b/tensorflow/tools/docs/pretty_docs.py
@@ -27,7 +27,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import itertools
import textwrap
@@ -58,8 +57,7 @@ def build_md_page(page_info):
def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
- parts = [_Metadata(page_info.full_name).build_html()]
- parts.append('# %s\n\n' % page_info.full_name)
+ parts = ['# %s\n\n' % page_info.full_name]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
@@ -83,17 +81,7 @@ def _build_function_page(page_info):
def _build_class_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
- meta_data = _Metadata(page_info.full_name)
- for item in itertools.chain(
- page_info.classes,
- page_info.properties,
- page_info.methods,
- page_info.other_members):
- meta_data.append(item)
-
- parts = [meta_data.build_html()]
-
- parts.append('# {page_info.full_name}\n\n'.format(page_info=page_info))
+ parts = ['# {page_info.full_name}\n\n'.format(page_info=page_info)]
parts.append('## Class `%s`\n\n' % page_info.full_name.split('.')[-1])
if page_info.bases:
@@ -105,6 +93,15 @@ def _build_class_page(page_info):
parts.append('\n\n')
+ # Sort the methods list, but make sure constructors come first.
+ constructor_names = ['__init__', '__new__']
+ constructors = sorted(
+ method for method in page_info.methods
+ if method.short_name in constructor_names)
+ other_methods = sorted(
+ method for method in page_info.methods
+ if method.short_name not in constructor_names)
+
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
parts.extend('* Class `%s`\n' % name for name in page_info.aliases)
@@ -121,6 +118,11 @@ def _build_class_page(page_info):
parts.append('\n\n')
+ if constructors:
+ for method_info in constructors:
+ parts.append(_build_method_section(method_info, heading_level=2))
+ parts.append('\n\n')
+
if page_info.classes:
parts.append('## Child Classes\n')
@@ -134,7 +136,7 @@ def _build_class_page(page_info):
if page_info.properties:
parts.append('## Properties\n\n')
- for prop_info in sorted(page_info.properties):
+ for prop_info in page_info.properties:
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
parts.append(h3.format(short_name=prop_info.short_name))
@@ -146,28 +148,11 @@ def _build_class_page(page_info):
parts.append('\n\n')
- if page_info.methods:
+ if other_methods:
parts.append('## Methods\n\n')
- # Sort the methods list, but make sure constructors come first.
- constructors = ['__init__', '__new__']
- inits = [method for method in page_info.methods
- if method.short_name in constructors]
- others = [method for method in page_info.methods
- if method.short_name not in constructors]
-
- for method_info in sorted(inits) + sorted(others):
- h3 = ('<h3 id="{short_name}">'
- '<code>{short_name}</code>'
- '</h3>\n\n')
- parts.append(h3.format(**method_info._asdict()))
-
- if method_info.signature is not None:
- parts.append(_build_signature(method_info, use_full_name=False))
-
- parts.append(method_info.doc.docstring)
- parts.append(_build_function_details(method_info.doc.function_details))
- parts.append(_build_compatibility(method_info.doc.compatibility))
- parts.append('\n\n')
+
+ for method_info in other_methods:
+ parts.append(_build_method_section(method_info))
parts.append('\n\n')
if page_info.other_members:
@@ -184,19 +169,36 @@ def _build_class_page(page_info):
return ''.join(parts)
-def _build_module_page(page_info):
- """Given a ClassPageInfo object Return the page as an md string."""
- meta_data = _Metadata(page_info.full_name)
+def _build_method_section(method_info, heading_level=3):
+ """Generates a markdown section for a method.
- # Objects with their own pages are not added to the matadata list for the
- # module, as the only thing on the module page is a link to the object's page.
- for item in page_info.other_members:
- meta_data.append(item)
+ Args:
+ method_info: A `MethodInfo` object.
+ heading_level: An Int, which HTML heading level to use.
- parts = [meta_data.build_html()]
+ Returns:
+ A markdown string.
+ """
+ parts = []
+ heading = ('<h{heading_level} id="{short_name}">'
+ '<code>{short_name}</code>'
+ '</h{heading_level}>\n\n')
+ parts.append(heading.format(heading_level=heading_level,
+ **method_info._asdict()))
+
+ if method_info.signature is not None:
+ parts.append(_build_signature(method_info, use_full_name=False))
+
+ parts.append(method_info.doc.docstring)
+ parts.append(_build_function_details(method_info.doc.function_details))
+ parts.append(_build_compatibility(method_info.doc.compatibility))
+ parts.append('\n\n')
+ return ''.join(parts)
- parts.append(
- '# Module: {full_name}\n\n'.format(full_name=page_info.full_name))
+
+def _build_module_page(page_info):
+ """Given a ClassPageInfo object Return the page as an md string."""
+ parts = ['# Module: {full_name}\n\n'.format(full_name=page_info.full_name)]
if len(page_info.aliases) > 1:
parts.append('### Aliases:\n\n')
@@ -253,8 +255,9 @@ def _build_module_page(page_info):
# at least for basic types.
parts.append('## Other Members\n\n')
+ h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
for item in page_info.other_members:
- parts.append('`{short_name}`\n\n'.format(**item._asdict()))
+ parts.append(h3.format(**item._asdict()))
return ''.join(parts)
@@ -317,41 +320,3 @@ def _build_function_details(function_details):
parts.append(''.join(sub))
return '\n'.join(parts)
-
-
-class _Metadata(object):
- """A class for building a page's Metadata block.
-
- Attributes:
- name: The name of the page being described by the Metadata block.
- """
-
- def __init__(self, name):
- """Create a Metadata builder.
-
- Args:
- name: The name of the page being described by the Metadata block.
- """
- self.name = name
- self._content = []
-
- def append(self, item):
- """Add an item from the page to the Metadata block.
-
- Args:
- item: The parsed page section to add.
- """
- self._content.append(item.short_name)
-
- def build_html(self):
- """Return the Metadata block as an Html string."""
- schema = 'http://developers.google.com/ReferenceObject'
- parts = ['<div itemscope itemtype="%s">' % schema]
-
- parts.append('<meta itemprop="name" content="%s" />' % self.name)
- for item in self._content:
- parts.append('<meta itemprop="property" content="%s"/>' % item)
-
- parts.extend(['</div>', '', ''])
-
- return '\n'.join(parts)
diff --git a/tensorflow/tools/docs/py_guide_parser.py b/tensorflow/tools/docs/py_guide_parser.py
index 328f42d18f..b00694dc40 100644
--- a/tensorflow/tools/docs/py_guide_parser.py
+++ b/tensorflow/tools/docs/py_guide_parser.py
@@ -44,7 +44,8 @@ class PyGuideParser(object):
def process(self, full_path):
"""Read and process the file at `full_path`."""
- md_string = open(full_path, 'rb').read().decode('utf-8')
+ with open(full_path, 'rb') as f:
+ md_string = f.read().decode('utf-8')
self._lines = md_string.split('\n')
seen = set()
diff --git a/tensorflow/tools/git/gen_git_source.py b/tensorflow/tools/git/gen_git_source.py
index 372329b70c..cc2288a7fa 100755
--- a/tensorflow/tools/git/gen_git_source.py
+++ b/tensorflow/tools/git/gen_git_source.py
@@ -125,7 +125,7 @@ def configure(src_base_path, gen_path, debug=False):
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
- if hasattr(os, 'symlink'):
+ if hasattr(os, "symlink"):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
@@ -139,7 +139,7 @@ def configure(src_base_path, gen_path, debug=False):
print("gen_git_source.py: spec is %r" % spec)
-def get_git_version(git_base_path):
+def get_git_version(git_base_path, git_tag_override):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
@@ -152,6 +152,9 @@ def get_git_version(git_base_path):
Args:
git_base_path: where the .git directory is located
+ git_tag_override: Override the value for the git tag. This is useful for
+ releases where we want to build the release before the git tag is
+ created.
Returns:
A bytestring representing the git version
"""
@@ -161,8 +164,19 @@ def get_git_version(git_base_path):
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
+ if git_tag_override and val:
+ split_val = val.split("-")
+ if len(split_val) < 3:
+ raise Exception(
+ ("Expected git version in format 'TAG-COMMITS AFTER TAG-HASH' "
+ "but got '%s'") % val)
+ # There might be "-" in the tag name. But we can be sure that the final
+ # two "-" are those inserted by the git describe command.
+ abbrev_commit = split_val[-1]
+ val = bytes(
+ "-".join([git_tag_override, "0", abbrev_commit]))
return val if val else unknown_label
- except subprocess.CalledProcessError:
+ except (subprocess.CalledProcessError, OSError):
return unknown_label
@@ -205,7 +219,7 @@ const int tf_monolithic_build() {
open(filename, "w").write(contents)
-def generate(arglist):
+def generate(arglist, git_tag_override=None):
"""Generate version_info.cc as given `destination_file`.
Args:
@@ -225,6 +239,10 @@ def generate(arglist):
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
+ git_tag_override: Override the value for the git tag. This is useful for
+ releases where we want to build the release before the git tag is
+ created.
+
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
@@ -242,11 +260,11 @@ def generate(arglist):
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
- git_version = get_git_version(data["path"])
+ git_version = get_git_version(data["path"], git_tag_override)
write_version_info(dest_file, git_version)
-def raw_generate(output_file, source_dir):
+def raw_generate(output_file, source_dir, git_tag_override=None):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
@@ -255,9 +273,12 @@ def raw_generate(output_file, source_dir):
Args:
output_file: Output filename for the version info cc
source_dir: Base path of the source code
+ git_tag_override: Override the value for the git tag. This is useful for
+ releases where we want to build the release before the git tag is
+ created.
"""
- git_version = get_git_version(source_dir)
+ git_version = get_git_version(source_dir, git_tag_override)
write_version_info(output_file, git_version)
@@ -280,6 +301,11 @@ parser.add_argument(
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
+ "--git_tag_override", type=str,
+ help="Override git tag value in the __git_version__ string. Useful when "
+ "creating release builds before the release tag is created.")
+
+parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
@@ -302,12 +328,12 @@ if args.configure is not None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
- generate(args.generate)
+ generate(args.generate, args.git_tag_override)
elif args.raw_generate is not None:
source_path = "."
if args.source_dir is not None:
source_path = args.source_dir
- raw_generate(args.raw_generate, source_path)
+ raw_generate(args.raw_generate, source_path, args.git_tag_override)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
diff --git a/tensorflow/tools/graph_transforms/README.md b/tensorflow/tools/graph_transforms/README.md
index 67badb4869..9f6f553ba1 100644
--- a/tensorflow/tools/graph_transforms/README.md
+++ b/tensorflow/tools/graph_transforms/README.md
@@ -388,7 +388,7 @@ input is collapsed down into a simple constant.
Args:
* clear_output_shapes: Clears tensor shape information saved as attributes.
- Some older graphs containes out-of-date information and may cause import
+ Some older graphs contains out-of-date information and may cause import
errors. Defaults to true.
Prerequisites: None
diff --git a/tensorflow/tools/graph_transforms/fold_constants_lib.cc b/tensorflow/tools/graph_transforms/fold_constants_lib.cc
index 85660f94a8..6df2718e61 100644
--- a/tensorflow/tools/graph_transforms/fold_constants_lib.cc
+++ b/tensorflow/tools/graph_transforms/fold_constants_lib.cc
@@ -117,6 +117,31 @@ Status ReplaceSendRecvs(const GraphDef& original_graph_def,
return Status::OK();
}
+Status RewriteInputsAsPlaceholders(const TransformFuncContext& context,
+ GraphDef* graph_def) {
+ std::unordered_set<string> input_names;
+ for (const string& input_name : context.input_names) {
+ input_names.emplace(ParseTensorName(input_name).first);
+ }
+
+ for (NodeDef& node : *graph_def->mutable_node()) {
+ if (input_names.find(node.name()) == input_names.end()) {
+ continue;
+ }
+ if (node.op() == "PlaceholderWithDefault") {
+ node.set_op("Placeholder");
+ node.clear_input();
+ } else if (node.op() != "Placeholder") {
+ return errors::InvalidArgument(
+ "Input '", node.name(),
+ "' was expected to be a Placeholder or PlaceholderWithDefault op, "
+ "but was ",
+ node.op());
+ }
+ }
+ return Status::OK();
+}
+
Status RemoveUnusedNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
@@ -165,6 +190,7 @@ Status RemoveUnusedNodes(const GraphDef& input_graph_def,
input_graph_def,
[&](const NodeDef& node) { return used_nodes.count(node.name()) > 0; },
output_graph_def);
+ TF_RETURN_IF_ERROR(RewriteInputsAsPlaceholders(context, output_graph_def));
return Status::OK();
}
diff --git a/tensorflow/tools/graph_transforms/fold_constants_lib.h b/tensorflow/tools/graph_transforms/fold_constants_lib.h
index 8aefa6ae0f..0802ebb815 100644
--- a/tensorflow/tools/graph_transforms/fold_constants_lib.h
+++ b/tensorflow/tools/graph_transforms/fold_constants_lib.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_FOLD_CONSTANTS_H_
-#define TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_FOLD_CONSTANTS_H_
+#ifndef TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_FOLD_CONSTANTS_LIB_H_
+#define TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_FOLD_CONSTANTS_LIB_H_
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
@@ -40,4 +40,4 @@ Status RemoveUnusedNodes(const GraphDef& input_graph_def,
} // namespace graph_transforms
} // namespace tensorflow
-#endif // TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_FOLD_CONSTANTS_H_
+#endif // TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_FOLD_CONSTANTS_LIB_H_
diff --git a/tensorflow/tools/graph_transforms/fold_constants_test.cc b/tensorflow/tools/graph_transforms/fold_constants_test.cc
index a082399a87..dcdc3c2906 100644
--- a/tensorflow/tools/graph_transforms/fold_constants_test.cc
+++ b/tensorflow/tools/graph_transforms/fold_constants_test.cc
@@ -330,48 +330,6 @@ class ConstantFoldingTest : public ::testing::Test {
EXPECT_EQ(0, node_map.count("unused"));
}
- void TestRemoveUnusedNodesMultipleOutputs() {
- using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
- auto root = tensorflow::Scope::NewRootScope();
-
- // a b
- // \ /
- // shape_n
- // \ /
- // c
- auto a = Placeholder(root.WithOpName("a"), DT_FLOAT);
- auto b = Placeholder(root.WithOpName("b"), DT_FLOAT);
- auto shape_n = ShapeN(root.WithOpName("shape_n"), {Output(a), Output(b)});
- auto c = Add(root.WithOpName("c"), shape_n[0], shape_n[1]);
-
- GraphDef graph_def;
- TF_ASSERT_OK(root.ToGraphDef(&graph_def));
- GraphDef result_graph_def;
- TF_ASSERT_OK(graph_transforms::RemoveUnusedNodes(
- graph_def, {{shape_n[0].name()}, {"c"}}, &result_graph_def));
-
- // Only one output of shape_n node is fed input. Hence the graph search
- // should propagate to inputs of shape_n. Nothing to remove here.
- std::map<string, const NodeDef*> node_map;
- graph_transforms::MapNamesToNodes(result_graph_def, &node_map);
- EXPECT_EQ(1, node_map.count("a"));
- EXPECT_EQ(1, node_map.count("b"));
- EXPECT_EQ(1, node_map.count("c"));
-
- result_graph_def.Clear();
- TF_ASSERT_OK(graph_transforms::RemoveUnusedNodes(
- graph_def, {{shape_n[0].name(), shape_n[1].name()}, {"c"}},
- &result_graph_def));
-
- // Both outputs of shape_n node are fed inputs. shape_n does not function
- // and inputs to shape_n should be removed.
- node_map.clear();
- graph_transforms::MapNamesToNodes(result_graph_def, &node_map);
- EXPECT_EQ(0, node_map.count("a"));
- EXPECT_EQ(0, node_map.count("b"));
- EXPECT_EQ(1, node_map.count("c"));
- }
-
void TestMaxConstantSizeInBytes() {
auto root = tensorflow::Scope::NewRootScope();
@@ -431,10 +389,6 @@ TEST_F(ConstantFoldingTest, TestReplaceSendRecvsPrefixNames) {
TEST_F(ConstantFoldingTest, TestRemoveUnusedNodes) { TestRemoveUnusedNodes(); }
-TEST_F(ConstantFoldingTest, TestRemoveUnusedNodesMultipleOutputs) {
- TestRemoveUnusedNodesMultipleOutputs();
-}
-
TEST_F(ConstantFoldingTest, TestMaxConstantSizeInBytes) {
TestMaxConstantSizeInBytes();
}
diff --git a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
index d86f65325b..156636ab82 100644
--- a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
+++ b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
@@ -159,6 +159,9 @@ Status FuseScaleOffsetToConvWeights(const std::vector<float>& scale_values,
NodeDef bias_add_node;
bias_add_node.set_op("BiasAdd");
bias_add_node.set_name(conv_output_name);
+ if (conv_node.attr().count("data_format") > 0) {
+ CopyNodeAttr(conv_node, "data_format", "data_format", &bias_add_node);
+ }
CopyNodeAttr(conv_node, "T", "T", &bias_add_node);
AddNodeInput(conv_node.name(), &bias_add_node);
AddNodeInput(bias_offset_node.name(), &bias_add_node);
diff --git a/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc b/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
index 7651a03fe5..435f46c107 100644
--- a/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
+++ b/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc
@@ -191,7 +191,7 @@ class FoldOldBatchNormsTest : public ::testing::Test {
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
- test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
+ test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 2e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("FusedBatchNorm", node.op());
diff --git a/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc b/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
index f401723808..d97496cbeb 100644
--- a/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
+++ b/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
@@ -92,9 +92,8 @@ Status ExtractMinMaxRecords(const string& log_file_name,
if (!str_util::EndsWith(name_string, print_suffix)) {
continue;
}
- string name =
- name_string.substr(0, name_string.size() - print_suffix.size())
- .ToString();
+ string name(
+ name_string.substr(0, name_string.size() - print_suffix.size()));
records->push_back({name, min, max});
}
return Status::OK();
diff --git a/tensorflow/tools/graph_transforms/sparsify_gather_test.cc b/tensorflow/tools/graph_transforms/sparsify_gather_test.cc
index d41321c9a6..b8d6ba00de 100644
--- a/tensorflow/tools/graph_transforms/sparsify_gather_test.cc
+++ b/tensorflow/tools/graph_transforms/sparsify_gather_test.cc
@@ -42,8 +42,8 @@ class SparsifyGatherTest : public ::testing::Test {
const std::vector<NodeDef*>& inputs, GraphDef* graph_def,
bool control_dep = false) {
NodeDef* node_def = graph_def->add_node();
- node_def->set_name(name.ToString());
- node_def->set_op(op.ToString());
+ node_def->set_name(string(name));
+ node_def->set_op(string(op));
if (!control_dep) {
std::for_each(inputs.begin(), inputs.end(), [&node_def](NodeDef* input) {
node_def->add_input(input->name());
diff --git a/tensorflow/tools/graph_transforms/transform_graph.cc b/tensorflow/tools/graph_transforms/transform_graph.cc
index 8ce8f5e24b..7efe450710 100644
--- a/tensorflow/tools/graph_transforms/transform_graph.cc
+++ b/tensorflow/tools/graph_transforms/transform_graph.cc
@@ -65,19 +65,19 @@ Status ParseTransformParameters(const string& transforms_string,
.GetResult(&remaining, &transform_name);
if (!found_transform_name) {
return errors::InvalidArgument("Looking for transform name, but found ",
- remaining.ToString().c_str());
+ string(remaining).c_str());
}
if (Scanner(remaining).OneLiteral("(").GetResult(&remaining, &match)) {
state = TRANSFORM_PARAM_NAME;
} else {
// Add a transform with no parameters.
- params_list->push_back({transform_name.ToString(), func_parameters});
+ params_list->push_back({string(transform_name), func_parameters});
transform_name = "";
state = TRANSFORM_NAME;
}
} else if (state == TRANSFORM_PARAM_NAME) {
if (Scanner(remaining).OneLiteral(")").GetResult(&remaining, &match)) {
- params_list->push_back({transform_name.ToString(), func_parameters});
+ params_list->push_back({string(transform_name), func_parameters});
transform_name = "";
state = TRANSFORM_NAME;
} else {
@@ -92,13 +92,13 @@ Status ParseTransformParameters(const string& transforms_string,
if (!found_parameter_name) {
return errors::InvalidArgument(
"Looking for parameter name, but found ",
- remaining.ToString().c_str());
+ string(remaining).c_str());
}
if (Scanner(remaining).OneLiteral("=").GetResult(&remaining, &match)) {
state = TRANSFORM_PARAM_VALUE;
} else {
return errors::InvalidArgument("Looking for =, but found ",
- remaining.ToString().c_str());
+ string(remaining).c_str());
}
}
} else if (state == TRANSFORM_PARAM_VALUE) {
@@ -120,10 +120,9 @@ Status ParseTransformParameters(const string& transforms_string,
}
if (!found_parameter_value) {
return errors::InvalidArgument("Looking for parameter name, but found ",
- remaining.ToString().c_str());
+ string(remaining).c_str());
}
- func_parameters[parameter_name.ToString()].push_back(
- parameter_value.ToString());
+ func_parameters[string(parameter_name)].emplace_back(parameter_value);
// Eat up any trailing quotes.
Scanner(remaining).ZeroOrOneLiteral("\"").GetResult(&remaining, &match);
Scanner(remaining).ZeroOrOneLiteral("'").GetResult(&remaining, &match);
@@ -141,7 +140,7 @@ std::string ExpandPath(const std::string& path_string) {
return path_string;
}
- const char* home = NULL;
+ const char* home = nullptr;
std::string::size_type prefix = path_string.find_first_of('/');
if (path_string.length() == 1 || prefix == 1) {
// The value of $HOME, e.g., ~/foo
diff --git a/tensorflow/tools/graph_transforms/transform_utils.cc b/tensorflow/tools/graph_transforms/transform_utils.cc
index 367048965d..c715380aae 100644
--- a/tensorflow/tools/graph_transforms/transform_utils.cc
+++ b/tensorflow/tools/graph_transforms/transform_utils.cc
@@ -93,7 +93,7 @@ void NodeNamePartsFromInput(const string& input_name, string* prefix,
} else {
*prefix = "";
}
- *node_name = node_name_piece.ToString();
+ *node_name = string(node_name_piece);
}
string NodeNameFromInput(const string& input_name) {
@@ -247,9 +247,16 @@ Status SortByExecutionOrder(const GraphDef& input_graph_def,
}
}
- if (processed < input_graph_def.node_size()) {
- return errors::InvalidArgument(input_graph_def.node_size() - processed,
- " nodes in a cycle");
+ if (processed < num_nodes) {
+ LOG(WARNING) << "IN " << __func__ << (num_nodes - processed)
+ << " NODES IN A CYCLE";
+ for (int64 i = 0; i < num_nodes; i++) {
+ if (pending_count[i] != 0) {
+ LOG(WARNING) << "PENDING: " << SummarizeNodeDef(input_graph_def.node(i))
+ << "WITH PENDING COUNT = " << pending_count[i];
+ }
+ }
+ return errors::InvalidArgument(num_nodes - processed, " nodes in a cycle");
}
return Status::OK();
}
diff --git a/tensorflow/tools/lib_package/BUILD b/tensorflow/tools/lib_package/BUILD
index 0ede8c6370..b450bc42c5 100644
--- a/tensorflow/tools/lib_package/BUILD
+++ b/tensorflow/tools/lib_package/BUILD
@@ -4,7 +4,9 @@
package(default_visibility = ["//visibility:private"])
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
+load("@local_config_syslibs//:build_defs.bzl", "if_not_system_lib")
load("//tensorflow:tensorflow.bzl", "tf_binary_additional_srcs")
+load("//tensorflow:tensorflow.bzl", "if_cuda")
load("//third_party/mkl:build_defs.bzl", "if_mkl")
genrule(
@@ -113,20 +115,17 @@ genrule(
"//third_party/hadoop:LICENSE.txt",
"//third_party/eigen3:LICENSE",
"//third_party/fft2d:LICENSE",
- "@aws//:LICENSE",
"@boringssl//:LICENSE",
"@com_googlesource_code_re2//:LICENSE",
- "@cub_archive//:LICENSE.TXT",
"@curl//:COPYING",
+ "@double_conversion//:LICENSE",
"@eigen_archive//:COPYING.MPL2",
"@farmhash_archive//:COPYING",
"@fft2d//:fft/readme.txt",
"@gemmlowp//:LICENSE",
"@gif_archive//:COPYING",
"@highwayhash//:LICENSE",
- "@jemalloc//:COPYING",
"@jpeg//:LICENSE.md",
- "@libxsmm_archive//:LICENSE",
"@llvm//:LICENSE.TXT",
"@lmdb//:LICENSE",
"@local_config_sycl//sycl:LICENSE.text",
@@ -136,9 +135,42 @@ genrule(
"@protobuf_archive//:LICENSE",
"@snappy//:COPYING",
"@zlib_archive//:zlib.h",
- ] + if_mkl([
+ ] + select({
+ "//tensorflow:with_aws_support": [
+ "@aws//:LICENSE",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_gcp_support": [
+ "@com_github_googlecloudplatform_google_cloud_cpp//:LICENSE",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_jemalloc_linux_x86_64": [
+ "@jemalloc//:COPYING",
+ ],
+ "//tensorflow:with_jemalloc_linux_ppc64le": [
+ "@jemalloc//:COPYING",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow/core/kernels:xsmm": [
+ "@libxsmm_archive//:LICENSE.md",
+ ],
+ "//conditions:default": [],
+ }) + if_cuda([
+ "@cub_archive//:LICENSE.TXT",
+ ]) + if_mkl([
"//third_party/mkl:LICENSE",
- ]),
+ "//third_party/mkl_dnn:LICENSE",
+ ]) + if_not_system_lib(
+ "grpc",
+ [
+ "@grpc//:LICENSE",
+ "@grpc//third_party/nanopb:LICENSE.txt",
+ "@grpc//third_party/address_sorting:LICENSE",
+ ],
+ ),
outs = ["include/tensorflow/c/LICENSE"],
cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
tools = [":concat_licenses.sh"],
@@ -150,20 +182,17 @@ genrule(
"//third_party/hadoop:LICENSE.txt",
"//third_party/eigen3:LICENSE",
"//third_party/fft2d:LICENSE",
- "@aws//:LICENSE",
"@boringssl//:LICENSE",
"@com_googlesource_code_re2//:LICENSE",
- "@cub_archive//:LICENSE.TXT",
"@curl//:COPYING",
+ "@double_conversion//:LICENSE",
"@eigen_archive//:COPYING.MPL2",
"@farmhash_archive//:COPYING",
"@fft2d//:fft/readme.txt",
"@gemmlowp//:LICENSE",
"@gif_archive//:COPYING",
"@highwayhash//:LICENSE",
- "@jemalloc//:COPYING",
"@jpeg//:LICENSE.md",
- "@libxsmm_archive//:LICENSE",
"@llvm//:LICENSE.TXT",
"@lmdb//:LICENSE",
"@local_config_sycl//sycl:LICENSE.text",
@@ -173,8 +202,34 @@ genrule(
"@protobuf_archive//:LICENSE",
"@snappy//:COPYING",
"@zlib_archive//:zlib.h",
- ] + if_mkl([
+ ] + select({
+ "//tensorflow:with_aws_support": [
+ "@aws//:LICENSE",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_gcp_support": [
+ "@com_github_googlecloudplatform_google_cloud_cpp//:LICENSE",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_jemalloc_linux_x86_64": [
+ "@jemalloc//:COPYING",
+ ],
+ "//tensorflow:with_jemalloc_linux_ppc64le": [
+ "@jemalloc//:COPYING",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow/core/kernels:xsmm": [
+ "@libxsmm_archive//:LICENSE.md",
+ ],
+ "//conditions:default": [],
+ }) + if_cuda([
+ "@cub_archive//:LICENSE.TXT",
+ ]) + if_mkl([
"//third_party/mkl:LICENSE",
+ "//third_party/mkl_dnn:LICENSE",
]),
outs = ["include/tensorflow/jni/LICENSE"],
cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
diff --git a/tensorflow/tools/lib_package/README.md b/tensorflow/tools/lib_package/README.md
index 7008148260..cb6aef2624 100644
--- a/tensorflow/tools/lib_package/README.md
+++ b/tensorflow/tools/lib_package/README.md
@@ -35,8 +35,8 @@ The following commands:
bazel test --config opt //tensorflow/tools/lib_package:libtensorflow_test
bazel build --config opt \
//tensorflow/tools/lib_package:libtensorflow_jni.tar.gz \
- //tensorflow/tools/lib_package:libtensorflow.jar \
- //tensorflow/tools/lib_package:libtensorflow-src.jar
+ //tensorflow/java:libtensorflow.jar \
+ //tensorflow/java:libtensorflow-src.jar
```
test and produce the following:
@@ -44,9 +44,9 @@ test and produce the following:
- The native library (`libtensorflow_jni.so`) packaged in an archive at:
`bazel-bin/tensorflow/tools/lib_package/libtensorflow_jni.tar.gz`
- The Java archive at:
- `bazel-bin/tensorflow/tools/lib_package/libtensorflow.jar`
+ `bazel-bin/tensorflow/java/libtensorflow.jar`
- The Java archive for Java sources at:
- `bazel-bin/tensorflow/tools/lib_package/libtensorflow-src.jar`
+ `bazel-bin/tensorflow/java/libtensorflow-src.jar`
## Release
diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD
index 376644718f..12354a6ab2 100644
--- a/tensorflow/tools/pip_package/BUILD
+++ b/tensorflow/tools/pip_package/BUILD
@@ -9,10 +9,14 @@ load(
"if_windows",
"transitive_hdrs",
)
-load("//third_party/mkl:build_defs.bzl", "if_mkl")
+load("//third_party/mkl:build_defs.bzl", "if_mkl", "if_mkl_ml")
load("//tensorflow:tensorflow.bzl", "if_cuda")
-load("@local_config_tensorrt//:build_defs.bzl", "if_tensorrt")
+load("@local_config_syslibs//:build_defs.bzl", "if_not_system_lib")
load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_license_deps")
+load(
+ "//third_party/ngraph:build_defs.bzl",
+ "if_ngraph",
+)
# This returns a list of headers of all public header libraries (e.g.,
# framework, lib), and all of the transitive dependencies of those
@@ -56,17 +60,15 @@ COMMON_PIP_DEPS = [
":included_headers",
"//tensorflow:tensorflow_py",
"//tensorflow/contrib/autograph:autograph",
- "//tensorflow/contrib/autograph/converters:converters",
- "//tensorflow/contrib/autograph/converters:test_lib",
- "//tensorflow/contrib/autograph/impl:impl",
- "//tensorflow/contrib/autograph/pyct:pyct",
- "//tensorflow/contrib/autograph/pyct/static_analysis:static_analysis",
"//tensorflow/contrib/boosted_trees:boosted_trees_pip",
"//tensorflow/contrib/cluster_resolver:cluster_resolver_pip",
- "//tensorflow/contrib/data/python/kernel_tests:dataset_serialization_test",
+ "//tensorflow/contrib/compiler:xla",
+ "//tensorflow/contrib/constrained_optimization:constrained_optimization_pip",
+ "//tensorflow/contrib/data/python/kernel_tests/serialization:dataset_serialization_test_base",
+ "//tensorflow/contrib/data/python/kernel_tests:stats_dataset_test_base",
+ "//tensorflow/contrib/data/python/kernel_tests:test_utils",
"//tensorflow/contrib/data/python/ops:contrib_op_loader",
"//tensorflow/contrib/eager/python/examples:examples_pip",
- "//tensorflow/contrib/eager/python:checkpointable_utils",
"//tensorflow/contrib/eager/python:evaluator",
"//tensorflow/contrib/gan:gan",
"//tensorflow/contrib/graph_editor:graph_editor_pip",
@@ -74,7 +76,10 @@ COMMON_PIP_DEPS = [
"//tensorflow/contrib/labeled_tensor:labeled_tensor_pip",
"//tensorflow/contrib/nn:nn_py",
"//tensorflow/contrib/predictor:predictor_pip",
+ "//tensorflow/contrib/proto:proto",
"//tensorflow/contrib/receptive_field:receptive_field_pip",
+ "//tensorflow/contrib/rate:rate",
+ "//tensorflow/contrib/rpc:rpc_pip",
"//tensorflow/contrib/session_bundle:session_bundle_pip",
"//tensorflow/contrib/signal:signal_py",
"//tensorflow/contrib/signal:test_util",
@@ -88,6 +93,17 @@ COMMON_PIP_DEPS = [
"//tensorflow/contrib/timeseries:timeseries_pip",
"//tensorflow/contrib/tpu",
"//tensorflow/examples/tutorials/mnist:package",
+ # "//tensorflow/python/autograph/converters:converters",
+ # "//tensorflow/python/autograph/core:core",
+ "//tensorflow/python/autograph/core:test_lib",
+ # "//tensorflow/python/autograph/impl:impl",
+ # "//tensorflow/python/autograph/lang:lang",
+ # "//tensorflow/python/autograph/operators:operators",
+ # "//tensorflow/python/autograph/pyct:pyct",
+ # "//tensorflow/python/autograph/pyct/testing:testing",
+ # "//tensorflow/python/autograph/pyct/static_analysis:static_analysis",
+ "//tensorflow/python/autograph/pyct/common_transformers:common_transformers",
+ "//tensorflow/python:cond_v2",
"//tensorflow/python:distributed_framework_test_lib",
"//tensorflow/python:meta_graph_testdata",
"//tensorflow/python:spectral_ops_test_util",
@@ -97,7 +113,9 @@ COMMON_PIP_DEPS = [
"//tensorflow/python/kernel_tests/testdata:self_adjoint_eig_op_test_files",
"//tensorflow/python/saved_model:saved_model",
"//tensorflow/python/tools:tools_pip",
+ "//tensorflow/python/tools/api/generator:create_python_api",
"//tensorflow/python:test_ops",
+ "//tensorflow/python:while_v2",
"//tensorflow/tools/dist_test/server:grpc_tensorflow_server",
]
@@ -121,12 +139,11 @@ filegroup(
"@absl_py//absl/flags:LICENSE",
"@arm_neon_2_x86_sse//:LICENSE",
"@astor_archive//:LICENSE",
- "@aws//:LICENSE",
"@boringssl//:LICENSE",
"@com_google_absl//:LICENSE",
"@com_googlesource_code_re2//:LICENSE",
- "@cub_archive//:LICENSE.TXT",
"@curl//:COPYING",
+ "@double_conversion//:LICENSE",
"@eigen_archive//:COPYING.MPL2",
"@farmhash_archive//:COPYING",
"@fft2d//:fft/readme.txt",
@@ -134,17 +151,10 @@ filegroup(
"@gast_archive//:PKG-INFO",
"@gemmlowp//:LICENSE",
"@gif_archive//:COPYING",
- "@grpc//:LICENSE",
"@highwayhash//:LICENSE",
- "@jemalloc//:COPYING",
"@jpeg//:LICENSE.md",
- "@kafka//:LICENSE",
- "@libxsmm_archive//:LICENSE",
"@lmdb//:LICENSE",
- "@local_config_nccl//:LICENSE",
"@local_config_sycl//sycl:LICENSE.text",
- "@grpc//third_party/nanopb:LICENSE.txt",
- "@grpc//third_party/address_sorting:LICENSE",
"@nasm//:LICENSE",
"@nsync//:LICENSE",
"@pcre//:LICENCE",
@@ -156,8 +166,53 @@ filegroup(
"@termcolor_archive//:COPYING.txt",
"@zlib_archive//:zlib.h",
"@org_python_pypi_backports_weakref//:LICENSE",
- ] + if_mkl([
+ ] + select({
+ "//tensorflow:with_aws_support": [
+ "@aws//:LICENSE",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_gcp_support": [
+ "@com_github_googleapis_googleapis//:LICENSE",
+ "@com_github_googlecloudplatform_google_cloud_cpp//:LICENSE",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_jemalloc_linux_x86_64": [
+ "@jemalloc//:COPYING",
+ ],
+ "//tensorflow:with_jemalloc_linux_ppc64le": [
+ "@jemalloc//:COPYING",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow:with_kafka_support": [
+ "@kafka//:LICENSE",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ "//tensorflow/core/kernels:xsmm": [
+ "@libxsmm_archive//:LICENSE.md",
+ ],
+ "//conditions:default": [],
+ }) + if_cuda([
+ "@cub_archive//:LICENSE.TXT",
+ "@local_config_nccl//:LICENSE",
+ ]) + if_mkl([
"//third_party/mkl:LICENSE",
+ "//third_party/mkl_dnn:LICENSE",
+ ]) + if_not_system_lib(
+ "grpc",
+ [
+ "@grpc//:LICENSE",
+ "@grpc//third_party/nanopb:LICENSE.txt",
+ "@grpc//third_party/address_sorting:LICENSE",
+ ],
+ ) + if_ngraph([
+ "@ngraph//:LICENSE",
+ "@ngraph_tf//:LICENSE",
+ "@nlohmann_json_lib//:LICENSE.MIT",
+ "@tbb//:LICENSE",
]) + tf_additional_license_deps(),
)
@@ -165,19 +220,19 @@ sh_binary(
name = "build_pip_package",
srcs = ["build_pip_package.sh"],
data = select({
- "//tensorflow:windows": [":simple_console_for_windows"],
- "//tensorflow:windows_msvc": [":simple_console_for_windows"],
+ "//tensorflow:windows": [
+ ":simple_console_for_windows",
+ "//tensorflow/contrib/lite/python:interpreter_test_data",
+ "//tensorflow/contrib/lite/python:tflite_convert",
+ "//tensorflow/contrib/lite/toco/python:toco_from_protos",
+ ],
"//conditions:default": COMMON_PIP_DEPS + [
":simple_console",
"//tensorflow/contrib/lite/python:interpreter_test_data",
- "//tensorflow/contrib/lite/python:tf_lite_py_pip",
- "//tensorflow/contrib/lite/toco:toco",
- "//tensorflow/contrib/lite/toco/python:toco_wrapper",
+ "//tensorflow/contrib/lite/python:tflite_convert",
"//tensorflow/contrib/lite/toco/python:toco_from_protos",
],
- }) + if_mkl(["//third_party/mkl:intel_binary_blob"]) + if_tensorrt([
- "//tensorflow/contrib/tensorrt:init_py",
- ]),
+ }) + if_mkl_ml(["//third_party/mkl:intel_binary_blob"]),
)
# A genrule for generating a marker file for the pip package on Windows
diff --git a/tensorflow/tools/pip_package/MANIFEST.in b/tensorflow/tools/pip_package/MANIFEST.in
index 86c5e4776d..c4b4af93b8 100644
--- a/tensorflow/tools/pip_package/MANIFEST.in
+++ b/tensorflow/tools/pip_package/MANIFEST.in
@@ -1,5 +1,6 @@
include README
recursive-include * *.py
+recursive-include * *.pd
recursive-include * *.so
recursive-include * *.dll
recursive-include * *.lib
diff --git a/tensorflow/tools/pip_package/build_pip_package.sh b/tensorflow/tools/pip_package/build_pip_package.sh
index 8f0cf8c3d1..c62271c5cb 100755
--- a/tensorflow/tools/pip_package/build_pip_package.sh
+++ b/tensorflow/tools/pip_package/build_pip_package.sh
@@ -17,66 +17,50 @@
set -e
+function is_absolute {
+ [[ "$1" = /* ]] || [[ "$1" =~ ^[a-zA-Z]:[/\\].* ]]
+}
+
function real_path() {
- [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
+ is_absolute "$1" && echo "$1" || echo "$PWD/${1#./}"
}
function cp_external() {
local src_dir=$1
local dest_dir=$2
- for f in `find "$src_dir" -maxdepth 1 -mindepth 1 ! -name '*local_config_cuda*' ! -name '*org_tensorflow*'`; do
- cp -R "$f" "$dest_dir"
+
+ pushd .
+ cd "$src_dir"
+ for f in `find . ! -type d ! -name '*.py' ! -path '*local_config_cuda*' ! -path '*local_config_tensorrt*' ! -path '*local_config_syslibs*' ! -path '*org_tensorflow*'`; do
+ mkdir -p "${dest_dir}/$(dirname ${f})"
+ cp "${f}" "${dest_dir}/$(dirname ${f})/"
done
+ popd
+
mkdir -p "${dest_dir}/local_config_cuda/cuda/cuda/"
cp "${src_dir}/local_config_cuda/cuda/cuda/cuda_config.h" "${dest_dir}/local_config_cuda/cuda/cuda/"
}
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
function is_windows() {
- # On windows, the shell script is actually running in msys
- if [[ "${PLATFORM}" =~ msys_nt* ]]; then
+ if [[ "${PLATFORM}" =~ (cygwin|mingw32|mingw64|msys)_nt* ]]; then
true
else
false
fi
}
-function main() {
+function prepare_src() {
if [ $# -lt 1 ] ; then
echo "No destination dir provided"
exit 1
fi
- DEST=$(real_path $1)
- TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
-
- PKG_NAME_FLAG=""
- GPU_BUILD=0
- NIGHTLY_BUILD=0
- while true; do
- if [[ "$1" == "--nightly_flag" ]]; then
- NIGHTLY_BUILD=1
- elif [[ "$1" == "--gpu" ]]; then
- GPU_BUILD=1
- elif [[ "$1" == "--gpudirect" ]]; then
- PKG_NAME_FLAG="--project_name tensorflow_gpudirect"
- fi
- shift
-
- if [[ -z "$1" ]]; then
- break
- fi
- done
-
- if [[ ${NIGHTLY_BUILD} == "1" && ${GPU_BUILD} == "1" ]]; then
- PKG_NAME_FLAG="--project_name tf_nightly_gpu"
- elif [[ ${NIGHTLY_BUILD} == "1" ]]; then
- PKG_NAME_FLAG="--project_name tf_nightly"
- elif [[ ${GPU_BUILD} == "1" ]]; then
- PKG_NAME_FLAG="--project_name tensorflow_gpu"
- fi
+ TMPDIR="$1"
+ mkdir -p "$TMPDIR"
+ EXTERNAL_INCLUDES="${TMPDIR}/tensorflow/include/external"
- echo $(date) : "=== Using tmpdir: ${TMPDIR}"
+ echo $(date) : "=== Preparing sources in dir: ${TMPDIR}"
if [ ! -d bazel-bin/tensorflow ]; then
echo "Could not find bazel-bin. Did you run from the root of the build tree?"
@@ -93,10 +77,9 @@ function main() {
cp -R \
bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip/runfiles/org_tensorflow/tensorflow \
"${TMPDIR}"
- mkdir "${TMPDIR}/external"
cp_external \
bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip/runfiles \
- "${TMPDIR}/external"
+ "${EXTERNAL_INCLUDES}/"
RUNFILES=bazel-bin/tensorflow/tools/pip_package/simple_console_for_window_unzip/runfiles/org_tensorflow
else
RUNFILES=bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow
@@ -105,10 +88,9 @@ function main() {
cp -R \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/tensorflow \
"${TMPDIR}"
- mkdir "${TMPDIR}/external"
cp_external \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/external \
- "${TMPDIR}/external"
+ "${EXTERNAL_INCLUDES}"
# Copy MKL libs over so they can be loaded at runtime
so_lib_dir=$(ls $RUNFILES | grep solib) || true
if [ -n "${so_lib_dir}" ]; then
@@ -123,10 +105,9 @@ function main() {
cp -R \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles/org_tensorflow/tensorflow \
"${TMPDIR}"
- mkdir "${TMPDIR}/external"
cp_external \
bazel-bin/tensorflow/tools/pip_package/build_pip_package.runfiles \
- "${TMPDIR}/external"
+ "${EXTERNAL_INCLUDES}"
# Copy MKL libs over so they can be loaded at runtime
so_lib_dir=$(ls $RUNFILES | grep solib) || true
if [ -n "${so_lib_dir}" ]; then
@@ -139,26 +120,35 @@ function main() {
fi
mkdir "${TMPDIR}/tensorflow/aux-bin"
# Install toco as a binary in aux-bin.
- # TODO(aselle): Re-enable this when we find a way to do it without doubling
- # the whl size (over the limit).
- # cp bazel-bin/tensorflow/contrib/lite/toco/toco ${TMPDIR}/tensorflow/aux-bin/
+ cp bazel-bin/tensorflow/contrib/lite/python/tflite_convert ${TMPDIR}/tensorflow/aux-bin/
fi
# protobuf pip package doesn't ship with header files. Copy the headers
# over so user defined ops can be compiled.
mkdir -p ${TMPDIR}/google
mkdir -p ${TMPDIR}/third_party
- pushd ${RUNFILES%org_tensorflow}
+ pushd ${RUNFILES%org_tensorflow} > /dev/null
for header in $(find protobuf_archive -name \*.h); do
mkdir -p "${TMPDIR}/google/$(dirname ${header})"
cp "$header" "${TMPDIR}/google/$(dirname ${header})/"
done
- popd
+ popd > /dev/null
cp -R $RUNFILES/third_party/eigen3 ${TMPDIR}/third_party
cp tensorflow/tools/pip_package/MANIFEST.in ${TMPDIR}
cp tensorflow/tools/pip_package/README ${TMPDIR}
cp tensorflow/tools/pip_package/setup.py ${TMPDIR}
+}
+
+function build_wheel() {
+ if [ $# -lt 2 ] ; then
+ echo "No src and dest dir provided"
+ exit 1
+ fi
+
+ TMPDIR="$1"
+ DEST="$2"
+ PKG_NAME_FLAG="$3"
# Before we leave the top-level directory, make sure we know how to
# call python.
@@ -166,15 +156,110 @@ function main() {
source tools/python_bin_path.sh
fi
- pushd ${TMPDIR}
+ pushd ${TMPDIR} > /dev/null
rm -f MANIFEST
echo $(date) : "=== Building wheel"
"${PYTHON_BIN_PATH:-python}" setup.py bdist_wheel ${PKG_NAME_FLAG} >/dev/null
mkdir -p ${DEST}
cp dist/* ${DEST}
- popd
- rm -rf ${TMPDIR}
+ popd > /dev/null
echo $(date) : "=== Output wheel file is in: ${DEST}"
}
+function usage() {
+ echo "Usage:"
+ echo "$0 [--src srcdir] [--dst dstdir] [options]"
+ echo "$0 dstdir [options]"
+ echo ""
+ echo " --src prepare sources in srcdir"
+ echo " will use temporary dir if not specified"
+ echo ""
+ echo " --dst build wheel in dstdir"
+ echo " if dstdir is not set do not build, only prepare sources"
+ echo ""
+ echo " Options:"
+ echo " --project_name <name> set project name to name"
+ echo " --gpu build tensorflow_gpu"
+ echo " --gpudirect build tensorflow_gpudirect"
+ echo " --nightly_flag build tensorflow nightly"
+ echo ""
+ exit 1
+}
+
+function main() {
+ PKG_NAME_FLAG=""
+ PROJECT_NAME=""
+ GPU_BUILD=0
+ NIGHTLY_BUILD=0
+ SRCDIR=""
+ DSTDIR=""
+ CLEANSRC=1
+ while true; do
+ if [[ "$1" == "--help" ]]; then
+ usage
+ exit 1
+ elif [[ "$1" == "--nightly_flag" ]]; then
+ NIGHTLY_BUILD=1
+ elif [[ "$1" == "--gpu" ]]; then
+ GPU_BUILD=1
+ elif [[ "$1" == "--gpudirect" ]]; then
+ PKG_NAME_FLAG="--project_name tensorflow_gpudirect"
+ elif [[ "$1" == "--project_name" ]]; then
+ shift
+ if [[ -z "$1" ]]; then
+ break
+ fi
+ PROJECT_NAME="$1"
+ elif [[ "$1" == "--src" ]]; then
+ shift
+ SRCDIR="$(real_path $1)"
+ CLEANSRC=0
+ elif [[ "$1" == "--dst" ]]; then
+ shift
+ DSTDIR="$(real_path $1)"
+ else
+ DSTDIR="$(real_path $1)"
+ fi
+ shift
+
+ if [[ -z "$1" ]]; then
+ break
+ fi
+ done
+
+ if [[ -z "$DSTDIR" ]] && [[ -z "$SRCDIR" ]]; then
+ echo "No destination dir provided"
+ usage
+ exit 1
+ fi
+
+ if [[ -z "$SRCDIR" ]]; then
+ # make temp srcdir if none set
+ SRCDIR="$(mktemp -d -t tmp.XXXXXXXXXX)"
+ fi
+
+ prepare_src "$SRCDIR"
+
+ if [[ -z "$DSTDIR" ]]; then
+ # only want to prepare sources
+ exit
+ fi
+
+ if [[ -n ${PROJECT_NAME} ]]; then
+ PKG_NAME_FLAG="--project_name ${PROJECT_NAME}"
+ elif [[ ${NIGHTLY_BUILD} == "1" && ${GPU_BUILD} == "1" ]]; then
+ PKG_NAME_FLAG="--project_name tf_nightly_gpu"
+ elif [[ ${NIGHTLY_BUILD} == "1" ]]; then
+ PKG_NAME_FLAG="--project_name tf_nightly"
+ elif [[ ${GPU_BUILD} == "1" ]]; then
+ PKG_NAME_FLAG="--project_name tensorflow_gpu"
+ fi
+
+ build_wheel "$SRCDIR" "$DSTDIR" "$PKG_NAME_FLAG"
+
+ if [[ $CLEANSRC -ne 0 ]]; then
+ rm -rf "${TMPDIR}"
+ fi
+}
+
main "$@"
diff --git a/tensorflow/tools/pip_package/pip_smoke_test.py b/tensorflow/tools/pip_package/pip_smoke_test.py
index e2518f6cbf..bfc007bc39 100644
--- a/tensorflow/tools/pip_package/pip_smoke_test.py
+++ b/tensorflow/tools/pip_package/pip_smoke_test.py
@@ -30,15 +30,42 @@ os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")))
PIP_PACKAGE_QUERY_EXPRESSION = (
"deps(//tensorflow/tools/pip_package:build_pip_package)")
-# pylint: disable=g-backslash-continuation
-PY_TEST_QUERY_EXPRESSION = 'deps(\
- filter("^((?!benchmark).)*$",\
- kind(py_test,\
- //tensorflow/python/... \
- + //tensorflow/contrib/... \
- - //tensorflow/contrib/tensorboard/... \
- - attr(tags, "manual|no_pip", //tensorflow/...))), 1)'
-# pylint: enable=g-backslash-continuation
+
+def GetBuild(dir_base):
+ """Get the list of BUILD file all targets recursively startind at dir_base."""
+ items = []
+ for root, _, files in os.walk(dir_base):
+ for name in files:
+ if (name == "BUILD" and
+ root.find("tensorflow/contrib/lite/examples/android") == -1):
+ items.append("//" + root + ":all")
+ return items
+
+
+def BuildPyTestDependencies():
+ python_targets = GetBuild("tensorflow/python")
+ contrib_targets = GetBuild("tensorflow/contrib")
+ tensorboard_targets = GetBuild("tensorflow/contrib/tensorboard")
+ tensorflow_targets = GetBuild("tensorflow")
+ # Build list of test targets,
+ # python + contrib - tensorboard - attr(manual|pno_pip)
+ targets = " + ".join(python_targets)
+ for t in contrib_targets:
+ targets += " + " + t
+ for t in tensorboard_targets:
+ targets += " - " + t
+ targets += ' - attr(tags, "manual|no_pip", %s)' % " + ".join(
+ tensorflow_targets)
+ query_kind = "kind(py_test, %s)" % targets
+ # Skip benchmarks etc.
+ query_filter = 'filter("^((?!benchmark).)*$", %s)' % query_kind
+ # Get the dependencies
+ query_deps = "deps(%s, 1)" % query_filter
+
+ return python_targets, query_deps
+
+
+PYTHON_TARGETS, PY_TEST_QUERY_EXPRESSION = BuildPyTestDependencies()
# Hard-coded blacklist of files if not included in pip package
# TODO(amitpatankar): Clean up blacklist.
@@ -63,6 +90,7 @@ BLACKLIST = [
"//tensorflow/contrib/lite/python:interpreter.py",
"//tensorflow/contrib/lite/python:interpreter_test.py",
"//tensorflow/contrib/ffmpeg:test_data",
+ "//tensorflow/contrib/hadoop:test_data",
"//tensorflow/contrib/factorization/examples:mnist",
"//tensorflow/contrib/factorization/examples:mnist.py",
"//tensorflow/contrib/factorization:factorization_py_CYCLIC_DEPENDENCIES_THAT_NEED_TO_GO", # pylint:disable=line-too-long
@@ -94,15 +122,21 @@ def main():
# pip_package_dependencies_list is the list of included files in pip packages
pip_package_dependencies = subprocess.check_output(
- ["bazel", "query", PIP_PACKAGE_QUERY_EXPRESSION])
+ ["bazel", "cquery", PIP_PACKAGE_QUERY_EXPRESSION])
pip_package_dependencies_list = pip_package_dependencies.strip().split("\n")
+ pip_package_dependencies_list = [
+ x.split()[0] for x in pip_package_dependencies_list
+ ]
print("Pip package superset size: %d" % len(pip_package_dependencies_list))
# tf_py_test_dependencies is the list of dependencies for all python
# tests in tensorflow
tf_py_test_dependencies = subprocess.check_output(
- ["bazel", "query", PY_TEST_QUERY_EXPRESSION])
+ ["bazel", "cquery", PY_TEST_QUERY_EXPRESSION])
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n")
+ tf_py_test_dependencies_list = [
+ x.split()[0] for x in tf_py_test_dependencies.strip().split("\n")
+ ]
print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list))
missing_dependencies = []
@@ -133,16 +167,17 @@ def main():
for missing_dependency in missing_dependencies:
print("\nMissing dependency: %s " % missing_dependency)
print("Affected Tests:")
- rdep_query = ("rdeps(kind(py_test, //tensorflow/python/...), %s)" %
- missing_dependency)
- affected_tests = subprocess.check_output(["bazel", "query", rdep_query])
+ rdep_query = ("rdeps(kind(py_test, %s), %s)" %
+ (" + ".join(PYTHON_TARGETS), missing_dependency))
+ affected_tests = subprocess.check_output(["bazel", "cquery", rdep_query])
affected_tests_list = affected_tests.split("\n")[:-2]
print("\n".join(affected_tests_list))
- raise RuntimeError("""One or more dependencies are not in the pip package.
-Please either blacklist the dependencies in
-//tensorflow/tools/pip_package/pip_smoke_test.py
-or add them to //tensorflow/tools/pip_package/BUILD.""")
+ raise RuntimeError("""
+ One or more added test dependencies are not in the pip package.
+If these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.
+Else either blacklist the dependencies in //tensorflow/tools/pip_package/pip_smoke_test.py
+or add no_pip tag to the test.""")
else:
print("TEST PASSED")
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 6511a50b3b..d40ffb8cd0 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -12,6 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
+"""TensorFlow is an open source machine learning framework for everyone.
+
+TensorFlow is an open source software library for high performance numerical
+computation. Its flexible architecture allows easy deployment of computation
+across a variety of platforms (CPUs, GPUs, TPUs), and from desktops to clusters
+of servers to mobile and edge devices.
+
+Originally developed by researchers and engineers from the Google Brain team
+within Google's AI organization, it comes with strong support for machine
+learning and deep learning and the flexible numerical computation core is used
+across many other scientific domains.
+"""
from __future__ import absolute_import
from __future__ import division
@@ -22,23 +34,30 @@ import os
import re
import sys
-from setuptools import find_packages, setup, Command
+from setuptools import Command
+from setuptools import find_packages
+from setuptools import setup
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
+DOCLINES = __doc__.split('\n')
+
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '1.7.0'
+_VERSION = '1.11.0-rc1'
REQUIRED_PACKAGES = [
'absl-py >= 0.1.6',
'astor >= 0.6.0',
'gast >= 0.2.0',
+ 'keras_applications >= 1.0.5',
+ 'keras_preprocessing >= 1.0.3',
'numpy >= 1.13.3',
'six >= 1.10.0',
- 'protobuf >= 3.4.0',
- 'tensorboard >= 1.7.0, < 1.8.0',
+ 'protobuf >= 3.6.0',
+ 'setuptools <= 39.1.0',
+ 'tensorboard >= 1.10.0, < 1.11.0',
'termcolor >= 1.1.0',
]
@@ -67,7 +86,7 @@ else:
if 'tf_nightly' in project_name:
for i, pkg in enumerate(REQUIRED_PACKAGES):
if 'tensorboard' in pkg:
- REQUIRED_PACKAGES[i] = 'tb-nightly >= 1.8.0a0, < 1.9.0a0'
+ REQUIRED_PACKAGES[i] = 'tb-nightly >= 1.11.0a0, < 1.12.0a0'
break
# weakref.finalize and enum were introduced in Python 3.4
@@ -79,7 +98,8 @@ if sys.version_info < (3, 4):
CONSOLE_SCRIPTS = [
'freeze_graph = tensorflow.python.tools.freeze_graph:run_main',
'toco_from_protos = tensorflow.contrib.lite.toco.python.toco_from_protos:main',
- 'toco = tensorflow.contrib.lite.toco.python.toco_wrapper:main',
+ 'tflite_convert = tensorflow.contrib.lite.python.tflite_convert:main',
+ 'toco = tensorflow.contrib.lite.python.tflite_convert:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
@@ -97,7 +117,9 @@ TEST_PACKAGES = [
'scipy >= 0.15.1',
]
+
class BinaryDistribution(Distribution):
+
def has_ext_modules(self):
return True
@@ -145,16 +167,21 @@ class InstallHeaders(Command):
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
- # Copy eigen code into tensorflow/include.
+ # Copy external code headers into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
- if 'external/eigen_archive/' in install_dir:
- extra_dir = install_dir.replace('external/eigen_archive', '')
- if not os.path.exists(extra_dir):
- self.mkpath(extra_dir)
- self.copy_file(header, extra_dir)
+ external_header_locations = [
+ 'tensorflow/include/external/eigen_archive/',
+ 'tensorflow/include/external/com_google_absl/',
+ ]
+ for location in external_header_locations:
+ if location in install_dir:
+ extra_dir = install_dir.replace(location, '')
+ if not os.path.exists(extra_dir):
+ self.mkpath(extra_dir)
+ self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
@@ -179,18 +206,17 @@ class InstallHeaders(Command):
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
- for path, _, files in os.walk(root):
+ for dirpath, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
- yield os.path.join(path, filename)
-
+ yield os.path.join(dirpath, filename)
-matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
so_lib_paths = [
i for i in os.listdir('.')
if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]
+matches = []
for path in so_lib_paths:
matches.extend(
['../' + x for x in find_files('*', path) if '.py' not in x]
@@ -205,14 +231,17 @@ headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) +
- list(find_files('*', 'external/eigen_archive')))
+ list(find_files('*.h',
+ 'tensorflow/include/external/com_google_absl')) +
+ list(find_files('*', 'tensorflow/include/external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
- description='TensorFlow helps the tensors flow',
- long_description='',
+ description=DOCLINES[0],
+ long_description='\n'.join(DOCLINES[2:]),
url='https://www.tensorflow.org/',
+ download_url='https://github.com/tensorflow/tensorflow/tags',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
@@ -238,7 +267,7 @@ setup(
},
# PyPI package information.
classifiers=[
- 'Development Status :: 4 - Beta',
+ 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
@@ -257,4 +286,5 @@ setup(
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
- keywords='tensorflow tensor machine learning',)
+ keywords='tensorflow tensor machine learning',
+)
diff --git a/tensorflow/tools/proto_text/BUILD b/tensorflow/tools/proto_text/BUILD
index ef7bfdd3c9..b4b70e0a78 100644
--- a/tensorflow/tools/proto_text/BUILD
+++ b/tensorflow/tools/proto_text/BUILD
@@ -39,6 +39,7 @@ cc_binary(
":gen_proto_text_functions_lib",
"@protobuf_archive//:protobuf",
"//tensorflow/core:lib_proto_parsing",
+ "//tensorflow/core:lib_proto_compiler",
] + if_ios(["//tensorflow/core/platform/default/build_config:logging"]),
)
@@ -49,7 +50,6 @@ cc_library(
copts = if_ios(["-DGOOGLE_LOGGING"]),
linkopts = select({
"//tensorflow:windows": [],
- "//tensorflow:windows_msvc": [],
"//tensorflow:darwin": [
"-lm",
"-lpthread",
@@ -75,9 +75,14 @@ tf_proto_library_cc(
)
tf_generate_proto_text_sources(
- name = "test_proto_text_srcs",
+ name = "test_proto_text",
srcs = ["test.proto"],
srcs_relative_dir = "tensorflow/tools/proto_text/",
+ deps = [
+ ":test_proto_cc",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:lib_internal",
+ ],
)
tf_cc_test(
diff --git a/tensorflow/tools/proto_text/gen_proto_text_functions.cc b/tensorflow/tools/proto_text/gen_proto_text_functions.cc
index f0bb59acf8..159976f1b0 100644
--- a/tensorflow/tools/proto_text/gen_proto_text_functions.cc
+++ b/tensorflow/tools/proto_text/gen_proto_text_functions.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
+#include "tensorflow/core/platform/protobuf_compiler.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/tools/proto_text/gen_proto_text_functions_lib.h"
@@ -130,7 +131,11 @@ int MainImpl(int argc, char** argv) {
const string path = output_root + "/" + proto_path_no_suffix + suffix;
FILE* f = fopen(path.c_str(), "w");
- if (f == nullptr) return -1;
+ if (f == nullptr) {
+ // We don't expect this output to be generated. It was specified in the
+ // list of sources solely to satisfy a proto import dependency.
+ continue;
+ }
if (fwrite(data.c_str(), 1, data.size(), f) != data.size()) {
fclose(f);
return -1;
diff --git a/tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc b/tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc
index 62e29b5128..15d7c70281 100644
--- a/tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc
+++ b/tensorflow/tools/proto_text/gen_proto_text_functions_lib.cc
@@ -279,8 +279,13 @@ void Generator::AppendFieldValueAppend(const FieldDescriptor& field,
if (omit_default) {
Print("if (", field_expr, " != 0) {").Nest();
}
- Print("o->AppendEnumName(\"", field.name(), "\", ",
- GetQualifiedEnumNameFn(*field.enum_type()), "(", field_expr, "));");
+ Print("const char* enum_name = ",
+ GetQualifiedEnumNameFn(*field.enum_type()), "(", field_expr, ");");
+ Print("if (enum_name[0]) {").Nest();
+ Print("o->AppendEnumName(\"", field.name(), "\", enum_name);");
+ Unnest().Print("} else {").Nest();
+ Print("o->AppendNumeric(\"", field.name(), "\", ", field_expr, ");");
+ Unnest().Print("}");
if (omit_default) {
Unnest().Print("}");
}
@@ -540,18 +545,24 @@ void Generator::AppendParseMessageFunction(const Descriptor& md) {
for (int enum_i = 0; enum_i < enum_d->value_count(); ++enum_i) {
const auto* value_d = enum_d->value(enum_i);
const string& value_name = value_d->name();
- string condition = StrCat("value == \"", value_name,
- "\" || value == \"", value_d->number(), "\"");
- if (value_d->number() == 0) {
- StrAppend(&condition, " || value == \"-0\"");
- }
+ string condition = StrCat("value == \"", value_name, "\"");
Print(enum_i == 0 ? "" : "} else ", "if (", condition, ") {");
Nest();
Print(set_value_prefix, "(", value_prefix, value_name, ");");
Unnest();
}
+ Print("} else {");
+ Nest();
+ // Proto3 allows all numeric values.
+ Print("int32 int_value;");
+ Print("if (strings::SafeStringToNumeric(value, &int_value)) {");
+ Nest();
+ Print(set_value_prefix, "(static_cast<", GetQualifiedName(*enum_d),
+ ">(int_value));");
+ Unnest();
Print("} else {").Nest().Print("return false;").Unnest().Print("}");
+ Unnest().Print("}");
} else {
Print(field->cpp_type_name(), " value;");
switch (field->cpp_type()) {
@@ -803,6 +814,9 @@ void Generator::Generate(const FileDescriptor& fd) {
// Add header to cc file.
SetOutput(&cc_);
Print("// GENERATED FILE - DO NOT MODIFY");
+ Print();
+ Print("#include <algorithm>"); // for `std::stable_sort()`
+ Print();
headers = {GetProtoTextHeaderName(fd, true /* impl */)};
AddHeadersToCurrentSection(headers);
Print();
diff --git a/tensorflow/tools/proto_text/gen_proto_text_functions_lib.h b/tensorflow/tools/proto_text/gen_proto_text_functions_lib.h
index e18d749cff..20aa605480 100644
--- a/tensorflow/tools/proto_text/gen_proto_text_functions_lib.h
+++ b/tensorflow/tools/proto_text/gen_proto_text_functions_lib.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_CORE_UTIL_CREATE_PROTO_DEBUG_STRING_LIB_H_
-#define TENSORFLOW_CORE_UTIL_CREATE_PROTO_DEBUG_STRING_LIB_H_
+#ifndef TENSORFLOW_TOOLS_PROTO_TEXT_GEN_PROTO_TEXT_FUNCTIONS_LIB_H_
+#define TENSORFLOW_TOOLS_PROTO_TEXT_GEN_PROTO_TEXT_FUNCTIONS_LIB_H_
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
@@ -50,4 +50,4 @@ ProtoTextFunctionCode GetProtoTextFunctionCode(
} // namespace tensorflow
-#endif // TENSORFLOW_CORE_UTIL_CREATE_PROTO_DEBUG_STRING_LIB_H_
+#endif // TENSORFLOW_TOOLS_PROTO_TEXT_GEN_PROTO_TEXT_FUNCTIONS_LIB_H_
diff --git a/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc b/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc
index 6f0b4f47de..e67add72de 100644
--- a/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc
+++ b/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc
@@ -455,7 +455,10 @@ TEST(CreateProtoDebugStringLibTest, Enums) {
"repeated_nested_enum: 1"));
EXPECT_PARSE_SUCCESS("", "optional_nested_enum: -0");
- EXPECT_PARSE_FAILURE("optional_nested_enum: 6");
+ // TODO(amauryfa): restore the line below when protobuf::TextFormat also
+ // supports unknonwn enum values.
+ // EXPECT_PARSE_SUCCESS("optional_nested_enum: 6", "optional_nested_enum: 6");
+ EXPECT_PARSE_FAILURE("optional_nested_enum: 2147483648"); // > INT32_MAX
EXPECT_PARSE_FAILURE("optional_nested_enum: BARNONE");
EXPECT_PARSE_FAILURE("optional_nested_enum: 'BAR'");
EXPECT_PARSE_FAILURE("optional_nested_enum: \"BAR\" ");
diff --git a/tensorflow/tools/quantization/quantize_graph_test.py b/tensorflow/tools/quantization/quantize_graph_test.py
index df71840b64..92bb5127da 100644
--- a/tensorflow/tools/quantization/quantize_graph_test.py
+++ b/tensorflow/tools/quantization/quantize_graph_test.py
@@ -119,8 +119,8 @@ def are_tensors_near(a, b, tolerance):
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
- print("Tensors are different sizes: " + str(len(flat_a)) + " vs " + str(
- len(flat_b)))
+ tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ + str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
@@ -140,10 +140,10 @@ def are_tensors_near(a, b, tolerance):
if how_many_different == 0:
return True
else:
- print("Tensors have {0} different values ({1}%), with mean difference"
- " {2} and mean absolute difference {3}".format(
- how_many_different, proportion_different * 100, mean_difference,
- mean_abs_difference))
+ tf_logging.info("Tensors have {0} different values ({1}%), with mean"
+ " difference {2} and mean absolute difference {3}".format(
+ how_many_different, proportion_different * 100,
+ mean_difference, mean_abs_difference))
return False
diff --git a/tensorflow/tools/test/check_futures_test.py b/tensorflow/tools/test/check_futures_test.py
index 9181c9bd4a..a883ce221f 100644
--- a/tensorflow/tools/test/check_futures_test.py
+++ b/tensorflow/tools/test/check_futures_test.py
@@ -37,6 +37,7 @@ BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
FUTURES_PATTERN = re.compile(r'^from __future__ import (\w+)\s*$')
FUTURES_PATTERN_2 = re.compile(
r'^from __future__ import (\w+), (\w+), (\w+)\s*$')
+FUTURES_PATTERN_3 = re.compile(r'^from __future__ import (\w+) as \w+\s*$')
REQUIRED_FUTURES = frozenset(['absolute_import', 'division', 'print_function'])
WHITELIST = [
@@ -59,6 +60,8 @@ def check_file(path, old_division):
for line in open(path, encoding='utf-8') if six.PY3 else open(path):
count += 1
m = FUTURES_PATTERN.match(line)
+ if not m:
+ m = FUTURES_PATTERN_3.match(line)
if m:
futures.add(m.group(1))
else:
diff --git a/tensorflow/tools/test/upload_test_benchmarks.py b/tensorflow/tools/test/upload_test_benchmarks.py
index 9c45359ee1..c030575109 100644
--- a/tensorflow/tools/test/upload_test_benchmarks.py
+++ b/tensorflow/tools/test/upload_test_benchmarks.py
@@ -89,7 +89,6 @@ import shutil
from six import text_type
from google.cloud import datastore
-from six import text_type
def is_real_file(dirpath, fname):