aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-11-06 17:19:44 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-11-06 17:27:03 -0800
commit99c790697a3c399a3a89d23043cf6678de450e20 (patch)
treebce800754f50b6cd239387d74e37b8c3ab7ab6c6
parente7bf04dd42dd714a4e6b73ee9f47fd38ed763456 (diff)
Update ops-related pbtxt files.
PiperOrigin-RevId: 174787397
-rw-r--r--tensorflow/core/ops/compat/ops_history.v1.pbtxt38
-rw-r--r--tensorflow/core/ops/ops.pbtxt12
2 files changed, 49 insertions, 1 deletions
diff --git a/tensorflow/core/ops/compat/ops_history.v1.pbtxt b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
index 382812be18..973691a353 100644
--- a/tensorflow/core/ops/compat/ops_history.v1.pbtxt
+++ b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
@@ -13344,6 +13344,44 @@ op {
}
}
op {
+ name: "GenerateVocabRemapping"
+ input_arg {
+ name: "new_vocab_file"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "old_vocab_file"
+ type: DT_STRING
+ }
+ output_arg {
+ name: "remapping"
+ type: DT_INT64
+ }
+ output_arg {
+ name: "num_present"
+ type: DT_INT32
+ }
+ attr {
+ name: "new_vocab_offset"
+ type: "int"
+ has_minimum: true
+ }
+ attr {
+ name: "num_new_vocab"
+ type: "int"
+ has_minimum: true
+ }
+ attr {
+ name: "old_vocab_size"
+ type: "int"
+ default_value {
+ i: -1
+ }
+ has_minimum: true
+ minimum: -1
+ }
+}
+op {
name: "GetSessionHandle"
input_arg {
name: "value"
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 4e0d3107fd..85fdef32c4 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -10077,8 +10077,18 @@ op {
description: "Number of entries in the new vocab file to remap."
has_minimum: true
}
+ attr {
+ name: "old_vocab_size"
+ type: "int"
+ default_value {
+ i: -1
+ }
+ description: "Number of entries in the old vocab file to consider. If -1,\nuse the entire old vocabulary."
+ has_minimum: true
+ minimum: -1
+ }
summary: "Given a path to new and old vocabulary files, returns a remapping Tensor of"
- description: "length `num_new_vocab`, where `remapping[i]` contains the row number in the old\nvocabulary that corresponds to row `i` in the new vocabulary (starting at line\n`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`\nin the new vocabulary is not in the old vocabulary. `num_vocab_offset` enables\nuse in the partitioned variable case, and should generally be set through\nexamining partitioning info. The format of the files should be a text file,\nwith each line containing a single entity within the vocabulary.\n\nFor example, with `new_vocab_file` a text file containing each of the following\nelements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],\n`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be\n`[0, -1, 2]`.\n\nThe op also returns a count of how many entries in the new vocabulary\nwere present in the old vocabulary, which is used to calculate the number of\nvalues to initialize in a weight matrix remapping\n\nThis functionality can be used to remap both row vocabularies (typically,\nfeatures) and column vocabularies (typically, classes) from TensorFlow\ncheckpoints. Note that the partitioning logic relies on contiguous vocabularies\ncorresponding to div-partitioned variables. Moreover, the underlying remapping\nuses an IndexTable (as opposed to an inexact CuckooTable), so client code should\nuse the corresponding index_table_from_file() as the FeatureColumn framework\ndoes (as opposed to tf.feature_to_id(), which uses a CuckooTable)."
+ description: "length `num_new_vocab`, where `remapping[i]` contains the row number in the old\nvocabulary that corresponds to row `i` in the new vocabulary (starting at line\n`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`\nin the new vocabulary is not in the old vocabulary. The old vocabulary is\nconstrained to the first `old_vocab_size` entries if `old_vocab_size` is not the\ndefault value of -1.\n\n`num_vocab_offset` enables\nuse in the partitioned variable case, and should generally be set through\nexamining partitioning info. The format of the files should be a text file,\nwith each line containing a single entity within the vocabulary.\n\nFor example, with `new_vocab_file` a text file containing each of the following\nelements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],\n`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be\n`[0, -1, 2]`.\n\nThe op also returns a count of how many entries in the new vocabulary\nwere present in the old vocabulary, which is used to calculate the number of\nvalues to initialize in a weight matrix remapping\n\nThis functionality can be used to remap both row vocabularies (typically,\nfeatures) and column vocabularies (typically, classes) from TensorFlow\ncheckpoints. Note that the partitioning logic relies on contiguous vocabularies\ncorresponding to div-partitioned variables. Moreover, the underlying remapping\nuses an IndexTable (as opposed to an inexact CuckooTable), so client code should\nuse the corresponding index_table_from_file() as the FeatureColumn framework\ndoes (as opposed to tf.feature_to_id(), which uses a CuckooTable)."
}
op {
name: "GetSessionHandle"