aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/linear_optimizer
diff options
context:
space:
mode:
authorGravatar Patrick Nguyen <drpng@google.com>2017-03-21 20:18:24 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-03-21 21:25:58 -0700
commit540a812c820c530d5f650f6ae49bba59d47e8291 (patch)
tree0ae3e713a1e268bb7fdaf1075b2c19ef7ce7e0c4 /tensorflow/contrib/linear_optimizer
parent8aa2f3c2ed6a4b8377ad9628c6890a5f12ea2aa8 (diff)
Fix some documentation formatting errors.
Change: 150841749
Diffstat (limited to 'tensorflow/contrib/linear_optimizer')
-rw-r--r--tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py28
-rw-r--r--tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py23
2 files changed, 32 insertions, 19 deletions
diff --git a/tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py b/tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py
index ed7105b5c9..003795233f 100644
--- a/tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py
+++ b/tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py
@@ -27,28 +27,36 @@ class SparseFeatureColumn(object):
"""Represents a sparse feature column.
Contains three tensors representing a sparse feature column, they are
- example indices (int64), feature indices (int64), and feature values (float).
- Feature weights are optional, and are treated as 1.0f if missing.
+ example indices (`int64`), feature indices (`int64`), and feature
+ values (`float`).
+ Feature weights are optional, and are treated as `1.0f` if missing.
For example, consider a batch of 4 examples, which contains the following
- features in a particular SparseFeatureColumn:
- Example 0: feature 5, value 1
- Example 1: feature 6, value 1 and feature 10, value 0.5
- Example 2: no features
- Example 3: two copies of feature 2, value 1
+ features in a particular `SparseFeatureColumn`:
+
+ * Example 0: feature 5, value 1
+ * Example 1: feature 6, value 1 and feature 10, value 0.5
+ * Example 2: no features
+ * Example 3: two copies of feature 2, value 1
This SparseFeatureColumn will be represented as follows:
+
+ ```
<0, 5, 1>
<1, 6, 1>
<1, 10, 0.5>
<3, 2, 1>
<3, 2, 1>
+ ```
For a batch of 2 examples below:
- Example 0: feature 5
- Example 1: feature 6
- is represented by SparseFeatureColumn as:
+ * Example 0: feature 5
+ * Example 1: feature 6
+
+ is represented by `SparseFeatureColumn` as:
+
+ ```
<0, 5, 1>
<1, 6, 1>
diff --git a/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py b/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
index afa0b3b833..f9d69d6dea 100644
--- a/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
+++ b/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
@@ -32,6 +32,8 @@ class SDCAOptimizer(object):
Estimator.
Example usage:
+
+ ```python
real_feature_column = real_valued_column(...)
sparse_feature_column = sparse_column_with_hash_bucket(...)
sdca_optimizer = linear.SDCAOptimizer(example_id_column='example_id',
@@ -44,19 +46,22 @@ class SDCAOptimizer(object):
optimizer=sdca_optimizer)
classifier.fit(input_fn_train, steps=50)
classifier.evaluate(input_fn=input_fn_eval)
+ ```
- Here the expectation is that the input_fn_* functions passed to train and
+ Here the expectation is that the `input_fn_*` functions passed to train and
evaluate return a pair (dict, label_tensor) where dict has `example_id_column`
as `key` whose value is a `Tensor` of shape [batch_size] and dtype string.
num_loss_partitions defines the number of partitions of the global loss
- function and should be set to (#concurrent train ops/per worker) x (#workers).
- Convergence of (global) loss is guaranteed if num_loss_partitions is larger or
- equal to the above product. Larger values for num_loss_partitions lead to
- slower convergence. The recommended value for num_loss_partitions in tf.learn
- (where currently there is one process per worker) is the number of workers
- running the train steps. It defaults to 1 (single machine). num_table_shards
- defines the number of shards for the internal state table, typically set to
- match the number of parameter servers for large data sets.
+ function and should be set to `(#concurrent train ops/per worker)
+ x (#workers)`.
+ Convergence of (global) loss is guaranteed if `num_loss_partitions` is larger
+ or equal to the above product. Larger values for `num_loss_partitions` lead to
+ slower convergence. The recommended value for `num_loss_partitions` in
+ `tf.learn` (where currently there is one process per worker) is the number
+ of workers running the train steps. It defaults to 1 (single machine).
+ `num_table_shards` defines the number of shards for the internal state
+ table, typically set to match the number of parameter servers for large
+ data sets.
"""
def __init__(self,