aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/kernel_methods
diff options
context:
space:
mode:
authorGravatar Scott Zhu <scottzhu@google.com>2018-04-13 17:52:20 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-13 17:57:27 -0700
commit3652556dab3ebfe0152232facc7304fe5754aecb (patch)
tree9a9cecde4c85dc53548a185f9bd6d7c6e0591262 /tensorflow/contrib/kernel_methods
parentef24ad14502e992716c49fdd5c63e6b2c2fb6b5a (diff)
Merge changes from github.
PiperOrigin-RevId: 192850372
Diffstat (limited to 'tensorflow/contrib/kernel_methods')
-rw-r--r--tensorflow/contrib/kernel_methods/python/losses.py6
-rw-r--r--tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py44
-rw-r--r--tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features_test.py2
3 files changed, 25 insertions, 27 deletions
diff --git a/tensorflow/contrib/kernel_methods/python/losses.py b/tensorflow/contrib/kernel_methods/python/losses.py
index f182fef067..4ef0a66a52 100644
--- a/tensorflow/contrib/kernel_methods/python/losses.py
+++ b/tensorflow/contrib/kernel_methods/python/losses.py
@@ -43,10 +43,10 @@ def sparse_multiclass_hinge_loss(
This is a generalization of standard (binary) hinge loss. For a given instance
with correct label c*, the loss is given by:
- loss = max_{c != c*} logits_c - logits_{c*} + 1.
+ $$loss = max_{c != c*} logits_c - logits_{c*} + 1.$$
or equivalently
- loss = max_c { logits_c - logits_{c*} + I_{c != c*} }
- where I_{c != c*} = 1 if c != c* and 0 otherwise.
+ $$loss = max_c { logits_c - logits_{c*} + I_{c != c*} }$$
+ where \\(I_{c != c*} = 1\ \text{if}\ c != c*\\) and 0 otherwise.
Args:
labels: `Tensor` of shape [batch_size] or [batch_size, 1]. Corresponds to
diff --git a/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py b/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py
index 9dc01124ab..9a721a9d44 100644
--- a/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py
+++ b/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features.py
@@ -34,33 +34,31 @@ class RandomFourierFeatureMapper(dkm.DenseKernelMapper):
r"""Class that implements Random Fourier Feature Mapping (RFFM) in TensorFlow.
The RFFM mapping is used to approximate the Gaussian (RBF) kernel:
- ```
- exp(-||x-y||_2^2 / (2 * sigma^2))
- ```
+ $$(exp(-||x-y||_2^2 / (2 * \sigma^2))$$
The implementation of RFFM is based on the following paper:
"Random Features for Large-Scale Kernel Machines" by Ali Rahimi and Ben Recht.
(link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
- The mapping uses a matrix `Omega \in R^{d x D}` and a bias vector `b \in R^D`
- where `d` is the input dimension (number of dense input features) and `D` is
- the output dimension (i.e., dimension of the feature space the input is mapped
- to). Each entry of `Omega` is sampled i.i.d. from a (scaled) Gaussian
- distribution and each entry of `b` is sampled independently and uniformly from
- [0, 2 * pi].
-
- For a single input feature vector x in R^d, its RFFM is defined as:
- ```
- sqrt(2/D) * cos(x * Omega + b)
- ```
- where `cos` is the element-wise cosine function and `x, b` are represented as
- row vectors. The aforementioned paper shows that the linear kernel of
- RFFM-mapped vectors approximates the Gaussian kernel of the initial vectors.
+ The mapping uses a matrix \\(\Omega \in R^{d x D}\\) and a bias vector
+ \\(b \in R^D\\) where \\(d\\) is the input dimension (number of dense input
+ features) and \\(D\\) is the output dimension (i.e., dimension of the feature
+ space the input is mapped to). Each entry of \\(\Omega\\) is sampled i.i.d.
+ from a (scaled) Gaussian distribution and each entry of \\(b\\) is sampled
+ independently and uniformly from [0, \\(2 * \pi\\)].
+
+ For a single input feature vector \\(x \in R^d\\), its RFFM is defined as:
+ $$\sqrt(2/D) * cos(x * \Omega + b)$$
+
+ where \\(cos\\) is the element-wise cosine function and \\(x, b\\) are
+ represented as row vectors. The aforementioned paper shows that the linear
+ kernel of RFFM-mapped vectors approximates the Gaussian kernel of the initial
+ vectors.
"""
def __init__(self, input_dim, output_dim, stddev=1.0, seed=1, name=None):
- """Constructs a RandomFourierFeatureMapper instance.
+ r"""Constructs a RandomFourierFeatureMapper instance.
Args:
input_dim: The dimension (number of features) of the tensors to be mapped.
@@ -68,11 +66,11 @@ class RandomFourierFeatureMapper(dkm.DenseKernelMapper):
stddev: The standard deviation of the Gaussian kernel to be approximated.
The error of the classifier trained using this approximation is very
sensitive to this parameter.
- seed: An integer used to initialize the parameters (`Omega` and `b`) of
- the mapper. For repeatable sequences across different invocations of the
- mapper object (for instance, to ensure consistent mapping both at
- training and eval/inference if these happen in different invocations),
- set this to the same integer.
+ seed: An integer used to initialize the parameters (\\(\Omega\\) and
+ \\(b\\)) of the mapper. For repeatable sequences across different
+ invocations of the mapper object (for instance, to ensure consistent
+ mapping both at training and eval/inference if these happen in
+ different invocations), set this to the same integer.
name: name for the mapper object.
"""
# TODO(sibyl-vie3Poto): Maybe infer input_dim and/or output_dim (if not explicitly
diff --git a/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features_test.py b/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features_test.py
index 6f4a264485..91929184a2 100644
--- a/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features_test.py
+++ b/tensorflow/contrib/kernel_methods/python/mappers/random_fourier_features_test.py
@@ -34,7 +34,7 @@ def _inner_product(x, y):
"""Inner product between tensors x and y.
The input tensors are assumed to be in ROW representation, that is, the method
- returns x * y^T.
+ returns \\(x * y^T\\).
Args:
x: input tensor in row format