aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc/tutorials
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <nobody@tensorflow.org>2016-06-03 08:32:57 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-06-03 09:49:28 -0700
commit88cabd6670d9a8c707460c75dc0ce5fc3fdbd052 (patch)
tree6fdaac2212679c6320769d242901668182d2f5df /tensorflow/cc/tutorials
parentc94f554bb7bb4a3d040f4e93d4e1177e5b987273 (diff)
Fixing bug in propagating normalized y into the input of the next power iteration, improved eigenvalue estimate by making use of the Rayleigh quotient, and normalizing the random input vector.
Change: 123975933
Diffstat (limited to 'tensorflow/cc/tutorials')
-rw-r--r--tensorflow/cc/tutorials/example_trainer.cc13
1 files changed, 10 insertions, 3 deletions
diff --git a/tensorflow/cc/tutorials/example_trainer.cc b/tensorflow/cc/tutorials/example_trainer.cc
index 38439e6d8b..a465d98f88 100644
--- a/tensorflow/cc/tutorials/example_trainer.cc
+++ b/tensorflow/cc/tutorials/example_trainer.cc
@@ -82,9 +82,12 @@ string DebugString(const Tensor& x, const Tensor& y) {
CHECK_EQ(y.NumElements(), 2);
auto x_flat = x.flat<float>();
auto y_flat = y.flat<float>();
- const float lambda = y_flat(0) / x_flat(0);
+ // Compute an estimate of the eigenvalue via
+ // (x' A x) / (x' x) = (x' y) / (x' x)
+ // and exploit the fact that x' x = 1 by assumption
+ Eigen::Tensor<float, 0, Eigen::RowMajor> lambda = (x_flat * y_flat).sum();
return strings::Printf("lambda = %8.6f x = [%8.6f %8.6f] y = [%8.6f %8.6f]",
- lambda, x_flat(0), x_flat(1), y_flat(0), y_flat(1));
+ lambda(), x_flat(0), x_flat(1), y_flat(0), y_flat(1));
}
void ConcurrentSteps(const Options* opts, int session_index) {
@@ -106,7 +109,11 @@ void ConcurrentSteps(const Options* opts, int session_index) {
step_threads.Schedule([&session, opts, session_index, step]() {
// Randomly initialize the input.
Tensor x(DT_FLOAT, TensorShape({2, 1}));
- x.flat<float>().setRandom();
+ auto x_flat = x.flat<float>();
+ x_flat.setRandom();
+ Eigen::Tensor<float, 0, Eigen::RowMajor> inv_norm =
+ x_flat.square().sum().sqrt().inverse();
+ x_flat = x_flat * inv_norm();
// Iterations.
std::vector<Tensor> outputs;