diff options
author | Eugene Brevdo <ebrevdo@google.com> | 2017-05-22 17:32:50 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2017-05-22 17:36:11 -0700 |
commit | 827d2e4b9180db67853f60c125e548d83986b96c (patch) | |
tree | 1ccaf8f20bf678ec755330b488eb28946dbe38e6 /tensorflow/contrib/tfprof | |
parent | 95719e869c61c78a4b0ac0407e1fb04e60daca35 (diff) |
Move many of the "core" RNNCells and rnn functions back to TF core.
Unit test files will move in a followup PR. This is the big API change.
The old behavior (using tf.contrib.rnn....) will continue to work for
backwards compatibility.
PiperOrigin-RevId: 156809677
Diffstat (limited to 'tensorflow/contrib/tfprof')
-rw-r--r-- | tensorflow/contrib/tfprof/python/tools/tfprof/model_analyzer_testlib.py | 6 |
1 files changed, 2 insertions, 4 deletions
diff --git a/tensorflow/contrib/tfprof/python/tools/tfprof/model_analyzer_testlib.py b/tensorflow/contrib/tfprof/python/tools/tfprof/model_analyzer_testlib.py index ed26f001c2..1234b15199 100644 --- a/tensorflow/contrib/tfprof/python/tools/tfprof/model_analyzer_testlib.py +++ b/tensorflow/contrib/tfprof/python/tools/tfprof/model_analyzer_testlib.py @@ -17,13 +17,13 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from tensorflow.contrib.rnn.python.ops.core_rnn_cell import BasicRNNCell from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import rnn +from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope from tensorflow.python.training import gradient_descent @@ -55,7 +55,7 @@ def BuildFullModel(): with variable_scope.variable_scope('inp_%d' % i): seq.append(array_ops.reshape(BuildSmallModel(), [2, 1, -1])) - cell = BasicRNNCell(16, 48) + cell = rnn_cell.BasicRNNCell(16) out = rnn.dynamic_rnn( cell, array_ops.concat(seq, axis=1), dtype=dtypes.float32)[0] @@ -63,5 +63,3 @@ def BuildFullModel(): loss = nn_ops.l2_loss(math_ops.reduce_mean(target - out)) sgd_op = gradient_descent.GradientDescentOptimizer(1e-2) return sgd_op.minimize(loss) - - |