aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Jianwei Xie <xiejw@google.com>2016-12-07 14:03:04 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-12-07 14:23:09 -0800
commit4139949dc8c1919efc666fd6369a741af4f13990 (patch)
tree0ce8f58b524fbadef030b00371d84cccc8a394d9
parent8b86c22185e20b8d69f86df0b964d2adcca0a1d4 (diff)
Use tf.contrib.legacy_seq2seq instead of tf.nn.seq2seq.
Change: 141353751
-rw-r--r--tensorflow/models/rnn/ptb/ptb_word_lm.py2
-rw-r--r--tensorflow/models/rnn/seq2seq.py3
-rw-r--r--tensorflow/models/rnn/translate/seq2seq_model.py6
3 files changed, 6 insertions, 5 deletions
diff --git a/tensorflow/models/rnn/ptb/ptb_word_lm.py b/tensorflow/models/rnn/ptb/ptb_word_lm.py
index f4560a7a28..df1939e0f3 100644
--- a/tensorflow/models/rnn/ptb/ptb_word_lm.py
+++ b/tensorflow/models/rnn/ptb/ptb_word_lm.py
@@ -146,7 +146,7 @@ class PTBModel(object):
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
- loss = tf.nn.seq2seq.sequence_loss_by_example(
+ loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
diff --git a/tensorflow/models/rnn/seq2seq.py b/tensorflow/models/rnn/seq2seq.py
index 0c3e645f44..ff487f9475 100644
--- a/tensorflow/models/rnn/seq2seq.py
+++ b/tensorflow/models/rnn/seq2seq.py
@@ -18,4 +18,5 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-raise ImportError("This module is deprecated. Use tf.nn.seq2seq instead.")
+raise ImportError(
+ "This module is deprecated. Use tf.contrib.legacy_seq2seq instead.")
diff --git a/tensorflow/models/rnn/translate/seq2seq_model.py b/tensorflow/models/rnn/translate/seq2seq_model.py
index 2d73372f6d..b25e61bd23 100644
--- a/tensorflow/models/rnn/translate/seq2seq_model.py
+++ b/tensorflow/models/rnn/translate/seq2seq_model.py
@@ -123,7 +123,7 @@ class Seq2SeqModel(object):
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
- return tf.nn.seq2seq.embedding_attention_seq2seq(
+ return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
@@ -153,7 +153,7 @@ class Seq2SeqModel(object):
# Training outputs and losses.
if forward_only:
- self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
+ self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
@@ -165,7 +165,7 @@ class Seq2SeqModel(object):
for output in self.outputs[b]
]
else:
- self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
+ self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),