aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/specs
diff options
context:
space:
mode:
authorGravatar Eugene Brevdo <ebrevdo@google.com>2018-02-07 16:24:34 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-02-07 16:31:12 -0800
commit14ebbebc290510b6cfa491349862e6c5aca4200a (patch)
tree7c55e41f9a17a0dbf4beb904ec78c91e65fd4c5b /tensorflow/contrib/specs
parent5b464e1d737fb9b07e5d9dfdbe3f4eebf5218987 (diff)
Remove tf.contrib.ndlstm as it is not maintained and barely used.
Users can find an external implementation by the original author at: https://github.com/tmbarchive/tfndlstm PiperOrigin-RevId: 184914822
Diffstat (limited to 'tensorflow/contrib/specs')
-rw-r--r--tensorflow/contrib/specs/BUILD1
-rw-r--r--tensorflow/contrib/specs/README.md11
-rw-r--r--tensorflow/contrib/specs/python/specs_ops.py20
-rw-r--r--tensorflow/contrib/specs/python/specs_test.py30
4 files changed, 0 insertions, 62 deletions
diff --git a/tensorflow/contrib/specs/BUILD b/tensorflow/contrib/specs/BUILD
index 4b688690ae..084953a0a2 100644
--- a/tensorflow/contrib/specs/BUILD
+++ b/tensorflow/contrib/specs/BUILD
@@ -23,7 +23,6 @@ py_library(
srcs_version = "PY2AND3",
deps = [
"//tensorflow/contrib/layers:layers_py",
- "//tensorflow/contrib/ndlstm",
"//tensorflow/python:array_ops",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:logging_ops",
diff --git a/tensorflow/contrib/specs/README.md b/tensorflow/contrib/specs/README.md
index b764e6e714..bcf34e601f 100644
--- a/tensorflow/contrib/specs/README.md
+++ b/tensorflow/contrib/specs/README.md
@@ -59,17 +59,6 @@ Reshaping:
- `Squeeze` = tf.squeeze
- `Expand` = tf.expand_dims
-Multidimensional LSTM:
-
-These are intended as alternatives to 2D convolutions. For sequence models,
-there will be other modeling primitives.
-
- - `Lstm2` = Fun(lstm2d.separable_lstm) # 2D-to-2D
- - `Lstm2to1` = Fun(lstm2d.reduce_to_sequence) # 2D-to-1D
- - `Lstm2to0` = Fun(lstm2d.reduce_to_final) # 2D-to-vector
- - `Clstm2(n, m)` is a `Cl(n, [3,3])` followed by `Lstm2(m)`
- - `Dws(n)` is a depthwise convolution `Cs(n, [1, 1])`
-
Other:
- `Id` = identity
diff --git a/tensorflow/contrib/specs/python/specs_ops.py b/tensorflow/contrib/specs/python/specs_ops.py
index a6bd4d16c2..49b989b8d0 100644
--- a/tensorflow/contrib/specs/python/specs_ops.py
+++ b/tensorflow/contrib/specs/python/specs_ops.py
@@ -23,8 +23,6 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
-from tensorflow.contrib.ndlstm.python import lstm1d
-from tensorflow.contrib.ndlstm.python import lstm2d
from tensorflow.contrib.specs.python import specs_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops
@@ -122,17 +120,6 @@ Sig = Fun(math_ops.sigmoid)
Tanh = Fun(math_ops.tanh)
Smax = Fun(nn_ops.softmax)
-# 2D LSTM
-
-Lstm2 = Fun(lstm2d.separable_lstm)
-Lstm2to1 = Fun(lstm2d.reduce_to_sequence) # 2D to 1D
-Lstm2to0 = Fun(lstm2d.reduce_to_final) # 2D to depth-only
-
-
-def Clstm2(n, *args, **kw):
- """2D LSTM with 3x3 pre-convolution."""
- return Cl(n, [3, 3]) | Lstm2(*args, **kw)
-
def Dws(n):
"""Depth-wise convolution + sigmoid (used after LSTM)."""
@@ -143,13 +130,6 @@ def Dwm(n):
"""Depth-wise convolution + softmax (used after LSTM)."""
return Cm(n, [1, 1])
-
-# 1D LSTM
-
-Lstm1 = Fun(lstm1d.ndlstm_base)
-Lstm1to0 = Fun(lstm1d.sequence_to_final) # 1D to depth-only
-Ssm = Fun(lstm1d.sequence_softmax)
-
# Sharing of Variables
diff --git a/tensorflow/contrib/specs/python/specs_test.py b/tensorflow/contrib/specs/python/specs_test.py
index 41782a9fc9..9a4ad36793 100644
--- a/tensorflow/contrib/specs/python/specs_test.py
+++ b/tensorflow/contrib/specs/python/specs_test.py
@@ -149,36 +149,6 @@ class SpecsTest(test.TestCase):
self.assertEqual(tuple(result.shape), (10, 20))
self.assertEqual(summaries.tf_spec_structure(spec, inputs), "_ sig sig")
- def testLstm2(self):
- with self.test_session():
- inputs = constant_op.constant(_rand(1, 64, 64, 5))
- spec = "net = Lstm2(15)"
- outputs = specs.create_net(spec, inputs)
- self.assertEqual(outputs.get_shape().as_list(), [1, 64, 64, 15])
- variables.global_variables_initializer().run()
- result = outputs.eval()
- self.assertEqual(tuple(result.shape), (1, 64, 64, 15))
-
- def testLstm2to1(self):
- with self.test_session():
- inputs = constant_op.constant(_rand(1, 64, 64, 5))
- spec = "net = Lstm2to1(15)"
- outputs = specs.create_net(spec, inputs)
- self.assertEqual(outputs.get_shape().as_list(), [1, 64, 15])
- variables.global_variables_initializer().run()
- result = outputs.eval()
- self.assertEqual(tuple(result.shape), (1, 64, 15))
-
- def testLstm2to0(self):
- with self.test_session():
- inputs = constant_op.constant(_rand(1, 64, 64, 5))
- spec = "net = Lstm2to0(15)"
- outputs = specs.create_net(spec, inputs)
- self.assertEqual(outputs.get_shape().as_list(), [1, 15])
- variables.global_variables_initializer().run()
- result = outputs.eval()
- self.assertEqual(tuple(result.shape), (1, 15))
-
def testKeywordRestriction(self):
with self.test_session():
inputs = constant_op.constant(_rand(10, 20))