aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt31
-rw-r--r--tensorflow/core/kernels/matrix_exponential_op.cc1
-rw-r--r--tensorflow/core/ops/linalg_ops.cc2
-rw-r--r--tensorflow/core/public/version.h4
-rw-r--r--tensorflow/python/kernel_tests/BUILD2
-rw-r--r--tensorflow/python/kernel_tests/matrix_logarithm_op_test.py30
6 files changed, 29 insertions, 41 deletions
diff --git a/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt b/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt
index d7b56aec87..46da1de1c3 100644
--- a/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_MatrixExponential.pbtxt
@@ -1,32 +1,5 @@
op {
graph_op_name: "MatrixExponential"
- in_arg {
- name: "input"
- description: <<END
-Shape is `[..., M, M]`.
-END
- }
- out_arg {
- name: "output"
- description: <<END
-Shape is `[..., M, M]`.
-
-@compatibility(scipy)
-Equivalent to scipy.linalg.expm
-@end_compatibility
-END
- }
- summary: "Computes the matrix exponential of one or more square matrices:"
- description: <<END
-\\(exp(A) = \sum_{n=0}^\infty A^n/n!\\)
-
-The exponential is computed using a combination of the scaling and squaring
-method and the Pade approximation. Details can be founds in:
-Nicholas J. Higham, "The scaling and squaring method for the matrix exponential
-revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
-
-The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-form square matrices. The output is a tensor of the same shape as the input
-containing the exponential for all input submatrices `[..., :, :]`.
-END
+ visibility: SKIP
+ summary: "Deprecated, use python implementation tf.linalg.matrix_exponential."
}
diff --git a/tensorflow/core/kernels/matrix_exponential_op.cc b/tensorflow/core/kernels/matrix_exponential_op.cc
index 99db898301..01d4894438 100644
--- a/tensorflow/core/kernels/matrix_exponential_op.cc
+++ b/tensorflow/core/kernels/matrix_exponential_op.cc
@@ -49,6 +49,7 @@ class MatrixExponentialOp : public LinearAlgebraOp<Scalar> {
TF_DISALLOW_COPY_AND_ASSIGN(MatrixExponentialOp);
};
+// Deprecated kernels (2018/08/21).
REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<float>), float);
REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<double>), double);
REGISTER_LINALG_OP("MatrixExponential", (MatrixExponentialOp<complex64>),
diff --git a/tensorflow/core/ops/linalg_ops.cc b/tensorflow/core/ops/linalg_ops.cc
index f37f79ddbf..1d4d51a25d 100644
--- a/tensorflow/core/ops/linalg_ops.cc
+++ b/tensorflow/core/ops/linalg_ops.cc
@@ -235,6 +235,8 @@ REGISTER_OP("MatrixInverse")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("MatrixExponential")
+ .Deprecated(
+ 27, "Use Python implementation tf.linalg.matrix_exponential instead.")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, complex64, complex128}")
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index 563564119f..4129c93af5 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -96,10 +96,12 @@ limitations under the License.
// GraphDef. (7dec2017)
// 27. Deprecate TensorArray ops v2 in favor of v3 and deprecated io_ops
// deprecated in favor of V2 ops. (2018/01/23)
+// 28. Deprecate MatrixExponential op in favor of Python implementation.
+// (2018/08/21).
#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0
#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0
-#define TF_GRAPH_DEF_VERSION 26
+#define TF_GRAPH_DEF_VERSION 27
// Checkpoint compatibility versions (the versions field in SavedSliceMeta).
//
diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD
index b9c5f26cb7..a9982a7ae0 100644
--- a/tensorflow/python/kernel_tests/BUILD
+++ b/tensorflow/python/kernel_tests/BUILD
@@ -601,7 +601,7 @@ tf_py_test(
tf_py_test(
name = "matrix_logarithm_op_test",
- size = "small",
+ size = "medium",
srcs = ["matrix_logarithm_op_test.py"],
additional_deps = [
"//third_party/py/numpy",
diff --git a/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py b/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py
index 24edc4f59f..723a15fbd1 100644
--- a/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py
+++ b/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py
@@ -30,6 +30,7 @@ from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
+from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test
@@ -39,7 +40,7 @@ class LogarithmOpTest(test.TestCase):
inp = x.astype(np_type)
with self.test_session(use_gpu=True):
# Verify that expm(logm(A)) == A.
- tf_ans = gen_linalg_ops.matrix_exponential(
+ tf_ans = linalg_impl.matrix_exponential(
gen_linalg_ops.matrix_logarithm(inp))
out = tf_ans.eval()
self.assertAllClose(inp, out, rtol=1e-4, atol=1e-3)
@@ -98,16 +99,25 @@ class LogarithmOpTest(test.TestCase):
self._verifyLogarithmComplex(np.empty([0, 2, 2], dtype=np.complex64))
self._verifyLogarithmComplex(np.empty([2, 0, 0], dtype=np.complex64))
- def testRandomSmallAndLarge(self):
+ def testRandomSmallAndLargeComplex64(self):
np.random.seed(42)
- for dtype in np.complex64, np.complex128:
- for batch_dims in [(), (1,), (3,), (2, 2)]:
- for size in 8, 31, 32:
- shape = batch_dims + (size, size)
- matrix = np.random.uniform(
- low=-1.0, high=1.0,
- size=np.prod(shape)).reshape(shape).astype(dtype)
- self._verifyLogarithmComplex(matrix)
+ for batch_dims in [(), (1,), (3,), (2, 2)]:
+ for size in 8, 31, 32:
+ shape = batch_dims + (size, size)
+ matrix = np.random.uniform(
+ low=-1.0, high=1.0,
+ size=np.prod(shape)).reshape(shape).astype(np.complex64)
+ self._verifyLogarithmComplex(matrix)
+
+ def testRandomSmallAndLargeComplex128(self):
+ np.random.seed(42)
+ for batch_dims in [(), (1,), (3,), (2, 2)]:
+ for size in 8, 31, 32:
+ shape = batch_dims + (size, size)
+ matrix = np.random.uniform(
+ low=-1.0, high=1.0,
+ size=np.prod(shape)).reshape(shape).astype(np.complex128)
+ self._verifyLogarithmComplex(matrix)
def testConcurrentExecutesWithoutError(self):
with self.test_session(use_gpu=True) as sess: