aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-05-16 19:12:18 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-05-16 19:14:49 -0700
commitcf55582ed8ccaa39f70d7370513050bcf65411be (patch)
treea9487c57b0c4e765baada80a4a8332013118a9c1 /tensorflow
parentb33d1001b3e03c454cf28c5ae8f87ace608d849e (diff)
Enhance DenseLayer + XLA compatibility test cases to cover compilation behavior differences in different jit modes.
PiperOrigin-RevId: 196926896
Diffstat (limited to 'tensorflow')
-rw-r--r--tensorflow/compiler/tests/BUILD14
-rw-r--r--tensorflow/compiler/tests/dense_layer_test.py135
-rw-r--r--tensorflow/compiler/tests/jit_test.py19
3 files changed, 148 insertions, 20 deletions
diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD
index 213ab95a12..2a88743c80 100644
--- a/tensorflow/compiler/tests/BUILD
+++ b/tensorflow/compiler/tests/BUILD
@@ -853,9 +853,21 @@ cuda_py_test(
"//tensorflow/python:control_flow_ops",
"//tensorflow/python:framework",
"//tensorflow/python:gradients",
- "//tensorflow/python:layers",
"//tensorflow/python:math_ops",
"//tensorflow/python:nn_ops",
+ ],
+)
+
+cuda_py_test(
+ name = "dense_layer_test",
+ size = "small",
+ srcs = ["dense_layer_test.py"],
+ additional_deps = [
+ "//tensorflow/contrib/compiler:compiler_py",
+ "//tensorflow/core:protos_all_py",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:layers",
"//tensorflow/python:variables",
],
)
diff --git a/tensorflow/compiler/tests/dense_layer_test.py b/tensorflow/compiler/tests/dense_layer_test.py
new file mode 100644
index 0000000000..b0bf1b79d6
--- /dev/null
+++ b/tensorflow/compiler/tests/dense_layer_test.py
@@ -0,0 +1,135 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for DenseLayer JIT compilation on the CPU and GPU devices."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import numpy as np
+
+from tensorflow.contrib.compiler import jit
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.python.layers import layers
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+
+jit_scope = jit.experimental_jit_scope
+
+
+def GetRunMetadataLabels(run_metadata):
+ """Returns all labels in run_metadata."""
+ labels = []
+ for dev_stats in run_metadata.step_stats.dev_stats:
+ for node_stats in dev_stats.node_stats:
+ labels.append(node_stats.timeline_label)
+ return labels
+
+
+def InLabels(labels, substr):
+ """Returns true iff one of the labels contains substr."""
+ return any([substr in x for x in labels])
+
+
+def XlaLaunchOpCount(labels):
+ """Count how many _XlaLaunch labels are present."""
+ return sum("_XlaLaunch(" in x for x in labels)
+
+
+class DenseLayerTest(test.TestCase):
+
+ def testDenseLayerAutoJit(self):
+ """Tests dense layer compilation in auto-jit mode.
+
+ Dense layer should be compiled into a single _XlaLaunch op in auto-jit mode.
+ """
+
+ os.environ["TF_XLA_FLAGS"] = ("--tf_xla_cpu_global_jit")
+ config = config_pb2.ConfigProto()
+ config.graph_options.optimizer_options.global_jit_level = (
+ config_pb2.OptimizerOptions.ON_1)
+
+ with self.test_session(config=config) as sess:
+ x = array_ops.placeholder(shape=[None, None, 3], dtype=np.float32)
+ y = layers.dense(x, 3)
+
+ sess.run(variables.initialize_all_variables())
+ run_metadata = config_pb2.RunMetadata()
+ sess.run(
+ y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
+ run_metadata=run_metadata,
+ options=config_pb2.RunOptions(
+ trace_level=config_pb2.RunOptions.FULL_TRACE))
+
+ labels = GetRunMetadataLabels(run_metadata)
+ self.assertEqual(1, XlaLaunchOpCount(labels))
+ self.assertFalse(InLabels(labels, "ListDiff"))
+
+ def testDenseLayerJitScopeDefinedShape(self):
+ """Tests that the dense layer node is properly compiled in jit scope.
+
+ Dense layer with static shape input tensor should be compiled into a single
+ _XlaLaunch op by XLA.
+ """
+
+ with self.test_session() as sess:
+ x = array_ops.placeholder(shape=[2, 2, 3], dtype=np.float32)
+ with jit_scope():
+ y = layers.dense(x, 3)
+
+ sess.run(variables.initialize_all_variables())
+ run_metadata = config_pb2.RunMetadata()
+ sess.run(
+ y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
+ run_metadata=run_metadata,
+ options=config_pb2.RunOptions(
+ trace_level=config_pb2.RunOptions.FULL_TRACE))
+
+ labels = GetRunMetadataLabels(run_metadata)
+ self.assertEqual(1, XlaLaunchOpCount(labels))
+ # No need to check whether ListDiff is compiled or not because ListDiff op
+ # is not used when input tensor shape is fully defined.
+
+ def testDenseLayerJitScopeUndefinedShape(self):
+ """Tests that the dense layer node is properly compiled in jit scope.
+
+ Dense layer uses shape op to get shape of input tensor if its shape is not
+ fully defined. XLA does not cluster shape op with other operators. But in
+ experimental_jit_scope, XLA is forced to compile shape op into its own
+ cluster, causing dense layer to be split into TWO _XlaLaunch ops.
+ """
+
+ with self.test_session() as sess:
+ x = array_ops.placeholder(shape=[None, None, 3], dtype=np.float32)
+ with jit_scope():
+ y = layers.dense(x, 3)
+
+ sess.run(variables.initialize_all_variables())
+ run_metadata = config_pb2.RunMetadata()
+ sess.run(
+ y, {x: np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])},
+ run_metadata=run_metadata,
+ options=config_pb2.RunOptions(
+ trace_level=config_pb2.RunOptions.FULL_TRACE))
+
+ labels = GetRunMetadataLabels(run_metadata)
+ self.assertEqual(2, XlaLaunchOpCount(labels))
+ self.assertFalse(InLabels(labels, "ListDiff"))
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/compiler/tests/jit_test.py b/tensorflow/compiler/tests/jit_test.py
index 1ad83d8040..0310cdde66 100644
--- a/tensorflow/compiler/tests/jit_test.py
+++ b/tensorflow/compiler/tests/jit_test.py
@@ -29,13 +29,11 @@ from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
-from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
-from tensorflow.python.ops import variables
from tensorflow.python.platform import test
jit_scope = jit.experimental_jit_scope
@@ -452,23 +450,6 @@ class XlaCompilationTest(test.TestCase):
self.assertFalse(InLabels(labels, "Mul"))
self.assertTrue(InLabels(labels, "_XlaLaunch"))
- def testDenseLayer(self):
- """Tests that the dense layer node is properly compiled."""
-
- with self.test_session(config=NoRewriteSessionConfig()) as sess:
- x = array_ops.placeholder(shape=[2, 3], dtype=np.float32)
- with jit_scope():
- y = layers.dense(x, 3)
-
- sess.run(variables.initialize_all_variables())
- run_metadata = config_pb2.RunMetadata()
- sess.run(y, {x: np.array([[1, 2, 3], [4, 5, 6]])},
- run_metadata=run_metadata,
- options=config_pb2.RunOptions(
- trace_level=config_pb2.RunOptions.FULL_TRACE))
-
- self.assert_(MetadataHasXlaLaunch(run_metadata))
-
class ElementWiseFusionTest(test.TestCase):