aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/tools/test
diff options
context:
space:
mode:
authorGravatar Vijay Vasudevan <vrv@google.com>2016-03-29 18:23:11 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-03-29 19:33:33 -0700
commit80a5a3e653f3b10e2680fe2ea9bc511e8801e273 (patch)
tree6d205c779cde774c46e6aa328a8f7ef0f85a1461 /tensorflow/tools/test
parente3a0d6fb61cbb1dd9864684c20e49ef3fa385bb6 (diff)
Merge changes from github.
Change: 118532471
Diffstat (limited to 'tensorflow/tools/test')
-rw-r--r--tensorflow/tools/test/BUILD16
-rw-r--r--tensorflow/tools/test/performance.bzl56
-rw-r--r--tensorflow/tools/test/run_and_gather_logs.py3
-rw-r--r--tensorflow/tools/test/run_and_gather_logs_lib.py34
4 files changed, 106 insertions, 3 deletions
diff --git a/tensorflow/tools/test/BUILD b/tensorflow/tools/test/BUILD
index 45112833cd..23d9cc6444 100644
--- a/tensorflow/tools/test/BUILD
+++ b/tensorflow/tools/test/BUILD
@@ -3,7 +3,11 @@
package(default_visibility = ["//tensorflow:internal"])
-load("//tensorflow:tensorflow.bzl", "cuda_py_test")
+load(
+ "//tensorflow/tools/test:performance.bzl",
+ "tf_cc_logged_benchmark",
+ "tf_py_logged_benchmark",
+)
licenses(["notice"]) # Apache 2.0
@@ -69,6 +73,16 @@ py_binary(
# main = "run_and_gather_logs.py",
#)
+tf_cc_logged_benchmark(
+ name = "cast_op_benchmark",
+ target = "//tensorflow/core/kernels:cast_op_test",
+)
+
+tf_py_logged_benchmark(
+ name = "rnn_op_benchmark",
+ target = "//tensorflow/python:rnn_test",
+)
+
filegroup(
name = "all_files",
srcs = glob(
diff --git a/tensorflow/tools/test/performance.bzl b/tensorflow/tools/test/performance.bzl
new file mode 100644
index 0000000000..750d20fdca
--- /dev/null
+++ b/tensorflow/tools/test/performance.bzl
@@ -0,0 +1,56 @@
+# -*- Python -*-
+
+load("//tensorflow:tensorflow.bzl", "tf_py_test")
+
+# Create a benchmark test target of a TensorFlow C++ test (tf_cc_*_test)
+def tf_cc_logged_benchmark(
+ name=None,
+ target=None,
+ benchmarks="..",
+ tags=[],
+ test_log_output_prefix=""):
+ if not name:
+ fail("Must provide a name")
+ if not target:
+ fail("Must provide a target")
+ if (not ":" in target
+ or not target.startswith("//")
+ or target.endswith(":all")
+ or target.endswith(".")):
+ fail(" ".join(("Target must be a single well-defined test, e.g.,",
+ "//path/to:test. Received: %s" % target)))
+
+ all_tags = list(set(tags) + \
+ set(["benchmark-test", "local", "regression-test"]))
+
+ tf_py_test(
+ name = name,
+ tags = all_tags,
+ srcs = ["//tensorflow/tools/test:run_and_gather_logs.py"],
+ args = [
+ "--test_name=" + target
+ ],
+ data = [
+ target,
+ ],
+ main = "run_and_gather_logs.py",
+ additional_deps = [
+ "//tensorflow/tools/test:run_and_gather_logs"
+ ])
+
+# Create a benchmark test target of a TensorFlow python test (*py_tests)
+def tf_py_logged_benchmark(
+ name=None,
+ target=None,
+ benchmarks="..",
+ tags=[],
+ test_log_output_prefix=""):
+ # For now generating a py benchmark is the same as generating a C++
+ # benchmark target. In the future this may change, so we have
+ # two macros just in case
+ tf_cc_logged_benchmark(
+ name=name,
+ target=target,
+ benchmarks=benchmarks,
+ tags=tags,
+ test_log_output_prefix=test_log_output_prefix)
diff --git a/tensorflow/tools/test/run_and_gather_logs.py b/tensorflow/tools/test/run_and_gather_logs.py
index 40a8542a46..9c50138a7b 100644
--- a/tensorflow/tools/test/run_and_gather_logs.py
+++ b/tensorflow/tools/test/run_and_gather_logs.py
@@ -44,6 +44,7 @@ from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.tools.test import run_and_gather_logs_lib
+
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("test_name", "", """Test target to run.""")
@@ -92,7 +93,7 @@ def main(unused_args):
else:
output_path = os.path.abspath(FLAGS.test_log_output)
tf.gfile.GFile(output_path, "w").write(serialized_test_results)
- print("Test results written to: %s" % output_path)
+ tf.logging.info("Test results written to: %s" % output_path)
if __name__ == "__main__":
diff --git a/tensorflow/tools/test/run_and_gather_logs_lib.py b/tensorflow/tools/test/run_and_gather_logs_lib.py
index afe8f210cc..d6bc10dec9 100644
--- a/tensorflow/tools/test/run_and_gather_logs_lib.py
+++ b/tensorflow/tools/test/run_and_gather_logs_lib.py
@@ -28,16 +28,48 @@ import time
import tensorflow as tf
from google.protobuf import text_format
-
from tensorflow.core.util import test_log_pb2
from tensorflow.tools.test import system_info_lib
+def get_git_commit_sha():
+ """Get git commit SHA for this build.
+
+ Attempt to get the SHA from environment variable GIT_COMMIT, which should
+ be available on Jenkins build agents.
+
+ Returns:
+ SHA hash of the git commit used for the build, if available
+ """
+
+ return os.getenv("GIT_COMMIT")
+
+
def process_test_logs(test_name, test_args, start_time, run_time, log_files):
+ """Gather test information and put it in a TestResults proto.
+
+ Args:
+ test_name: A unique bazel target, e.g. "//path/to:test"
+ test_args: A string containing all arguments to run the target with.
+
+ start_time: Test starting time (epoch)
+ run_time: Wall time that the test ran for
+ log_files: Paths to the log files
+
+ Returns:
+ A TestResults proto
+ """
+
results = test_log_pb2.TestResults()
results.target = test_name
results.start_time = start_time
results.run_time = run_time
+
+ # Gather source code information
+ git_sha = get_git_commit_sha()
+ if git_sha:
+ results.commit_id.hash = git_sha
+
results.entries.CopyFrom(process_benchmarks(log_files))
results.run_configuration.argument.extend(test_args)
results.machine_configuration.CopyFrom(