aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Jan Tattermusch <jtattermusch@users.noreply.github.com>2018-08-30 23:44:10 +0200
committerGravatar GitHub <noreply@github.com>2018-08-30 23:44:10 +0200
commitd766b9a2a40fd3002d9b93c97525a868c9d92dc4 (patch)
tree7ced498743c2e45c20812e85b9e717271872a69d
parent4230aa4855951bcf2480f637dc1dd81fba2b36da (diff)
parentaf1966aa037bf3515f553aee87c886ac829efc02 (diff)
Merge pull request #16461 from jtattermusch/refactor_uploading_to_bq
Refactor upload_test_results.py
-rwxr-xr-xtools/interop_matrix/run_interop_matrix_tests.py2
-rw-r--r--tools/run_tests/python_utils/upload_test_results.py17
-rwxr-xr-xtools/run_tests/run_interop_tests.py2
-rwxr-xr-xtools/run_tests/run_tests.py12
4 files changed, 17 insertions, 16 deletions
diff --git a/tools/interop_matrix/run_interop_matrix_tests.py b/tools/interop_matrix/run_interop_matrix_tests.py
index 9d442346a7..6cd6f43167 100755
--- a/tools/interop_matrix/run_interop_matrix_tests.py
+++ b/tools/interop_matrix/run_interop_matrix_tests.py
@@ -235,7 +235,7 @@ def run_tests_for_lang(lang, runtime, images):
maxjobs=args.jobs)
if args.bq_result_table and resultset:
upload_test_results.upload_interop_results_to_bq(
- resultset, args.bq_result_table, args)
+ resultset, args.bq_result_table)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
total_num_failures += num_failures
diff --git a/tools/run_tests/python_utils/upload_test_results.py b/tools/run_tests/python_utils/upload_test_results.py
index 9d99703725..0ca23f56cf 100644
--- a/tools/run_tests/python_utils/upload_test_results.py
+++ b/tools/run_tests/python_utils/upload_test_results.py
@@ -104,14 +104,13 @@ def _insert_rows_with_retries(bq, bq_table, bq_rows):
sys.exit(1)
-def upload_results_to_bq(resultset, bq_table, args, platform):
+def upload_results_to_bq(resultset, bq_table, extra_fields):
"""Upload test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
- args: args in run_tests.py, generated by argparse
- platform: string name of platform tests were run on
+ extra_fields: dict with extra values that will be uploaded along with the results
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(
@@ -129,32 +128,26 @@ def upload_results_to_bq(resultset, bq_table, args, platform):
for result in results:
test_results = {}
_get_build_metadata(test_results)
- test_results['compiler'] = args.compiler
- test_results['config'] = args.config
test_results['cpu_estimated'] = result.cpu_estimated
test_results['cpu_measured'] = result.cpu_measured
test_results['elapsed_time'] = '%.2f' % result.elapsed_time
- test_results['iomgr_platform'] = args.iomgr_platform
- # args.language is a list, but will always have one element in the contexts
- # this function is used.
- test_results['language'] = args.language[0]
- test_results['platform'] = platform
test_results['result'] = result.state
test_results['return_code'] = result.returncode
test_results['test_name'] = shortname
test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+ for field_name, field_value in six.iteritems(extra_fields):
+ test_results[field_name] = field_value
row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
bq_rows.append(row)
_insert_rows_with_retries(bq, bq_table, bq_rows)
-def upload_interop_results_to_bq(resultset, bq_table, args):
+def upload_interop_results_to_bq(resultset, bq_table):
"""Upload interop test results to a BQ table.
Args:
resultset: dictionary generated by jobset.run
bq_table: string name of table to create/upload results to in BQ
- args: args in run_interop_tests.py, generated by argparse
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 22055d58e8..2936bdfa80 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -1494,7 +1494,7 @@ try:
maxjobs=args.jobs,
skip_jobs=args.manual_run)
if args.bq_result_table and resultset:
- upload_interop_results_to_bq(resultset, args.bq_result_table, args)
+ upload_interop_results_to_bq(resultset, args.bq_result_table)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
else:
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index e04b13b24c..ecb5e1d899 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -1821,8 +1821,16 @@ def _build_and_run(check_cancelled,
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
- upload_results_to_bq(resultset, args.bq_result_table, args,
- platform_string())
+ upload_extra_fields = {
+ 'compiler': args.compiler,
+ 'config': args.config,
+ 'iomgr_platform': args.iomgr_platform,
+ 'language': args.language[
+ 0], # args.language is a list but will always have one element when uploading to BQ is enabled.
+ 'platform': platform_string()
+ }
+ upload_results_to_bq(resultset, args.bq_result_table,
+ upload_extra_fields)
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)