diff options
author | 2018-07-11 13:03:55 -0700 | |
---|---|---|
committer | 2018-07-11 13:03:55 -0700 | |
commit | f2cd616329ba5973ac1834f60bab65de7e7fdeaa (patch) | |
tree | a88d258fb6d388336742be0c4bab6a9ed94b3bd2 | |
parent | a789ac30673f6e56960329e802bc199d5a9ac760 (diff) | |
parent | b2670fceb70a9d8f9d00c74eeba5404ea2defb85 (diff) |
Merge pull request #15918 from matt-kwong/rbe-unknown
Enable uploading UNKNOWN results for RBE
-rw-r--r-- | tools/run_tests/python_utils/upload_rbe_results.py | 86 |
1 files changed, 65 insertions, 21 deletions
diff --git a/tools/run_tests/python_utils/upload_rbe_results.py b/tools/run_tests/python_utils/upload_rbe_results.py index cbeb1ad941..d29ebc6219 100644 --- a/tools/run_tests/python_utils/upload_rbe_results.py +++ b/tools/run_tests/python_utils/upload_rbe_results.py @@ -136,7 +136,7 @@ if __name__ == "__main__": resultstore_actions = _get_resultstore_data(api_key, invocation_id) bq_rows = [] - for action in resultstore_actions: + for index, action in enumerate(resultstore_actions): # Filter out non-test related data, such as build results. if 'testAction' not in action: continue @@ -157,6 +157,23 @@ if __name__ == "__main__": 'timedOut': True } }] + # When RBE believes its infrastructure is failing, it will abort and + # mark running tests as UNKNOWN. These infrastructure failures may be + # related to our tests, so we should investigate if specific tests are + # repeatedly being marked as UNKNOWN. + elif action['statusAttributes']['status'] == 'UNKNOWN': + test_cases = [{ + 'testCase': { + 'caseName': str(action['id']['actionId']), + 'unknown': True + } + }] + # Take the timestamp from the previous action, which should be + # a close approximation. + action['timing'] = { + 'startTime': + resultstore_actions[index - 1]['timing']['startTime'] + } else: test_cases = action['testAction']['testSuite']['tests'][0][ 'testSuite']['tests'] @@ -165,28 +182,55 @@ if __name__ == "__main__": result = 'FAILED' elif 'timedOut' in test_case['testCase']: result = 'TIMEOUT' + elif 'unknown' in test_case['testCase']: + result = 'UNKNOWN' else: result = 'PASSED' - bq_rows.append({ - 'insertId': str(uuid.uuid4()), - 'json': { - 'job_name': - os.getenv('KOKORO_JOB_NAME'), - 'build_id': - os.getenv('KOKORO_BUILD_NUMBER'), - 'build_url': - 'https://source.cloud.google.com/results/invocations/%s' % - invocation_id, - 'test_target': - action['id']['targetId'], - 'test_case': - test_case['testCase']['caseName'], - 'result': - result, - 'timestamp': - action['timing']['startTime'], - } - }) + try: + bq_rows.append({ + 'insertId': str(uuid.uuid4()), + 'json': { + 'job_name': + os.getenv('KOKORO_JOB_NAME'), + 'build_id': + os.getenv('KOKORO_BUILD_NUMBER'), + 'build_url': + 'https://source.cloud.google.com/results/invocations/%s' + % invocation_id, + 'test_target': + action['id']['targetId'], + 'test_case': + test_case['testCase']['caseName'], + 'result': + result, + 'timestamp': + action['timing']['startTime'], + } + }) + except Exception as e: + print('Failed to parse test result. Error: %s' % str(e)) + print(json.dumps(test_case, indent=4)) + bq_rows.append({ + 'insertId': str(uuid.uuid4()), + 'json': { + 'job_name': + os.getenv('KOKORO_JOB_NAME'), + 'build_id': + os.getenv('KOKORO_BUILD_NUMBER'), + 'build_url': + 'https://source.cloud.google.com/results/invocations/%s' + % invocation_id, + 'test_target': + action['id']['targetId'], + 'test_case': + 'N/A', + 'result': + 'UNPARSEABLE', + 'timestamp': + 'N/A', + } + }) + # BigQuery sometimes fails with large uploads, so batch 1,000 rows at a time. for i in range((len(bq_rows) / 1000) + 1): _upload_results_to_bq(bq_rows[i * 1000:(i + 1) * 1000]) |