aboutsummaryrefslogtreecommitdiffhomepage
path: root/infra/bots/recipe_modules/core
diff options
context:
space:
mode:
Diffstat (limited to 'infra/bots/recipe_modules/core')
-rw-r--r--infra/bots/recipe_modules/core/__init__.py17
-rwxr-xr-xinfra/bots/recipe_modules/core/android_devices.py99
-rw-r--r--infra/bots/recipe_modules/core/api.py498
-rw-r--r--infra/bots/recipe_modules/core/fake_specs.py1523
-rw-r--r--infra/bots/recipe_modules/core/resources/binary_size_utils.py67
-rw-r--r--infra/bots/recipe_modules/core/resources/elf_symbolizer.py477
-rwxr-xr-xinfra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py75
-rwxr-xr-xinfra/bots/recipe_modules/core/resources/run_binary_size_analysis.py817
-rwxr-xr-xinfra/bots/recipe_modules/core/resources/upload_bench_results.py75
-rwxr-xr-xinfra/bots/recipe_modules/core/resources/upload_dm_results.py98
-rwxr-xr-xinfra/bots/recipe_modules/core/ssh_devices.py30
11 files changed, 3776 insertions, 0 deletions
diff --git a/infra/bots/recipe_modules/core/__init__.py b/infra/bots/recipe_modules/core/__init__.py
new file mode 100644
index 0000000000..df4078b271
--- /dev/null
+++ b/infra/bots/recipe_modules/core/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+DEPS = [
+ 'build/file',
+ 'depot_tools/gclient',
+ 'depot_tools/tryserver',
+ 'flavor',
+ 'recipe_engine/path',
+ 'recipe_engine/platform',
+ 'recipe_engine/properties',
+ 'recipe_engine/python',
+ 'recipe_engine/step',
+ 'run',
+ 'vars',
+]
diff --git a/infra/bots/recipe_modules/core/android_devices.py b/infra/bots/recipe_modules/core/android_devices.py
new file mode 100755
index 0000000000..1a59c77333
--- /dev/null
+++ b/infra/bots/recipe_modules/core/android_devices.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import json
+
+
+DEFAULT_SDK_ROOT = '/home/chrome-bot/android-sdk-linux'
+MAC_SDK_ROOT = '/Users/chrome-bot/adt-bundle-mac-x86_64-20140702/sdk'
+MACMINI_SDK_ROOT = '/Users/chrome-bot/android-sdk-macosx'
+
+SlaveInfo = collections.namedtuple('SlaveInfo',
+ 'serial android_sdk_root has_root')
+
+SLAVE_INFO = {
+ 'skiabot-mac-10_8-compile-000':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-001':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-002':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-003':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-004':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-005':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-006':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-007':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-008':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-mac-10_8-compile-009':
+ SlaveInfo('noserial', MAC_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu15-androidone-001':
+ SlaveInfo('AG86044202A04GC', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu15-androidone-002':
+ SlaveInfo('AG8404EC06G02GC', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu15-androidone-003':
+ SlaveInfo('AG8404EC0688EGC', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-galaxys3-001':
+ SlaveInfo('4df713b8244a21cf', DEFAULT_SDK_ROOT, False),
+ 'skiabot-shuttle-ubuntu12-galaxys3-002':
+ SlaveInfo('32309a56e9b3a09f', DEFAULT_SDK_ROOT, False),
+ 'skiabot-shuttle-ubuntu12-galaxys4-001':
+ SlaveInfo('4d0032a5d8cb6125', MACMINI_SDK_ROOT, False),
+ 'skiabot-shuttle-ubuntu12-galaxys4-002':
+ SlaveInfo('4d00353cd8ed61c3', MACMINI_SDK_ROOT, False),
+ 'skiabot-shuttle-ubuntu12-nexus5-001':
+ SlaveInfo('03f61449437cc47b', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus5-002':
+ SlaveInfo('018dff3520c970f6', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu15-nexus6-001':
+ SlaveInfo('ZX1G22JJWS', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu15-nexus6-002':
+ SlaveInfo('ZX1G22JN35', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu15-nexus6-003':
+ SlaveInfo('ZX1G22JXXM', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus7-001':
+ SlaveInfo('015d210a13480604', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus7-002':
+ SlaveInfo('015d18848c280217', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus7-003':
+ SlaveInfo('015d16897c401e17', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus9-001':
+ SlaveInfo('HT43RJT00022', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus9-002':
+ SlaveInfo('HT4AEJT03112', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus9-003':
+ SlaveInfo('HT4ADJT03339', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus10-001':
+ SlaveInfo('R32C801B5LH', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexus10-003':
+ SlaveInfo('R32CB017X2L', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexusplayer-001':
+ SlaveInfo('D76C708B', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu12-nexusplayer-002':
+ SlaveInfo('8AB5139A', DEFAULT_SDK_ROOT, True),
+ 'skiabot-shuttle-ubuntu15-nvidia-shield-001':
+ SlaveInfo('04217150066510000078', MACMINI_SDK_ROOT, False),
+ 'skiabot-linux-housekeeper-003':
+ SlaveInfo('noserial', DEFAULT_SDK_ROOT, False),
+ 'vm690-m3': SlaveInfo('noserial', MACMINI_SDK_ROOT, False),
+ 'vm691-m3': SlaveInfo('noserial', MACMINI_SDK_ROOT, False),
+ 'vm692-m3': SlaveInfo('noserial', MACMINI_SDK_ROOT, False),
+ 'vm693-m3': SlaveInfo('noserial', MACMINI_SDK_ROOT, False),
+ 'skiabot-linux-swarm-000': SlaveInfo('noserial', DEFAULT_SDK_ROOT, True),
+ 'default':
+ SlaveInfo('noserial', DEFAULT_SDK_ROOT, False),
+}
+
+
+if __name__ == '__main__':
+ print json.dumps(SLAVE_INFO) # pragma: no cover
+
diff --git a/infra/bots/recipe_modules/core/api.py b/infra/bots/recipe_modules/core/api.py
new file mode 100644
index 0000000000..6d477efd06
--- /dev/null
+++ b/infra/bots/recipe_modules/core/api.py
@@ -0,0 +1,498 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# pylint: disable=W0201
+
+
+import json
+import os
+import re
+import sys
+
+from recipe_engine import recipe_api
+from recipe_engine import config_types
+
+from . import fake_specs
+
+
+TEST_EXPECTED_SKP_VERSION = '42'
+TEST_EXPECTED_SK_IMAGE_VERSION = '42'
+
+VERSION_FILE_SK_IMAGE = 'SK_IMAGE_VERSION'
+VERSION_FILE_SKP = 'SKP_VERSION'
+
+VERSION_NONE = -1
+
+
+class SkiaApi(recipe_api.RecipeApi):
+
+ def get_builder_spec(self, skia_dir, builder_name):
+ """Obtain the buildbot spec for the given builder."""
+ fake_spec = None
+ if self._test_data.enabled:
+ fake_spec = fake_specs.FAKE_SPECS[builder_name]
+ builder_spec = self.m.run.json_from_file(
+ skia_dir.join('tools', 'buildbot_spec.py'),
+ skia_dir,
+ builder_name,
+ fake_spec)
+ return builder_spec
+
+ def setup(self):
+ """Prepare the bot to run."""
+ # Setup dependencies.
+ self.m.vars.setup()
+
+ # Check out the Skia code.
+ self.checkout_steps()
+
+ # Obtain the spec for this builder from the Skia repo. Use it to set more
+ # properties.
+ builder_spec = self.get_builder_spec(self.m.vars.skia_dir,
+ self.m.vars.builder_name)
+
+ # Continue setting up vars with the builder_spec.
+ self.m.vars.update_with_builder_spec(builder_spec)
+
+
+ if not self.m.path.exists(self.m.vars.tmp_dir):
+ self.m.run.run_once(self.m.file.makedirs,
+ 'tmp_dir',
+ self.m.vars.tmp_dir,
+ infra_step=True)
+
+ self.m.flavor.setup()
+
+ def update_repo(self, parent_dir, repo):
+ """Update an existing repo. This is safe to call without gen_steps."""
+ repo_path = parent_dir.join(repo.name)
+ if self.m.path.exists(repo_path): # pragma: nocover
+ if self.m.platform.is_win:
+ git = 'git.bat'
+ else:
+ git = 'git'
+ self.m.step('git remote set-url',
+ cmd=[git, 'remote', 'set-url', 'origin', repo.url],
+ cwd=repo_path,
+ infra_step=True)
+ self.m.step('git fetch',
+ cmd=[git, 'fetch'],
+ cwd=repo_path,
+ infra_step=True)
+ self.m.step('git reset',
+ cmd=[git, 'reset', '--hard', repo.revision],
+ cwd=repo_path,
+ infra_step=True)
+ self.m.step('git clean',
+ cmd=[git, 'clean', '-d', '-f'],
+ cwd=repo_path,
+ infra_step=True)
+
+ def checkout_steps(self):
+ """Run the steps to obtain a checkout of Skia."""
+ cfg_kwargs = {}
+ if not self.m.vars.persistent_checkout:
+ # We should've obtained the Skia checkout through isolates, so we don't
+ # need to perform the checkout ourselves.
+ return
+
+ # Use a persistent gclient cache for Swarming.
+ cfg_kwargs['CACHE_DIR'] = self.m.vars.gclient_cache
+
+ # Create the checkout path if necessary.
+ if not self.m.path.exists(self.m.vars.checkout_root):
+ self.m.file.makedirs('checkout_path',
+ self.m.vars.checkout_root,
+ infra_step=True)
+
+ # Initial cleanup.
+ gclient_cfg = self.m.gclient.make_config(**cfg_kwargs)
+ skia = gclient_cfg.solutions.add()
+ skia.name = 'skia'
+ skia.managed = False
+ skia.url = 'https://skia.googlesource.com/skia.git'
+ skia.revision = self.m.properties.get('revision') or 'origin/master'
+ self.update_repo(self.m.vars.checkout_root, skia)
+
+ # TODO(rmistry): Remove the below block after there is a solution for
+ # crbug.com/616443
+ entries_file = self.m.vars.checkout_root.join('.gclient_entries')
+ if self.m.path.exists(entries_file):
+ self.m.file.remove('remove %s' % entries_file,
+ entries_file,
+ infra_step=True) # pragma: no cover
+
+ if self.m.vars.need_chromium_checkout:
+ chromium = gclient_cfg.solutions.add()
+ chromium.name = 'src'
+ chromium.managed = False
+ chromium.url = 'https://chromium.googlesource.com/chromium/src.git'
+ chromium.revision = 'origin/lkgr'
+ self.update_repo(self.m.vars.checkout_root, chromium)
+
+ if self.m.vars.need_pdfium_checkout:
+ pdfium = gclient_cfg.solutions.add()
+ pdfium.name = 'pdfium'
+ pdfium.managed = False
+ pdfium.url = 'https://pdfium.googlesource.com/pdfium.git'
+ pdfium.revision = 'origin/master'
+ self.update_repo(self.m.vars.checkout_root, pdfium)
+
+ # Run 'gclient sync'.
+ gclient_cfg.got_revision_mapping['skia'] = 'got_revision'
+ gclient_cfg.target_os.add('llvm')
+ checkout_kwargs = {}
+ checkout_kwargs['env'] = self.m.vars.default_env
+
+ # api.gclient.revert() assumes things about the layout of the code, so it
+ # fails for us. Run an appropriate revert sequence for trybots instead.
+ gclient_file = self.m.vars.checkout_root.join('.gclient')
+ if (self.m.tryserver.is_tryserver and
+ self.m.path.exists(gclient_file)): # pragma: no cover
+ # These steps taken from:
+ # https://chromium.googlesource.com/chromium/tools/build/+/
+ # 81a696760ab7c25f6606c54fc781b90b8af9fdd2/scripts/slave/
+ # gclient_safe_revert.py
+ if self.m.path.exists(entries_file):
+ self.m.gclient('recurse', [
+ 'recurse', '-i', 'sh', '-c',
+ 'if [ -e .git ]; then git remote update; fi'])
+ self.m.gclient(
+ 'revert',
+ ['revert', '-v', '-v', '-v', '--nohooks', '--upstream'],
+ cwd=self.m.vars.checkout_root)
+
+ update_step = self.m.gclient.checkout(gclient_config=gclient_cfg,
+ cwd=self.m.vars.checkout_root,
+ revert=False,
+ **checkout_kwargs)
+
+ self.m.vars.got_revision = (
+ update_step.presentation.properties['got_revision'])
+ self.m.tryserver.maybe_apply_issue()
+
+ if self.m.vars.need_chromium_checkout:
+ self.m.gclient.runhooks(cwd=self.m.vars.checkout_root,
+ env=self.m.vars.gclient_env)
+
+ def copy_dir(self, host_version, version_file, tmp_dir,
+ host_path, device_path, test_expected_version,
+ test_actual_version):
+ actual_version_file = self.m.path.join(tmp_dir, version_file)
+ # Copy to device.
+ device_version_file = self.m.flavor.device_path_join(
+ self.m.flavor.device_dirs.tmp_dir, version_file)
+ if str(actual_version_file) != str(device_version_file):
+ try:
+ device_version = (
+ self.m.flavor.read_file_on_device(device_version_file))
+ except self.m.step.StepFailure:
+ device_version = VERSION_NONE
+ if device_version != host_version:
+ self.m.flavor.remove_file_on_device(device_version_file)
+ self.m.flavor.create_clean_device_dir(device_path)
+ self.m.flavor.copy_directory_contents_to_device(
+ host_path, device_path)
+
+ # Copy the new version file.
+ self.m.flavor.copy_file_to_device(actual_version_file,
+ device_version_file)
+
+ def _copy_images(self):
+ """Download and copy test images if needed."""
+ version_file = self.m.vars.infrabots_dir.join(
+ 'assets', 'skimage', 'VERSION')
+ test_data = self.m.properties.get(
+ 'test_downloaded_sk_image_version', TEST_EXPECTED_SK_IMAGE_VERSION)
+ version = self.m.run.readfile(
+ version_file,
+ name='Get downloaded skimage VERSION',
+ test_data=test_data).rstrip()
+ self.m.run.writefile(
+ self.m.path.join(self.m.vars.tmp_dir, VERSION_FILE_SK_IMAGE),
+ version)
+ self.copy_dir(
+ version,
+ VERSION_FILE_SK_IMAGE,
+ self.m.vars.tmp_dir,
+ self.m.vars.images_dir,
+ self.m.flavor.device_dirs.images_dir,
+ test_expected_version=self.m.properties.get(
+ 'test_downloaded_sk_image_version',
+ TEST_EXPECTED_SK_IMAGE_VERSION),
+ test_actual_version=self.m.properties.get(
+ 'test_downloaded_sk_image_version',
+ TEST_EXPECTED_SK_IMAGE_VERSION))
+ return version
+
+ def _copy_skps(self):
+ """Download and copy the SKPs if needed."""
+ version_file = self.m.vars.infrabots_dir.join(
+ 'assets', 'skp', 'VERSION')
+ test_data = self.m.properties.get(
+ 'test_downloaded_skp_version', TEST_EXPECTED_SKP_VERSION)
+ version = self.m.run.readfile(
+ version_file,
+ name='Get downloaded SKP VERSION',
+ test_data=test_data).rstrip()
+ self.m.run.writefile(
+ self.m.path.join(self.m.vars.tmp_dir, VERSION_FILE_SKP),
+ version)
+ self.copy_dir(
+ version,
+ VERSION_FILE_SKP,
+ self.m.vars.tmp_dir,
+ self.m.vars.local_skp_dir,
+ self.m.flavor.device_dirs.skp_dir,
+ test_expected_version=self.m.properties.get(
+ 'test_downloaded_skp_version', TEST_EXPECTED_SKP_VERSION),
+ test_actual_version=self.m.properties.get(
+ 'test_downloaded_skp_version', TEST_EXPECTED_SKP_VERSION))
+ return version
+
+ def install(self):
+ """Copy the required executables and files to the device."""
+ # Run any device-specific installation.
+ self.m.flavor.install()
+
+ # TODO(borenet): Only copy files which have changed.
+ # Resources
+ self.m.flavor.copy_directory_contents_to_device(
+ self.m.vars.resource_dir,
+ self.m.flavor.device_dirs.resource_dir)
+
+ def test_steps(self):
+ """Run the DM test."""
+ self.m.run.run_once(self.install)
+ self.m.run.run_once(self._copy_skps)
+ self.m.run.run_once(self._copy_images)
+
+ use_hash_file = False
+ if self.m.vars.upload_dm_results:
+ # This must run before we write anything into
+ # self.m.flavor.device_dirs.dm_dir or we may end up deleting our
+ # output on machines where they're the same.
+ self.m.flavor.create_clean_host_dir(self.m.vars.dm_dir)
+ host_dm_dir = str(self.m.vars.dm_dir)
+ device_dm_dir = str(self.m.flavor.device_dirs.dm_dir)
+ if host_dm_dir != device_dm_dir:
+ self.m.flavor.create_clean_device_dir(device_dm_dir)
+
+ # Obtain the list of already-generated hashes.
+ hash_filename = 'uninteresting_hashes.txt'
+
+ # Ensure that the tmp_dir exists.
+ self.m.run.run_once(self.m.file.makedirs,
+ 'tmp_dir',
+ self.m.vars.tmp_dir,
+ infra_step=True)
+
+ host_hashes_file = self.m.vars.tmp_dir.join(hash_filename)
+ hashes_file = self.m.flavor.device_path_join(
+ self.m.flavor.device_dirs.tmp_dir, hash_filename)
+ self.m.run(
+ self.m.python.inline,
+ 'get uninteresting hashes',
+ program="""
+ import contextlib
+ import math
+ import socket
+ import sys
+ import time
+ import urllib2
+
+ HASHES_URL = 'https://gold.skia.org/_/hashes'
+ RETRIES = 5
+ TIMEOUT = 60
+ WAIT_BASE = 15
+
+ socket.setdefaulttimeout(TIMEOUT)
+ for retry in range(RETRIES):
+ try:
+ with contextlib.closing(
+ urllib2.urlopen(HASHES_URL, timeout=TIMEOUT)) as w:
+ hashes = w.read()
+ with open(sys.argv[1], 'w') as f:
+ f.write(hashes)
+ break
+ except Exception as e:
+ print 'Failed to get uninteresting hashes from %s:' % HASHES_URL
+ print e
+ if retry == RETRIES:
+ raise
+ waittime = WAIT_BASE * math.pow(2, retry)
+ print 'Retry in %d seconds.' % waittime
+ time.sleep(waittime)
+ """,
+ args=[host_hashes_file],
+ cwd=self.m.vars.skia_dir,
+ abort_on_failure=False,
+ fail_build_on_failure=False,
+ infra_step=True)
+
+ if self.m.path.exists(host_hashes_file):
+ self.m.flavor.copy_file_to_device(host_hashes_file, hashes_file)
+ use_hash_file = True
+
+ # Run DM.
+ properties = [
+ 'gitHash', self.m.vars.got_revision,
+ 'master', self.m.vars.master_name,
+ 'builder', self.m.vars.builder_name,
+ 'build_number', self.m.vars.build_number,
+ ]
+ if self.m.vars.is_trybot:
+ properties.extend([
+ 'issue', self.m.vars.issue,
+ 'patchset', self.m.vars.patchset,
+ ])
+
+ args = [
+ 'dm',
+ '--undefok', # This helps branches that may not know new flags.
+ '--resourcePath', self.m.flavor.device_dirs.resource_dir,
+ '--skps', self.m.flavor.device_dirs.skp_dir,
+ '--images', self.m.flavor.device_path_join(
+ self.m.flavor.device_dirs.images_dir, 'dm'),
+ '--colorImages', self.m.flavor.device_path_join(
+ self.m.flavor.device_dirs.images_dir, 'colorspace'),
+ '--nameByHash',
+ '--properties'
+ ] + properties
+
+ args.append('--key')
+ args.extend(self._KeyParams())
+ if use_hash_file:
+ args.extend(['--uninterestingHashesFile', hashes_file])
+ if self.m.vars.upload_dm_results:
+ args.extend(['--writePath', self.m.flavor.device_dirs.dm_dir])
+
+ skip_flag = None
+ if self.m.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
+ skip_flag = '--nogpu'
+ elif self.m.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
+ skip_flag = '--nocpu'
+ if skip_flag:
+ args.append(skip_flag)
+ args.extend(self.m.vars.dm_flags)
+
+ self.m.run(self.m.flavor.step, 'dm', cmd=args,
+ abort_on_failure=False,
+ env=self.m.vars.default_env)
+
+ if self.m.vars.upload_dm_results:
+ # Copy images and JSON to host machine if needed.
+ self.m.flavor.copy_directory_contents_to_host(
+ self.m.flavor.device_dirs.dm_dir, self.m.vars.dm_dir)
+
+ # See skia:2789.
+ if ('Valgrind' in self.m.vars.builder_name and
+ self.m.vars.builder_cfg.get('cpu_or_gpu') == 'GPU'):
+ abandonGpuContext = list(args)
+ abandonGpuContext.append('--abandonGpuContext')
+ self.m.run(self.m.flavor.step, 'dm --abandonGpuContext',
+ cmd=abandonGpuContext, abort_on_failure=False)
+ preAbandonGpuContext = list(args)
+ preAbandonGpuContext.append('--preAbandonGpuContext')
+ self.m.run(self.m.flavor.step, 'dm --preAbandonGpuContext',
+ cmd=preAbandonGpuContext, abort_on_failure=False,
+ env=self.m.vars.default_env)
+
+ def perf_steps(self):
+ """Run Skia benchmarks."""
+ self.m.run.run_once(self.install)
+ self.m.run.run_once(self._copy_skps)
+ self.m.run.run_once(self._copy_images)
+
+ if self.m.vars.upload_perf_results:
+ self.m.flavor.create_clean_device_dir(
+ self.m.flavor.device_dirs.perf_data_dir)
+
+ # Run nanobench.
+ properties = [
+ '--properties',
+ 'gitHash', self.m.vars.got_revision,
+ 'build_number', self.m.vars.build_number,
+ ]
+ if self.m.vars.is_trybot:
+ properties.extend([
+ 'issue', self.m.vars.issue,
+ 'patchset', self.m.vars.patchset,
+ ])
+
+ target = 'nanobench'
+ if 'VisualBench' in self.m.vars.builder_name:
+ target = 'visualbench'
+ args = [
+ target,
+ '--undefok', # This helps branches that may not know new flags.
+ '-i', self.m.flavor.device_dirs.resource_dir,
+ '--skps', self.m.flavor.device_dirs.skp_dir,
+ '--images', self.m.flavor.device_path_join(
+ self.m.flavor.device_dirs.images_dir, 'nanobench'),
+ ]
+
+ skip_flag = None
+ if self.m.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
+ skip_flag = '--nogpu'
+ elif self.m.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
+ skip_flag = '--nocpu'
+ if skip_flag:
+ args.append(skip_flag)
+ args.extend(self.m.vars.nanobench_flags)
+
+ if self.m.vars.upload_perf_results:
+ json_path = self.m.flavor.device_path_join(
+ self.m.flavor.device_dirs.perf_data_dir,
+ 'nanobench_%s.json' % self.m.vars.got_revision)
+ args.extend(['--outResultsFile', json_path])
+ args.extend(properties)
+
+ keys_blacklist = ['configuration', 'role', 'is_trybot']
+ args.append('--key')
+ for k in sorted(self.m.vars.builder_cfg.keys()):
+ if not k in keys_blacklist:
+ args.extend([k, self.m.vars.builder_cfg[k]])
+
+ self.m.run(self.m.flavor.step, target, cmd=args,
+ abort_on_failure=False,
+ env=self.m.vars.default_env)
+
+ # See skia:2789.
+ if ('Valgrind' in self.m.vars.builder_name and
+ self.m.vars.builder_cfg.get('cpu_or_gpu') == 'GPU'):
+ abandonGpuContext = list(args)
+ abandonGpuContext.extend(['--abandonGpuContext', '--nocpu'])
+ self.m.run(self.m.flavor.step,
+ '%s --abandonGpuContext' % target,
+ cmd=abandonGpuContext, abort_on_failure=False,
+ env=self.m.vars.default_env)
+
+ # Upload results.
+ if self.m.vars.upload_perf_results:
+ self.m.file.makedirs('perf_dir', self.m.vars.perf_data_dir)
+ self.m.flavor.copy_directory_contents_to_host(
+ self.m.flavor.device_dirs.perf_data_dir,
+ self.m.vars.perf_data_dir)
+
+ def cleanup_steps(self):
+ """Run any cleanup steps."""
+ self.m.flavor.cleanup_steps()
+
+ def _KeyParams(self):
+ """Build a unique key from the builder name (as a list).
+
+ E.g. arch x86 gpu GeForce320M mode MacMini4.1 os Mac10.6
+ """
+ # Don't bother to include role, which is always Test.
+ # TryBots are uploaded elsewhere so they can use the same key.
+ blacklist = ['role', 'is_trybot']
+
+ flat = []
+ for k in sorted(self.m.vars.builder_cfg.keys()):
+ if k not in blacklist:
+ flat.append(k)
+ flat.append(self.m.vars.builder_cfg[k])
+ return flat
diff --git a/infra/bots/recipe_modules/core/fake_specs.py b/infra/bots/recipe_modules/core/fake_specs.py
new file mode 100644
index 0000000000..4b922b52e7
--- /dev/null
+++ b/infra/bots/recipe_modules/core/fake_specs.py
@@ -0,0 +1,1523 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is generated by the infra/bots/gen_buildbot_specs.py script.
+
+FAKE_SPECS = {
+ 'Build-Mac-Clang-Arm7-Debug-Android': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'Clang',
+ 'configuration': 'Debug',
+ 'extra_config': 'Android',
+ 'is_trybot': False,
+ 'os': 'Mac',
+ 'role': 'Build',
+ 'target_arch': 'Arm7',
+ },
+ 'configuration': 'Debug',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ 'skia_arch_type=arm skia_clang_build=1 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Mac-Clang-Arm7-Release-iOS': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'Clang',
+ 'configuration': 'Release',
+ 'extra_config': 'iOS',
+ 'is_trybot': False,
+ 'os': 'Mac',
+ 'role': 'Build',
+ 'target_arch': 'Arm7',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ ('skia_arch_type=arm skia_clang_build=1 skia_os=ios skia_warnings_a'
+ 's_errors=1'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Mac-Clang-x86_64-Debug-CommandBuffer': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'Clang',
+ 'configuration': 'Debug',
+ 'extra_config': 'CommandBuffer',
+ 'is_trybot': False,
+ 'os': 'Mac',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ ('skia_arch_type=x86_64 skia_clang_build=1 skia_command_buffer=1 sk'
+ 'ia_warnings_as_errors=1'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Mac-Clang-x86_64-Release': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'Clang',
+ 'configuration': 'Release',
+ 'is_trybot': False,
+ 'os': 'Mac',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ 'skia_arch_type=x86_64 skia_clang_build=1 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Mac-Clang-x86_64-Release-CMake': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'Clang',
+ 'configuration': 'Release',
+ 'extra_config': 'CMake',
+ 'is_trybot': False,
+ 'os': 'Mac',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ 'skia_arch_type=x86_64 skia_clang_build=1 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-Arm64-Debug-Android_Vulkan': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'extra_config': 'Android_Vulkan',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'Arm64',
+ },
+ 'configuration': 'Debug',
+ 'device_cfg': 'arm64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=arm64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-Arm7-Debug-Android-Trybot': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'extra_config': 'Android',
+ 'is_trybot': True,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'Arm7',
+ },
+ 'configuration': 'Debug',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=arm skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-Arm7-Release-Android': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'extra_config': 'Android',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'Arm7',
+ },
+ 'configuration': 'Release',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=arm skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-Arm7-Release-Android_Vulkan': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'extra_config': 'Android_Vulkan',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'Arm7',
+ },
+ 'configuration': 'Release',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=arm skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86-Debug': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Debug': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-Clang-x86_64-Debug-GN': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'Clang',
+ 'configuration': 'Debug',
+ 'extra_config': 'GN',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Debug-GN': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'extra_config': 'GN',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Debug-MSAN': {
+ 'build_targets': [
+ 'dm',
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'extra_config': 'MSAN',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': False,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Release-CMake': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'extra_config': 'CMake',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Release-PDFium': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'extra_config': 'PDFium',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Release-RemoteRun': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'extra_config': 'RemoteRun',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Release-Shared': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'extra_config': 'Shared',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ 'skia_arch_type=x86_64 skia_shared_lib=1 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Release-Trybot': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'is_trybot': True,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=x86_64 skia_warnings_as_errors=1',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Ubuntu-GCC-x86_64-Release-Valgrind': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'extra_config': 'Valgrind',
+ 'is_trybot': False,
+ 'os': 'Ubuntu',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('skia_arch_type=x86_64 skia_release_optimization_level=1 skia_warn'
+ 'ings_as_errors=1'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': False,
+ 'upload_perf_results': False,
+ },
+ 'Build-Win-MSVC-x86-Debug': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'MSVC',
+ 'configuration': 'Debug',
+ 'is_trybot': False,
+ 'os': 'Win',
+ 'role': 'Build',
+ 'target_arch': 'x86',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/4.8.5/ skia_arch_type=x86 skia_warnings_as_errors=1 '
+ 'skia_win_debuggers_path=c:/DbgHelp skia_win_ltcg=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Win-MSVC-x86-Release-GN': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'extra_config': 'GN',
+ 'is_trybot': False,
+ 'os': 'Win',
+ 'role': 'Build',
+ 'target_arch': 'x86',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/4.8.5/ skia_arch_type=x86 skia_warnings_as_errors=1 '
+ 'skia_win_debuggers_path=c:/DbgHelp skia_win_ltcg=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Win-MSVC-x86_64-Release': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'is_trybot': False,
+ 'os': 'Win',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/4.8.5/ skia_arch_type=x86_64 skia_warnings_as_errors'
+ '=1 skia_win_debuggers_path=c:/DbgHelp skia_win_ltcg=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Build-Win-MSVC-x86_64-Release-Vulkan': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'extra_config': 'Vulkan',
+ 'is_trybot': False,
+ 'os': 'Win',
+ 'role': 'Build',
+ 'target_arch': 'x86_64',
+ },
+ 'configuration': 'Release_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/4.8.5/ skia_arch_type=x86_64 skia_vulkan=1 skia_vulk'
+ 'an_debug_layers=0 skia_warnings_as_errors=1 skia_win_debuggers_pa'
+ 'th=c:/DbgHelp skia_win_ltcg=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Housekeeper-Nightly-RecreateSKPs_Canary': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'extra_config': 'RecreateSKPs_Canary',
+ 'frequency': 'Nightly',
+ 'is_trybot': False,
+ 'role': 'Housekeeper',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_shared_lib=1 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Housekeeper-PerCommit': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'frequency': 'PerCommit',
+ 'is_trybot': False,
+ 'role': 'Housekeeper',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_shared_lib=1 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Housekeeper-PerCommit-Trybot': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'frequency': 'PerCommit',
+ 'is_trybot': True,
+ 'role': 'Housekeeper',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_shared_lib=1 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Housekeeper-Weekly-RecreateSKPs': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'extra_config': 'RecreateSKPs',
+ 'frequency': 'Weekly',
+ 'is_trybot': False,
+ 'role': 'Housekeeper',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_shared_lib=1 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Infra-PerCommit': {
+ 'build_targets': [
+ 'most',
+ ],
+ 'builder_cfg': {
+ 'frequency': 'PerCommit',
+ 'is_trybot': False,
+ 'role': 'Infra',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES': 'skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Perf-Android-GCC-Nexus7-GPU-Tegra3-Arm7-Release': {
+ 'build_targets': [
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'Arm7',
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'Tegra3',
+ 'is_trybot': False,
+ 'model': 'Nexus7',
+ 'os': 'Android',
+ 'role': 'Perf',
+ },
+ 'configuration': 'Release',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': True,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ 'skia_arch_type=arm skia_dump_stats=1 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'product.board': 'grouper',
+ 'upload_dm_results': True,
+ 'upload_perf_results': True,
+ },
+ 'Perf-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-Trybot': {
+ 'build_targets': [
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'is_trybot': True,
+ 'model': 'GCE',
+ 'os': 'Ubuntu',
+ 'role': 'Perf',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': True,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ 'skia_arch_type=x86_64 skia_gpu=0 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': True,
+ },
+ 'Perf-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind': {
+ 'build_targets': [
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'GTX550Ti',
+ 'extra_config': 'Valgrind',
+ 'is_trybot': False,
+ 'model': 'ShuttleA',
+ 'os': 'Ubuntu',
+ 'role': 'Perf',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': True,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('skia_arch_type=x86_64 skia_dump_stats=1 skia_release_optimization'
+ '_level=1 skia_warnings_as_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': False,
+ 'upload_perf_results': True,
+ },
+ 'Perf-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-VisualBench': {
+ 'build_targets': [
+ 'visualbench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'GTX550Ti',
+ 'extra_config': 'VisualBench',
+ 'is_trybot': False,
+ 'model': 'ShuttleA',
+ 'os': 'Ubuntu',
+ 'role': 'Perf',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': True,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('skia_arch_type=x86_64 skia_dump_stats=1 skia_use_sdl=1 skia_warni'
+ 'ngs_as_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': True,
+ },
+ 'Perf-Win-MSVC-GCE-CPU-AVX2-x86_64-Debug': {
+ 'build_targets': [
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'MSVC',
+ 'configuration': 'Debug',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'is_trybot': False,
+ 'model': 'GCE',
+ 'os': 'Win',
+ 'role': 'Perf',
+ },
+ 'configuration': 'Debug_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': True,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/4.8.5/ skia_arch_type=x86_64 skia_gpu=0 skia_warning'
+ 's_as_errors=0 skia_win_debuggers_path=c:/DbgHelp'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Perf-Win-MSVC-GCE-CPU-AVX2-x86_64-Release': {
+ 'build_targets': [
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'is_trybot': False,
+ 'model': 'GCE',
+ 'os': 'Win',
+ 'role': 'Perf',
+ },
+ 'configuration': 'Release_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': True,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/4.8.5/ skia_arch_type=x86_64 skia_gpu=0 skia_warning'
+ 's_as_errors=0 skia_win_debuggers_path=c:/DbgHelp'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': True,
+ },
+ 'Perf-Win8-MSVC-ShuttleB-GPU-HD4600-x86_64-Release-Trybot': {
+ 'build_targets': [
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'HD4600',
+ 'is_trybot': True,
+ 'model': 'ShuttleB',
+ 'os': 'Win8',
+ 'role': 'Perf',
+ },
+ 'configuration': 'Release_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': True,
+ 'do_test_steps': False,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/Qt5.1.0/5.1.0/msvc2012_64/ skia_arch_type=x86_64 ski'
+ 'a_dump_stats=1 skia_warnings_as_errors=0 skia_win_debuggers_path='
+ 'c:/DbgHelp'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': True,
+ },
+ 'Test-Android-GCC-GalaxyS3-GPU-Mali400-Arm7-Debug': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'Arm7',
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'Mali400',
+ 'is_trybot': False,
+ 'model': 'GalaxyS3',
+ 'os': 'Android',
+ 'role': 'Test',
+ },
+ 'configuration': 'Debug',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=arm skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'product.board': 'm0',
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Android-GCC-NVIDIA_Shield-GPU-TegraX1-Arm64-Debug-Vulkan': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'Arm64',
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'TegraX1',
+ 'extra_config': 'Vulkan',
+ 'is_trybot': False,
+ 'model': 'NVIDIA_Shield',
+ 'os': 'Android',
+ 'role': 'Test',
+ },
+ 'configuration': 'Debug',
+ 'device_cfg': 'arm64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES':
+ ('skia_arch_type=arm64 skia_vulkan=1 skia_vulkan_debug_layers=0 ski'
+ 'a_warnings_as_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'product.board': 'foster',
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Android-GCC-Nexus7-GPU-Tegra3-Arm7-Debug': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'Arm7',
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'Tegra3',
+ 'is_trybot': False,
+ 'model': 'Nexus7',
+ 'os': 'Android',
+ 'role': 'Test',
+ },
+ 'configuration': 'Debug',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=arm skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'product.board': 'grouper',
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Android-GCC-Nexus7v2-GPU-Tegra3-Arm7-Release': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'Arm7',
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'Tegra3',
+ 'is_trybot': False,
+ 'model': 'Nexus7v2',
+ 'os': 'Android',
+ 'role': 'Test',
+ },
+ 'configuration': 'Release',
+ 'device_cfg': 'arm_v7_neon',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES': 'skia_arch_type=arm skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'product.board': 'flo',
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Mac-Clang-MacMini6.2-CPU-AVX-x86_64-Release': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'Clang',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX',
+ 'is_trybot': False,
+ 'model': 'MacMini6.2',
+ 'os': 'Mac',
+ 'role': 'Test',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ ('skia_arch_type=x86_64 skia_clang_build=1 skia_gpu=0 skia_warnings'
+ '_as_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Ubuntu-Clang-GCE-CPU-AVX2-x86_64-Coverage-Trybot': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'Clang',
+ 'configuration': 'Coverage',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'is_trybot': True,
+ 'model': 'GCE',
+ 'os': 'Ubuntu',
+ 'role': 'Test',
+ },
+ 'configuration': 'Coverage',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_compile_steps': False,
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'CC': '/usr/bin/clang-3.6',
+ 'CXX': '/usr/bin/clang++-3.6',
+ 'GYP_DEFINES':
+ ('skia_arch_type=x86_64 skia_clang_build=1 skia_gpu=0 skia_warnings'
+ '_as_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': False,
+ 'upload_perf_results': False,
+ },
+ 'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'is_trybot': False,
+ 'model': 'GCE',
+ 'os': 'Ubuntu',
+ 'role': 'Test',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES':
+ 'skia_arch_type=x86_64 skia_gpu=0 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug-MSAN': {
+ 'build_targets': [
+ 'dm',
+ 'nanobench',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'GCC',
+ 'configuration': 'Debug',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'extra_config': 'MSAN',
+ 'is_trybot': False,
+ 'model': 'GCE',
+ 'os': 'Ubuntu',
+ 'role': 'Test',
+ },
+ 'configuration': 'Debug',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES':
+ 'skia_arch_type=x86_64 skia_gpu=0 skia_warnings_as_errors=0',
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': False,
+ 'upload_perf_results': False,
+ },
+ 'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'GCC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'GTX550Ti',
+ 'extra_config': 'Valgrind',
+ 'is_trybot': False,
+ 'model': 'ShuttleA',
+ 'os': 'Ubuntu',
+ 'role': 'Test',
+ },
+ 'configuration': 'Release',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES':
+ ('skia_arch_type=x86_64 skia_release_optimization_level=1 skia_warn'
+ 'ings_as_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': False,
+ 'upload_perf_results': False,
+ },
+ 'Test-Win8-MSVC-ShuttleA-GPU-HD7770-x86_64-Release': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'HD7770',
+ 'is_trybot': False,
+ 'model': 'ShuttleA',
+ 'os': 'Win8',
+ 'role': 'Test',
+ },
+ 'configuration': 'Release_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/Qt5.1.0/5.1.0/msvc2012_64/ skia_arch_type=x86_64 ski'
+ 'a_warnings_as_errors=0 skia_win_debuggers_path=c:/DbgHelp'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Win8-MSVC-ShuttleB-CPU-AVX2-x86_64-Release': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'is_trybot': False,
+ 'model': 'ShuttleB',
+ 'os': 'Win8',
+ 'role': 'Test',
+ },
+ 'configuration': 'Release_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/Qt5.1.0/5.1.0/msvc2012_64/ skia_arch_type=x86_64 ski'
+ 'a_gpu=0 skia_warnings_as_errors=0 skia_win_debuggers_path=c:/DbgH'
+ 'elp'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-Win8-MSVC-ShuttleB-CPU-AVX2-x86_64-Release-Trybot': {
+ 'build_targets': [
+ 'dm',
+ ],
+ 'builder_cfg': {
+ 'arch': 'x86_64',
+ 'compiler': 'MSVC',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'CPU',
+ 'cpu_or_gpu_value': 'AVX2',
+ 'is_trybot': True,
+ 'model': 'ShuttleB',
+ 'os': 'Win8',
+ 'role': 'Test',
+ },
+ 'configuration': 'Release_x64',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'GYP_DEFINES':
+ ('qt_sdk=C:/Qt/Qt5.1.0/5.1.0/msvc2012_64/ skia_arch_type=x86_64 ski'
+ 'a_gpu=0 skia_warnings_as_errors=0 skia_win_debuggers_path=c:/DbgH'
+ 'elp'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-iOS-Clang-iPad4-GPU-SGX554-Arm7-Debug': {
+ 'build_targets': [
+ 'iOSShell',
+ ],
+ 'builder_cfg': {
+ 'arch': 'Arm7',
+ 'compiler': 'Clang',
+ 'configuration': 'Debug',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'SGX554',
+ 'is_trybot': False,
+ 'model': 'iPad4',
+ 'os': 'iOS',
+ 'role': 'Test',
+ },
+ 'configuration': 'Debug',
+ 'device_cfg': 'iPad4,1',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ ('skia_arch_type=arm skia_clang_build=1 skia_os=ios skia_warnings_a'
+ 's_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+ 'Test-iOS-Clang-iPad4-GPU-SGX554-Arm7-Release': {
+ 'build_targets': [
+ 'iOSShell',
+ ],
+ 'builder_cfg': {
+ 'arch': 'Arm7',
+ 'compiler': 'Clang',
+ 'configuration': 'Release',
+ 'cpu_or_gpu': 'GPU',
+ 'cpu_or_gpu_value': 'SGX554',
+ 'is_trybot': False,
+ 'model': 'iPad4',
+ 'os': 'iOS',
+ 'role': 'Test',
+ },
+ 'configuration': 'Release',
+ 'device_cfg': 'iPad4,1',
+ 'dm_flags': [
+ '--dummy-flags',
+ ],
+ 'do_perf_steps': False,
+ 'do_test_steps': True,
+ 'env': {
+ 'CC': '/usr/bin/clang',
+ 'CXX': '/usr/bin/clang++',
+ 'GYP_DEFINES':
+ ('skia_arch_type=arm skia_clang_build=1 skia_os=ios skia_warnings_a'
+ 's_errors=0'),
+ },
+ 'nanobench_flags': [
+ '--dummy-flags',
+ ],
+ 'upload_dm_results': True,
+ 'upload_perf_results': False,
+ },
+}
diff --git a/infra/bots/recipe_modules/core/resources/binary_size_utils.py b/infra/bots/recipe_modules/core/resources/binary_size_utils.py
new file mode 100644
index 0000000000..c09a65dccd
--- /dev/null
+++ b/infra/bots/recipe_modules/core/resources/binary_size_utils.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common utilities for tools that deal with binary size information.
+
+Copied from chromium/src/build/android/pylib/symbols/binary_size_tools.py.
+"""
+
+import logging
+import re
+
+
+def ParseNm(nm_lines):
+ """Parse nm output, returning data for all relevant (to binary size)
+ symbols and ignoring the rest.
+
+ Args:
+ nm_lines: an iterable over lines of nm output.
+
+ Yields:
+ (symbol name, symbol type, symbol size, source file path).
+
+ Path may be None if nm couldn't figure out the source file.
+ """
+
+ # Match lines with size, symbol, optional location, optional discriminator
+ sym_re = re.compile(r'^[0-9a-f]{8,} ' # address (8+ hex digits)
+ '([0-9a-f]{8,}) ' # size (8+ hex digits)
+ '(.) ' # symbol type, one character
+ '([^\t]+)' # symbol name, separated from next by tab
+ '(?:\t(.*):[\d\?]+)?.*$') # location
+ # Match lines with addr but no size.
+ addr_re = re.compile(r'^[0-9a-f]{8,} (.) ([^\t]+)(?:\t.*)?$')
+ # Match lines that don't have an address at all -- typically external symbols.
+ noaddr_re = re.compile(r'^ {8,} (.) (.*)$')
+ # Match lines with no symbol name, only addr and type
+ addr_only_re = re.compile(r'^[0-9a-f]{8,} (.)$')
+
+ for line in nm_lines:
+ line = line.rstrip()
+ match = sym_re.match(line)
+ if match:
+ size, sym_type, sym = match.groups()[0:3]
+ size = int(size, 16)
+ if sym_type in ('B', 'b'):
+ continue # skip all BSS for now.
+ path = match.group(4)
+ yield sym, sym_type, size, path
+ continue
+ match = addr_re.match(line)
+ if match:
+ # sym_type, sym = match.groups()[0:2]
+ continue # No size == we don't care.
+ match = noaddr_re.match(line)
+ if match:
+ sym_type, sym = match.groups()
+ if sym_type in ('U', 'w'):
+ continue # external or weak symbol
+ match = addr_only_re.match(line)
+ if match:
+ continue # Nothing to do.
+
+
+ # If we reach this part of the loop, there was something in the
+ # line that we didn't expect or recognize.
+ logging.warning('nm output parser failed to parse: %s', repr(line))
diff --git a/infra/bots/recipe_modules/core/resources/elf_symbolizer.py b/infra/bots/recipe_modules/core/resources/elf_symbolizer.py
new file mode 100644
index 0000000000..de9c141219
--- /dev/null
+++ b/infra/bots/recipe_modules/core/resources/elf_symbolizer.py
@@ -0,0 +1,477 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""The ElfSymbolizer class for symbolizing Executable and Linkable Files.
+
+Adapted for Skia's use from
+chromium/src/build/android/pylib/symbols/elf_symbolizer.py.
+
+Main changes:
+-- Added prefix_to_remove param to remove path prefix from tree data.
+"""
+
+import collections
+import datetime
+import logging
+import multiprocessing
+import os
+import posixpath
+import Queue
+import re
+import subprocess
+import sys
+import threading
+
+
+# addr2line builds a possibly infinite memory cache that can exhaust
+# the computer's memory if allowed to grow for too long. This constant
+# controls how many lookups we do before restarting the process. 4000
+# gives near peak performance without extreme memory usage.
+ADDR2LINE_RECYCLE_LIMIT = 4000
+
+
+class ELFSymbolizer(object):
+ """An uber-fast (multiprocessing, pipelined and asynchronous) ELF symbolizer.
+
+ This class is a frontend for addr2line (part of GNU binutils), designed to
+ symbolize batches of large numbers of symbols for a given ELF file. It
+ supports sharding symbolization against many addr2line instances and
+ pipelining of multiple requests per each instance (in order to hide addr2line
+ internals and OS pipe latencies).
+
+ The interface exhibited by this class is a very simple asynchronous interface,
+ which is based on the following three methods:
+ - SymbolizeAsync(): used to request (enqueue) resolution of a given address.
+ - The |callback| method: used to communicated back the symbol information.
+ - Join(): called to conclude the batch to gather the last outstanding results.
+ In essence, before the Join method returns, this class will have issued as
+ many callbacks as the number of SymbolizeAsync() calls. In this regard, note
+ that due to multiprocess sharding, callbacks can be delivered out of order.
+
+ Some background about addr2line:
+ - it is invoked passing the elf path in the cmdline, piping the addresses in
+ its stdin and getting results on its stdout.
+ - it has pretty large response times for the first requests, but it
+ works very well in streaming mode once it has been warmed up.
+ - it doesn't scale by itself (on more cores). However, spawning multiple
+ instances at the same time on the same file is pretty efficient as they
+ keep hitting the pagecache and become mostly CPU bound.
+ - it might hang or crash, mostly for OOM. This class deals with both of these
+ problems.
+
+ Despite the "scary" imports and the multi* words above, (almost) no multi-
+ threading/processing is involved from the python viewpoint. Concurrency
+ here is achieved by spawning several addr2line subprocesses and handling their
+ output pipes asynchronously. Therefore, all the code here (with the exception
+ of the Queue instance in Addr2Line) should be free from mind-blowing
+ thread-safety concerns.
+
+ The multiprocess sharding works as follows:
+ The symbolizer tries to use the lowest number of addr2line instances as
+ possible (with respect of |max_concurrent_jobs|) and enqueue all the requests
+ in a single addr2line instance. For few symbols (i.e. dozens) sharding isn't
+ worth the startup cost.
+ The multiprocess logic kicks in as soon as the queues for the existing
+ instances grow. Specifically, once all the existing instances reach the
+ |max_queue_size| bound, a new addr2line instance is kicked in.
+ In the case of a very eager producer (i.e. all |max_concurrent_jobs| instances
+ have a backlog of |max_queue_size|), back-pressure is applied on the caller by
+ blocking the SymbolizeAsync method.
+
+ This module has been deliberately designed to be dependency free (w.r.t. of
+ other modules in this project), to allow easy reuse in external projects.
+ """
+
+ def __init__(self, elf_file_path, addr2line_path, callback, inlines=False,
+ max_concurrent_jobs=None, addr2line_timeout=30, max_queue_size=50,
+ source_root_path=None, strip_base_path=None, prefix_to_remove=None):
+ """Args:
+ elf_file_path: path of the elf file to be symbolized.
+ addr2line_path: path of the toolchain's addr2line binary.
+ callback: a callback which will be invoked for each resolved symbol with
+ the two args (sym_info, callback_arg). The former is an instance of
+ |ELFSymbolInfo| and contains the symbol information. The latter is an
+ embedder-provided argument which is passed to SymbolizeAsync().
+ inlines: when True, the ELFSymbolInfo will contain also the details about
+ the outer inlining functions. When False, only the innermost function
+ will be provided.
+ max_concurrent_jobs: Max number of addr2line instances spawned.
+ Parallelize responsibly, addr2line is a memory and I/O monster.
+ max_queue_size: Max number of outstanding requests per addr2line instance.
+ addr2line_timeout: Max time (in seconds) to wait for a addr2line response.
+ After the timeout, the instance will be considered hung and respawned.
+ source_root_path: In some toolchains only the name of the source file is
+ is output, without any path information; disambiguation searches
+ through the source directory specified by |source_root_path| argument
+ for files whose name matches, adding the full path information to the
+ output. For example, if the toolchain outputs "unicode.cc" and there
+ is a file called "unicode.cc" located under |source_root_path|/foo,
+ the tool will replace "unicode.cc" with
+ "|source_root_path|/foo/unicode.cc". If there are multiple files with
+ the same name, disambiguation will fail because the tool cannot
+ determine which of the files was the source of the symbol.
+ strip_base_path: Rebases the symbols source paths onto |source_root_path|
+ (i.e replace |strip_base_path| with |source_root_path).
+ prefix_to_remove: Removes the prefix from ElfSymbolInfo output. Skia added
+ """
+ assert(os.path.isfile(addr2line_path)), 'Cannot find ' + addr2line_path
+ self.elf_file_path = elf_file_path
+ self.addr2line_path = addr2line_path
+ self.callback = callback
+ self.inlines = inlines
+ self.max_concurrent_jobs = (max_concurrent_jobs or
+ min(multiprocessing.cpu_count(), 4))
+ self.max_queue_size = max_queue_size
+ self.addr2line_timeout = addr2line_timeout
+ self.requests_counter = 0 # For generating monotonic request IDs.
+ self._a2l_instances = [] # Up to |max_concurrent_jobs| _Addr2Line inst.
+
+ # Skia addition: remove the given prefix from tree paths.
+ self.prefix_to_remove = prefix_to_remove
+
+ # If necessary, create disambiguation lookup table
+ self.disambiguate = source_root_path is not None
+ self.disambiguation_table = {}
+ self.strip_base_path = strip_base_path
+ if(self.disambiguate):
+ self.source_root_path = os.path.abspath(source_root_path)
+ self._CreateDisambiguationTable()
+
+ # Create one addr2line instance. More instances will be created on demand
+ # (up to |max_concurrent_jobs|) depending on the rate of the requests.
+ self._CreateNewA2LInstance()
+
+ def SymbolizeAsync(self, addr, callback_arg=None):
+ """Requests symbolization of a given address.
+
+ This method is not guaranteed to return immediately. It generally does, but
+ in some scenarios (e.g. all addr2line instances have full queues) it can
+ block to create back-pressure.
+
+ Args:
+ addr: address to symbolize.
+ callback_arg: optional argument which will be passed to the |callback|."""
+ assert(isinstance(addr, int))
+
+ # Process all the symbols that have been resolved in the meanwhile.
+ # Essentially, this drains all the addr2line(s) out queues.
+ for a2l_to_purge in self._a2l_instances:
+ a2l_to_purge.ProcessAllResolvedSymbolsInQueue()
+ a2l_to_purge.RecycleIfNecessary()
+
+ # Find the best instance according to this logic:
+ # 1. Find an existing instance with the shortest queue.
+ # 2. If all of instances' queues are full, but there is room in the pool,
+ # (i.e. < |max_concurrent_jobs|) create a new instance.
+ # 3. If there were already |max_concurrent_jobs| instances and all of them
+ # had full queues, make back-pressure.
+
+ # 1.
+ def _SortByQueueSizeAndReqID(a2l):
+ return (a2l.queue_size, a2l.first_request_id)
+ a2l = min(self._a2l_instances, key=_SortByQueueSizeAndReqID)
+
+ # 2.
+ if (a2l.queue_size >= self.max_queue_size and
+ len(self._a2l_instances) < self.max_concurrent_jobs):
+ a2l = self._CreateNewA2LInstance()
+
+ # 3.
+ if a2l.queue_size >= self.max_queue_size:
+ a2l.WaitForNextSymbolInQueue()
+
+ a2l.EnqueueRequest(addr, callback_arg)
+
+ def Join(self):
+ """Waits for all the outstanding requests to complete and terminates."""
+ for a2l in self._a2l_instances:
+ a2l.WaitForIdle()
+ a2l.Terminate()
+
+ def _CreateNewA2LInstance(self):
+ assert(len(self._a2l_instances) < self.max_concurrent_jobs)
+ a2l = ELFSymbolizer.Addr2Line(self)
+ self._a2l_instances.append(a2l)
+ return a2l
+
+ def _CreateDisambiguationTable(self):
+ """ Non-unique file names will result in None entries"""
+ self.disambiguation_table = {}
+
+ for root, _, filenames in os.walk(self.source_root_path):
+ for f in filenames:
+ self.disambiguation_table[f] = os.path.join(root, f) if (f not in
+ self.disambiguation_table) else None
+
+
+ class Addr2Line(object):
+ """A python wrapper around an addr2line instance.
+
+ The communication with the addr2line process looks as follows:
+ [STDIN] [STDOUT] (from addr2line's viewpoint)
+ > f001111
+ > f002222
+ < Symbol::Name(foo, bar) for f001111
+ < /path/to/source/file.c:line_number
+ > f003333
+ < Symbol::Name2() for f002222
+ < /path/to/source/file.c:line_number
+ < Symbol::Name3() for f003333
+ < /path/to/source/file.c:line_number
+ """
+
+ SYM_ADDR_RE = re.compile(r'([^:]+):(\?|\d+).*')
+
+ def __init__(self, symbolizer):
+ self._symbolizer = symbolizer
+ self._lib_file_name = posixpath.basename(symbolizer.elf_file_path)
+
+ # The request queue (i.e. addresses pushed to addr2line's stdin and not
+ # yet retrieved on stdout)
+ self._request_queue = collections.deque()
+
+ # This is essentially len(self._request_queue). It has been optimized to a
+ # separate field because turned out to be a perf hot-spot.
+ self.queue_size = 0
+
+ # Keep track of the number of symbols a process has processed to
+ # avoid a single process growing too big and using all the memory.
+ self._processed_symbols_count = 0
+
+ # Objects required to handle the addr2line subprocess.
+ self._proc = None # Subprocess.Popen(...) instance.
+ self._thread = None # Threading.thread instance.
+ self._out_queue = None # Queue.Queue instance (for buffering a2l stdout).
+ self._RestartAddr2LineProcess()
+
+ def EnqueueRequest(self, addr, callback_arg):
+ """Pushes an address to addr2line's stdin (and keeps track of it)."""
+ self._symbolizer.requests_counter += 1 # For global "age" of requests.
+ req_idx = self._symbolizer.requests_counter
+ self._request_queue.append((addr, callback_arg, req_idx))
+ self.queue_size += 1
+ self._WriteToA2lStdin(addr)
+
+ def WaitForIdle(self):
+ """Waits until all the pending requests have been symbolized."""
+ while self.queue_size > 0:
+ self.WaitForNextSymbolInQueue()
+
+ def WaitForNextSymbolInQueue(self):
+ """Waits for the next pending request to be symbolized."""
+ if not self.queue_size:
+ return
+
+ # This outer loop guards against a2l hanging (detecting stdout timeout).
+ while True:
+ start_time = datetime.datetime.now()
+ timeout = datetime.timedelta(seconds=self._symbolizer.addr2line_timeout)
+
+ # The inner loop guards against a2l crashing (checking if it exited).
+ while (datetime.datetime.now() - start_time < timeout):
+ # poll() returns !None if the process exited. a2l should never exit.
+ if self._proc.poll():
+ logging.warning('addr2line crashed, respawning (lib: %s).' %
+ self._lib_file_name)
+ self._RestartAddr2LineProcess()
+ # TODO(primiano): the best thing to do in this case would be
+ # shrinking the pool size as, very likely, addr2line is crashed
+ # due to low memory (and the respawned one will die again soon).
+
+ try:
+ lines = self._out_queue.get(block=True, timeout=0.25)
+ except Queue.Empty:
+ # On timeout (1/4 s.) repeat the inner loop and check if either the
+ # addr2line process did crash or we waited its output for too long.
+ continue
+
+ # In nominal conditions, we get straight to this point.
+ self._ProcessSymbolOutput(lines)
+ return
+
+ # If this point is reached, we waited more than |addr2line_timeout|.
+ logging.warning('Hung addr2line process, respawning (lib: %s).' %
+ self._lib_file_name)
+ self._RestartAddr2LineProcess()
+
+ def ProcessAllResolvedSymbolsInQueue(self):
+ """Consumes all the addr2line output lines produced (without blocking)."""
+ if not self.queue_size:
+ return
+ while True:
+ try:
+ lines = self._out_queue.get_nowait()
+ except Queue.Empty:
+ break
+ self._ProcessSymbolOutput(lines)
+
+ def RecycleIfNecessary(self):
+ """Restarts the process if it has been used for too long.
+
+ A long running addr2line process will consume excessive amounts
+ of memory without any gain in performance."""
+ if self._processed_symbols_count >= ADDR2LINE_RECYCLE_LIMIT:
+ self._RestartAddr2LineProcess()
+
+
+ def Terminate(self):
+ """Kills the underlying addr2line process.
+
+ The poller |_thread| will terminate as well due to the broken pipe."""
+ try:
+ self._proc.kill()
+ self._proc.communicate() # Essentially wait() without risking deadlock.
+ except Exception: # An exception while terminating? How interesting.
+ pass
+ self._proc = None
+
+ def _WriteToA2lStdin(self, addr):
+ self._proc.stdin.write('%s\n' % hex(addr))
+ if self._symbolizer.inlines:
+ # In the case of inlines we output an extra blank line, which causes
+ # addr2line to emit a (??,??:0) tuple that we use as a boundary marker.
+ self._proc.stdin.write('\n')
+ self._proc.stdin.flush()
+
+ def _ProcessSymbolOutput(self, lines):
+ """Parses an addr2line symbol output and triggers the client callback."""
+ (_, callback_arg, _) = self._request_queue.popleft()
+ self.queue_size -= 1
+
+ innermost_sym_info = None
+ sym_info = None
+ for (line1, line2) in lines:
+ prev_sym_info = sym_info
+ name = line1 if not line1.startswith('?') else None
+ source_path = None
+ source_line = None
+ m = ELFSymbolizer.Addr2Line.SYM_ADDR_RE.match(line2)
+ if m:
+ if not m.group(1).startswith('?'):
+ source_path = m.group(1)
+ if not m.group(2).startswith('?'):
+ source_line = int(m.group(2))
+ else:
+ logging.warning('Got invalid symbol path from addr2line: %s' % line2)
+
+ # In case disambiguation is on, and needed
+ was_ambiguous = False
+ disambiguated = False
+ if self._symbolizer.disambiguate:
+ if source_path and not posixpath.isabs(source_path):
+ path = self._symbolizer.disambiguation_table.get(source_path)
+ was_ambiguous = True
+ disambiguated = path is not None
+ source_path = path if disambiguated else source_path
+
+ # Use absolute paths (so that paths are consistent, as disambiguation
+ # uses absolute paths)
+ if source_path and not was_ambiguous:
+ source_path = os.path.abspath(source_path)
+
+ if source_path and self._symbolizer.strip_base_path:
+ # Strip the base path
+ source_path = re.sub('^' + self._symbolizer.strip_base_path,
+ self._symbolizer.source_root_path or '', source_path)
+
+ sym_info = ELFSymbolInfo(name, source_path, source_line, was_ambiguous,
+ disambiguated,
+ self._symbolizer.prefix_to_remove)
+ if prev_sym_info:
+ prev_sym_info.inlined_by = sym_info
+ if not innermost_sym_info:
+ innermost_sym_info = sym_info
+
+ self._processed_symbols_count += 1
+ self._symbolizer.callback(innermost_sym_info, callback_arg)
+
+ def _RestartAddr2LineProcess(self):
+ if self._proc:
+ self.Terminate()
+
+ # The only reason of existence of this Queue (and the corresponding
+ # Thread below) is the lack of a subprocess.stdout.poll_avail_lines().
+ # Essentially this is a pipe able to extract a couple of lines atomically.
+ self._out_queue = Queue.Queue()
+
+ # Start the underlying addr2line process in line buffered mode.
+
+ cmd = [self._symbolizer.addr2line_path, '--functions', '--demangle',
+ '--exe=' + self._symbolizer.elf_file_path]
+ if self._symbolizer.inlines:
+ cmd += ['--inlines']
+ self._proc = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE, stderr=sys.stderr, close_fds=True)
+
+ # Start the poller thread, which simply moves atomically the lines read
+ # from the addr2line's stdout to the |_out_queue|.
+ self._thread = threading.Thread(
+ target=ELFSymbolizer.Addr2Line.StdoutReaderThread,
+ args=(self._proc.stdout, self._out_queue, self._symbolizer.inlines))
+ self._thread.daemon = True # Don't prevent early process exit.
+ self._thread.start()
+
+ self._processed_symbols_count = 0
+
+ # Replay the pending requests on the new process (only for the case
+ # of a hung addr2line timing out during the game).
+ for (addr, _, _) in self._request_queue:
+ self._WriteToA2lStdin(addr)
+
+ @staticmethod
+ def StdoutReaderThread(process_pipe, queue, inlines):
+ """The poller thread fn, which moves the addr2line stdout to the |queue|.
+
+ This is the only piece of code not running on the main thread. It merely
+ writes to a Queue, which is thread-safe. In the case of inlines, it
+ detects the ??,??:0 marker and sends the lines atomically, such that the
+ main thread always receives all the lines corresponding to one symbol in
+ one shot."""
+ try:
+ lines_for_one_symbol = []
+ while True:
+ line1 = process_pipe.readline().rstrip('\r\n')
+ line2 = process_pipe.readline().rstrip('\r\n')
+ if not line1 or not line2:
+ break
+ inline_has_more_lines = inlines and (len(lines_for_one_symbol) == 0 or
+ (line1 != '??' and line2 != '??:0'))
+ if not inlines or inline_has_more_lines:
+ lines_for_one_symbol += [(line1, line2)]
+ if inline_has_more_lines:
+ continue
+ queue.put(lines_for_one_symbol)
+ lines_for_one_symbol = []
+ process_pipe.close()
+
+ # Every addr2line processes will die at some point, please die silently.
+ except (IOError, OSError):
+ pass
+
+ @property
+ def first_request_id(self):
+ """Returns the request_id of the oldest pending request in the queue."""
+ return self._request_queue[0][2] if self._request_queue else 0
+
+
+class ELFSymbolInfo(object):
+ """The result of the symbolization passed as first arg. of each callback."""
+
+ def __init__(self, name, source_path, source_line, was_ambiguous=False,
+ disambiguated=False, prefix_to_remove=None):
+ """All the fields here can be None (if addr2line replies with '??')."""
+ self.name = name
+ if source_path and source_path.startswith(prefix_to_remove):
+ source_path = source_path[len(prefix_to_remove) : ]
+ self.source_path = source_path
+ self.source_line = source_line
+ # In the case of |inlines|=True, the |inlined_by| points to the outer
+ # function inlining the current one (and so on, to form a chain).
+ self.inlined_by = None
+ self.disambiguated = disambiguated
+ self.was_ambiguous = was_ambiguous
+
+ def __str__(self):
+ return '%s [%s:%d]' % (
+ self.name or '??', self.source_path or '??', self.source_line or 0)
diff --git a/infra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py b/infra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py
new file mode 100755
index 0000000000..f06ea96bc7
--- /dev/null
+++ b/infra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""Generate Doxygen documentation."""
+
+
+import datetime
+import os
+import shutil
+import subprocess
+import sys
+
+
+DOXYFILE_BASENAME = 'Doxyfile' # must match name of Doxyfile in skia root
+DOXYGEN_BINARY = 'doxygen'
+WORKDIR = os.path.join(os.pardir, 'doxygen_workdir')
+DOXYGEN_CONFIG_DIR = os.path.join(WORKDIR, 'doxygen-config')
+DOXYGEN_WORKING_DIR = os.path.join(WORKDIR, 'doxygen')
+DOXYGEN_GS_PATH = '/'.join(['gs://chromium-skia-gm', 'doxygen'])
+
+IFRAME_FOOTER_TEMPLATE = """
+<html><body><address style="text-align: right;"><small>
+Generated at %s for skia
+by <a href="http://www.doxygen.org/index.html">doxygen</a>
+%s </small></address></body></html>
+"""
+
+
+def recreate_dir(path):
+ """Delete and recreate the directory."""
+ try:
+ shutil.rmtree(path)
+ except OSError:
+ if os.path.exists(path):
+ raise Exception('Could not remove %s' % path)
+ os.makedirs(path)
+
+
+def generate_and_upload_doxygen(gsutil_path):
+ """Generate Doxygen."""
+ # Create empty dir and add static_footer.txt
+ recreate_dir(DOXYGEN_WORKING_DIR)
+ static_footer_path = os.path.join(DOXYGEN_WORKING_DIR, 'static_footer.txt')
+ shutil.copyfile(os.path.join('tools', 'doxygen_footer.txt'),
+ static_footer_path)
+
+ # Make copy of doxygen config file, overriding any necessary configs,
+ # and run doxygen.
+ recreate_dir(DOXYGEN_CONFIG_DIR)
+ modified_doxyfile = os.path.join(DOXYGEN_CONFIG_DIR, DOXYFILE_BASENAME)
+ with open(DOXYFILE_BASENAME, 'r') as reader:
+ with open(modified_doxyfile, 'w') as writer:
+ shutil.copyfileobj(reader, writer)
+ writer.write('OUTPUT_DIRECTORY = %s\n' % DOXYGEN_WORKING_DIR)
+ writer.write('HTML_FOOTER = %s\n' % static_footer_path)
+ subprocess.check_call([DOXYGEN_BINARY, modified_doxyfile])
+
+ # Create iframe_footer.html
+ with open(os.path.join(DOXYGEN_WORKING_DIR, 'iframe_footer.html'), 'w') as f:
+ f.write(IFRAME_FOOTER_TEMPLATE % (
+ datetime.datetime.now().isoformat(' '),
+ subprocess.check_output([DOXYGEN_BINARY, '--version']).rstrip()))
+
+ # Upload.
+ cmd = [gsutil_path, 'cp', '-a', 'public-read', '-R',
+ DOXYGEN_WORKING_DIR, DOXYGEN_GS_PATH]
+ subprocess.check_call(cmd)
+
+
+if '__main__' == __name__:
+ generate_and_upload_doxygen(*sys.argv[1:])
+
diff --git a/infra/bots/recipe_modules/core/resources/run_binary_size_analysis.py b/infra/bots/recipe_modules/core/resources/run_binary_size_analysis.py
new file mode 100755
index 0000000000..5cb24d967a
--- /dev/null
+++ b/infra/bots/recipe_modules/core/resources/run_binary_size_analysis.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generate a spatial analysis against an arbitrary library.
+
+Adapted for Skia's use case from
+chromium/src/tools/binary_size/run_binary_size_analysis.py. Main changes:
+
+-- Cleans up some deprecated codes.
+-- Always use relative code path so the tree root is Skia repo's root.
+-- Instead of outputting the standalone HTML/CSS/JS filesets, writes the
+ TreeMap JSON data into a Google Storage bucket.
+-- Adds githash and total_size to the JSON data.
+-- Outputs another summary data in JSON Bench format for skiaperf ingestion.
+
+The output JSON data for visualization is in the following format:
+
+{
+ "githash": 123abc,
+ "commit_ts": 1234567890,
+ "total_size": 1234567,
+ "key": {"source_type": "binary_size"},
+ "tree_data": {
+ "maxDepth": 9,
+ "k": "p", "children":[
+ {"k":"p","children":[
+ {"k":"p","children":[
+ {"k":"p","lastPathElement":true,"children":[
+ {"k":"b","t":"t","children":[
+ {"k":"s", "t":"t", "value":4029,
+ "n":"etc_encode_subblock_helper(unsigned char const*, ...)"
+ },
+ ......
+ }
+}
+
+Another JSON file is generated for size summaries to be used in skiaperf. The
+JSON format details can be found at:
+ https://github.com/google/skia/blob/master/bench/ResultsWriter.h#L54
+and:
+ https://skia.googlesource.com/buildbot/+/master/perf/go/ingester/nanobench.go
+
+In the binary size case, outputs look like:
+
+{
+ "gitHash": "123abc",
+ "key": {
+ "source_type": "binarysize"
+ }
+ "results: {
+ "src_lazy_global_weak_symbol": {
+ "memory": {
+ "bytes": 41,
+ "options": {
+ "path": "src_lazy",
+ "symbol": "global_weak_symbol"
+ }
+ }
+ },
+ "src_lazy_global_read_only_data": {
+ "memory": {
+ "bytes": 13476,
+ "options": {
+ "path": "src_lazy",
+ "symbol": "global_read_only_data"
+ }
+ }
+ },
+ ...
+ }
+}
+
+"""
+
+import collections
+import datetime
+import json
+import logging
+import multiprocessing
+import optparse
+import os
+import re
+import shutil
+import struct
+import subprocess
+import sys
+import tempfile
+import time
+import urllib2
+
+import binary_size_utils
+import elf_symbolizer
+
+from recipe_engine.types import freeze
+
+# Node dictionary keys. These are output in json read by the webapp so
+# keep them short to save file size.
+# Note: If these change, the webapp must also change.
+NODE_TYPE_KEY = 'k'
+NODE_NAME_KEY = 'n'
+NODE_CHILDREN_KEY = 'children'
+NODE_SYMBOL_TYPE_KEY = 't'
+NODE_SYMBOL_SIZE_KEY = 'value'
+NODE_MAX_DEPTH_KEY = 'maxDepth'
+NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement'
+
+# The display name of the bucket where we put symbols without path.
+NAME_NO_PATH_BUCKET = '(No Path)'
+
+# Try to keep data buckets smaller than this to avoid killing the
+# graphing lib.
+BIG_BUCKET_LIMIT = 3000
+
+# Skia addition: relative dir for libskia.so from code base.
+LIBSKIA_RELATIVE_PATH = os.path.join('out', 'Release', 'lib')
+
+# Skia addition: dictionary mapping symbol type code to symbol name.
+# See
+# https://code.google.com/p/chromium/codesearch#chromium/src/tools/binary_size/template/D3SymbolTreeMap.js&l=74
+SYMBOL_MAP = freeze({
+ 'A': 'global_absolute',
+ 'B': 'global_uninitialized_data',
+ 'b': 'local_uninitialized_data',
+ 'C': 'global_uninitialized_common',
+ 'D': 'global_initialized_data',
+ 'd': 'local_initialized_data',
+ 'G': 'global_small initialized_data',
+ 'g': 'local_small_initialized_data',
+ 'i': 'indirect_function',
+ 'N': 'debugging',
+ 'p': 'stack_unwind',
+ 'R': 'global_read_only_data',
+ 'r': 'local_read_only_data',
+ 'S': 'global_small_uninitialized_data',
+ 's': 'local_small_uninitialized_data',
+ 'T': 'global_code',
+ 't': 'local_code',
+ 'U': 'undefined',
+ 'u': 'unique',
+ 'V': 'global_weak_object',
+ 'v': 'local_weak_object',
+ 'W': 'global_weak_symbol',
+ 'w': 'local_weak_symbol',
+ '@': 'vtable_entry',
+ '-': 'stabs_debugging',
+ '?': 'unrecognized',
+})
+
+
+def _MkChild(node, name):
+ child = node[NODE_CHILDREN_KEY].get(name)
+ if child is None:
+ child = {NODE_NAME_KEY: name,
+ NODE_CHILDREN_KEY: {}}
+ node[NODE_CHILDREN_KEY][name] = child
+ return child
+
+
+def SplitNoPathBucket(node):
+ """NAME_NO_PATH_BUCKET can be too large for the graphing lib to
+ handle. Split it into sub-buckets in that case."""
+ root_children = node[NODE_CHILDREN_KEY]
+ if NAME_NO_PATH_BUCKET in root_children:
+ no_path_bucket = root_children[NAME_NO_PATH_BUCKET]
+ old_children = no_path_bucket[NODE_CHILDREN_KEY]
+ count = 0
+ for symbol_type, symbol_bucket in old_children.iteritems():
+ count += len(symbol_bucket[NODE_CHILDREN_KEY])
+ if count > BIG_BUCKET_LIMIT:
+ new_children = {}
+ no_path_bucket[NODE_CHILDREN_KEY] = new_children
+ current_bucket = None
+ index = 0
+ for symbol_type, symbol_bucket in old_children.iteritems():
+ for symbol_name, value in symbol_bucket[NODE_CHILDREN_KEY].iteritems():
+ if index % BIG_BUCKET_LIMIT == 0:
+ group_no = (index / BIG_BUCKET_LIMIT) + 1
+ current_bucket = _MkChild(no_path_bucket,
+ '%s subgroup %d' % (NAME_NO_PATH_BUCKET,
+ group_no))
+ assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
+ node[NODE_TYPE_KEY] = 'p' # p for path
+ index += 1
+ symbol_size = value[NODE_SYMBOL_SIZE_KEY]
+ AddSymbolIntoFileNode(current_bucket, symbol_type,
+ symbol_name, symbol_size)
+
+
+def MakeChildrenDictsIntoLists(node):
+ largest_list_len = 0
+ if NODE_CHILDREN_KEY in node:
+ largest_list_len = len(node[NODE_CHILDREN_KEY])
+ child_list = []
+ for child in node[NODE_CHILDREN_KEY].itervalues():
+ child_largest_list_len = MakeChildrenDictsIntoLists(child)
+ if child_largest_list_len > largest_list_len:
+ largest_list_len = child_largest_list_len
+ child_list.append(child)
+ node[NODE_CHILDREN_KEY] = child_list
+
+ return largest_list_len
+
+
+def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size):
+ """Puts symbol into the file path node |node|.
+ Returns the number of added levels in tree. I.e. returns 2."""
+
+ # 'node' is the file node and first step is to find its symbol-type bucket.
+ node[NODE_LAST_PATH_ELEMENT_KEY] = True
+ node = _MkChild(node, symbol_type)
+ assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b'
+ node[NODE_SYMBOL_TYPE_KEY] = symbol_type
+ node[NODE_TYPE_KEY] = 'b' # b for bucket
+
+ # 'node' is now the symbol-type bucket. Make the child entry.
+ node = _MkChild(node, symbol_name)
+ if NODE_CHILDREN_KEY in node:
+ if node[NODE_CHILDREN_KEY]:
+ logging.warning('A container node used as symbol for %s.' % symbol_name)
+ # This is going to be used as a leaf so no use for child list.
+ del node[NODE_CHILDREN_KEY]
+ node[NODE_SYMBOL_SIZE_KEY] = symbol_size
+ node[NODE_SYMBOL_TYPE_KEY] = symbol_type
+ node[NODE_TYPE_KEY] = 's' # s for symbol
+
+ return 2 # Depth of the added subtree.
+
+
+def MakeCompactTree(symbols, symbol_path_origin_dir):
+ result = {NODE_NAME_KEY: '/',
+ NODE_CHILDREN_KEY: {},
+ NODE_TYPE_KEY: 'p',
+ NODE_MAX_DEPTH_KEY: 0}
+ seen_symbol_with_path = False
+ for symbol_name, symbol_type, symbol_size, file_path in symbols:
+
+ if 'vtable for ' in symbol_name:
+ symbol_type = '@' # hack to categorize these separately
+ if file_path and file_path != "??":
+ seen_symbol_with_path = True
+ else:
+ file_path = NAME_NO_PATH_BUCKET
+
+ path_parts = file_path.split('/')
+
+ # Find pre-existing node in tree, or update if it already exists
+ node = result
+ depth = 0
+ while len(path_parts) > 0:
+ path_part = path_parts.pop(0)
+ if len(path_part) == 0:
+ continue
+ depth += 1
+ node = _MkChild(node, path_part)
+ assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
+ node[NODE_TYPE_KEY] = 'p' # p for path
+
+ depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size)
+ result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth)
+
+ if not seen_symbol_with_path:
+ logging.warning('Symbols lack paths. Data will not be structured.')
+
+ # The (no path) bucket can be extremely large if we failed to get
+ # path information. Split it into subgroups if needed.
+ SplitNoPathBucket(result)
+
+ largest_list_len = MakeChildrenDictsIntoLists(result)
+
+ if largest_list_len > BIG_BUCKET_LIMIT:
+ logging.warning('There are sections with %d nodes. '
+ 'Results might be unusable.' % largest_list_len)
+ return result
+
+
+# Skia added: summarizes tree size by symbol type for the given root node.
+# Returns a dict keyed by symbol type, and value the type's overall size.
+# e.g., {"t": 12345, "W": 543}.
+def GetTreeSizes(node):
+ if 'children' not in node or not node['children']:
+ return {node['t']: node['value']}
+ dic = {}
+ for i in node['children']:
+ for k, v in GetTreeSizes(i).items():
+ dic.setdefault(k, 0)
+ dic[k] += v
+
+ return dic
+
+
+# Skia added: creates dict to be converted to JSON in bench format.
+# See top of file for the structure description.
+def GetBenchDict(githash, tree_root):
+ dic = {'gitHash': githash,
+ 'key': {'source_type': 'binarysize'},
+ 'results': {},}
+ for i in tree_root['children']:
+ if '(No Path)' == i['n']: # Already at symbol summary level.
+ for k, v in GetTreeSizes(i).items():
+ dic['results']['no_path_' + SYMBOL_MAP[k]] = {
+ 'memory': {
+ 'bytes': v,
+ 'options': {'path': 'no_path',
+ 'symbol': SYMBOL_MAP[k],},}}
+ else: # We need to go deeper.
+ for c in i['children']:
+ path = i['n'] + '_' + c['n']
+ for k, v in GetTreeSizes(c).items():
+ dic['results'][path + '_' + SYMBOL_MAP[k]] = {
+ 'memory': {
+ 'bytes': v,
+ 'options': {'path': path,
+ 'symbol': SYMBOL_MAP[k],}}}
+
+ return dic
+
+
+# Skia added: constructs 'gsutil cp' subprocess command list.
+def GetGsCopyCommandList(gsutil, src, dst):
+ return [gsutil, '-h', 'Content-Type:application/json', 'cp', '-a',
+ 'public-read', src, dst]
+
+
+def DumpCompactTree(symbols, symbol_path_origin_dir, ha, ts, issue, gsutil):
+ tree_root = MakeCompactTree(symbols, symbol_path_origin_dir)
+ json_data = {'tree_data': tree_root,
+ 'githash': ha,
+ 'commit_ts': ts,
+ 'key': {'source_type': 'binary_size'},
+ 'total_size': sum(GetTreeSizes(tree_root).values()),}
+ tmpfile = tempfile.NamedTemporaryFile(delete=False).name
+ with open(tmpfile, 'w') as out:
+ # Use separators without whitespace to get a smaller file.
+ json.dump(json_data, out, separators=(',', ':'))
+
+ GS_PREFIX = 'gs://chromium-skia-gm/'
+ # Writes to Google Storage for visualization.
+ subprocess.check_call(GetGsCopyCommandList(
+ gsutil, tmpfile, GS_PREFIX + 'size/' + ha + '.json'))
+ # Updates the latest data.
+ if not issue:
+ subprocess.check_call(GetGsCopyCommandList(gsutil, tmpfile,
+ GS_PREFIX + 'size/latest.json'))
+ # Writes an extra copy using year/month/day/hour path for easy ingestion.
+ with open(tmpfile, 'w') as out:
+ json.dump(GetBenchDict(ha, tree_root), out, separators=(',', ':'))
+ now = datetime.datetime.utcnow()
+ ingest_path = '/'.join(('nano-json-v1', str(now.year).zfill(4),
+ str(now.month).zfill(2), str(now.day).zfill(2),
+ str(now.hour).zfill(2)))
+ if issue:
+ ingest_path = '/'.join('trybot', ingest_path, issue)
+ subprocess.check_call(GetGsCopyCommandList(gsutil, tmpfile,
+ GS_PREFIX + ingest_path + '/binarysize_' + ha + '.json'))
+
+
+def MakeSourceMap(symbols):
+ sources = {}
+ for _sym, _symbol_type, size, path in symbols:
+ key = None
+ if path:
+ key = os.path.normpath(path)
+ else:
+ key = '[no path]'
+ if key not in sources:
+ sources[key] = {'path': path, 'symbol_count': 0, 'size': 0}
+ record = sources[key]
+ record['size'] += size
+ record['symbol_count'] += 1
+ return sources
+
+
+# Regex for parsing "nm" output. A sample line looks like this:
+# 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95
+#
+# The fields are: address, size, type, name, source location
+# Regular expression explained ( see also: https://xkcd.com/208 ):
+# ([0-9a-f]{8,}+) The address
+# [\s]+ Whitespace separator
+# ([0-9a-f]{8,}+) The size. From here on out it's all optional.
+# [\s]+ Whitespace separator
+# (\S?) The symbol type, which is any non-whitespace char
+# [\s*] Whitespace separator
+# ([^\t]*) Symbol name, any non-tab character (spaces ok!)
+# [\t]? Tab separator
+# (.*) The location (filename[:linennum|?][ (discriminator n)]
+sNmPattern = re.compile(
+ r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)')
+
+class Progress():
+ def __init__(self):
+ self.count = 0
+ self.skip_count = 0
+ self.collisions = 0
+ self.time_last_output = time.time()
+ self.count_last_output = 0
+ self.disambiguations = 0
+ self.was_ambiguous = 0
+
+
+def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
+ disambiguate, src_path):
+ nm_output = RunNm(library, nm_binary)
+ nm_output_lines = nm_output.splitlines()
+ nm_output_lines_len = len(nm_output_lines)
+ address_symbol = {}
+ progress = Progress()
+ def map_address_symbol(symbol, addr):
+ progress.count += 1
+ if addr in address_symbol:
+ # 'Collision between %s and %s.' % (str(symbol.name),
+ # str(address_symbol[addr].name))
+ progress.collisions += 1
+ else:
+ if symbol.disambiguated:
+ progress.disambiguations += 1
+ if symbol.was_ambiguous:
+ progress.was_ambiguous += 1
+
+ address_symbol[addr] = symbol
+
+ progress_output()
+
+ def progress_output():
+ progress_chunk = 100
+ if progress.count % progress_chunk == 0:
+ time_now = time.time()
+ time_spent = time_now - progress.time_last_output
+ if time_spent > 1.0:
+ # Only output at most once per second.
+ progress.time_last_output = time_now
+ chunk_size = progress.count - progress.count_last_output
+ progress.count_last_output = progress.count
+ if time_spent > 0:
+ speed = chunk_size / time_spent
+ else:
+ speed = 0
+ progress_percent = (100.0 * (progress.count + progress.skip_count) /
+ nm_output_lines_len)
+ disambiguation_percent = 0
+ if progress.disambiguations != 0:
+ disambiguation_percent = (100.0 * progress.disambiguations /
+ progress.was_ambiguous)
+
+ sys.stdout.write('\r%.1f%%: Looked up %d symbols (%d collisions, '
+ '%d disambiguations where %.1f%% succeeded)'
+ ' - %.1f lookups/s.' %
+ (progress_percent, progress.count, progress.collisions,
+ progress.disambiguations, disambiguation_percent, speed))
+
+ # In case disambiguation was disabled, we remove the source path (which upon
+ # being set signals the symbolizer to enable disambiguation)
+ if not disambiguate:
+ src_path = None
+ symbol_path_origin_dir = os.path.dirname(library)
+ # Skia specific.
+ symbol_path_prefix = symbol_path_origin_dir.replace(LIBSKIA_RELATIVE_PATH, '')
+ symbolizer = elf_symbolizer.ELFSymbolizer(library, addr2line_binary,
+ map_address_symbol,
+ max_concurrent_jobs=jobs,
+ source_root_path=src_path,
+ prefix_to_remove=symbol_path_prefix)
+ user_interrupted = False
+ try:
+ for line in nm_output_lines:
+ match = sNmPattern.match(line)
+ if match:
+ location = match.group(5)
+ if not location:
+ addr = int(match.group(1), 16)
+ size = int(match.group(2), 16)
+ if addr in address_symbol: # Already looked up, shortcut
+ # ELFSymbolizer.
+ map_address_symbol(address_symbol[addr], addr)
+ continue
+ elif size == 0:
+ # Save time by not looking up empty symbols (do they even exist?)
+ print('Empty symbol: ' + line)
+ else:
+ symbolizer.SymbolizeAsync(addr, addr)
+ continue
+
+ progress.skip_count += 1
+ except KeyboardInterrupt:
+ user_interrupted = True
+ print('Interrupting - killing subprocesses. Please wait.')
+
+ try:
+ symbolizer.Join()
+ except KeyboardInterrupt:
+ # Don't want to abort here since we will be finished in a few seconds.
+ user_interrupted = True
+ print('Patience you must have my young padawan.')
+
+ print ''
+
+ if user_interrupted:
+ print('Skipping the rest of the file mapping. '
+ 'Output will not be fully classified.')
+
+ symbol_path_origin_dir = os.path.dirname(library)
+ # Skia specific: path prefix to strip.
+ symbol_path_prefix = symbol_path_origin_dir.replace(LIBSKIA_RELATIVE_PATH, '')
+
+ with open(outfile, 'w') as out:
+ for line in nm_output_lines:
+ match = sNmPattern.match(line)
+ if match:
+ location = match.group(5)
+ if not location:
+ addr = int(match.group(1), 16)
+ symbol = address_symbol.get(addr)
+ if symbol is not None:
+ path = '??'
+ if symbol.source_path is not None:
+ path = symbol.source_path.replace(symbol_path_prefix, '')
+ line_number = 0
+ if symbol.source_line is not None:
+ line_number = symbol.source_line
+ out.write('%s\t%s:%d\n' % (line, path, line_number))
+ continue
+
+ out.write('%s\n' % line)
+
+ print('%d symbols in the results.' % len(address_symbol))
+
+
+def RunNm(binary, nm_binary):
+ cmd = [nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort',
+ binary]
+ nm_process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (process_output, err_output) = nm_process.communicate()
+
+ if nm_process.returncode != 0:
+ if err_output:
+ raise Exception, err_output
+ else:
+ raise Exception, process_output
+
+ return process_output
+
+
+def GetNmSymbols(nm_infile, outfile, library, jobs, verbose,
+ addr2line_binary, nm_binary, disambiguate, src_path):
+ if nm_infile is None:
+ if outfile is None:
+ outfile = tempfile.NamedTemporaryFile(delete=False).name
+
+ if verbose:
+ print 'Running parallel addr2line, dumping symbols to ' + outfile
+ RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
+ disambiguate, src_path)
+
+ nm_infile = outfile
+
+ elif verbose:
+ print 'Using nm input from ' + nm_infile
+ with file(nm_infile, 'r') as infile:
+ return list(binary_size_utils.ParseNm(infile))
+
+
+PAK_RESOURCE_ID_TO_STRING = { "inited": False }
+
+def LoadPakIdsFromResourceFile(filename):
+ """Given a file name, it loads everything that looks like a resource id
+ into PAK_RESOURCE_ID_TO_STRING."""
+ with open(filename) as resource_header:
+ for line in resource_header:
+ if line.startswith("#define "):
+ line_data = line.split()
+ if len(line_data) == 3:
+ try:
+ resource_number = int(line_data[2])
+ resource_name = line_data[1]
+ PAK_RESOURCE_ID_TO_STRING[resource_number] = resource_name
+ except ValueError:
+ pass
+
+def GetReadablePakResourceName(pak_file, resource_id):
+ """Pak resources have a numeric identifier. It is not helpful when
+ trying to locate where footprint is generated. This does its best to
+ map the number to a usable string."""
+ if not PAK_RESOURCE_ID_TO_STRING['inited']:
+ # Try to find resource header files generated by grit when
+ # building the pak file. We'll look for files named *resources.h"
+ # and lines of the type:
+ # #define MY_RESOURCE_JS 1234
+ PAK_RESOURCE_ID_TO_STRING['inited'] = True
+ gen_dir = os.path.join(os.path.dirname(pak_file), 'gen')
+ if os.path.isdir(gen_dir):
+ for dirname, _dirs, files in os.walk(gen_dir):
+ for filename in files:
+ if filename.endswith('resources.h'):
+ LoadPakIdsFromResourceFile(os.path.join(dirname, filename))
+ return PAK_RESOURCE_ID_TO_STRING.get(resource_id,
+ 'Pak Resource %d' % resource_id)
+
+def AddPakData(symbols, pak_file):
+ """Adds pseudo-symbols from a pak file."""
+ pak_file = os.path.abspath(pak_file)
+ with open(pak_file, 'rb') as pak:
+ data = pak.read()
+
+ PAK_FILE_VERSION = 4
+ HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries)
+ # and one uint8 (encoding of text resources)
+ INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32.
+ version, num_entries, _encoding = struct.unpack('<IIB', data[:HEADER_LENGTH])
+ assert version == PAK_FILE_VERSION, ('Unsupported pak file '
+ 'version (%d) in %s. Only '
+ 'support version %d' %
+ (version, pak_file, PAK_FILE_VERSION))
+ if num_entries > 0:
+ # Read the index and data.
+ data = data[HEADER_LENGTH:]
+ for _ in range(num_entries):
+ resource_id, offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE])
+ data = data[INDEX_ENTRY_SIZE:]
+ _next_id, next_offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE])
+ resource_size = next_offset - offset
+
+ symbol_name = GetReadablePakResourceName(pak_file, resource_id)
+ symbol_path = pak_file
+ symbol_type = 'd' # Data. Approximation.
+ symbol_size = resource_size
+ symbols.append((symbol_name, symbol_type, symbol_size, symbol_path))
+
+def _find_in_system_path(binary):
+ """Locate the full path to binary in the system path or return None
+ if not found."""
+ system_path = os.environ["PATH"].split(os.pathsep)
+ for path in system_path:
+ binary_path = os.path.join(path, binary)
+ if os.path.isfile(binary_path):
+ return binary_path
+ return None
+
+def CheckDebugFormatSupport(library, addr2line_binary):
+ """Kills the program if debug data is in an unsupported format.
+
+ There are two common versions of the DWARF debug formats and
+ since we are right now transitioning from DWARF2 to newer formats,
+ it's possible to have a mix of tools that are not compatible. Detect
+ that and abort rather than produce meaningless output."""
+ tool_output = subprocess.check_output([addr2line_binary, '--version'])
+ version_re = re.compile(r'^GNU [^ ]+ .* (\d+).(\d+).*?$', re.M)
+ parsed_output = version_re.match(tool_output)
+ major = int(parsed_output.group(1))
+ minor = int(parsed_output.group(2))
+ supports_dwarf4 = major > 2 or major == 2 and minor > 22
+
+ if supports_dwarf4:
+ return
+
+ print('Checking version of debug information in %s.' % library)
+ debug_info = subprocess.check_output(['readelf', '--debug-dump=info',
+ '--dwarf-depth=1', library])
+ dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M)
+ parsed_dwarf_format_output = dwarf_version_re.search(debug_info)
+ version = int(parsed_dwarf_format_output.group(1))
+ if version > 2:
+ print('The supplied tools only support DWARF2 debug data but the binary\n' +
+ 'uses DWARF%d. Update the tools or compile the binary\n' % version +
+ 'with -gdwarf-2.')
+ sys.exit(1)
+
+
+def main():
+ usage = """%prog [options]
+
+ Runs a spatial analysis on a given library, looking up the source locations
+ of its symbols and calculating how much space each directory, source file,
+ and so on is taking. The result is a report that can be used to pinpoint
+ sources of large portions of the binary, etceteras.
+
+ Under normal circumstances, you only need to pass two arguments, thusly:
+
+ %prog --library /path/to/library --destdir /path/to/output
+
+ In this mode, the program will dump the symbols from the specified library
+ and map those symbols back to source locations, producing a web-based
+ report in the specified output directory.
+
+ Other options are available via '--help'.
+ """
+ parser = optparse.OptionParser(usage=usage)
+ parser.add_option('--nm-in', metavar='PATH',
+ help='if specified, use nm input from <path> instead of '
+ 'generating it. Note that source locations should be '
+ 'present in the file; i.e., no addr2line symbol lookups '
+ 'will be performed when this option is specified. '
+ 'Mutually exclusive with --library.')
+ parser.add_option('--destdir', metavar='PATH',
+ help='write output to the specified directory. An HTML '
+ 'report is generated here along with supporting files; '
+ 'any existing report will be overwritten. Not used in '
+ 'Skia.')
+ parser.add_option('--library', metavar='PATH',
+ help='if specified, process symbols in the library at '
+ 'the specified path. Mutually exclusive with --nm-in.')
+ parser.add_option('--pak', metavar='PATH',
+ help='if specified, includes the contents of the '
+ 'specified *.pak file in the output.')
+ parser.add_option('--nm-binary',
+ help='use the specified nm binary to analyze library. '
+ 'This is to be used when the nm in the path is not for '
+ 'the right architecture or of the right version.')
+ parser.add_option('--addr2line-binary',
+ help='use the specified addr2line binary to analyze '
+ 'library. This is to be used when the addr2line in '
+ 'the path is not for the right architecture or '
+ 'of the right version.')
+ parser.add_option('--jobs', type='int',
+ help='number of jobs to use for the parallel '
+ 'addr2line processing pool; defaults to 1. More '
+ 'jobs greatly improve throughput but eat RAM like '
+ 'popcorn, and take several gigabytes each. Start low '
+ 'and ramp this number up until your machine begins to '
+ 'struggle with RAM. '
+ 'This argument is only valid when using --library.')
+ parser.add_option('-v', dest='verbose', action='store_true',
+ help='be verbose, printing lots of status information.')
+ parser.add_option('--nm-out', metavar='PATH',
+ help='keep the nm output file, and store it at the '
+ 'specified path. This is useful if you want to see the '
+ 'fully processed nm output after the symbols have been '
+ 'mapped to source locations. By default, a tempfile is '
+ 'used and is deleted when the program terminates.'
+ 'This argument is only valid when using --library.')
+ parser.add_option('--legacy', action='store_true',
+ help='emit legacy binary size report instead of modern')
+ parser.add_option('--disable-disambiguation', action='store_true',
+ help='disables the disambiguation process altogether,'
+ ' NOTE: this may, depending on your toolchain, produce'
+ ' output with some symbols at the top layer if addr2line'
+ ' could not get the entire source path.')
+ parser.add_option('--source-path', default='./',
+ help='the path to the source code of the output binary, '
+ 'default set to current directory. Used in the'
+ ' disambiguation process.')
+ parser.add_option('--githash', default='latest',
+ help='Git hash for the binary version. Added by Skia.')
+ parser.add_option('--commit_ts', type='int', default=-1,
+ help='Timestamp for the commit. Added by Skia.')
+ parser.add_option('--issue_number', default='',
+ help='The trybot issue number in string. Added by Skia.')
+ parser.add_option('--gsutil_path', default='gsutil',
+ help='Path to gsutil binary. Added by Skia.')
+ opts, _args = parser.parse_args()
+
+ if ((not opts.library) and (not opts.nm_in)) or (opts.library and opts.nm_in):
+ parser.error('exactly one of --library or --nm-in is required')
+ if (opts.nm_in):
+ if opts.jobs:
+ print >> sys.stderr, ('WARNING: --jobs has no effect '
+ 'when used with --nm-in')
+ if not opts.jobs:
+ # Use the number of processors but cap between 2 and 4 since raw
+ # CPU power isn't the limiting factor. It's I/O limited, memory
+ # bus limited and available-memory-limited. Too many processes and
+ # the computer will run out of memory and it will be slow.
+ opts.jobs = max(2, min(4, str(multiprocessing.cpu_count())))
+
+ if opts.addr2line_binary:
+ assert os.path.isfile(opts.addr2line_binary)
+ addr2line_binary = opts.addr2line_binary
+ else:
+ addr2line_binary = _find_in_system_path('addr2line')
+ assert addr2line_binary, 'Unable to find addr2line in the path. '\
+ 'Use --addr2line-binary to specify location.'
+
+ if opts.nm_binary:
+ assert os.path.isfile(opts.nm_binary)
+ nm_binary = opts.nm_binary
+ else:
+ nm_binary = _find_in_system_path('nm')
+ assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\
+ 'to specify location.'
+
+ if opts.pak:
+ assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak
+
+ print('addr2line: %s' % addr2line_binary)
+ print('nm: %s' % nm_binary)
+
+ if opts.library:
+ CheckDebugFormatSupport(opts.library, addr2line_binary)
+
+ symbols = GetNmSymbols(opts.nm_in, opts.nm_out, opts.library,
+ opts.jobs, opts.verbose is True,
+ addr2line_binary, nm_binary,
+ opts.disable_disambiguation is None,
+ opts.source_path)
+
+ if opts.pak:
+ AddPakData(symbols, opts.pak)
+
+ if opts.legacy: # legacy report
+ print 'Do Not set legacy flag.'
+
+ else: # modern report
+ if opts.library:
+ symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library))
+ else:
+ # Just a guess. Hopefully all paths in the input file are absolute.
+ symbol_path_origin_dir = os.path.abspath(os.getcwd())
+ DumpCompactTree(symbols, symbol_path_origin_dir, opts.githash,
+ opts.commit_ts, opts.issue_number, opts.gsutil_path)
+ print 'Report data uploaded to GS.'
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/infra/bots/recipe_modules/core/resources/upload_bench_results.py b/infra/bots/recipe_modules/core/resources/upload_bench_results.py
new file mode 100755
index 0000000000..25cfcc2631
--- /dev/null
+++ b/infra/bots/recipe_modules/core/resources/upload_bench_results.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Upload benchmark performance data results. """
+
+import gzip
+import os
+import os.path
+import re
+import subprocess
+import sys
+import tempfile
+
+from datetime import datetime
+
+
+def _UploadJSONResults(builder_name, build_number, dest_gsbase, gs_subdir,
+ full_json_path, gzipped=True, gsutil_path='gsutil',
+ issue_number=None):
+ now = datetime.utcnow()
+ gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2),
+ str(now.day).zfill(2), str(now.hour).zfill(2)))
+ gs_dir = '/'.join((gs_subdir, gs_json_path, builder_name))
+ if builder_name.endswith('-Trybot'):
+ if not issue_number:
+ raise Exception('issue_number build property is missing!')
+ gs_dir = '/'.join(('trybot', gs_dir, build_number, issue_number))
+ full_path_to_upload = full_json_path
+ file_to_upload = os.path.basename(full_path_to_upload)
+ http_header = ['Content-Type:application/json']
+ if gzipped:
+ http_header.append('Content-Encoding:gzip')
+ gzipped_file = os.path.join(tempfile.gettempdir(), file_to_upload)
+ # Apply gzip.
+ with open(full_path_to_upload, 'rb') as f_in:
+ with gzip.open(gzipped_file, 'wb') as f_out:
+ f_out.writelines(f_in)
+ full_path_to_upload = gzipped_file
+ cmd = ['python', gsutil_path]
+ for header in http_header:
+ cmd.extend(['-h', header])
+ cmd.extend(['cp', '-a', 'public-read', full_path_to_upload,
+ '/'.join((dest_gsbase, gs_dir, file_to_upload))])
+ print ' '.join(cmd)
+ subprocess.check_call(cmd)
+
+
+def main(builder_name, build_number, perf_data_dir, got_revision, gsutil_path,
+ issue_number=None):
+ """Uploads gzipped nanobench JSON data."""
+ # Find the nanobench JSON
+ file_list = os.listdir(perf_data_dir)
+ RE_FILE_SEARCH = re.compile(
+ 'nanobench_({})_[0-9]+\.json'.format(got_revision))
+ nanobench_name = None
+
+ for file_name in file_list:
+ if RE_FILE_SEARCH.search(file_name):
+ nanobench_name = file_name
+ break
+
+ if nanobench_name:
+ dest_gsbase = 'gs://skia-perf'
+ nanobench_json_file = os.path.join(perf_data_dir,
+ nanobench_name)
+ _UploadJSONResults(builder_name, build_number, dest_gsbase, 'nano-json-v1',
+ nanobench_json_file, gsutil_path=gsutil_path,
+ issue_number=issue_number)
+
+
+if __name__ == '__main__':
+ main(*sys.argv[1:])
+
diff --git a/infra/bots/recipe_modules/core/resources/upload_dm_results.py b/infra/bots/recipe_modules/core/resources/upload_dm_results.py
new file mode 100755
index 0000000000..1bee64fb78
--- /dev/null
+++ b/infra/bots/recipe_modules/core/resources/upload_dm_results.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Upload DM output PNG files and JSON summary to Google Storage."""
+
+import datetime
+import json
+import os
+import shutil
+import sys
+import tempfile
+
+def main(dm_dir, git_hash, builder_name, build_number, try_issue, import_path):
+ """Upload DM output PNG files and JSON summary to Google Storage.
+
+ dm_dir: path to PNG files and JSON summary (str)
+ git_hash: this build's Git hash (str)
+ builder_name: name of this builder (str)
+ build_number: nth build on this builder (str or int)
+ try_issue: Rietveld issue if this is a try job (str, int, or None)
+ import_path: Path to import the gs_utils package (str)
+ """
+ # import gs_utils
+ sys.path.insert(0, import_path)
+ import gs_utils
+
+ # Private, but Google-readable.
+ ACL = gs_utils.GSUtils.PredefinedACL.PRIVATE
+ FINE_ACLS = [(
+ gs_utils.GSUtils.IdType.GROUP_BY_DOMAIN,
+ 'google.com',
+ gs_utils.GSUtils.Permission.READ
+ )]
+
+ # Move dm.json and verbose.log to their own directory for easy upload.
+ tmp = tempfile.mkdtemp()
+ shutil.move(os.path.join(dm_dir, 'dm.json'),
+ os.path.join(tmp, 'dm.json'))
+ shutil.move(os.path.join(dm_dir, 'verbose.log'),
+ os.path.join(tmp, 'verbose.log'))
+
+ # Make sure the JSON file parses correctly.
+ json_file_name = os.path.join(tmp, 'dm.json')
+ with open(json_file_name) as jsonFile:
+ try:
+ json.load(jsonFile)
+ except ValueError:
+ json_content = open(json_file_name).read()
+ print >> sys.stderr, "Invalid JSON: \n\n%s\n" % json_content
+ raise
+
+ # Only images are left in dm_dir. Upload any new ones.
+ gs = gs_utils.GSUtils()
+ bucket, image_dest_dir = 'chromium-skia-gm', 'dm-images-v1'
+ print 'Uploading images to gs://' + bucket + '/' + image_dest_dir
+ gs.upload_dir_contents(dm_dir,
+ bucket,
+ image_dest_dir,
+ upload_if = gs.UploadIf.ALWAYS,
+ predefined_acl = ACL,
+ fine_grained_acl_list = FINE_ACLS)
+
+
+ # /dm-json-v1/year/month/day/hour/git-hash/builder/build-number/dm.json
+ now = datetime.datetime.utcnow()
+ summary_dest_dir = '/'.join(['dm-json-v1',
+ str(now.year ).zfill(4),
+ str(now.month).zfill(2),
+ str(now.day ).zfill(2),
+ str(now.hour ).zfill(2),
+ git_hash,
+ builder_name,
+ str(build_number)])
+
+ # Trybot results are further siloed by CL.
+ if try_issue:
+ summary_dest_dir = '/'.join(['trybot', summary_dest_dir, str(try_issue)])
+
+ # Upload the JSON summary and verbose.log.
+ print 'Uploading logs to gs://' + bucket + '/' + summary_dest_dir
+ gs.upload_dir_contents(tmp,
+ bucket,
+ summary_dest_dir,
+ predefined_acl = ACL,
+ fine_grained_acl_list = FINE_ACLS)
+
+
+ # Just for hygiene, put dm.json and verbose.log back.
+ shutil.move(os.path.join(tmp, 'dm.json'),
+ os.path.join(dm_dir, 'dm.json'))
+ shutil.move(os.path.join(tmp, 'verbose.log'),
+ os.path.join(dm_dir, 'verbose.log'))
+ os.rmdir(tmp)
+
+if '__main__' == __name__:
+ main(*sys.argv[1:])
diff --git a/infra/bots/recipe_modules/core/ssh_devices.py b/infra/bots/recipe_modules/core/ssh_devices.py
new file mode 100755
index 0000000000..d8ce937572
--- /dev/null
+++ b/infra/bots/recipe_modules/core/ssh_devices.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import collections
+import json
+
+
+DEFAULT_PORT = '22'
+DEFAULT_USER = 'chrome-bot'
+
+
+SlaveInfo = collections.namedtuple('SlaveInfo',
+ 'ssh_user ssh_host ssh_port')
+
+SLAVE_INFO = {
+ 'skiabot-shuttle-ubuntu12-003':
+ SlaveInfo('root', '192.168.1.123', DEFAULT_PORT),
+ 'skiabot-shuttle-ubuntu12-004':
+ SlaveInfo('root', '192.168.1.134', DEFAULT_PORT),
+ 'default':
+ SlaveInfo('nouser', 'noip', 'noport'),
+}
+
+
+if __name__ == '__main__':
+ print json.dumps(SLAVE_INFO) # pragma: no cover
+