From 90f050387a5851943910f91dce2c01babf0d3896 Mon Sep 17 00:00:00 2001 From: Eric Boren Date: Thu, 24 May 2018 09:14:18 -0400 Subject: [recipes] Rename some modules and files - Move doxygen and binary size out of core and into their own modules. - Rename core -> checkout since that's all it does, shorten method names. - Rename flavors: - Everything is GN, so remove GN/gn_ everywhere. - Merge gn_flavor into default. - Shorten file / module names. Bug: skia:6473 Change-Id: I8ac9ff9c9a267f366206b9991adfa5eb37126ca7 Reviewed-on: https://skia-review.googlesource.com/129176 Commit-Queue: Eric Boren Reviewed-by: Ben Wagner --- infra/bots/recipe_modules/binary_size/__init__.py | 11 + infra/bots/recipe_modules/binary_size/api.py | 22 + .../examples/full.expected/binary_size.json | 26 + .../recipe_modules/binary_size/examples/full.py | 33 + .../binary_size/resources/binary_size_utils.py | 67 ++ .../binary_size/resources/elf_symbolizer.py | 477 +++++++++++++ .../resources/run_binary_size_analysis.py | 788 +++++++++++++++++++++ infra/bots/recipe_modules/checkout/__init__.py | 19 + infra/bots/recipe_modules/checkout/api.py | 150 ++++ .../Build-Debian9-Clang-x86_64-Release-NoDEPS.json | 118 +++ ...Build-Mac-Clang-x86_64-Debug-CommandBuffer.json | 134 ++++ ...ld-Win-Clang-x86_64-Release-ParentRevision.json | 116 +++ .../Housekeeper-Weekly-RecreateSKPs.json | 134 ++++ .../examples/full.expected/cross_repo_trybot.json | 120 ++++ .../examples/full.expected/flutter_trybot.json | 151 ++++ .../full.expected/parent_revision_trybot.json | 116 +++ .../checkout/examples/full.expected/test.json | 120 ++++ .../bots/recipe_modules/checkout/examples/full.py | 143 ++++ infra/bots/recipe_modules/core/__init__.py | 19 - infra/bots/recipe_modules/core/api.py | 155 ---- .../Build-Debian9-Clang-x86_64-Release-NoDEPS.json | 118 --- ...Build-Mac-Clang-x86_64-Debug-CommandBuffer.json | 134 ---- ...ld-Win-Clang-x86_64-Release-ParentRevision.json | 116 --- .../Housekeeper-Weekly-RecreateSKPs.json | 134 ---- .../examples/full.expected/cross_repo_trybot.json | 120 ---- .../examples/full.expected/flutter_trybot.json | 151 ---- .../full.expected/parent_revision_trybot.json | 116 --- .../core/examples/full.expected/test.json | 120 ---- infra/bots/recipe_modules/core/examples/full.py | 143 ---- .../core/resources/binary_size_utils.py | 67 -- .../core/resources/elf_symbolizer.py | 477 ------------- .../core/resources/generate_and_upload_doxygen.py | 75 -- .../core/resources/run_binary_size_analysis.py | 788 --------------------- infra/bots/recipe_modules/doxygen/__init__.py | 9 + infra/bots/recipe_modules/doxygen/api.py | 17 + .../doxygen/examples/full.expected/doxygen.json | 18 + infra/bots/recipe_modules/doxygen/examples/full.py | 27 + .../resources/generate_and_upload_doxygen.py | 75 ++ infra/bots/recipe_modules/flavor/__init__.py | 1 + infra/bots/recipe_modules/flavor/android.py | 542 ++++++++++++++ infra/bots/recipe_modules/flavor/api.py | 37 +- infra/bots/recipe_modules/flavor/chromebook.py | 131 ++++ infra/bots/recipe_modules/flavor/chromecast.py | 154 ++++ infra/bots/recipe_modules/flavor/default.py | 251 +++++++ infra/bots/recipe_modules/flavor/default_flavor.py | 157 ---- .../recipe_modules/flavor/gn_android_flavor.py | 540 -------------- .../recipe_modules/flavor/gn_chromebook_flavor.py | 134 ---- .../recipe_modules/flavor/gn_chromecast_flavor.py | 153 ---- infra/bots/recipe_modules/flavor/gn_flavor.py | 116 --- infra/bots/recipe_modules/flavor/ios.py | 90 +++ infra/bots/recipe_modules/flavor/ios_flavor.py | 87 --- infra/bots/recipe_modules/flavor/valgrind.py | 30 + .../bots/recipe_modules/flavor/valgrind_flavor.py | 30 - infra/bots/recipe_modules/infra/examples/full.py | 1 - infra/bots/recipes/bookmaker.py | 6 +- infra/bots/recipes/calmbench.py | 1 - infra/bots/recipes/check_generated_files.py | 6 +- infra/bots/recipes/compile.py | 8 +- infra/bots/recipes/ct_skps.py | 6 +- .../Housekeeper-PerCommit-Trybot.json | 3 +- .../Housekeeper-PerCommit.json | 5 +- infra/bots/recipes/housekeeper.py | 44 +- infra/bots/recipes/infra.py | 10 +- infra/bots/recipes/perf.py | 1 - infra/bots/recipes/recreate_skps.py | 6 +- infra/bots/recipes/skqp_test.py | 1 - infra/bots/recipes/test.py | 1 - infra/bots/recipes/upload_calmbench_results.py | 1 - infra/bots/recipes/upload_skiaserve.py | 1 - 69 files changed, 4152 insertions(+), 4026 deletions(-) create mode 100644 infra/bots/recipe_modules/binary_size/__init__.py create mode 100644 infra/bots/recipe_modules/binary_size/api.py create mode 100644 infra/bots/recipe_modules/binary_size/examples/full.expected/binary_size.json create mode 100644 infra/bots/recipe_modules/binary_size/examples/full.py create mode 100644 infra/bots/recipe_modules/binary_size/resources/binary_size_utils.py create mode 100644 infra/bots/recipe_modules/binary_size/resources/elf_symbolizer.py create mode 100755 infra/bots/recipe_modules/binary_size/resources/run_binary_size_analysis.py create mode 100644 infra/bots/recipe_modules/checkout/__init__.py create mode 100644 infra/bots/recipe_modules/checkout/api.py create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/cross_repo_trybot.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/flutter_trybot.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/parent_revision_trybot.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.expected/test.json create mode 100644 infra/bots/recipe_modules/checkout/examples/full.py delete mode 100644 infra/bots/recipe_modules/core/__init__.py delete mode 100644 infra/bots/recipe_modules/core/api.py delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/cross_repo_trybot.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/flutter_trybot.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/parent_revision_trybot.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.expected/test.json delete mode 100644 infra/bots/recipe_modules/core/examples/full.py delete mode 100644 infra/bots/recipe_modules/core/resources/binary_size_utils.py delete mode 100644 infra/bots/recipe_modules/core/resources/elf_symbolizer.py delete mode 100755 infra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py delete mode 100755 infra/bots/recipe_modules/core/resources/run_binary_size_analysis.py create mode 100644 infra/bots/recipe_modules/doxygen/__init__.py create mode 100644 infra/bots/recipe_modules/doxygen/api.py create mode 100644 infra/bots/recipe_modules/doxygen/examples/full.expected/doxygen.json create mode 100644 infra/bots/recipe_modules/doxygen/examples/full.py create mode 100755 infra/bots/recipe_modules/doxygen/resources/generate_and_upload_doxygen.py create mode 100644 infra/bots/recipe_modules/flavor/android.py create mode 100644 infra/bots/recipe_modules/flavor/chromebook.py create mode 100644 infra/bots/recipe_modules/flavor/chromecast.py create mode 100644 infra/bots/recipe_modules/flavor/default.py delete mode 100644 infra/bots/recipe_modules/flavor/default_flavor.py delete mode 100644 infra/bots/recipe_modules/flavor/gn_android_flavor.py delete mode 100644 infra/bots/recipe_modules/flavor/gn_chromebook_flavor.py delete mode 100644 infra/bots/recipe_modules/flavor/gn_chromecast_flavor.py delete mode 100644 infra/bots/recipe_modules/flavor/gn_flavor.py create mode 100644 infra/bots/recipe_modules/flavor/ios.py delete mode 100644 infra/bots/recipe_modules/flavor/ios_flavor.py create mode 100644 infra/bots/recipe_modules/flavor/valgrind.py delete mode 100644 infra/bots/recipe_modules/flavor/valgrind_flavor.py (limited to 'infra') diff --git a/infra/bots/recipe_modules/binary_size/__init__.py b/infra/bots/recipe_modules/binary_size/__init__.py new file mode 100644 index 0000000000..4e3b671627 --- /dev/null +++ b/infra/bots/recipe_modules/binary_size/__init__.py @@ -0,0 +1,11 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +DEPS = [ + 'recipe_engine/context', + 'recipe_engine/properties', + 'recipe_engine/step', + 'run', + 'vars', +] diff --git a/infra/bots/recipe_modules/binary_size/api.py b/infra/bots/recipe_modules/binary_size/api.py new file mode 100644 index 0000000000..db8ee9d3a9 --- /dev/null +++ b/infra/bots/recipe_modules/binary_size/api.py @@ -0,0 +1,22 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +from recipe_engine import recipe_api +from recipe_engine import config_types + + +class BinarySizeApi(recipe_api.RecipeApi): + def run_analysis(self, skia_dir, dest_file): + cmd = ['python', self.resource('run_binary_size_analysis.py'), + '--library', self.m.vars.skia_out.join('libskia.so'), + '--githash', self.m.properties['revision'], + '--dest', dest_file] + if self.m.vars.is_trybot: + cmd.extend(['--issue_number', str(self.m.properties['patch_issue'])]) + with self.m.context(cwd=skia_dir): + self.m.run( + self.m.step, + 'generate binary size data', + cmd=cmd) diff --git a/infra/bots/recipe_modules/binary_size/examples/full.expected/binary_size.json b/infra/bots/recipe_modules/binary_size/examples/full.expected/binary_size.json new file mode 100644 index 0000000000..a5dd9413a5 --- /dev/null +++ b/infra/bots/recipe_modules/binary_size/examples/full.expected/binary_size.json @@ -0,0 +1,26 @@ +[ + { + "cmd": [ + "python", + "RECIPE_MODULE[skia::binary_size]/resources/run_binary_size_analysis.py", + "--library", + "[START_DIR]/build/out/Release/libskia.so", + "--githash", + "abc123", + "--dest", + "[START_DIR]/binary_size", + "--issue_number", + "456789" + ], + "env": { + "CHROME_HEADLESS": "1", + "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" + }, + "name": "generate binary size data" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/binary_size/examples/full.py b/infra/bots/recipe_modules/binary_size/examples/full.py new file mode 100644 index 0000000000..135fedc8e4 --- /dev/null +++ b/infra/bots/recipe_modules/binary_size/examples/full.py @@ -0,0 +1,33 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +DEPS = [ + 'binary_size', + 'recipe_engine/path', + 'recipe_engine/properties', + 'vars', +] + + +def RunSteps(api): + api.vars.setup() + dest_file = api.path['start_dir'].join('binary_size') + api.binary_size.run_analysis(api.path['start_dir'], dest_file) + + +def GenTests(api): + yield ( + api.test('binary_size') + + api.properties(buildername='Housekeeper-PerCommit', + repository='https://skia.googlesource.com/skia.git', + revision='abc123', + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]') + + api.properties.tryserver( + buildername='Housekeeper-PerCommit', + gerrit_project='skia', + gerrit_url='https://skia-review.googlesource.com/', + ) + ) diff --git a/infra/bots/recipe_modules/binary_size/resources/binary_size_utils.py b/infra/bots/recipe_modules/binary_size/resources/binary_size_utils.py new file mode 100644 index 0000000000..c09a65dccd --- /dev/null +++ b/infra/bots/recipe_modules/binary_size/resources/binary_size_utils.py @@ -0,0 +1,67 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Common utilities for tools that deal with binary size information. + +Copied from chromium/src/build/android/pylib/symbols/binary_size_tools.py. +""" + +import logging +import re + + +def ParseNm(nm_lines): + """Parse nm output, returning data for all relevant (to binary size) + symbols and ignoring the rest. + + Args: + nm_lines: an iterable over lines of nm output. + + Yields: + (symbol name, symbol type, symbol size, source file path). + + Path may be None if nm couldn't figure out the source file. + """ + + # Match lines with size, symbol, optional location, optional discriminator + sym_re = re.compile(r'^[0-9a-f]{8,} ' # address (8+ hex digits) + '([0-9a-f]{8,}) ' # size (8+ hex digits) + '(.) ' # symbol type, one character + '([^\t]+)' # symbol name, separated from next by tab + '(?:\t(.*):[\d\?]+)?.*$') # location + # Match lines with addr but no size. + addr_re = re.compile(r'^[0-9a-f]{8,} (.) ([^\t]+)(?:\t.*)?$') + # Match lines that don't have an address at all -- typically external symbols. + noaddr_re = re.compile(r'^ {8,} (.) (.*)$') + # Match lines with no symbol name, only addr and type + addr_only_re = re.compile(r'^[0-9a-f]{8,} (.)$') + + for line in nm_lines: + line = line.rstrip() + match = sym_re.match(line) + if match: + size, sym_type, sym = match.groups()[0:3] + size = int(size, 16) + if sym_type in ('B', 'b'): + continue # skip all BSS for now. + path = match.group(4) + yield sym, sym_type, size, path + continue + match = addr_re.match(line) + if match: + # sym_type, sym = match.groups()[0:2] + continue # No size == we don't care. + match = noaddr_re.match(line) + if match: + sym_type, sym = match.groups() + if sym_type in ('U', 'w'): + continue # external or weak symbol + match = addr_only_re.match(line) + if match: + continue # Nothing to do. + + + # If we reach this part of the loop, there was something in the + # line that we didn't expect or recognize. + logging.warning('nm output parser failed to parse: %s', repr(line)) diff --git a/infra/bots/recipe_modules/binary_size/resources/elf_symbolizer.py b/infra/bots/recipe_modules/binary_size/resources/elf_symbolizer.py new file mode 100644 index 0000000000..de9c141219 --- /dev/null +++ b/infra/bots/recipe_modules/binary_size/resources/elf_symbolizer.py @@ -0,0 +1,477 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""The ElfSymbolizer class for symbolizing Executable and Linkable Files. + +Adapted for Skia's use from +chromium/src/build/android/pylib/symbols/elf_symbolizer.py. + +Main changes: +-- Added prefix_to_remove param to remove path prefix from tree data. +""" + +import collections +import datetime +import logging +import multiprocessing +import os +import posixpath +import Queue +import re +import subprocess +import sys +import threading + + +# addr2line builds a possibly infinite memory cache that can exhaust +# the computer's memory if allowed to grow for too long. This constant +# controls how many lookups we do before restarting the process. 4000 +# gives near peak performance without extreme memory usage. +ADDR2LINE_RECYCLE_LIMIT = 4000 + + +class ELFSymbolizer(object): + """An uber-fast (multiprocessing, pipelined and asynchronous) ELF symbolizer. + + This class is a frontend for addr2line (part of GNU binutils), designed to + symbolize batches of large numbers of symbols for a given ELF file. It + supports sharding symbolization against many addr2line instances and + pipelining of multiple requests per each instance (in order to hide addr2line + internals and OS pipe latencies). + + The interface exhibited by this class is a very simple asynchronous interface, + which is based on the following three methods: + - SymbolizeAsync(): used to request (enqueue) resolution of a given address. + - The |callback| method: used to communicated back the symbol information. + - Join(): called to conclude the batch to gather the last outstanding results. + In essence, before the Join method returns, this class will have issued as + many callbacks as the number of SymbolizeAsync() calls. In this regard, note + that due to multiprocess sharding, callbacks can be delivered out of order. + + Some background about addr2line: + - it is invoked passing the elf path in the cmdline, piping the addresses in + its stdin and getting results on its stdout. + - it has pretty large response times for the first requests, but it + works very well in streaming mode once it has been warmed up. + - it doesn't scale by itself (on more cores). However, spawning multiple + instances at the same time on the same file is pretty efficient as they + keep hitting the pagecache and become mostly CPU bound. + - it might hang or crash, mostly for OOM. This class deals with both of these + problems. + + Despite the "scary" imports and the multi* words above, (almost) no multi- + threading/processing is involved from the python viewpoint. Concurrency + here is achieved by spawning several addr2line subprocesses and handling their + output pipes asynchronously. Therefore, all the code here (with the exception + of the Queue instance in Addr2Line) should be free from mind-blowing + thread-safety concerns. + + The multiprocess sharding works as follows: + The symbolizer tries to use the lowest number of addr2line instances as + possible (with respect of |max_concurrent_jobs|) and enqueue all the requests + in a single addr2line instance. For few symbols (i.e. dozens) sharding isn't + worth the startup cost. + The multiprocess logic kicks in as soon as the queues for the existing + instances grow. Specifically, once all the existing instances reach the + |max_queue_size| bound, a new addr2line instance is kicked in. + In the case of a very eager producer (i.e. all |max_concurrent_jobs| instances + have a backlog of |max_queue_size|), back-pressure is applied on the caller by + blocking the SymbolizeAsync method. + + This module has been deliberately designed to be dependency free (w.r.t. of + other modules in this project), to allow easy reuse in external projects. + """ + + def __init__(self, elf_file_path, addr2line_path, callback, inlines=False, + max_concurrent_jobs=None, addr2line_timeout=30, max_queue_size=50, + source_root_path=None, strip_base_path=None, prefix_to_remove=None): + """Args: + elf_file_path: path of the elf file to be symbolized. + addr2line_path: path of the toolchain's addr2line binary. + callback: a callback which will be invoked for each resolved symbol with + the two args (sym_info, callback_arg). The former is an instance of + |ELFSymbolInfo| and contains the symbol information. The latter is an + embedder-provided argument which is passed to SymbolizeAsync(). + inlines: when True, the ELFSymbolInfo will contain also the details about + the outer inlining functions. When False, only the innermost function + will be provided. + max_concurrent_jobs: Max number of addr2line instances spawned. + Parallelize responsibly, addr2line is a memory and I/O monster. + max_queue_size: Max number of outstanding requests per addr2line instance. + addr2line_timeout: Max time (in seconds) to wait for a addr2line response. + After the timeout, the instance will be considered hung and respawned. + source_root_path: In some toolchains only the name of the source file is + is output, without any path information; disambiguation searches + through the source directory specified by |source_root_path| argument + for files whose name matches, adding the full path information to the + output. For example, if the toolchain outputs "unicode.cc" and there + is a file called "unicode.cc" located under |source_root_path|/foo, + the tool will replace "unicode.cc" with + "|source_root_path|/foo/unicode.cc". If there are multiple files with + the same name, disambiguation will fail because the tool cannot + determine which of the files was the source of the symbol. + strip_base_path: Rebases the symbols source paths onto |source_root_path| + (i.e replace |strip_base_path| with |source_root_path). + prefix_to_remove: Removes the prefix from ElfSymbolInfo output. Skia added + """ + assert(os.path.isfile(addr2line_path)), 'Cannot find ' + addr2line_path + self.elf_file_path = elf_file_path + self.addr2line_path = addr2line_path + self.callback = callback + self.inlines = inlines + self.max_concurrent_jobs = (max_concurrent_jobs or + min(multiprocessing.cpu_count(), 4)) + self.max_queue_size = max_queue_size + self.addr2line_timeout = addr2line_timeout + self.requests_counter = 0 # For generating monotonic request IDs. + self._a2l_instances = [] # Up to |max_concurrent_jobs| _Addr2Line inst. + + # Skia addition: remove the given prefix from tree paths. + self.prefix_to_remove = prefix_to_remove + + # If necessary, create disambiguation lookup table + self.disambiguate = source_root_path is not None + self.disambiguation_table = {} + self.strip_base_path = strip_base_path + if(self.disambiguate): + self.source_root_path = os.path.abspath(source_root_path) + self._CreateDisambiguationTable() + + # Create one addr2line instance. More instances will be created on demand + # (up to |max_concurrent_jobs|) depending on the rate of the requests. + self._CreateNewA2LInstance() + + def SymbolizeAsync(self, addr, callback_arg=None): + """Requests symbolization of a given address. + + This method is not guaranteed to return immediately. It generally does, but + in some scenarios (e.g. all addr2line instances have full queues) it can + block to create back-pressure. + + Args: + addr: address to symbolize. + callback_arg: optional argument which will be passed to the |callback|.""" + assert(isinstance(addr, int)) + + # Process all the symbols that have been resolved in the meanwhile. + # Essentially, this drains all the addr2line(s) out queues. + for a2l_to_purge in self._a2l_instances: + a2l_to_purge.ProcessAllResolvedSymbolsInQueue() + a2l_to_purge.RecycleIfNecessary() + + # Find the best instance according to this logic: + # 1. Find an existing instance with the shortest queue. + # 2. If all of instances' queues are full, but there is room in the pool, + # (i.e. < |max_concurrent_jobs|) create a new instance. + # 3. If there were already |max_concurrent_jobs| instances and all of them + # had full queues, make back-pressure. + + # 1. + def _SortByQueueSizeAndReqID(a2l): + return (a2l.queue_size, a2l.first_request_id) + a2l = min(self._a2l_instances, key=_SortByQueueSizeAndReqID) + + # 2. + if (a2l.queue_size >= self.max_queue_size and + len(self._a2l_instances) < self.max_concurrent_jobs): + a2l = self._CreateNewA2LInstance() + + # 3. + if a2l.queue_size >= self.max_queue_size: + a2l.WaitForNextSymbolInQueue() + + a2l.EnqueueRequest(addr, callback_arg) + + def Join(self): + """Waits for all the outstanding requests to complete and terminates.""" + for a2l in self._a2l_instances: + a2l.WaitForIdle() + a2l.Terminate() + + def _CreateNewA2LInstance(self): + assert(len(self._a2l_instances) < self.max_concurrent_jobs) + a2l = ELFSymbolizer.Addr2Line(self) + self._a2l_instances.append(a2l) + return a2l + + def _CreateDisambiguationTable(self): + """ Non-unique file names will result in None entries""" + self.disambiguation_table = {} + + for root, _, filenames in os.walk(self.source_root_path): + for f in filenames: + self.disambiguation_table[f] = os.path.join(root, f) if (f not in + self.disambiguation_table) else None + + + class Addr2Line(object): + """A python wrapper around an addr2line instance. + + The communication with the addr2line process looks as follows: + [STDIN] [STDOUT] (from addr2line's viewpoint) + > f001111 + > f002222 + < Symbol::Name(foo, bar) for f001111 + < /path/to/source/file.c:line_number + > f003333 + < Symbol::Name2() for f002222 + < /path/to/source/file.c:line_number + < Symbol::Name3() for f003333 + < /path/to/source/file.c:line_number + """ + + SYM_ADDR_RE = re.compile(r'([^:]+):(\?|\d+).*') + + def __init__(self, symbolizer): + self._symbolizer = symbolizer + self._lib_file_name = posixpath.basename(symbolizer.elf_file_path) + + # The request queue (i.e. addresses pushed to addr2line's stdin and not + # yet retrieved on stdout) + self._request_queue = collections.deque() + + # This is essentially len(self._request_queue). It has been optimized to a + # separate field because turned out to be a perf hot-spot. + self.queue_size = 0 + + # Keep track of the number of symbols a process has processed to + # avoid a single process growing too big and using all the memory. + self._processed_symbols_count = 0 + + # Objects required to handle the addr2line subprocess. + self._proc = None # Subprocess.Popen(...) instance. + self._thread = None # Threading.thread instance. + self._out_queue = None # Queue.Queue instance (for buffering a2l stdout). + self._RestartAddr2LineProcess() + + def EnqueueRequest(self, addr, callback_arg): + """Pushes an address to addr2line's stdin (and keeps track of it).""" + self._symbolizer.requests_counter += 1 # For global "age" of requests. + req_idx = self._symbolizer.requests_counter + self._request_queue.append((addr, callback_arg, req_idx)) + self.queue_size += 1 + self._WriteToA2lStdin(addr) + + def WaitForIdle(self): + """Waits until all the pending requests have been symbolized.""" + while self.queue_size > 0: + self.WaitForNextSymbolInQueue() + + def WaitForNextSymbolInQueue(self): + """Waits for the next pending request to be symbolized.""" + if not self.queue_size: + return + + # This outer loop guards against a2l hanging (detecting stdout timeout). + while True: + start_time = datetime.datetime.now() + timeout = datetime.timedelta(seconds=self._symbolizer.addr2line_timeout) + + # The inner loop guards against a2l crashing (checking if it exited). + while (datetime.datetime.now() - start_time < timeout): + # poll() returns !None if the process exited. a2l should never exit. + if self._proc.poll(): + logging.warning('addr2line crashed, respawning (lib: %s).' % + self._lib_file_name) + self._RestartAddr2LineProcess() + # TODO(primiano): the best thing to do in this case would be + # shrinking the pool size as, very likely, addr2line is crashed + # due to low memory (and the respawned one will die again soon). + + try: + lines = self._out_queue.get(block=True, timeout=0.25) + except Queue.Empty: + # On timeout (1/4 s.) repeat the inner loop and check if either the + # addr2line process did crash or we waited its output for too long. + continue + + # In nominal conditions, we get straight to this point. + self._ProcessSymbolOutput(lines) + return + + # If this point is reached, we waited more than |addr2line_timeout|. + logging.warning('Hung addr2line process, respawning (lib: %s).' % + self._lib_file_name) + self._RestartAddr2LineProcess() + + def ProcessAllResolvedSymbolsInQueue(self): + """Consumes all the addr2line output lines produced (without blocking).""" + if not self.queue_size: + return + while True: + try: + lines = self._out_queue.get_nowait() + except Queue.Empty: + break + self._ProcessSymbolOutput(lines) + + def RecycleIfNecessary(self): + """Restarts the process if it has been used for too long. + + A long running addr2line process will consume excessive amounts + of memory without any gain in performance.""" + if self._processed_symbols_count >= ADDR2LINE_RECYCLE_LIMIT: + self._RestartAddr2LineProcess() + + + def Terminate(self): + """Kills the underlying addr2line process. + + The poller |_thread| will terminate as well due to the broken pipe.""" + try: + self._proc.kill() + self._proc.communicate() # Essentially wait() without risking deadlock. + except Exception: # An exception while terminating? How interesting. + pass + self._proc = None + + def _WriteToA2lStdin(self, addr): + self._proc.stdin.write('%s\n' % hex(addr)) + if self._symbolizer.inlines: + # In the case of inlines we output an extra blank line, which causes + # addr2line to emit a (??,??:0) tuple that we use as a boundary marker. + self._proc.stdin.write('\n') + self._proc.stdin.flush() + + def _ProcessSymbolOutput(self, lines): + """Parses an addr2line symbol output and triggers the client callback.""" + (_, callback_arg, _) = self._request_queue.popleft() + self.queue_size -= 1 + + innermost_sym_info = None + sym_info = None + for (line1, line2) in lines: + prev_sym_info = sym_info + name = line1 if not line1.startswith('?') else None + source_path = None + source_line = None + m = ELFSymbolizer.Addr2Line.SYM_ADDR_RE.match(line2) + if m: + if not m.group(1).startswith('?'): + source_path = m.group(1) + if not m.group(2).startswith('?'): + source_line = int(m.group(2)) + else: + logging.warning('Got invalid symbol path from addr2line: %s' % line2) + + # In case disambiguation is on, and needed + was_ambiguous = False + disambiguated = False + if self._symbolizer.disambiguate: + if source_path and not posixpath.isabs(source_path): + path = self._symbolizer.disambiguation_table.get(source_path) + was_ambiguous = True + disambiguated = path is not None + source_path = path if disambiguated else source_path + + # Use absolute paths (so that paths are consistent, as disambiguation + # uses absolute paths) + if source_path and not was_ambiguous: + source_path = os.path.abspath(source_path) + + if source_path and self._symbolizer.strip_base_path: + # Strip the base path + source_path = re.sub('^' + self._symbolizer.strip_base_path, + self._symbolizer.source_root_path or '', source_path) + + sym_info = ELFSymbolInfo(name, source_path, source_line, was_ambiguous, + disambiguated, + self._symbolizer.prefix_to_remove) + if prev_sym_info: + prev_sym_info.inlined_by = sym_info + if not innermost_sym_info: + innermost_sym_info = sym_info + + self._processed_symbols_count += 1 + self._symbolizer.callback(innermost_sym_info, callback_arg) + + def _RestartAddr2LineProcess(self): + if self._proc: + self.Terminate() + + # The only reason of existence of this Queue (and the corresponding + # Thread below) is the lack of a subprocess.stdout.poll_avail_lines(). + # Essentially this is a pipe able to extract a couple of lines atomically. + self._out_queue = Queue.Queue() + + # Start the underlying addr2line process in line buffered mode. + + cmd = [self._symbolizer.addr2line_path, '--functions', '--demangle', + '--exe=' + self._symbolizer.elf_file_path] + if self._symbolizer.inlines: + cmd += ['--inlines'] + self._proc = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE, + stdin=subprocess.PIPE, stderr=sys.stderr, close_fds=True) + + # Start the poller thread, which simply moves atomically the lines read + # from the addr2line's stdout to the |_out_queue|. + self._thread = threading.Thread( + target=ELFSymbolizer.Addr2Line.StdoutReaderThread, + args=(self._proc.stdout, self._out_queue, self._symbolizer.inlines)) + self._thread.daemon = True # Don't prevent early process exit. + self._thread.start() + + self._processed_symbols_count = 0 + + # Replay the pending requests on the new process (only for the case + # of a hung addr2line timing out during the game). + for (addr, _, _) in self._request_queue: + self._WriteToA2lStdin(addr) + + @staticmethod + def StdoutReaderThread(process_pipe, queue, inlines): + """The poller thread fn, which moves the addr2line stdout to the |queue|. + + This is the only piece of code not running on the main thread. It merely + writes to a Queue, which is thread-safe. In the case of inlines, it + detects the ??,??:0 marker and sends the lines atomically, such that the + main thread always receives all the lines corresponding to one symbol in + one shot.""" + try: + lines_for_one_symbol = [] + while True: + line1 = process_pipe.readline().rstrip('\r\n') + line2 = process_pipe.readline().rstrip('\r\n') + if not line1 or not line2: + break + inline_has_more_lines = inlines and (len(lines_for_one_symbol) == 0 or + (line1 != '??' and line2 != '??:0')) + if not inlines or inline_has_more_lines: + lines_for_one_symbol += [(line1, line2)] + if inline_has_more_lines: + continue + queue.put(lines_for_one_symbol) + lines_for_one_symbol = [] + process_pipe.close() + + # Every addr2line processes will die at some point, please die silently. + except (IOError, OSError): + pass + + @property + def first_request_id(self): + """Returns the request_id of the oldest pending request in the queue.""" + return self._request_queue[0][2] if self._request_queue else 0 + + +class ELFSymbolInfo(object): + """The result of the symbolization passed as first arg. of each callback.""" + + def __init__(self, name, source_path, source_line, was_ambiguous=False, + disambiguated=False, prefix_to_remove=None): + """All the fields here can be None (if addr2line replies with '??').""" + self.name = name + if source_path and source_path.startswith(prefix_to_remove): + source_path = source_path[len(prefix_to_remove) : ] + self.source_path = source_path + self.source_line = source_line + # In the case of |inlines|=True, the |inlined_by| points to the outer + # function inlining the current one (and so on, to form a chain). + self.inlined_by = None + self.disambiguated = disambiguated + self.was_ambiguous = was_ambiguous + + def __str__(self): + return '%s [%s:%d]' % ( + self.name or '??', self.source_path or '??', self.source_line or 0) diff --git a/infra/bots/recipe_modules/binary_size/resources/run_binary_size_analysis.py b/infra/bots/recipe_modules/binary_size/resources/run_binary_size_analysis.py new file mode 100755 index 0000000000..822b366614 --- /dev/null +++ b/infra/bots/recipe_modules/binary_size/resources/run_binary_size_analysis.py @@ -0,0 +1,788 @@ +#!/usr/bin/env python +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Generate a spatial analysis against an arbitrary library. + +Adapted for Skia's use case from +chromium/src/tools/binary_size/run_binary_size_analysis.py. Main changes: + +-- Cleans up some deprecated codes. +-- Always use relative code path so the tree root is Skia repo's root. +-- Instead of outputting the standalone HTML/CSS/JS filesets, writes the + TreeMap JSON data into a Google Storage bucket. +-- Adds githash and total_size to the JSON data. +-- Outputs another summary data in JSON Bench format for skiaperf ingestion. + +The output JSON data for visualization is in the following format: + +{ + "githash": 123abc, + "commit_ts": 1234567890, + "total_size": 1234567, + "key": {"source_type": "binary_size"}, + "tree_data": { + "maxDepth": 9, + "k": "p", "children":[ + {"k":"p","children":[ + {"k":"p","children":[ + {"k":"p","lastPathElement":true,"children":[ + {"k":"b","t":"t","children":[ + {"k":"s", "t":"t", "value":4029, + "n":"etc_encode_subblock_helper(unsigned char const*, ...)" + }, + ...... + } +} + +Another JSON file is generated for size summaries to be used in skiaperf. The +JSON format details can be found at: + https://github.com/google/skia/blob/master/bench/ResultsWriter.h#L54 +and: + https://skia.googlesource.com/buildbot/+/master/perf/go/ingester/nanobench.go + +In the binary size case, outputs look like: + +{ + "gitHash": "123abc", + "key": { + "source_type": "binarysize" + } + "results: { + "src_lazy_global_weak_symbol": { + "memory": { + "bytes": 41, + "options": { + "path": "src_lazy", + "symbol": "global_weak_symbol" + } + } + }, + "src_lazy_global_read_only_data": { + "memory": { + "bytes": 13476, + "options": { + "path": "src_lazy", + "symbol": "global_read_only_data" + } + } + }, + ... + } +} + +""" + +import collections +import datetime +import json +import logging +import multiprocessing +import optparse +import os +import re +import shutil +import struct +import subprocess +import sys +import tempfile +import time +import urllib2 + +import binary_size_utils +import elf_symbolizer + +# Node dictionary keys. These are output in json read by the webapp so +# keep them short to save file size. +# Note: If these change, the webapp must also change. +NODE_TYPE_KEY = 'k' +NODE_NAME_KEY = 'n' +NODE_CHILDREN_KEY = 'children' +NODE_SYMBOL_TYPE_KEY = 't' +NODE_SYMBOL_SIZE_KEY = 'value' +NODE_MAX_DEPTH_KEY = 'maxDepth' +NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement' + +# The display name of the bucket where we put symbols without path. +NAME_NO_PATH_BUCKET = '(No Path)' + +# Try to keep data buckets smaller than this to avoid killing the +# graphing lib. +BIG_BUCKET_LIMIT = 3000 + +# Skia addition: relative dir for libskia.so from code base. +LIBSKIA_RELATIVE_PATH = os.path.join('out', 'Release') + +# Skia addition: dictionary mapping symbol type code to symbol name. +# See +# https://code.google.com/p/chromium/codesearch#chromium/src/tools/binary_size/template/D3SymbolTreeMap.js&l=74 +SYMBOL_MAP = { + 'A': 'global_absolute', + 'B': 'global_uninitialized_data', + 'b': 'local_uninitialized_data', + 'C': 'global_uninitialized_common', + 'D': 'global_initialized_data', + 'd': 'local_initialized_data', + 'G': 'global_small initialized_data', + 'g': 'local_small_initialized_data', + 'i': 'indirect_function', + 'N': 'debugging', + 'p': 'stack_unwind', + 'R': 'global_read_only_data', + 'r': 'local_read_only_data', + 'S': 'global_small_uninitialized_data', + 's': 'local_small_uninitialized_data', + 'T': 'global_code', + 't': 'local_code', + 'U': 'undefined', + 'u': 'unique', + 'V': 'global_weak_object', + 'v': 'local_weak_object', + 'W': 'global_weak_symbol', + 'w': 'local_weak_symbol', + '@': 'vtable_entry', + '-': 'stabs_debugging', + '?': 'unrecognized', +} + + +def _MkChild(node, name): + child = node[NODE_CHILDREN_KEY].get(name) + if child is None: + child = {NODE_NAME_KEY: name, + NODE_CHILDREN_KEY: {}} + node[NODE_CHILDREN_KEY][name] = child + return child + + +def SplitNoPathBucket(node): + """NAME_NO_PATH_BUCKET can be too large for the graphing lib to + handle. Split it into sub-buckets in that case.""" + root_children = node[NODE_CHILDREN_KEY] + if NAME_NO_PATH_BUCKET in root_children: + no_path_bucket = root_children[NAME_NO_PATH_BUCKET] + old_children = no_path_bucket[NODE_CHILDREN_KEY] + count = 0 + for symbol_type, symbol_bucket in old_children.iteritems(): + count += len(symbol_bucket[NODE_CHILDREN_KEY]) + if count > BIG_BUCKET_LIMIT: + new_children = {} + no_path_bucket[NODE_CHILDREN_KEY] = new_children + current_bucket = None + index = 0 + for symbol_type, symbol_bucket in old_children.iteritems(): + for symbol_name, value in symbol_bucket[NODE_CHILDREN_KEY].iteritems(): + if index % BIG_BUCKET_LIMIT == 0: + group_no = (index / BIG_BUCKET_LIMIT) + 1 + current_bucket = _MkChild(no_path_bucket, + '%s subgroup %d' % (NAME_NO_PATH_BUCKET, + group_no)) + assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p' + node[NODE_TYPE_KEY] = 'p' # p for path + index += 1 + symbol_size = value[NODE_SYMBOL_SIZE_KEY] + AddSymbolIntoFileNode(current_bucket, symbol_type, + symbol_name, symbol_size) + + +def MakeChildrenDictsIntoLists(node): + largest_list_len = 0 + if NODE_CHILDREN_KEY in node: + largest_list_len = len(node[NODE_CHILDREN_KEY]) + child_list = [] + for child in node[NODE_CHILDREN_KEY].itervalues(): + child_largest_list_len = MakeChildrenDictsIntoLists(child) + if child_largest_list_len > largest_list_len: + largest_list_len = child_largest_list_len + child_list.append(child) + node[NODE_CHILDREN_KEY] = child_list + + return largest_list_len + + +def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size): + """Puts symbol into the file path node |node|. + Returns the number of added levels in tree. I.e. returns 2.""" + + # 'node' is the file node and first step is to find its symbol-type bucket. + node[NODE_LAST_PATH_ELEMENT_KEY] = True + node = _MkChild(node, symbol_type) + assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b' + node[NODE_SYMBOL_TYPE_KEY] = symbol_type + node[NODE_TYPE_KEY] = 'b' # b for bucket + + # 'node' is now the symbol-type bucket. Make the child entry. + node = _MkChild(node, symbol_name) + if NODE_CHILDREN_KEY in node: + if node[NODE_CHILDREN_KEY]: + logging.warning('A container node used as symbol for %s.' % symbol_name) + # This is going to be used as a leaf so no use for child list. + del node[NODE_CHILDREN_KEY] + node[NODE_SYMBOL_SIZE_KEY] = symbol_size + node[NODE_SYMBOL_TYPE_KEY] = symbol_type + node[NODE_TYPE_KEY] = 's' # s for symbol + + return 2 # Depth of the added subtree. + + +def MakeCompactTree(symbols, symbol_path_origin_dir): + result = {NODE_NAME_KEY: '/', + NODE_CHILDREN_KEY: {}, + NODE_TYPE_KEY: 'p', + NODE_MAX_DEPTH_KEY: 0} + seen_symbol_with_path = False + for symbol_name, symbol_type, symbol_size, file_path in symbols: + + if 'vtable for ' in symbol_name: + symbol_type = '@' # hack to categorize these separately + if file_path and file_path != "??": + seen_symbol_with_path = True + else: + file_path = NAME_NO_PATH_BUCKET + + path_parts = file_path.split('/') + + # Find pre-existing node in tree, or update if it already exists + node = result + depth = 0 + while len(path_parts) > 0: + path_part = path_parts.pop(0) + if len(path_part) == 0: + continue + depth += 1 + node = _MkChild(node, path_part) + assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p' + node[NODE_TYPE_KEY] = 'p' # p for path + + depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size) + result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth) + + if not seen_symbol_with_path: + logging.warning('Symbols lack paths. Data will not be structured.') + + # The (no path) bucket can be extremely large if we failed to get + # path information. Split it into subgroups if needed. + SplitNoPathBucket(result) + + largest_list_len = MakeChildrenDictsIntoLists(result) + + if largest_list_len > BIG_BUCKET_LIMIT: + logging.warning('There are sections with %d nodes. ' + 'Results might be unusable.' % largest_list_len) + return result + + +# Skia added: summarizes tree size by symbol type for the given root node. +# Returns a dict keyed by symbol type, and value the type's overall size. +# e.g., {"t": 12345, "W": 543}. +def GetTreeSizes(node): + if 'children' not in node or not node['children']: + return {node['t']: node['value']} + dic = {} + for i in node['children']: + for k, v in GetTreeSizes(i).items(): + dic.setdefault(k, 0) + dic[k] += v + + return dic + + +# Skia added: creates dict to be converted to JSON in bench format. +# See top of file for the structure description. +def GetBenchDict(githash, tree_root): + dic = {'gitHash': githash, + 'key': {'source_type': 'binarysize'}, + 'results': {},} + for i in tree_root['children']: + if '(No Path)' == i['n']: # Already at symbol summary level. + for k, v in GetTreeSizes(i).items(): + dic['results']['no_path_' + SYMBOL_MAP[k]] = { + 'memory': { + 'bytes': v, + 'options': {'path': 'no_path', + 'symbol': SYMBOL_MAP[k],},}} + else: # We need to go deeper. + for c in i['children']: + path = i['n'] + '_' + c['n'] + for k, v in GetTreeSizes(c).items(): + dic['results'][path + '_' + SYMBOL_MAP[k]] = { + 'memory': { + 'bytes': v, + 'options': {'path': path, + 'symbol': SYMBOL_MAP[k],}}} + + return dic + + +def DumpCompactTree(symbols, symbol_path_origin_dir, ha, ts, issue, dest): + tree_root = MakeCompactTree(symbols, symbol_path_origin_dir) + json_data = {'tree_data': tree_root, + 'githash': ha, + 'commit_ts': ts, + 'key': {'source_type': 'binary_size'}, + 'total_size': sum(GetTreeSizes(tree_root).values()),} + with open(dest, 'w') as out: + # Use separators without whitespace to get a smaller file. + json.dump(json_data, out, separators=(',', ':')) + + +def MakeSourceMap(symbols): + sources = {} + for _sym, _symbol_type, size, path in symbols: + key = None + if path: + key = os.path.normpath(path) + else: + key = '[no path]' + if key not in sources: + sources[key] = {'path': path, 'symbol_count': 0, 'size': 0} + record = sources[key] + record['size'] += size + record['symbol_count'] += 1 + return sources + + +# Regex for parsing "nm" output. A sample line looks like this: +# 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95 +# +# The fields are: address, size, type, name, source location +# Regular expression explained ( see also: https://xkcd.com/208 ): +# ([0-9a-f]{8,}+) The address +# [\s]+ Whitespace separator +# ([0-9a-f]{8,}+) The size. From here on out it's all optional. +# [\s]+ Whitespace separator +# (\S?) The symbol type, which is any non-whitespace char +# [\s*] Whitespace separator +# ([^\t]*) Symbol name, any non-tab character (spaces ok!) +# [\t]? Tab separator +# (.*) The location (filename[:linennum|?][ (discriminator n)] +sNmPattern = re.compile( + r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)') + +class Progress(): + def __init__(self): + self.count = 0 + self.skip_count = 0 + self.collisions = 0 + self.time_last_output = time.time() + self.count_last_output = 0 + self.disambiguations = 0 + self.was_ambiguous = 0 + + +def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, + disambiguate, src_path): + nm_output = RunNm(library, nm_binary) + nm_output_lines = nm_output.splitlines() + nm_output_lines_len = len(nm_output_lines) + address_symbol = {} + progress = Progress() + def map_address_symbol(symbol, addr): + progress.count += 1 + if addr in address_symbol: + # 'Collision between %s and %s.' % (str(symbol.name), + # str(address_symbol[addr].name)) + progress.collisions += 1 + else: + if symbol.disambiguated: + progress.disambiguations += 1 + if symbol.was_ambiguous: + progress.was_ambiguous += 1 + + address_symbol[addr] = symbol + + progress_output() + + def progress_output(): + progress_chunk = 100 + if progress.count % progress_chunk == 0: + time_now = time.time() + time_spent = time_now - progress.time_last_output + if time_spent > 1.0: + # Only output at most once per second. + progress.time_last_output = time_now + chunk_size = progress.count - progress.count_last_output + progress.count_last_output = progress.count + if time_spent > 0: + speed = chunk_size / time_spent + else: + speed = 0 + progress_percent = (100.0 * (progress.count + progress.skip_count) / + nm_output_lines_len) + disambiguation_percent = 0 + if progress.disambiguations != 0: + disambiguation_percent = (100.0 * progress.disambiguations / + progress.was_ambiguous) + + sys.stdout.write('\r%.1f%%: Looked up %d symbols (%d collisions, ' + '%d disambiguations where %.1f%% succeeded)' + ' - %.1f lookups/s.' % + (progress_percent, progress.count, progress.collisions, + progress.disambiguations, disambiguation_percent, speed)) + + # In case disambiguation was disabled, we remove the source path (which upon + # being set signals the symbolizer to enable disambiguation) + if not disambiguate: + src_path = None + symbol_path_origin_dir = os.path.dirname(library) + # Skia specific. + symbol_path_prefix = symbol_path_origin_dir.replace(LIBSKIA_RELATIVE_PATH, '') + symbolizer = elf_symbolizer.ELFSymbolizer(library, addr2line_binary, + map_address_symbol, + max_concurrent_jobs=jobs, + source_root_path=src_path, + prefix_to_remove=symbol_path_prefix) + user_interrupted = False + try: + for line in nm_output_lines: + match = sNmPattern.match(line) + if match: + location = match.group(5) + if not location: + addr = int(match.group(1), 16) + size = int(match.group(2), 16) + if addr in address_symbol: # Already looked up, shortcut + # ELFSymbolizer. + map_address_symbol(address_symbol[addr], addr) + continue + elif size == 0: + # Save time by not looking up empty symbols (do they even exist?) + print('Empty symbol: ' + line) + else: + symbolizer.SymbolizeAsync(addr, addr) + continue + + progress.skip_count += 1 + except KeyboardInterrupt: + user_interrupted = True + print('Interrupting - killing subprocesses. Please wait.') + + try: + symbolizer.Join() + except KeyboardInterrupt: + # Don't want to abort here since we will be finished in a few seconds. + user_interrupted = True + print('Patience you must have my young padawan.') + + print '' + + if user_interrupted: + print('Skipping the rest of the file mapping. ' + 'Output will not be fully classified.') + + symbol_path_origin_dir = os.path.dirname(library) + # Skia specific: path prefix to strip. + symbol_path_prefix = symbol_path_origin_dir.replace(LIBSKIA_RELATIVE_PATH, '') + + with open(outfile, 'w') as out: + for line in nm_output_lines: + match = sNmPattern.match(line) + if match: + location = match.group(5) + if not location: + addr = int(match.group(1), 16) + symbol = address_symbol.get(addr) + if symbol is not None: + path = '??' + if symbol.source_path is not None: + path = symbol.source_path.replace(symbol_path_prefix, '') + line_number = 0 + if symbol.source_line is not None: + line_number = symbol.source_line + out.write('%s\t%s:%d\n' % (line, path, line_number)) + continue + + out.write('%s\n' % line) + + print('%d symbols in the results.' % len(address_symbol)) + + +def RunNm(binary, nm_binary): + cmd = [nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort', + binary] + nm_process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (process_output, err_output) = nm_process.communicate() + + if nm_process.returncode != 0: + if err_output: + raise Exception, err_output + else: + raise Exception, process_output + + return process_output + + +def GetNmSymbols(nm_infile, outfile, library, jobs, verbose, + addr2line_binary, nm_binary, disambiguate, src_path): + if nm_infile is None: + if outfile is None: + outfile = tempfile.NamedTemporaryFile(delete=False).name + + if verbose: + print 'Running parallel addr2line, dumping symbols to ' + outfile + RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, + disambiguate, src_path) + + nm_infile = outfile + + elif verbose: + print 'Using nm input from ' + nm_infile + with file(nm_infile, 'r') as infile: + return list(binary_size_utils.ParseNm(infile)) + + +PAK_RESOURCE_ID_TO_STRING = { "inited": False } + +def LoadPakIdsFromResourceFile(filename): + """Given a file name, it loads everything that looks like a resource id + into PAK_RESOURCE_ID_TO_STRING.""" + with open(filename) as resource_header: + for line in resource_header: + if line.startswith("#define "): + line_data = line.split() + if len(line_data) == 3: + try: + resource_number = int(line_data[2]) + resource_name = line_data[1] + PAK_RESOURCE_ID_TO_STRING[resource_number] = resource_name + except ValueError: + pass + +def GetReadablePakResourceName(pak_file, resource_id): + """Pak resources have a numeric identifier. It is not helpful when + trying to locate where footprint is generated. This does its best to + map the number to a usable string.""" + if not PAK_RESOURCE_ID_TO_STRING['inited']: + # Try to find resource header files generated by grit when + # building the pak file. We'll look for files named *resources.h" + # and lines of the type: + # #define MY_RESOURCE_JS 1234 + PAK_RESOURCE_ID_TO_STRING['inited'] = True + gen_dir = os.path.join(os.path.dirname(pak_file), 'gen') + if os.path.isdir(gen_dir): + for dirname, _dirs, files in os.walk(gen_dir): + for filename in files: + if filename.endswith('resources.h'): + LoadPakIdsFromResourceFile(os.path.join(dirname, filename)) + return PAK_RESOURCE_ID_TO_STRING.get(resource_id, + 'Pak Resource %d' % resource_id) + +def AddPakData(symbols, pak_file): + """Adds pseudo-symbols from a pak file.""" + pak_file = os.path.abspath(pak_file) + with open(pak_file, 'rb') as pak: + data = pak.read() + + PAK_FILE_VERSION = 4 + HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries) + # and one uint8 (encoding of text resources) + INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32. + version, num_entries, _encoding = struct.unpack(' 0: + # Read the index and data. + data = data[HEADER_LENGTH:] + for _ in range(num_entries): + resource_id, offset = struct.unpack(' 2 or major == 2 and minor > 22 + + if supports_dwarf4: + return + + print('Checking version of debug information in %s.' % library) + debug_info = subprocess.check_output(['readelf', '--debug-dump=info', + '--dwarf-depth=1', library]) + dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M) + parsed_dwarf_format_output = dwarf_version_re.search(debug_info) + version = int(parsed_dwarf_format_output.group(1)) + if version > 2: + print('The supplied tools only support DWARF2 debug data but the binary\n' + + 'uses DWARF%d. Update the tools or compile the binary\n' % version + + 'with -gdwarf-2.') + sys.exit(1) + + +def main(): + usage = """%prog [options] + + Runs a spatial analysis on a given library, looking up the source locations + of its symbols and calculating how much space each directory, source file, + and so on is taking. The result is a report that can be used to pinpoint + sources of large portions of the binary, etceteras. + + Under normal circumstances, you only need to pass two arguments, thusly: + + %prog --library /path/to/library --destdir /path/to/output + + In this mode, the program will dump the symbols from the specified library + and map those symbols back to source locations, producing a web-based + report in the specified output directory. + + Other options are available via '--help'. + """ + parser = optparse.OptionParser(usage=usage) + parser.add_option('--nm-in', metavar='PATH', + help='if specified, use nm input from instead of ' + 'generating it. Note that source locations should be ' + 'present in the file; i.e., no addr2line symbol lookups ' + 'will be performed when this option is specified. ' + 'Mutually exclusive with --library.') + parser.add_option('--destdir', metavar='PATH', + help='write output to the specified directory. An HTML ' + 'report is generated here along with supporting files; ' + 'any existing report will be overwritten. Not used in ' + 'Skia.') + parser.add_option('--library', metavar='PATH', + help='if specified, process symbols in the library at ' + 'the specified path. Mutually exclusive with --nm-in.') + parser.add_option('--pak', metavar='PATH', + help='if specified, includes the contents of the ' + 'specified *.pak file in the output.') + parser.add_option('--nm-binary', + help='use the specified nm binary to analyze library. ' + 'This is to be used when the nm in the path is not for ' + 'the right architecture or of the right version.') + parser.add_option('--addr2line-binary', + help='use the specified addr2line binary to analyze ' + 'library. This is to be used when the addr2line in ' + 'the path is not for the right architecture or ' + 'of the right version.') + parser.add_option('--jobs', type='int', + help='number of jobs to use for the parallel ' + 'addr2line processing pool; defaults to 1. More ' + 'jobs greatly improve throughput but eat RAM like ' + 'popcorn, and take several gigabytes each. Start low ' + 'and ramp this number up until your machine begins to ' + 'struggle with RAM. ' + 'This argument is only valid when using --library.') + parser.add_option('-v', dest='verbose', action='store_true', + help='be verbose, printing lots of status information.') + parser.add_option('--nm-out', metavar='PATH', + help='keep the nm output file, and store it at the ' + 'specified path. This is useful if you want to see the ' + 'fully processed nm output after the symbols have been ' + 'mapped to source locations. By default, a tempfile is ' + 'used and is deleted when the program terminates.' + 'This argument is only valid when using --library.') + parser.add_option('--legacy', action='store_true', + help='emit legacy binary size report instead of modern') + parser.add_option('--disable-disambiguation', action='store_true', + help='disables the disambiguation process altogether,' + ' NOTE: this may, depending on your toolchain, produce' + ' output with some symbols at the top layer if addr2line' + ' could not get the entire source path.') + parser.add_option('--source-path', default='./', + help='the path to the source code of the output binary, ' + 'default set to current directory. Used in the' + ' disambiguation process.') + parser.add_option('--githash', default='latest', + help='Git hash for the binary version. Added by Skia.') + parser.add_option('--commit_ts', type='int', default=-1, + help='Timestamp for the commit. Added by Skia.') + parser.add_option('--issue_number', default='', + help='The trybot issue number in string. Added by Skia.') + parser.add_option('--dest', default=None, + help='Destination file to write results.') + opts, _args = parser.parse_args() + + if ((not opts.library) and (not opts.nm_in)) or (opts.library and opts.nm_in): + parser.error('exactly one of --library or --nm-in is required') + if (opts.nm_in): + if opts.jobs: + print >> sys.stderr, ('WARNING: --jobs has no effect ' + 'when used with --nm-in') + if not opts.jobs: + # Use the number of processors but cap between 2 and 4 since raw + # CPU power isn't the limiting factor. It's I/O limited, memory + # bus limited and available-memory-limited. Too many processes and + # the computer will run out of memory and it will be slow. + opts.jobs = max(2, min(4, str(multiprocessing.cpu_count()))) + + if opts.addr2line_binary: + assert os.path.isfile(opts.addr2line_binary) + addr2line_binary = opts.addr2line_binary + else: + addr2line_binary = _find_in_system_path('addr2line') + assert addr2line_binary, 'Unable to find addr2line in the path. '\ + 'Use --addr2line-binary to specify location.' + + if opts.nm_binary: + assert os.path.isfile(opts.nm_binary) + nm_binary = opts.nm_binary + else: + nm_binary = _find_in_system_path('nm') + assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\ + 'to specify location.' + + if opts.pak: + assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak + + print('addr2line: %s' % addr2line_binary) + print('nm: %s' % nm_binary) + + if opts.library: + CheckDebugFormatSupport(opts.library, addr2line_binary) + + symbols = GetNmSymbols(opts.nm_in, opts.nm_out, opts.library, + opts.jobs, opts.verbose is True, + addr2line_binary, nm_binary, + opts.disable_disambiguation is None, + opts.source_path) + + if opts.pak: + AddPakData(symbols, opts.pak) + + if opts.legacy: # legacy report + print 'Do Not set legacy flag.' + + else: # modern report + if opts.library: + symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library)) + else: + # Just a guess. Hopefully all paths in the input file are absolute. + symbol_path_origin_dir = os.path.abspath(os.getcwd()) + DumpCompactTree(symbols, symbol_path_origin_dir, opts.githash, + opts.commit_ts, opts.issue_number, opts.dest) + print 'Report data uploaded to GS.' + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/infra/bots/recipe_modules/checkout/__init__.py b/infra/bots/recipe_modules/checkout/__init__.py new file mode 100644 index 0000000000..29f438e3a6 --- /dev/null +++ b/infra/bots/recipe_modules/checkout/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +DEPS = [ + 'depot_tools/bot_update', + 'depot_tools/gclient', + 'depot_tools/git', + 'depot_tools/tryserver', + 'flavor', + 'recipe_engine/context', + 'recipe_engine/file', + 'recipe_engine/path', + 'recipe_engine/properties', + 'recipe_engine/python', + 'recipe_engine/step', + 'run', + 'vars', +] diff --git a/infra/bots/recipe_modules/checkout/api.py b/infra/bots/recipe_modules/checkout/api.py new file mode 100644 index 0000000000..67c27b1aeb --- /dev/null +++ b/infra/bots/recipe_modules/checkout/api.py @@ -0,0 +1,150 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +# pylint: disable=W0201 + + +from recipe_engine import recipe_api +from recipe_engine import config_types + + +class CheckoutApi(recipe_api.RecipeApi): + + @property + def default_checkout_root(self): + """The default location for cached persistent checkouts.""" + return self.m.vars.cache_dir.join('work') + + def patch_ref(self, issue, patchset): + """Build a ref for the given issue and patchset.""" + return 'refs/changes/%s/%s/%s' % (issue[-2:], issue, patchset) + + def git(self, checkout_root): + """Run the steps to perform a pure-git checkout without DEPS.""" + skia_dir = checkout_root.join('skia') + self.m.git.checkout( + self.m.properties['repository'], dir_path=skia_dir, + ref=self.m.properties['revision'], submodules=False) + if self.m.vars.is_trybot: + ref = self.patch_ref(str(self.m.vars.issue), str(self.m.vars.patchset)) + self.m.git('fetch', 'origin', ref) + self.m.git('checkout', 'FETCH_HEAD') + self.m.git('rebase', self.m.properties['revision']) + return self.m.properties['revision'] + + def bot_update(self, checkout_root, gclient_cache=None): + """Run the steps to obtain a checkout using bot_update.""" + if not gclient_cache: + gclient_cache = self.m.vars.cache_dir.join('git') + + cfg_kwargs = {} + is_parent_revision = 'ParentRevision' in self.m.vars.extra_tokens + + # Use a persistent gclient cache for Swarming. + cfg_kwargs['CACHE_DIR'] = gclient_cache + + # Create the checkout path if necessary. + if not self.m.path.exists(checkout_root): + self.m.file.ensure_directory('makedirs checkout_path', checkout_root) + + # Initial cleanup. + gclient_cfg = self.m.gclient.make_config(**cfg_kwargs) + + # Some bots also require a checkout of chromium. + need_chromium_checkout = False + gclient_env = {'DEPOT_TOOLS_UPDATE': '0'} + if 'CommandBuffer' in self.m.properties['buildername']: + need_chromium_checkout = True + gclient_env['GYP_CHROMIUM_NO_ACTION'] = '0' + if 'RecreateSKPs' in self.m.properties['buildername']: + need_chromium_checkout = True + gclient_env['CPPFLAGS'] = '-DSK_ALLOW_CROSSPROCESS_PICTUREIMAGEFILTERS=1' + + # Add chromium first because of skbug.com/7917. + if need_chromium_checkout: + chromium = gclient_cfg.solutions.add() + chromium.name = 'src' + chromium.managed = False + chromium.url = 'https://chromium.googlesource.com/chromium/src.git' + chromium.revision = 'origin/master' + + main_repo = self.m.properties['repository'] + + need_flutter_checkout = 'Flutter' in self.m.properties['buildername'] + if need_flutter_checkout: + main_repo = 'https://github.com/flutter/engine.git' + main_name = self.m.path.basename(main_repo) + if main_name.endswith('.git'): + main_name = main_name[:-len('.git')] + # Special case for flutter because it seems to need a very specific + # directory structure to successfully build. + if need_flutter_checkout and main_name == 'engine': + main_name = 'src/flutter' + main = gclient_cfg.solutions.add() + main.name = main_name + main.managed = False + main.url = main_repo + main.revision = self.m.properties.get('revision') or 'origin/master' + m = gclient_cfg.got_revision_mapping + m[main_name] = 'got_revision' + patch_root = main_name + patch_repo = main.url + if self.m.properties.get('patch_repo'): + patch_repo = self.m.properties['patch_repo'] + patch_root = patch_repo.split('/')[-1] + if patch_root.endswith('.git'): + patch_root = patch_root[:-4] + + if need_flutter_checkout: + # Skia is a DEP of Flutter; the 'revision' property is a Skia revision, + # and any patch should be applied to Skia, not Flutter. + main.revision = 'origin/master' + main.managed = True + m[main_name] = 'got_flutter_revision' + if 'Android' in self.m.vars.extra_tokens: + gclient_cfg.target_os.add('android') + + skia_dep_path = 'src/third_party/skia' + gclient_cfg.patch_projects['skia'] = (skia_dep_path, 'HEAD') + gclient_cfg.revisions[skia_dep_path] = self.m.properties['revision'] + m[skia_dep_path] = 'got_revision' + patch_repo = 'https://skia.googlesource.com/skia.git' + patch_root = skia_dep_path + + # TODO(rmistry): Remove the below block after there is a solution for + # crbug.com/616443 + entries_file = checkout_root.join('.gclient_entries') + if self.m.path.exists(entries_file) or self._test_data.enabled: + self.m.file.remove('remove %s' % entries_file, + entries_file) + + # Run bot_update. + if patch_repo != self.m.properties['repository']: + # TODO(borenet): bot_update uses the 'repository' property to determine + # which repo the patch should come from. This conflicts with our usage of + # the same property to determine which root repo to check out, which may + # not be the same as the repository the patch comes from, for which we use + # the patch_repo property. Remove this hack by refactoring our checkout + # code and properties to agree with bot_update. + self.m.bot_update._repository = patch_repo + + if not self.m.vars.is_trybot and is_parent_revision: + main.revision = main.revision + '^' + + self.m.gclient.c = gclient_cfg + with self.m.context(cwd=checkout_root): + update_step = self.m.bot_update.ensure_checkout( + patch_root=patch_root, + # The logic in ensure_checkout for this arg is fairly naive, so if + # patch=False, we'll see "... (without patch)" in the step names, even + # for non-trybot runs, which is misleading and confusing. Therefore, + # always specify patch=True for non-trybot runs. + patch=not (self.m.vars.is_trybot and is_parent_revision) + ) + + if need_chromium_checkout or need_flutter_checkout: + with self.m.context(cwd=checkout_root, env=gclient_env): + self.m.gclient.runhooks() + return update_step.presentation.properties['got_revision'] diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json b/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json new file mode 100644 index 0000000000..ca21fe033f --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json @@ -0,0 +1,118 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::git]/resources/git_setup.py", + "--path", + "[START_DIR]/skia", + "--url", + "https://skia.googlesource.com/skia.git" + ], + "name": "git setup" + }, + { + "cmd": [ + "git", + "retry", + "fetch", + "origin", + "abc123", + "--progress" + ], + "cwd": "[START_DIR]/skia", + "env": { + "PATH": "RECIPE_PACKAGE_REPO[depot_tools]:" + }, + "infra_step": true, + "name": "git fetch" + }, + { + "cmd": [ + "git", + "checkout", + "-f", + "FETCH_HEAD" + ], + "cwd": "[START_DIR]/skia", + "infra_step": true, + "name": "git checkout" + }, + { + "cmd": [ + "git", + "rev-parse", + "HEAD" + ], + "cwd": "[START_DIR]/skia", + "infra_step": true, + "name": "read revision", + "stdout": "/path/to/tmp/", + "~followup_annotations": [ + "@@@STEP_TEXT@
checked out 'deadbeef'
@@@" + ] + }, + { + "cmd": [ + "git", + "clean", + "-f", + "-d", + "-x" + ], + "cwd": "[START_DIR]/skia", + "infra_step": true, + "name": "git clean" + }, + { + "cmd": [ + "git", + "fetch", + "origin", + "refs/changes/00/500/1" + ], + "cwd": "[START_DIR]/skia", + "infra_step": true, + "name": "git fetch (2)" + }, + { + "cmd": [ + "git", + "checkout", + "FETCH_HEAD" + ], + "cwd": "[START_DIR]/skia", + "infra_step": true, + "name": "git checkout (2)" + }, + { + "cmd": [ + "git", + "rebase", + "abc123" + ], + "cwd": "[START_DIR]/skia", + "infra_step": true, + "name": "git rebase" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json b/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json new file mode 100644 index 0000000000..d9db2bbf14 --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json @@ -0,0 +1,134 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/cache/work" + ], + "infra_step": true, + "name": "makedirs checkout_path" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "remove", + "[START_DIR]/cache/work/.gclient_entries" + ], + "infra_step": true, + "name": "remove [START_DIR]/cache/work/.gclient_entries" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", + "--spec-path", + "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'src', 'url': 'https://chromium.googlesource.com/chromium/src.git'}, {'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", + "--patch_root", + "skia", + "--revision_mapping_file", + "{\"got_revision\": \"skia\"}", + "--git-cache-dir", + "[START_DIR]/cache/git", + "--cleanup-dir", + "[CLEANUP]/bot_update", + "--output_json", + "/path/to/tmp/json", + "--revision", + "skia@abc123", + "--revision", + "src@origin/master" + ], + "cwd": "[START_DIR]/cache/work", + "env_prefixes": { + "PATH": [ + "RECIPE_PACKAGE_REPO[depot_tools]" + ] + }, + "infra_step": true, + "name": "bot_update", + "~followup_annotations": [ + "@@@STEP_TEXT@Some step text@@@", + "@@@STEP_LOG_LINE@json.output@{@@@", + "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", + "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"src\": \"origin/master\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"root\": \"src\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", + "@@@STEP_LOG_LINE@json.output@}@@@", + "@@@STEP_LOG_END@json.output@@@", + "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" + ] + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_PACKAGE_REPO[depot_tools]/gclient.py", + "runhooks" + ], + "cwd": "[START_DIR]/cache/work", + "env": { + "DEPOT_TOOLS_UPDATE": "0", + "GYP_CHROMIUM_NO_ACTION": "0", + "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" + }, + "name": "gclient runhooks" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json b/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json new file mode 100644 index 0000000000..ffce3b2ee9 --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json @@ -0,0 +1,116 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/cache/work" + ], + "infra_step": true, + "name": "makedirs checkout_path" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "remove", + "[START_DIR]/cache/work/.gclient_entries" + ], + "infra_step": true, + "name": "remove [START_DIR]/cache/work/.gclient_entries" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", + "--spec-path", + "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", + "--patch_root", + "skia", + "--revision_mapping_file", + "{\"got_revision\": \"skia\"}", + "--git-cache-dir", + "[START_DIR]/cache/git", + "--cleanup-dir", + "[CLEANUP]/bot_update", + "--output_json", + "/path/to/tmp/json", + "--revision", + "skia@abc123^" + ], + "cwd": "[START_DIR]/cache/work", + "env_prefixes": { + "PATH": [ + "RECIPE_PACKAGE_REPO[depot_tools]" + ] + }, + "infra_step": true, + "name": "bot_update", + "~followup_annotations": [ + "@@@STEP_TEXT@Some step text@@@", + "@@@STEP_LOG_LINE@json.output@{@@@", + "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", + "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123^\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", + "@@@STEP_LOG_LINE@json.output@}@@@", + "@@@STEP_LOG_END@json.output@@@", + "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" + ] + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json b/infra/bots/recipe_modules/checkout/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json new file mode 100644 index 0000000000..f3980c9d00 --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json @@ -0,0 +1,134 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/cache/work" + ], + "infra_step": true, + "name": "makedirs checkout_path" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "remove", + "[START_DIR]/cache/work/.gclient_entries" + ], + "infra_step": true, + "name": "remove [START_DIR]/cache/work/.gclient_entries" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", + "--spec-path", + "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'src', 'url': 'https://chromium.googlesource.com/chromium/src.git'}, {'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", + "--patch_root", + "skia", + "--revision_mapping_file", + "{\"got_revision\": \"skia\"}", + "--git-cache-dir", + "[START_DIR]/cache/git", + "--cleanup-dir", + "[CLEANUP]/bot_update", + "--output_json", + "/path/to/tmp/json", + "--revision", + "skia@abc123", + "--revision", + "src@origin/master" + ], + "cwd": "[START_DIR]/cache/work", + "env_prefixes": { + "PATH": [ + "RECIPE_PACKAGE_REPO[depot_tools]" + ] + }, + "infra_step": true, + "name": "bot_update", + "~followup_annotations": [ + "@@@STEP_TEXT@Some step text@@@", + "@@@STEP_LOG_LINE@json.output@{@@@", + "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", + "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"src\": \"origin/master\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"root\": \"src\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", + "@@@STEP_LOG_LINE@json.output@}@@@", + "@@@STEP_LOG_END@json.output@@@", + "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" + ] + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_PACKAGE_REPO[depot_tools]/gclient.py", + "runhooks" + ], + "cwd": "[START_DIR]/cache/work", + "env": { + "CPPFLAGS": "-DSK_ALLOW_CROSSPROCESS_PICTUREIMAGEFILTERS=1", + "DEPOT_TOOLS_UPDATE": "0", + "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" + }, + "name": "gclient runhooks" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/cross_repo_trybot.json b/infra/bots/recipe_modules/checkout/examples/full.expected/cross_repo_trybot.json new file mode 100644 index 0000000000..30f707cec4 --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/cross_repo_trybot.json @@ -0,0 +1,120 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/cache/work" + ], + "infra_step": true, + "name": "makedirs checkout_path" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "remove", + "[START_DIR]/cache/work/.gclient_entries" + ], + "infra_step": true, + "name": "remove [START_DIR]/cache/work/.gclient_entries" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", + "--spec-path", + "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'parent_repo', 'url': 'https://skia.googlesource.com/parent_repo.git'}]", + "--patch_root", + "skia", + "--revision_mapping_file", + "{\"got_revision\": \"parent_repo\"}", + "--git-cache-dir", + "[START_DIR]/cache/git", + "--cleanup-dir", + "[CLEANUP]/bot_update", + "--gerrit_repo", + "https://skia.googlesource.com/skia.git", + "--gerrit_ref", + "refs/changes/89/456789/12", + "--output_json", + "/path/to/tmp/json", + "--revision", + "parent_repo@abc123" + ], + "cwd": "[START_DIR]/cache/work", + "env_prefixes": { + "PATH": [ + "RECIPE_PACKAGE_REPO[depot_tools]" + ] + }, + "infra_step": true, + "name": "bot_update", + "~followup_annotations": [ + "@@@STEP_TEXT@Some step text@@@", + "@@@STEP_LOG_LINE@json.output@{@@@", + "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", + "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"parent_repo\": \"abc123\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"parent_repo\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/parent_repo.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"354f9075936db3e1e855a48538d2f8555b37ac5a\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"354f9075936db3e1e855a48538d2f8555b37ac5a\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#106773}\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"root\": \"parent_repo\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"parent_repo\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/parent_repo.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"354f9075936db3e1e855a48538d2f8555b37ac5a\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", + "@@@STEP_LOG_LINE@json.output@}@@@", + "@@@STEP_LOG_END@json.output@@@", + "@@@SET_BUILD_PROPERTY@got_revision@\"354f9075936db3e1e855a48538d2f8555b37ac5a\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#106773}\"@@@" + ] + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/flutter_trybot.json b/infra/bots/recipe_modules/checkout/examples/full.expected/flutter_trybot.json new file mode 100644 index 0000000000..85cbb107ab --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/flutter_trybot.json @@ -0,0 +1,151 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/cache/work/flutter" + ], + "infra_step": true, + "name": "makedirs checkout_path" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "remove", + "[START_DIR]/cache/work/flutter/.gclient_entries" + ], + "infra_step": true, + "name": "remove [START_DIR]/cache/work/flutter/.gclient_entries" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", + "--spec-path", + "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': True, 'name': 'src/flutter', 'url': 'https://github.com/flutter/engine.git'}]\ntarget_os = ['android']", + "--patch_root", + "src/third_party/skia", + "--revision_mapping_file", + "{\"got_flutter_revision\": \"src/flutter\", \"got_revision\": \"src/third_party/skia\"}", + "--git-cache-dir", + "[START_DIR]/cache/git", + "--cleanup-dir", + "[CLEANUP]/bot_update", + "--gerrit_repo", + "https://skia.googlesource.com/skia.git", + "--gerrit_ref", + "refs/changes/89/456789/12", + "--output_json", + "/path/to/tmp/json", + "--revision", + "src/flutter@origin/master", + "--revision", + "src/third_party/skia@abc123" + ], + "cwd": "[START_DIR]/cache/work/flutter", + "env_prefixes": { + "PATH": [ + "RECIPE_PACKAGE_REPO[depot_tools]" + ] + }, + "infra_step": true, + "name": "bot_update", + "~followup_annotations": [ + "@@@STEP_TEXT@Some step text@@@", + "@@@STEP_LOG_LINE@json.output@{@@@", + "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", + "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"src/flutter\": \"origin/master\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"src/third_party/skia\": \"abc123\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"src/flutter\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/src/flutter.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9221bca00ddbd888260084def81f09543281b952\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"src/third_party/skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/src/third_party/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"src/third_party/skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"got_flutter_revision\": \"9221bca00ddbd888260084def81f09543281b952\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_flutter_revision_cp\": \"refs/heads/master@{#84512}\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#143121}\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"root\": \"src/flutter\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"src/flutter\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/src/flutter.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9221bca00ddbd888260084def81f09543281b952\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"src/third_party/skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/src/third_party/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", + "@@@STEP_LOG_LINE@json.output@}@@@", + "@@@STEP_LOG_END@json.output@@@", + "@@@SET_BUILD_PROPERTY@got_flutter_revision@\"9221bca00ddbd888260084def81f09543281b952\"@@@", + "@@@SET_BUILD_PROPERTY@got_flutter_revision_cp@\"refs/heads/master@{#84512}\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision@\"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#143121}\"@@@" + ] + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_PACKAGE_REPO[depot_tools]/gclient.py", + "runhooks" + ], + "cwd": "[START_DIR]/cache/work/flutter", + "env": { + "DEPOT_TOOLS_UPDATE": "0", + "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" + }, + "name": "gclient runhooks" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/parent_revision_trybot.json b/infra/bots/recipe_modules/checkout/examples/full.expected/parent_revision_trybot.json new file mode 100644 index 0000000000..2a1eb4cb64 --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/parent_revision_trybot.json @@ -0,0 +1,116 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/cache/work" + ], + "infra_step": true, + "name": "makedirs checkout_path" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "remove", + "[START_DIR]/cache/work/.gclient_entries" + ], + "infra_step": true, + "name": "remove [START_DIR]/cache/work/.gclient_entries" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", + "--spec-path", + "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", + "--patch_root", + "skia", + "--revision_mapping_file", + "{\"got_revision\": \"skia\"}", + "--git-cache-dir", + "[START_DIR]/cache/git", + "--cleanup-dir", + "[CLEANUP]/bot_update", + "--output_json", + "/path/to/tmp/json", + "--revision", + "skia@abc123" + ], + "cwd": "[START_DIR]/cache/work", + "env_prefixes": { + "PATH": [ + "RECIPE_PACKAGE_REPO[depot_tools]" + ] + }, + "infra_step": true, + "name": "bot_update (without patch)", + "~followup_annotations": [ + "@@@STEP_TEXT@Some step text@@@", + "@@@STEP_LOG_LINE@json.output@{@@@", + "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", + "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", + "@@@STEP_LOG_LINE@json.output@}@@@", + "@@@STEP_LOG_END@json.output@@@", + "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" + ] + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.expected/test.json b/infra/bots/recipe_modules/checkout/examples/full.expected/test.json new file mode 100644 index 0000000000..ef0d4965bc --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.expected/test.json @@ -0,0 +1,120 @@ +[ + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/cache/work" + ], + "infra_step": true, + "name": "makedirs checkout_path" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "remove", + "[START_DIR]/cache/work/.gclient_entries" + ], + "infra_step": true, + "name": "remove [START_DIR]/cache/work/.gclient_entries" + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", + "--spec-path", + "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", + "--patch_root", + "skia", + "--revision_mapping_file", + "{\"got_revision\": \"skia\"}", + "--git-cache-dir", + "[START_DIR]/cache/git", + "--cleanup-dir", + "[CLEANUP]/bot_update", + "--gerrit_repo", + "https://skia.googlesource.com/skia.git", + "--gerrit_ref", + "refs/changes/89/456789/12", + "--output_json", + "/path/to/tmp/json", + "--revision", + "skia@abc123" + ], + "cwd": "[START_DIR]/cache/work", + "env_prefixes": { + "PATH": [ + "RECIPE_PACKAGE_REPO[depot_tools]" + ] + }, + "infra_step": true, + "name": "bot_update", + "~followup_annotations": [ + "@@@STEP_TEXT@Some step text@@@", + "@@@STEP_LOG_LINE@json.output@{@@@", + "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", + "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", + "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"root\": \"skia\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", + "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", + "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", + "@@@STEP_LOG_LINE@json.output@ }, @@@", + "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", + "@@@STEP_LOG_LINE@json.output@}@@@", + "@@@STEP_LOG_END@json.output@@@", + "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", + "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" + ] + }, + { + "cmd": [ + "python", + "-u", + "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", + "--json-output", + "/path/to/tmp/json", + "ensure-directory", + "--mode", + "0777", + "[START_DIR]/tmp" + ], + "infra_step": true, + "name": "makedirs tmp_dir" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/checkout/examples/full.py b/infra/bots/recipe_modules/checkout/examples/full.py new file mode 100644 index 0000000000..7e7f436783 --- /dev/null +++ b/infra/bots/recipe_modules/checkout/examples/full.py @@ -0,0 +1,143 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +DEPS = [ + 'checkout', + 'recipe_engine/file', + 'recipe_engine/path', + 'recipe_engine/properties', + 'run', + 'vars', +] + + +def RunSteps(api): + api.vars.setup() + + bot_update = True + if 'NoDEPS' in api.properties['buildername']: + bot_update = False + + if bot_update: + checkout_root = api.checkout.default_checkout_root + if 'Flutter' in api.vars.builder_name: + checkout_root = checkout_root.join('flutter') + api.checkout.bot_update(checkout_root=checkout_root) + else: + api.checkout.git(checkout_root=api.path['start_dir']) + api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir) + + +TEST_BUILDERS = [ + 'Build-Win-Clang-x86_64-Release-ParentRevision', + 'Build-Mac-Clang-x86_64-Debug-CommandBuffer', + 'Housekeeper-Weekly-RecreateSKPs', +] + + +def GenTests(api): + for buildername in TEST_BUILDERS: + yield ( + api.test(buildername) + + api.properties(buildername=buildername, + repository='https://skia.googlesource.com/skia.git', + revision='abc123', + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]') + ) + + buildername = 'Build-Win-Clang-x86_64-Release-Vulkan' + yield ( + api.test('test') + + api.properties(buildername=buildername, + repository='https://skia.googlesource.com/skia.git', + revision='abc123', + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]') + + api.properties(patch_storage='gerrit') + + api.properties.tryserver( + buildername=buildername, + gerrit_project='skia', + gerrit_url='https://skia-review.googlesource.com/', + ) + ) + + buildername = 'Build-Win-Clang-x86_64-Release-ParentRevision' + yield ( + api.test('parent_revision_trybot') + + api.properties(buildername=buildername, + repository='https://skia.googlesource.com/skia.git', + revision='abc123', + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]', + patch_issue=500, + patch_set=1, + patch_storage='gerrit') + + api.properties.tryserver( + buildername=buildername, + gerrit_project='skia', + gerrit_url='https://skia-review.googlesource.com/', + ) + ) + + buildername = 'Build-Debian9-GCC-x86_64-Release-Flutter_Android' + yield ( + api.test('flutter_trybot') + + api.properties( + repository='https://skia.googlesource.com/skia.git', + buildername=buildername, + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]', + revision='abc123', + patch_issue=500, + patch_set=1, + patch_storage='gerrit') + + api.properties.tryserver( + buildername=buildername, + gerrit_project='skia', + gerrit_url='https://skia-review.googlesource.com/', + ) + + api.path.exists( + api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt') + ) + ) + + builder = 'Build-Debian9-Clang-x86_64-Release-NoDEPS' + yield ( + api.test(builder) + + api.properties(buildername=builder, + repository='https://skia.googlesource.com/skia.git', + revision='abc123', + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]', + patch_issue=500, + patch_repo='https://skia.googlesource.com/skia.git', + patch_set=1, + patch_storage='gerrit') + + api.path.exists(api.path['start_dir'].join('skp_output')) + ) + + buildername = 'Build-Debian9-GCC-x86_64-Release' + yield ( + api.test('cross_repo_trybot') + + api.properties( + repository='https://skia.googlesource.com/parent_repo.git', + buildername=buildername, + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]', + revision='abc123', + patch_issue=500, + patch_repo='https://skia.googlesource.com/skia.git', + patch_set=1, + patch_storage='gerrit') + + api.properties.tryserver( + buildername=buildername, + gerrit_project='skia', + gerrit_url='https://skia-review.googlesource.com/', + ) + + api.path.exists( + api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt') + ) + ) diff --git a/infra/bots/recipe_modules/core/__init__.py b/infra/bots/recipe_modules/core/__init__.py deleted file mode 100644 index 29f438e3a6..0000000000 --- a/infra/bots/recipe_modules/core/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -DEPS = [ - 'depot_tools/bot_update', - 'depot_tools/gclient', - 'depot_tools/git', - 'depot_tools/tryserver', - 'flavor', - 'recipe_engine/context', - 'recipe_engine/file', - 'recipe_engine/path', - 'recipe_engine/properties', - 'recipe_engine/python', - 'recipe_engine/step', - 'run', - 'vars', -] diff --git a/infra/bots/recipe_modules/core/api.py b/infra/bots/recipe_modules/core/api.py deleted file mode 100644 index f943b865b2..0000000000 --- a/infra/bots/recipe_modules/core/api.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - - -# pylint: disable=W0201 - - -import json -import os -import re -import sys - -from recipe_engine import recipe_api -from recipe_engine import config_types - - -class SkiaApi(recipe_api.RecipeApi): - - @property - def default_checkout_root(self): - """The default location for cached persistent checkouts.""" - return self.m.vars.cache_dir.join('work') - - def patch_ref(self, issue, patchset): - """Build a ref for the given issue and patchset.""" - return 'refs/changes/%s/%s/%s' % (issue[-2:], issue, patchset) - - def checkout_git(self, checkout_root): - """Run the steps to perform a pure-git checkout without DEPS.""" - skia_dir = checkout_root.join('skia') - self.m.git.checkout( - self.m.properties['repository'], dir_path=skia_dir, - ref=self.m.properties['revision'], submodules=False) - if self.m.vars.is_trybot: - ref = self.patch_ref(str(self.m.vars.issue), str(self.m.vars.patchset)) - self.m.git('fetch', 'origin', ref) - self.m.git('checkout', 'FETCH_HEAD') - self.m.git('rebase', self.m.properties['revision']) - return self.m.properties['revision'] - - def checkout_bot_update(self, checkout_root, gclient_cache=None): - """Run the steps to obtain a checkout using bot_update.""" - if not gclient_cache: - gclient_cache = self.m.vars.cache_dir.join('git') - - cfg_kwargs = {} - is_parent_revision = 'ParentRevision' in self.m.vars.extra_tokens - - # Use a persistent gclient cache for Swarming. - cfg_kwargs['CACHE_DIR'] = gclient_cache - - # Create the checkout path if necessary. - if not self.m.path.exists(checkout_root): - self.m.file.ensure_directory('makedirs checkout_path', checkout_root) - - # Initial cleanup. - gclient_cfg = self.m.gclient.make_config(**cfg_kwargs) - - # Some bots also require a checkout of chromium. - need_chromium_checkout = False - gclient_env = {'DEPOT_TOOLS_UPDATE': '0'} - if 'CommandBuffer' in self.m.properties['buildername']: - need_chromium_checkout = True - gclient_env['GYP_CHROMIUM_NO_ACTION'] = '0' - if 'RecreateSKPs' in self.m.properties['buildername']: - need_chromium_checkout = True - gclient_env['CPPFLAGS'] = '-DSK_ALLOW_CROSSPROCESS_PICTUREIMAGEFILTERS=1' - - # Add chromium first because of skbug.com/7917. - if need_chromium_checkout: - chromium = gclient_cfg.solutions.add() - chromium.name = 'src' - chromium.managed = False - chromium.url = 'https://chromium.googlesource.com/chromium/src.git' - chromium.revision = 'origin/master' - - main_repo = self.m.properties['repository'] - - need_flutter_checkout = 'Flutter' in self.m.properties['buildername'] - if need_flutter_checkout: - main_repo = 'https://github.com/flutter/engine.git' - main_name = self.m.path.basename(main_repo) - if main_name.endswith('.git'): - main_name = main_name[:-len('.git')] - # Special case for flutter because it seems to need a very specific - # directory structure to successfully build. - if need_flutter_checkout and main_name == 'engine': - main_name = 'src/flutter' - main = gclient_cfg.solutions.add() - main.name = main_name - main.managed = False - main.url = main_repo - main.revision = self.m.properties.get('revision') or 'origin/master' - m = gclient_cfg.got_revision_mapping - m[main_name] = 'got_revision' - patch_root = main_name - patch_repo = main.url - if self.m.properties.get('patch_repo'): - patch_repo = self.m.properties['patch_repo'] - patch_root = patch_repo.split('/')[-1] - if patch_root.endswith('.git'): - patch_root = patch_root[:-4] - - if need_flutter_checkout: - # Skia is a DEP of Flutter; the 'revision' property is a Skia revision, - # and any patch should be applied to Skia, not Flutter. - main.revision = 'origin/master' - main.managed = True - m[main_name] = 'got_flutter_revision' - if 'Android' in self.m.vars.extra_tokens: - gclient_cfg.target_os.add('android') - - skia_dep_path = 'src/third_party/skia' - gclient_cfg.patch_projects['skia'] = (skia_dep_path, 'HEAD') - gclient_cfg.revisions[skia_dep_path] = self.m.properties['revision'] - m[skia_dep_path] = 'got_revision' - patch_repo = 'https://skia.googlesource.com/skia.git' - patch_root = skia_dep_path - - # TODO(rmistry): Remove the below block after there is a solution for - # crbug.com/616443 - entries_file = checkout_root.join('.gclient_entries') - if self.m.path.exists(entries_file) or self._test_data.enabled: - self.m.file.remove('remove %s' % entries_file, - entries_file) - - # Run bot_update. - if patch_repo != self.m.properties['repository']: - # TODO(borenet): bot_update uses the 'repository' property to determine - # which repo the patch should come from. This conflicts with our usage of - # the same property to determine which root repo to check out, which may - # not be the same as the repository the patch comes from, for which we use - # the patch_repo property. Remove this hack by refactoring our checkout - # code and properties to agree with bot_update. - self.m.bot_update._repository = patch_repo - - if not self.m.vars.is_trybot and is_parent_revision: - main.revision = main.revision + '^' - - self.m.gclient.c = gclient_cfg - with self.m.context(cwd=checkout_root): - update_step = self.m.bot_update.ensure_checkout( - patch_root=patch_root, - # The logic in ensure_checkout for this arg is fairly naive, so if - # patch=False, we'll see "... (without patch)" in the step names, even - # for non-trybot runs, which is misleading and confusing. Therefore, - # always specify patch=True for non-trybot runs. - patch=not (self.m.vars.is_trybot and is_parent_revision) - ) - - if need_chromium_checkout or need_flutter_checkout: - with self.m.context(cwd=checkout_root, env=gclient_env): - self.m.gclient.runhooks() - return update_step.presentation.properties['got_revision'] diff --git a/infra/bots/recipe_modules/core/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json b/infra/bots/recipe_modules/core/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json deleted file mode 100644 index ca21fe033f..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/Build-Debian9-Clang-x86_64-Release-NoDEPS.json +++ /dev/null @@ -1,118 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::git]/resources/git_setup.py", - "--path", - "[START_DIR]/skia", - "--url", - "https://skia.googlesource.com/skia.git" - ], - "name": "git setup" - }, - { - "cmd": [ - "git", - "retry", - "fetch", - "origin", - "abc123", - "--progress" - ], - "cwd": "[START_DIR]/skia", - "env": { - "PATH": "RECIPE_PACKAGE_REPO[depot_tools]:" - }, - "infra_step": true, - "name": "git fetch" - }, - { - "cmd": [ - "git", - "checkout", - "-f", - "FETCH_HEAD" - ], - "cwd": "[START_DIR]/skia", - "infra_step": true, - "name": "git checkout" - }, - { - "cmd": [ - "git", - "rev-parse", - "HEAD" - ], - "cwd": "[START_DIR]/skia", - "infra_step": true, - "name": "read revision", - "stdout": "/path/to/tmp/", - "~followup_annotations": [ - "@@@STEP_TEXT@
checked out 'deadbeef'
@@@" - ] - }, - { - "cmd": [ - "git", - "clean", - "-f", - "-d", - "-x" - ], - "cwd": "[START_DIR]/skia", - "infra_step": true, - "name": "git clean" - }, - { - "cmd": [ - "git", - "fetch", - "origin", - "refs/changes/00/500/1" - ], - "cwd": "[START_DIR]/skia", - "infra_step": true, - "name": "git fetch (2)" - }, - { - "cmd": [ - "git", - "checkout", - "FETCH_HEAD" - ], - "cwd": "[START_DIR]/skia", - "infra_step": true, - "name": "git checkout (2)" - }, - { - "cmd": [ - "git", - "rebase", - "abc123" - ], - "cwd": "[START_DIR]/skia", - "infra_step": true, - "name": "git rebase" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json b/infra/bots/recipe_modules/core/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json deleted file mode 100644 index d9db2bbf14..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/Build-Mac-Clang-x86_64-Debug-CommandBuffer.json +++ /dev/null @@ -1,134 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/cache/work" - ], - "infra_step": true, - "name": "makedirs checkout_path" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "remove", - "[START_DIR]/cache/work/.gclient_entries" - ], - "infra_step": true, - "name": "remove [START_DIR]/cache/work/.gclient_entries" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", - "--spec-path", - "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'src', 'url': 'https://chromium.googlesource.com/chromium/src.git'}, {'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", - "--patch_root", - "skia", - "--revision_mapping_file", - "{\"got_revision\": \"skia\"}", - "--git-cache-dir", - "[START_DIR]/cache/git", - "--cleanup-dir", - "[CLEANUP]/bot_update", - "--output_json", - "/path/to/tmp/json", - "--revision", - "skia@abc123", - "--revision", - "src@origin/master" - ], - "cwd": "[START_DIR]/cache/work", - "env_prefixes": { - "PATH": [ - "RECIPE_PACKAGE_REPO[depot_tools]" - ] - }, - "infra_step": true, - "name": "bot_update", - "~followup_annotations": [ - "@@@STEP_TEXT@Some step text@@@", - "@@@STEP_LOG_LINE@json.output@{@@@", - "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", - "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"src\": \"origin/master\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"root\": \"src\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", - "@@@STEP_LOG_LINE@json.output@}@@@", - "@@@STEP_LOG_END@json.output@@@", - "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" - ] - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_PACKAGE_REPO[depot_tools]/gclient.py", - "runhooks" - ], - "cwd": "[START_DIR]/cache/work", - "env": { - "DEPOT_TOOLS_UPDATE": "0", - "GYP_CHROMIUM_NO_ACTION": "0", - "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" - }, - "name": "gclient runhooks" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json b/infra/bots/recipe_modules/core/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json deleted file mode 100644 index ffce3b2ee9..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/Build-Win-Clang-x86_64-Release-ParentRevision.json +++ /dev/null @@ -1,116 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/cache/work" - ], - "infra_step": true, - "name": "makedirs checkout_path" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "remove", - "[START_DIR]/cache/work/.gclient_entries" - ], - "infra_step": true, - "name": "remove [START_DIR]/cache/work/.gclient_entries" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", - "--spec-path", - "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", - "--patch_root", - "skia", - "--revision_mapping_file", - "{\"got_revision\": \"skia\"}", - "--git-cache-dir", - "[START_DIR]/cache/git", - "--cleanup-dir", - "[CLEANUP]/bot_update", - "--output_json", - "/path/to/tmp/json", - "--revision", - "skia@abc123^" - ], - "cwd": "[START_DIR]/cache/work", - "env_prefixes": { - "PATH": [ - "RECIPE_PACKAGE_REPO[depot_tools]" - ] - }, - "infra_step": true, - "name": "bot_update", - "~followup_annotations": [ - "@@@STEP_TEXT@Some step text@@@", - "@@@STEP_LOG_LINE@json.output@{@@@", - "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", - "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123^\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", - "@@@STEP_LOG_LINE@json.output@}@@@", - "@@@STEP_LOG_END@json.output@@@", - "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" - ] - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json b/infra/bots/recipe_modules/core/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json deleted file mode 100644 index f3980c9d00..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/Housekeeper-Weekly-RecreateSKPs.json +++ /dev/null @@ -1,134 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/cache/work" - ], - "infra_step": true, - "name": "makedirs checkout_path" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "remove", - "[START_DIR]/cache/work/.gclient_entries" - ], - "infra_step": true, - "name": "remove [START_DIR]/cache/work/.gclient_entries" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", - "--spec-path", - "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'src', 'url': 'https://chromium.googlesource.com/chromium/src.git'}, {'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", - "--patch_root", - "skia", - "--revision_mapping_file", - "{\"got_revision\": \"skia\"}", - "--git-cache-dir", - "[START_DIR]/cache/git", - "--cleanup-dir", - "[CLEANUP]/bot_update", - "--output_json", - "/path/to/tmp/json", - "--revision", - "skia@abc123", - "--revision", - "src@origin/master" - ], - "cwd": "[START_DIR]/cache/work", - "env_prefixes": { - "PATH": [ - "RECIPE_PACKAGE_REPO[depot_tools]" - ] - }, - "infra_step": true, - "name": "bot_update", - "~followup_annotations": [ - "@@@STEP_TEXT@Some step text@@@", - "@@@STEP_LOG_LINE@json.output@{@@@", - "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", - "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"src\": \"origin/master\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"root\": \"src\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", - "@@@STEP_LOG_LINE@json.output@}@@@", - "@@@STEP_LOG_END@json.output@@@", - "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" - ] - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_PACKAGE_REPO[depot_tools]/gclient.py", - "runhooks" - ], - "cwd": "[START_DIR]/cache/work", - "env": { - "CPPFLAGS": "-DSK_ALLOW_CROSSPROCESS_PICTUREIMAGEFILTERS=1", - "DEPOT_TOOLS_UPDATE": "0", - "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" - }, - "name": "gclient runhooks" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.expected/cross_repo_trybot.json b/infra/bots/recipe_modules/core/examples/full.expected/cross_repo_trybot.json deleted file mode 100644 index 30f707cec4..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/cross_repo_trybot.json +++ /dev/null @@ -1,120 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/cache/work" - ], - "infra_step": true, - "name": "makedirs checkout_path" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "remove", - "[START_DIR]/cache/work/.gclient_entries" - ], - "infra_step": true, - "name": "remove [START_DIR]/cache/work/.gclient_entries" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", - "--spec-path", - "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'parent_repo', 'url': 'https://skia.googlesource.com/parent_repo.git'}]", - "--patch_root", - "skia", - "--revision_mapping_file", - "{\"got_revision\": \"parent_repo\"}", - "--git-cache-dir", - "[START_DIR]/cache/git", - "--cleanup-dir", - "[CLEANUP]/bot_update", - "--gerrit_repo", - "https://skia.googlesource.com/skia.git", - "--gerrit_ref", - "refs/changes/89/456789/12", - "--output_json", - "/path/to/tmp/json", - "--revision", - "parent_repo@abc123" - ], - "cwd": "[START_DIR]/cache/work", - "env_prefixes": { - "PATH": [ - "RECIPE_PACKAGE_REPO[depot_tools]" - ] - }, - "infra_step": true, - "name": "bot_update", - "~followup_annotations": [ - "@@@STEP_TEXT@Some step text@@@", - "@@@STEP_LOG_LINE@json.output@{@@@", - "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", - "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"parent_repo\": \"abc123\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"parent_repo\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/parent_repo.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"354f9075936db3e1e855a48538d2f8555b37ac5a\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"354f9075936db3e1e855a48538d2f8555b37ac5a\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#106773}\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"root\": \"parent_repo\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"parent_repo\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/parent_repo.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"354f9075936db3e1e855a48538d2f8555b37ac5a\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", - "@@@STEP_LOG_LINE@json.output@}@@@", - "@@@STEP_LOG_END@json.output@@@", - "@@@SET_BUILD_PROPERTY@got_revision@\"354f9075936db3e1e855a48538d2f8555b37ac5a\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#106773}\"@@@" - ] - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.expected/flutter_trybot.json b/infra/bots/recipe_modules/core/examples/full.expected/flutter_trybot.json deleted file mode 100644 index 85cbb107ab..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/flutter_trybot.json +++ /dev/null @@ -1,151 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/cache/work/flutter" - ], - "infra_step": true, - "name": "makedirs checkout_path" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "remove", - "[START_DIR]/cache/work/flutter/.gclient_entries" - ], - "infra_step": true, - "name": "remove [START_DIR]/cache/work/flutter/.gclient_entries" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", - "--spec-path", - "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': True, 'name': 'src/flutter', 'url': 'https://github.com/flutter/engine.git'}]\ntarget_os = ['android']", - "--patch_root", - "src/third_party/skia", - "--revision_mapping_file", - "{\"got_flutter_revision\": \"src/flutter\", \"got_revision\": \"src/third_party/skia\"}", - "--git-cache-dir", - "[START_DIR]/cache/git", - "--cleanup-dir", - "[CLEANUP]/bot_update", - "--gerrit_repo", - "https://skia.googlesource.com/skia.git", - "--gerrit_ref", - "refs/changes/89/456789/12", - "--output_json", - "/path/to/tmp/json", - "--revision", - "src/flutter@origin/master", - "--revision", - "src/third_party/skia@abc123" - ], - "cwd": "[START_DIR]/cache/work/flutter", - "env_prefixes": { - "PATH": [ - "RECIPE_PACKAGE_REPO[depot_tools]" - ] - }, - "infra_step": true, - "name": "bot_update", - "~followup_annotations": [ - "@@@STEP_TEXT@Some step text@@@", - "@@@STEP_LOG_LINE@json.output@{@@@", - "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", - "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"src/flutter\": \"origin/master\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"src/third_party/skia\": \"abc123\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"src/flutter\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/src/flutter.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9221bca00ddbd888260084def81f09543281b952\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"src/third_party/skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/src/third_party/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"src/third_party/skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"got_flutter_revision\": \"9221bca00ddbd888260084def81f09543281b952\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_flutter_revision_cp\": \"refs/heads/master@{#84512}\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#143121}\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"root\": \"src/flutter\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"src/flutter\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/src/flutter.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9221bca00ddbd888260084def81f09543281b952\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"src/third_party/skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/src/third_party/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", - "@@@STEP_LOG_LINE@json.output@}@@@", - "@@@STEP_LOG_END@json.output@@@", - "@@@SET_BUILD_PROPERTY@got_flutter_revision@\"9221bca00ddbd888260084def81f09543281b952\"@@@", - "@@@SET_BUILD_PROPERTY@got_flutter_revision_cp@\"refs/heads/master@{#84512}\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision@\"d9c4a4d173a97ef2832b65636b4200bb93ea8ee1\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#143121}\"@@@" - ] - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_PACKAGE_REPO[depot_tools]/gclient.py", - "runhooks" - ], - "cwd": "[START_DIR]/cache/work/flutter", - "env": { - "DEPOT_TOOLS_UPDATE": "0", - "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" - }, - "name": "gclient runhooks" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.expected/parent_revision_trybot.json b/infra/bots/recipe_modules/core/examples/full.expected/parent_revision_trybot.json deleted file mode 100644 index 2a1eb4cb64..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/parent_revision_trybot.json +++ /dev/null @@ -1,116 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/cache/work" - ], - "infra_step": true, - "name": "makedirs checkout_path" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "remove", - "[START_DIR]/cache/work/.gclient_entries" - ], - "infra_step": true, - "name": "remove [START_DIR]/cache/work/.gclient_entries" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", - "--spec-path", - "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", - "--patch_root", - "skia", - "--revision_mapping_file", - "{\"got_revision\": \"skia\"}", - "--git-cache-dir", - "[START_DIR]/cache/git", - "--cleanup-dir", - "[CLEANUP]/bot_update", - "--output_json", - "/path/to/tmp/json", - "--revision", - "skia@abc123" - ], - "cwd": "[START_DIR]/cache/work", - "env_prefixes": { - "PATH": [ - "RECIPE_PACKAGE_REPO[depot_tools]" - ] - }, - "infra_step": true, - "name": "bot_update (without patch)", - "~followup_annotations": [ - "@@@STEP_TEXT@Some step text@@@", - "@@@STEP_LOG_LINE@json.output@{@@@", - "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", - "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", - "@@@STEP_LOG_LINE@json.output@}@@@", - "@@@STEP_LOG_END@json.output@@@", - "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" - ] - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.expected/test.json b/infra/bots/recipe_modules/core/examples/full.expected/test.json deleted file mode 100644 index ef0d4965bc..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.expected/test.json +++ /dev/null @@ -1,120 +0,0 @@ -[ - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/cache/work" - ], - "infra_step": true, - "name": "makedirs checkout_path" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "remove", - "[START_DIR]/cache/work/.gclient_entries" - ], - "infra_step": true, - "name": "remove [START_DIR]/cache/work/.gclient_entries" - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[depot_tools::bot_update]/resources/bot_update.py", - "--spec-path", - "cache_dir = '[START_DIR]/cache/git'\nsolutions = [{'deps_file': '.DEPS.git', 'managed': False, 'name': 'skia', 'url': 'https://skia.googlesource.com/skia.git'}]", - "--patch_root", - "skia", - "--revision_mapping_file", - "{\"got_revision\": \"skia\"}", - "--git-cache-dir", - "[START_DIR]/cache/git", - "--cleanup-dir", - "[CLEANUP]/bot_update", - "--gerrit_repo", - "https://skia.googlesource.com/skia.git", - "--gerrit_ref", - "refs/changes/89/456789/12", - "--output_json", - "/path/to/tmp/json", - "--revision", - "skia@abc123" - ], - "cwd": "[START_DIR]/cache/work", - "env_prefixes": { - "PATH": [ - "RECIPE_PACKAGE_REPO[depot_tools]" - ] - }, - "infra_step": true, - "name": "bot_update", - "~followup_annotations": [ - "@@@STEP_TEXT@Some step text@@@", - "@@@STEP_LOG_LINE@json.output@{@@@", - "@@@STEP_LOG_LINE@json.output@ \"did_run\": true, @@@", - "@@@STEP_LOG_LINE@json.output@ \"fixed_revisions\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": \"abc123\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repository\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_failure\": false, @@@", - "@@@STEP_LOG_LINE@json.output@ \"patch_root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"properties\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"got_revision_cp\": \"refs/heads/master@{#164710}\"@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"root\": \"skia\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"source_manifest\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"directories\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"skia\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"git_checkout\": {@@@", - "@@@STEP_LOG_LINE@json.output@ \"repo_url\": \"https://fake.org/skia.git\", @@@", - "@@@STEP_LOG_LINE@json.output@ \"revision\": \"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"version\": 0@@@", - "@@@STEP_LOG_LINE@json.output@ }, @@@", - "@@@STEP_LOG_LINE@json.output@ \"step_text\": \"Some step text\"@@@", - "@@@STEP_LOG_LINE@json.output@}@@@", - "@@@STEP_LOG_END@json.output@@@", - "@@@SET_BUILD_PROPERTY@got_revision@\"9046e2e693bb92a76e972b694580e5d17ad10748\"@@@", - "@@@SET_BUILD_PROPERTY@got_revision_cp@\"refs/heads/master@{#164710}\"@@@" - ] - }, - { - "cmd": [ - "python", - "-u", - "RECIPE_MODULE[recipe_engine::file]/resources/fileutil.py", - "--json-output", - "/path/to/tmp/json", - "ensure-directory", - "--mode", - "0777", - "[START_DIR]/tmp" - ], - "infra_step": true, - "name": "makedirs tmp_dir" - }, - { - "name": "$result", - "recipe_result": null, - "status_code": 0 - } -] \ No newline at end of file diff --git a/infra/bots/recipe_modules/core/examples/full.py b/infra/bots/recipe_modules/core/examples/full.py deleted file mode 100644 index 2d1279f547..0000000000 --- a/infra/bots/recipe_modules/core/examples/full.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - - -DEPS = [ - 'core', - 'recipe_engine/file', - 'recipe_engine/path', - 'recipe_engine/properties', - 'run', - 'vars', -] - - -def RunSteps(api): - api.vars.setup() - - bot_update = True - if 'NoDEPS' in api.properties['buildername']: - bot_update = False - - if bot_update: - checkout_root = api.core.default_checkout_root - if 'Flutter' in api.vars.builder_name: - checkout_root = checkout_root.join('flutter') - api.core.checkout_bot_update(checkout_root=checkout_root) - else: - api.core.checkout_git(checkout_root=api.path['start_dir']) - api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir) - - -TEST_BUILDERS = [ - 'Build-Win-Clang-x86_64-Release-ParentRevision', - 'Build-Mac-Clang-x86_64-Debug-CommandBuffer', - 'Housekeeper-Weekly-RecreateSKPs', -] - - -def GenTests(api): - for buildername in TEST_BUILDERS: - yield ( - api.test(buildername) + - api.properties(buildername=buildername, - repository='https://skia.googlesource.com/skia.git', - revision='abc123', - path_config='kitchen', - swarm_out_dir='[SWARM_OUT_DIR]') - ) - - buildername = 'Build-Win-Clang-x86_64-Release-Vulkan' - yield ( - api.test('test') + - api.properties(buildername=buildername, - repository='https://skia.googlesource.com/skia.git', - revision='abc123', - path_config='kitchen', - swarm_out_dir='[SWARM_OUT_DIR]') + - api.properties(patch_storage='gerrit') + - api.properties.tryserver( - buildername=buildername, - gerrit_project='skia', - gerrit_url='https://skia-review.googlesource.com/', - ) - ) - - buildername = 'Build-Win-Clang-x86_64-Release-ParentRevision' - yield ( - api.test('parent_revision_trybot') + - api.properties(buildername=buildername, - repository='https://skia.googlesource.com/skia.git', - revision='abc123', - path_config='kitchen', - swarm_out_dir='[SWARM_OUT_DIR]', - patch_issue=500, - patch_set=1, - patch_storage='gerrit') + - api.properties.tryserver( - buildername=buildername, - gerrit_project='skia', - gerrit_url='https://skia-review.googlesource.com/', - ) - ) - - buildername = 'Build-Debian9-GCC-x86_64-Release-Flutter_Android' - yield ( - api.test('flutter_trybot') + - api.properties( - repository='https://skia.googlesource.com/skia.git', - buildername=buildername, - path_config='kitchen', - swarm_out_dir='[SWARM_OUT_DIR]', - revision='abc123', - patch_issue=500, - patch_set=1, - patch_storage='gerrit') + - api.properties.tryserver( - buildername=buildername, - gerrit_project='skia', - gerrit_url='https://skia-review.googlesource.com/', - ) + - api.path.exists( - api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt') - ) - ) - - builder = 'Build-Debian9-Clang-x86_64-Release-NoDEPS' - yield ( - api.test(builder) + - api.properties(buildername=builder, - repository='https://skia.googlesource.com/skia.git', - revision='abc123', - path_config='kitchen', - swarm_out_dir='[SWARM_OUT_DIR]', - patch_issue=500, - patch_repo='https://skia.googlesource.com/skia.git', - patch_set=1, - patch_storage='gerrit') + - api.path.exists(api.path['start_dir'].join('skp_output')) - ) - - buildername = 'Build-Debian9-GCC-x86_64-Release' - yield ( - api.test('cross_repo_trybot') + - api.properties( - repository='https://skia.googlesource.com/parent_repo.git', - buildername=buildername, - path_config='kitchen', - swarm_out_dir='[SWARM_OUT_DIR]', - revision='abc123', - patch_issue=500, - patch_repo='https://skia.googlesource.com/skia.git', - patch_set=1, - patch_storage='gerrit') + - api.properties.tryserver( - buildername=buildername, - gerrit_project='skia', - gerrit_url='https://skia-review.googlesource.com/', - ) + - api.path.exists( - api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt') - ) - ) diff --git a/infra/bots/recipe_modules/core/resources/binary_size_utils.py b/infra/bots/recipe_modules/core/resources/binary_size_utils.py deleted file mode 100644 index c09a65dccd..0000000000 --- a/infra/bots/recipe_modules/core/resources/binary_size_utils.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Common utilities for tools that deal with binary size information. - -Copied from chromium/src/build/android/pylib/symbols/binary_size_tools.py. -""" - -import logging -import re - - -def ParseNm(nm_lines): - """Parse nm output, returning data for all relevant (to binary size) - symbols and ignoring the rest. - - Args: - nm_lines: an iterable over lines of nm output. - - Yields: - (symbol name, symbol type, symbol size, source file path). - - Path may be None if nm couldn't figure out the source file. - """ - - # Match lines with size, symbol, optional location, optional discriminator - sym_re = re.compile(r'^[0-9a-f]{8,} ' # address (8+ hex digits) - '([0-9a-f]{8,}) ' # size (8+ hex digits) - '(.) ' # symbol type, one character - '([^\t]+)' # symbol name, separated from next by tab - '(?:\t(.*):[\d\?]+)?.*$') # location - # Match lines with addr but no size. - addr_re = re.compile(r'^[0-9a-f]{8,} (.) ([^\t]+)(?:\t.*)?$') - # Match lines that don't have an address at all -- typically external symbols. - noaddr_re = re.compile(r'^ {8,} (.) (.*)$') - # Match lines with no symbol name, only addr and type - addr_only_re = re.compile(r'^[0-9a-f]{8,} (.)$') - - for line in nm_lines: - line = line.rstrip() - match = sym_re.match(line) - if match: - size, sym_type, sym = match.groups()[0:3] - size = int(size, 16) - if sym_type in ('B', 'b'): - continue # skip all BSS for now. - path = match.group(4) - yield sym, sym_type, size, path - continue - match = addr_re.match(line) - if match: - # sym_type, sym = match.groups()[0:2] - continue # No size == we don't care. - match = noaddr_re.match(line) - if match: - sym_type, sym = match.groups() - if sym_type in ('U', 'w'): - continue # external or weak symbol - match = addr_only_re.match(line) - if match: - continue # Nothing to do. - - - # If we reach this part of the loop, there was something in the - # line that we didn't expect or recognize. - logging.warning('nm output parser failed to parse: %s', repr(line)) diff --git a/infra/bots/recipe_modules/core/resources/elf_symbolizer.py b/infra/bots/recipe_modules/core/resources/elf_symbolizer.py deleted file mode 100644 index de9c141219..0000000000 --- a/infra/bots/recipe_modules/core/resources/elf_symbolizer.py +++ /dev/null @@ -1,477 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""The ElfSymbolizer class for symbolizing Executable and Linkable Files. - -Adapted for Skia's use from -chromium/src/build/android/pylib/symbols/elf_symbolizer.py. - -Main changes: --- Added prefix_to_remove param to remove path prefix from tree data. -""" - -import collections -import datetime -import logging -import multiprocessing -import os -import posixpath -import Queue -import re -import subprocess -import sys -import threading - - -# addr2line builds a possibly infinite memory cache that can exhaust -# the computer's memory if allowed to grow for too long. This constant -# controls how many lookups we do before restarting the process. 4000 -# gives near peak performance without extreme memory usage. -ADDR2LINE_RECYCLE_LIMIT = 4000 - - -class ELFSymbolizer(object): - """An uber-fast (multiprocessing, pipelined and asynchronous) ELF symbolizer. - - This class is a frontend for addr2line (part of GNU binutils), designed to - symbolize batches of large numbers of symbols for a given ELF file. It - supports sharding symbolization against many addr2line instances and - pipelining of multiple requests per each instance (in order to hide addr2line - internals and OS pipe latencies). - - The interface exhibited by this class is a very simple asynchronous interface, - which is based on the following three methods: - - SymbolizeAsync(): used to request (enqueue) resolution of a given address. - - The |callback| method: used to communicated back the symbol information. - - Join(): called to conclude the batch to gather the last outstanding results. - In essence, before the Join method returns, this class will have issued as - many callbacks as the number of SymbolizeAsync() calls. In this regard, note - that due to multiprocess sharding, callbacks can be delivered out of order. - - Some background about addr2line: - - it is invoked passing the elf path in the cmdline, piping the addresses in - its stdin and getting results on its stdout. - - it has pretty large response times for the first requests, but it - works very well in streaming mode once it has been warmed up. - - it doesn't scale by itself (on more cores). However, spawning multiple - instances at the same time on the same file is pretty efficient as they - keep hitting the pagecache and become mostly CPU bound. - - it might hang or crash, mostly for OOM. This class deals with both of these - problems. - - Despite the "scary" imports and the multi* words above, (almost) no multi- - threading/processing is involved from the python viewpoint. Concurrency - here is achieved by spawning several addr2line subprocesses and handling their - output pipes asynchronously. Therefore, all the code here (with the exception - of the Queue instance in Addr2Line) should be free from mind-blowing - thread-safety concerns. - - The multiprocess sharding works as follows: - The symbolizer tries to use the lowest number of addr2line instances as - possible (with respect of |max_concurrent_jobs|) and enqueue all the requests - in a single addr2line instance. For few symbols (i.e. dozens) sharding isn't - worth the startup cost. - The multiprocess logic kicks in as soon as the queues for the existing - instances grow. Specifically, once all the existing instances reach the - |max_queue_size| bound, a new addr2line instance is kicked in. - In the case of a very eager producer (i.e. all |max_concurrent_jobs| instances - have a backlog of |max_queue_size|), back-pressure is applied on the caller by - blocking the SymbolizeAsync method. - - This module has been deliberately designed to be dependency free (w.r.t. of - other modules in this project), to allow easy reuse in external projects. - """ - - def __init__(self, elf_file_path, addr2line_path, callback, inlines=False, - max_concurrent_jobs=None, addr2line_timeout=30, max_queue_size=50, - source_root_path=None, strip_base_path=None, prefix_to_remove=None): - """Args: - elf_file_path: path of the elf file to be symbolized. - addr2line_path: path of the toolchain's addr2line binary. - callback: a callback which will be invoked for each resolved symbol with - the two args (sym_info, callback_arg). The former is an instance of - |ELFSymbolInfo| and contains the symbol information. The latter is an - embedder-provided argument which is passed to SymbolizeAsync(). - inlines: when True, the ELFSymbolInfo will contain also the details about - the outer inlining functions. When False, only the innermost function - will be provided. - max_concurrent_jobs: Max number of addr2line instances spawned. - Parallelize responsibly, addr2line is a memory and I/O monster. - max_queue_size: Max number of outstanding requests per addr2line instance. - addr2line_timeout: Max time (in seconds) to wait for a addr2line response. - After the timeout, the instance will be considered hung and respawned. - source_root_path: In some toolchains only the name of the source file is - is output, without any path information; disambiguation searches - through the source directory specified by |source_root_path| argument - for files whose name matches, adding the full path information to the - output. For example, if the toolchain outputs "unicode.cc" and there - is a file called "unicode.cc" located under |source_root_path|/foo, - the tool will replace "unicode.cc" with - "|source_root_path|/foo/unicode.cc". If there are multiple files with - the same name, disambiguation will fail because the tool cannot - determine which of the files was the source of the symbol. - strip_base_path: Rebases the symbols source paths onto |source_root_path| - (i.e replace |strip_base_path| with |source_root_path). - prefix_to_remove: Removes the prefix from ElfSymbolInfo output. Skia added - """ - assert(os.path.isfile(addr2line_path)), 'Cannot find ' + addr2line_path - self.elf_file_path = elf_file_path - self.addr2line_path = addr2line_path - self.callback = callback - self.inlines = inlines - self.max_concurrent_jobs = (max_concurrent_jobs or - min(multiprocessing.cpu_count(), 4)) - self.max_queue_size = max_queue_size - self.addr2line_timeout = addr2line_timeout - self.requests_counter = 0 # For generating monotonic request IDs. - self._a2l_instances = [] # Up to |max_concurrent_jobs| _Addr2Line inst. - - # Skia addition: remove the given prefix from tree paths. - self.prefix_to_remove = prefix_to_remove - - # If necessary, create disambiguation lookup table - self.disambiguate = source_root_path is not None - self.disambiguation_table = {} - self.strip_base_path = strip_base_path - if(self.disambiguate): - self.source_root_path = os.path.abspath(source_root_path) - self._CreateDisambiguationTable() - - # Create one addr2line instance. More instances will be created on demand - # (up to |max_concurrent_jobs|) depending on the rate of the requests. - self._CreateNewA2LInstance() - - def SymbolizeAsync(self, addr, callback_arg=None): - """Requests symbolization of a given address. - - This method is not guaranteed to return immediately. It generally does, but - in some scenarios (e.g. all addr2line instances have full queues) it can - block to create back-pressure. - - Args: - addr: address to symbolize. - callback_arg: optional argument which will be passed to the |callback|.""" - assert(isinstance(addr, int)) - - # Process all the symbols that have been resolved in the meanwhile. - # Essentially, this drains all the addr2line(s) out queues. - for a2l_to_purge in self._a2l_instances: - a2l_to_purge.ProcessAllResolvedSymbolsInQueue() - a2l_to_purge.RecycleIfNecessary() - - # Find the best instance according to this logic: - # 1. Find an existing instance with the shortest queue. - # 2. If all of instances' queues are full, but there is room in the pool, - # (i.e. < |max_concurrent_jobs|) create a new instance. - # 3. If there were already |max_concurrent_jobs| instances and all of them - # had full queues, make back-pressure. - - # 1. - def _SortByQueueSizeAndReqID(a2l): - return (a2l.queue_size, a2l.first_request_id) - a2l = min(self._a2l_instances, key=_SortByQueueSizeAndReqID) - - # 2. - if (a2l.queue_size >= self.max_queue_size and - len(self._a2l_instances) < self.max_concurrent_jobs): - a2l = self._CreateNewA2LInstance() - - # 3. - if a2l.queue_size >= self.max_queue_size: - a2l.WaitForNextSymbolInQueue() - - a2l.EnqueueRequest(addr, callback_arg) - - def Join(self): - """Waits for all the outstanding requests to complete and terminates.""" - for a2l in self._a2l_instances: - a2l.WaitForIdle() - a2l.Terminate() - - def _CreateNewA2LInstance(self): - assert(len(self._a2l_instances) < self.max_concurrent_jobs) - a2l = ELFSymbolizer.Addr2Line(self) - self._a2l_instances.append(a2l) - return a2l - - def _CreateDisambiguationTable(self): - """ Non-unique file names will result in None entries""" - self.disambiguation_table = {} - - for root, _, filenames in os.walk(self.source_root_path): - for f in filenames: - self.disambiguation_table[f] = os.path.join(root, f) if (f not in - self.disambiguation_table) else None - - - class Addr2Line(object): - """A python wrapper around an addr2line instance. - - The communication with the addr2line process looks as follows: - [STDIN] [STDOUT] (from addr2line's viewpoint) - > f001111 - > f002222 - < Symbol::Name(foo, bar) for f001111 - < /path/to/source/file.c:line_number - > f003333 - < Symbol::Name2() for f002222 - < /path/to/source/file.c:line_number - < Symbol::Name3() for f003333 - < /path/to/source/file.c:line_number - """ - - SYM_ADDR_RE = re.compile(r'([^:]+):(\?|\d+).*') - - def __init__(self, symbolizer): - self._symbolizer = symbolizer - self._lib_file_name = posixpath.basename(symbolizer.elf_file_path) - - # The request queue (i.e. addresses pushed to addr2line's stdin and not - # yet retrieved on stdout) - self._request_queue = collections.deque() - - # This is essentially len(self._request_queue). It has been optimized to a - # separate field because turned out to be a perf hot-spot. - self.queue_size = 0 - - # Keep track of the number of symbols a process has processed to - # avoid a single process growing too big and using all the memory. - self._processed_symbols_count = 0 - - # Objects required to handle the addr2line subprocess. - self._proc = None # Subprocess.Popen(...) instance. - self._thread = None # Threading.thread instance. - self._out_queue = None # Queue.Queue instance (for buffering a2l stdout). - self._RestartAddr2LineProcess() - - def EnqueueRequest(self, addr, callback_arg): - """Pushes an address to addr2line's stdin (and keeps track of it).""" - self._symbolizer.requests_counter += 1 # For global "age" of requests. - req_idx = self._symbolizer.requests_counter - self._request_queue.append((addr, callback_arg, req_idx)) - self.queue_size += 1 - self._WriteToA2lStdin(addr) - - def WaitForIdle(self): - """Waits until all the pending requests have been symbolized.""" - while self.queue_size > 0: - self.WaitForNextSymbolInQueue() - - def WaitForNextSymbolInQueue(self): - """Waits for the next pending request to be symbolized.""" - if not self.queue_size: - return - - # This outer loop guards against a2l hanging (detecting stdout timeout). - while True: - start_time = datetime.datetime.now() - timeout = datetime.timedelta(seconds=self._symbolizer.addr2line_timeout) - - # The inner loop guards against a2l crashing (checking if it exited). - while (datetime.datetime.now() - start_time < timeout): - # poll() returns !None if the process exited. a2l should never exit. - if self._proc.poll(): - logging.warning('addr2line crashed, respawning (lib: %s).' % - self._lib_file_name) - self._RestartAddr2LineProcess() - # TODO(primiano): the best thing to do in this case would be - # shrinking the pool size as, very likely, addr2line is crashed - # due to low memory (and the respawned one will die again soon). - - try: - lines = self._out_queue.get(block=True, timeout=0.25) - except Queue.Empty: - # On timeout (1/4 s.) repeat the inner loop and check if either the - # addr2line process did crash or we waited its output for too long. - continue - - # In nominal conditions, we get straight to this point. - self._ProcessSymbolOutput(lines) - return - - # If this point is reached, we waited more than |addr2line_timeout|. - logging.warning('Hung addr2line process, respawning (lib: %s).' % - self._lib_file_name) - self._RestartAddr2LineProcess() - - def ProcessAllResolvedSymbolsInQueue(self): - """Consumes all the addr2line output lines produced (without blocking).""" - if not self.queue_size: - return - while True: - try: - lines = self._out_queue.get_nowait() - except Queue.Empty: - break - self._ProcessSymbolOutput(lines) - - def RecycleIfNecessary(self): - """Restarts the process if it has been used for too long. - - A long running addr2line process will consume excessive amounts - of memory without any gain in performance.""" - if self._processed_symbols_count >= ADDR2LINE_RECYCLE_LIMIT: - self._RestartAddr2LineProcess() - - - def Terminate(self): - """Kills the underlying addr2line process. - - The poller |_thread| will terminate as well due to the broken pipe.""" - try: - self._proc.kill() - self._proc.communicate() # Essentially wait() without risking deadlock. - except Exception: # An exception while terminating? How interesting. - pass - self._proc = None - - def _WriteToA2lStdin(self, addr): - self._proc.stdin.write('%s\n' % hex(addr)) - if self._symbolizer.inlines: - # In the case of inlines we output an extra blank line, which causes - # addr2line to emit a (??,??:0) tuple that we use as a boundary marker. - self._proc.stdin.write('\n') - self._proc.stdin.flush() - - def _ProcessSymbolOutput(self, lines): - """Parses an addr2line symbol output and triggers the client callback.""" - (_, callback_arg, _) = self._request_queue.popleft() - self.queue_size -= 1 - - innermost_sym_info = None - sym_info = None - for (line1, line2) in lines: - prev_sym_info = sym_info - name = line1 if not line1.startswith('?') else None - source_path = None - source_line = None - m = ELFSymbolizer.Addr2Line.SYM_ADDR_RE.match(line2) - if m: - if not m.group(1).startswith('?'): - source_path = m.group(1) - if not m.group(2).startswith('?'): - source_line = int(m.group(2)) - else: - logging.warning('Got invalid symbol path from addr2line: %s' % line2) - - # In case disambiguation is on, and needed - was_ambiguous = False - disambiguated = False - if self._symbolizer.disambiguate: - if source_path and not posixpath.isabs(source_path): - path = self._symbolizer.disambiguation_table.get(source_path) - was_ambiguous = True - disambiguated = path is not None - source_path = path if disambiguated else source_path - - # Use absolute paths (so that paths are consistent, as disambiguation - # uses absolute paths) - if source_path and not was_ambiguous: - source_path = os.path.abspath(source_path) - - if source_path and self._symbolizer.strip_base_path: - # Strip the base path - source_path = re.sub('^' + self._symbolizer.strip_base_path, - self._symbolizer.source_root_path or '', source_path) - - sym_info = ELFSymbolInfo(name, source_path, source_line, was_ambiguous, - disambiguated, - self._symbolizer.prefix_to_remove) - if prev_sym_info: - prev_sym_info.inlined_by = sym_info - if not innermost_sym_info: - innermost_sym_info = sym_info - - self._processed_symbols_count += 1 - self._symbolizer.callback(innermost_sym_info, callback_arg) - - def _RestartAddr2LineProcess(self): - if self._proc: - self.Terminate() - - # The only reason of existence of this Queue (and the corresponding - # Thread below) is the lack of a subprocess.stdout.poll_avail_lines(). - # Essentially this is a pipe able to extract a couple of lines atomically. - self._out_queue = Queue.Queue() - - # Start the underlying addr2line process in line buffered mode. - - cmd = [self._symbolizer.addr2line_path, '--functions', '--demangle', - '--exe=' + self._symbolizer.elf_file_path] - if self._symbolizer.inlines: - cmd += ['--inlines'] - self._proc = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE, - stdin=subprocess.PIPE, stderr=sys.stderr, close_fds=True) - - # Start the poller thread, which simply moves atomically the lines read - # from the addr2line's stdout to the |_out_queue|. - self._thread = threading.Thread( - target=ELFSymbolizer.Addr2Line.StdoutReaderThread, - args=(self._proc.stdout, self._out_queue, self._symbolizer.inlines)) - self._thread.daemon = True # Don't prevent early process exit. - self._thread.start() - - self._processed_symbols_count = 0 - - # Replay the pending requests on the new process (only for the case - # of a hung addr2line timing out during the game). - for (addr, _, _) in self._request_queue: - self._WriteToA2lStdin(addr) - - @staticmethod - def StdoutReaderThread(process_pipe, queue, inlines): - """The poller thread fn, which moves the addr2line stdout to the |queue|. - - This is the only piece of code not running on the main thread. It merely - writes to a Queue, which is thread-safe. In the case of inlines, it - detects the ??,??:0 marker and sends the lines atomically, such that the - main thread always receives all the lines corresponding to one symbol in - one shot.""" - try: - lines_for_one_symbol = [] - while True: - line1 = process_pipe.readline().rstrip('\r\n') - line2 = process_pipe.readline().rstrip('\r\n') - if not line1 or not line2: - break - inline_has_more_lines = inlines and (len(lines_for_one_symbol) == 0 or - (line1 != '??' and line2 != '??:0')) - if not inlines or inline_has_more_lines: - lines_for_one_symbol += [(line1, line2)] - if inline_has_more_lines: - continue - queue.put(lines_for_one_symbol) - lines_for_one_symbol = [] - process_pipe.close() - - # Every addr2line processes will die at some point, please die silently. - except (IOError, OSError): - pass - - @property - def first_request_id(self): - """Returns the request_id of the oldest pending request in the queue.""" - return self._request_queue[0][2] if self._request_queue else 0 - - -class ELFSymbolInfo(object): - """The result of the symbolization passed as first arg. of each callback.""" - - def __init__(self, name, source_path, source_line, was_ambiguous=False, - disambiguated=False, prefix_to_remove=None): - """All the fields here can be None (if addr2line replies with '??').""" - self.name = name - if source_path and source_path.startswith(prefix_to_remove): - source_path = source_path[len(prefix_to_remove) : ] - self.source_path = source_path - self.source_line = source_line - # In the case of |inlines|=True, the |inlined_by| points to the outer - # function inlining the current one (and so on, to form a chain). - self.inlined_by = None - self.disambiguated = disambiguated - self.was_ambiguous = was_ambiguous - - def __str__(self): - return '%s [%s:%d]' % ( - self.name or '??', self.source_path or '??', self.source_line or 0) diff --git a/infra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py b/infra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py deleted file mode 100755 index 968f80debf..0000000000 --- a/infra/bots/recipe_modules/core/resources/generate_and_upload_doxygen.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - - -"""Generate Doxygen documentation.""" - - -import datetime -import os -import shutil -import subprocess -import sys - - -DOXYFILE_BASENAME = 'Doxyfile' # must match name of Doxyfile in skia root -DOXYGEN_BINARY = 'doxygen' -WORKDIR = os.path.join(os.pardir, 'doxygen_workdir') -DOXYGEN_CONFIG_DIR = os.path.join(WORKDIR, 'doxygen-config') -DOXYGEN_WORKING_DIR = os.path.join(WORKDIR, 'doxygen') -DOXYGEN_GS_PATH = '/'.join(['gs://skia-doc', 'doxygen']) - -IFRAME_FOOTER_TEMPLATE = """ -
-Generated at %s for skia -by doxygen -%s
-""" - - -def recreate_dir(path): - """Delete and recreate the directory.""" - try: - shutil.rmtree(path) - except OSError: - if os.path.exists(path): - raise Exception('Could not remove %s' % path) - os.makedirs(path) - - -def generate_and_upload_doxygen(): - """Generate Doxygen.""" - # Create empty dir and add static_footer.txt - recreate_dir(DOXYGEN_WORKING_DIR) - static_footer_path = os.path.join(DOXYGEN_WORKING_DIR, 'static_footer.txt') - shutil.copyfile(os.path.join('tools', 'doxygen_footer.txt'), - static_footer_path) - - # Make copy of doxygen config file, overriding any necessary configs, - # and run doxygen. - recreate_dir(DOXYGEN_CONFIG_DIR) - modified_doxyfile = os.path.join(DOXYGEN_CONFIG_DIR, DOXYFILE_BASENAME) - with open(DOXYFILE_BASENAME, 'r') as reader: - with open(modified_doxyfile, 'w') as writer: - shutil.copyfileobj(reader, writer) - writer.write('OUTPUT_DIRECTORY = %s\n' % DOXYGEN_WORKING_DIR) - writer.write('HTML_FOOTER = %s\n' % static_footer_path) - subprocess.check_call([DOXYGEN_BINARY, modified_doxyfile]) - - # Create iframe_footer.html - with open(os.path.join(DOXYGEN_WORKING_DIR, 'iframe_footer.html'), 'w') as f: - f.write(IFRAME_FOOTER_TEMPLATE % ( - datetime.datetime.now().isoformat(' '), - subprocess.check_output([DOXYGEN_BINARY, '--version']).rstrip())) - - # Upload. - cmd = ['gsutil', 'cp', '-a', 'public-read', '-R', - DOXYGEN_WORKING_DIR, DOXYGEN_GS_PATH] - subprocess.check_call(cmd) - - -if '__main__' == __name__: - generate_and_upload_doxygen() - diff --git a/infra/bots/recipe_modules/core/resources/run_binary_size_analysis.py b/infra/bots/recipe_modules/core/resources/run_binary_size_analysis.py deleted file mode 100755 index 822b366614..0000000000 --- a/infra/bots/recipe_modules/core/resources/run_binary_size_analysis.py +++ /dev/null @@ -1,788 +0,0 @@ -#!/usr/bin/env python -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -"""Generate a spatial analysis against an arbitrary library. - -Adapted for Skia's use case from -chromium/src/tools/binary_size/run_binary_size_analysis.py. Main changes: - --- Cleans up some deprecated codes. --- Always use relative code path so the tree root is Skia repo's root. --- Instead of outputting the standalone HTML/CSS/JS filesets, writes the - TreeMap JSON data into a Google Storage bucket. --- Adds githash and total_size to the JSON data. --- Outputs another summary data in JSON Bench format for skiaperf ingestion. - -The output JSON data for visualization is in the following format: - -{ - "githash": 123abc, - "commit_ts": 1234567890, - "total_size": 1234567, - "key": {"source_type": "binary_size"}, - "tree_data": { - "maxDepth": 9, - "k": "p", "children":[ - {"k":"p","children":[ - {"k":"p","children":[ - {"k":"p","lastPathElement":true,"children":[ - {"k":"b","t":"t","children":[ - {"k":"s", "t":"t", "value":4029, - "n":"etc_encode_subblock_helper(unsigned char const*, ...)" - }, - ...... - } -} - -Another JSON file is generated for size summaries to be used in skiaperf. The -JSON format details can be found at: - https://github.com/google/skia/blob/master/bench/ResultsWriter.h#L54 -and: - https://skia.googlesource.com/buildbot/+/master/perf/go/ingester/nanobench.go - -In the binary size case, outputs look like: - -{ - "gitHash": "123abc", - "key": { - "source_type": "binarysize" - } - "results: { - "src_lazy_global_weak_symbol": { - "memory": { - "bytes": 41, - "options": { - "path": "src_lazy", - "symbol": "global_weak_symbol" - } - } - }, - "src_lazy_global_read_only_data": { - "memory": { - "bytes": 13476, - "options": { - "path": "src_lazy", - "symbol": "global_read_only_data" - } - } - }, - ... - } -} - -""" - -import collections -import datetime -import json -import logging -import multiprocessing -import optparse -import os -import re -import shutil -import struct -import subprocess -import sys -import tempfile -import time -import urllib2 - -import binary_size_utils -import elf_symbolizer - -# Node dictionary keys. These are output in json read by the webapp so -# keep them short to save file size. -# Note: If these change, the webapp must also change. -NODE_TYPE_KEY = 'k' -NODE_NAME_KEY = 'n' -NODE_CHILDREN_KEY = 'children' -NODE_SYMBOL_TYPE_KEY = 't' -NODE_SYMBOL_SIZE_KEY = 'value' -NODE_MAX_DEPTH_KEY = 'maxDepth' -NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement' - -# The display name of the bucket where we put symbols without path. -NAME_NO_PATH_BUCKET = '(No Path)' - -# Try to keep data buckets smaller than this to avoid killing the -# graphing lib. -BIG_BUCKET_LIMIT = 3000 - -# Skia addition: relative dir for libskia.so from code base. -LIBSKIA_RELATIVE_PATH = os.path.join('out', 'Release') - -# Skia addition: dictionary mapping symbol type code to symbol name. -# See -# https://code.google.com/p/chromium/codesearch#chromium/src/tools/binary_size/template/D3SymbolTreeMap.js&l=74 -SYMBOL_MAP = { - 'A': 'global_absolute', - 'B': 'global_uninitialized_data', - 'b': 'local_uninitialized_data', - 'C': 'global_uninitialized_common', - 'D': 'global_initialized_data', - 'd': 'local_initialized_data', - 'G': 'global_small initialized_data', - 'g': 'local_small_initialized_data', - 'i': 'indirect_function', - 'N': 'debugging', - 'p': 'stack_unwind', - 'R': 'global_read_only_data', - 'r': 'local_read_only_data', - 'S': 'global_small_uninitialized_data', - 's': 'local_small_uninitialized_data', - 'T': 'global_code', - 't': 'local_code', - 'U': 'undefined', - 'u': 'unique', - 'V': 'global_weak_object', - 'v': 'local_weak_object', - 'W': 'global_weak_symbol', - 'w': 'local_weak_symbol', - '@': 'vtable_entry', - '-': 'stabs_debugging', - '?': 'unrecognized', -} - - -def _MkChild(node, name): - child = node[NODE_CHILDREN_KEY].get(name) - if child is None: - child = {NODE_NAME_KEY: name, - NODE_CHILDREN_KEY: {}} - node[NODE_CHILDREN_KEY][name] = child - return child - - -def SplitNoPathBucket(node): - """NAME_NO_PATH_BUCKET can be too large for the graphing lib to - handle. Split it into sub-buckets in that case.""" - root_children = node[NODE_CHILDREN_KEY] - if NAME_NO_PATH_BUCKET in root_children: - no_path_bucket = root_children[NAME_NO_PATH_BUCKET] - old_children = no_path_bucket[NODE_CHILDREN_KEY] - count = 0 - for symbol_type, symbol_bucket in old_children.iteritems(): - count += len(symbol_bucket[NODE_CHILDREN_KEY]) - if count > BIG_BUCKET_LIMIT: - new_children = {} - no_path_bucket[NODE_CHILDREN_KEY] = new_children - current_bucket = None - index = 0 - for symbol_type, symbol_bucket in old_children.iteritems(): - for symbol_name, value in symbol_bucket[NODE_CHILDREN_KEY].iteritems(): - if index % BIG_BUCKET_LIMIT == 0: - group_no = (index / BIG_BUCKET_LIMIT) + 1 - current_bucket = _MkChild(no_path_bucket, - '%s subgroup %d' % (NAME_NO_PATH_BUCKET, - group_no)) - assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p' - node[NODE_TYPE_KEY] = 'p' # p for path - index += 1 - symbol_size = value[NODE_SYMBOL_SIZE_KEY] - AddSymbolIntoFileNode(current_bucket, symbol_type, - symbol_name, symbol_size) - - -def MakeChildrenDictsIntoLists(node): - largest_list_len = 0 - if NODE_CHILDREN_KEY in node: - largest_list_len = len(node[NODE_CHILDREN_KEY]) - child_list = [] - for child in node[NODE_CHILDREN_KEY].itervalues(): - child_largest_list_len = MakeChildrenDictsIntoLists(child) - if child_largest_list_len > largest_list_len: - largest_list_len = child_largest_list_len - child_list.append(child) - node[NODE_CHILDREN_KEY] = child_list - - return largest_list_len - - -def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size): - """Puts symbol into the file path node |node|. - Returns the number of added levels in tree. I.e. returns 2.""" - - # 'node' is the file node and first step is to find its symbol-type bucket. - node[NODE_LAST_PATH_ELEMENT_KEY] = True - node = _MkChild(node, symbol_type) - assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b' - node[NODE_SYMBOL_TYPE_KEY] = symbol_type - node[NODE_TYPE_KEY] = 'b' # b for bucket - - # 'node' is now the symbol-type bucket. Make the child entry. - node = _MkChild(node, symbol_name) - if NODE_CHILDREN_KEY in node: - if node[NODE_CHILDREN_KEY]: - logging.warning('A container node used as symbol for %s.' % symbol_name) - # This is going to be used as a leaf so no use for child list. - del node[NODE_CHILDREN_KEY] - node[NODE_SYMBOL_SIZE_KEY] = symbol_size - node[NODE_SYMBOL_TYPE_KEY] = symbol_type - node[NODE_TYPE_KEY] = 's' # s for symbol - - return 2 # Depth of the added subtree. - - -def MakeCompactTree(symbols, symbol_path_origin_dir): - result = {NODE_NAME_KEY: '/', - NODE_CHILDREN_KEY: {}, - NODE_TYPE_KEY: 'p', - NODE_MAX_DEPTH_KEY: 0} - seen_symbol_with_path = False - for symbol_name, symbol_type, symbol_size, file_path in symbols: - - if 'vtable for ' in symbol_name: - symbol_type = '@' # hack to categorize these separately - if file_path and file_path != "??": - seen_symbol_with_path = True - else: - file_path = NAME_NO_PATH_BUCKET - - path_parts = file_path.split('/') - - # Find pre-existing node in tree, or update if it already exists - node = result - depth = 0 - while len(path_parts) > 0: - path_part = path_parts.pop(0) - if len(path_part) == 0: - continue - depth += 1 - node = _MkChild(node, path_part) - assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p' - node[NODE_TYPE_KEY] = 'p' # p for path - - depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size) - result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth) - - if not seen_symbol_with_path: - logging.warning('Symbols lack paths. Data will not be structured.') - - # The (no path) bucket can be extremely large if we failed to get - # path information. Split it into subgroups if needed. - SplitNoPathBucket(result) - - largest_list_len = MakeChildrenDictsIntoLists(result) - - if largest_list_len > BIG_BUCKET_LIMIT: - logging.warning('There are sections with %d nodes. ' - 'Results might be unusable.' % largest_list_len) - return result - - -# Skia added: summarizes tree size by symbol type for the given root node. -# Returns a dict keyed by symbol type, and value the type's overall size. -# e.g., {"t": 12345, "W": 543}. -def GetTreeSizes(node): - if 'children' not in node or not node['children']: - return {node['t']: node['value']} - dic = {} - for i in node['children']: - for k, v in GetTreeSizes(i).items(): - dic.setdefault(k, 0) - dic[k] += v - - return dic - - -# Skia added: creates dict to be converted to JSON in bench format. -# See top of file for the structure description. -def GetBenchDict(githash, tree_root): - dic = {'gitHash': githash, - 'key': {'source_type': 'binarysize'}, - 'results': {},} - for i in tree_root['children']: - if '(No Path)' == i['n']: # Already at symbol summary level. - for k, v in GetTreeSizes(i).items(): - dic['results']['no_path_' + SYMBOL_MAP[k]] = { - 'memory': { - 'bytes': v, - 'options': {'path': 'no_path', - 'symbol': SYMBOL_MAP[k],},}} - else: # We need to go deeper. - for c in i['children']: - path = i['n'] + '_' + c['n'] - for k, v in GetTreeSizes(c).items(): - dic['results'][path + '_' + SYMBOL_MAP[k]] = { - 'memory': { - 'bytes': v, - 'options': {'path': path, - 'symbol': SYMBOL_MAP[k],}}} - - return dic - - -def DumpCompactTree(symbols, symbol_path_origin_dir, ha, ts, issue, dest): - tree_root = MakeCompactTree(symbols, symbol_path_origin_dir) - json_data = {'tree_data': tree_root, - 'githash': ha, - 'commit_ts': ts, - 'key': {'source_type': 'binary_size'}, - 'total_size': sum(GetTreeSizes(tree_root).values()),} - with open(dest, 'w') as out: - # Use separators without whitespace to get a smaller file. - json.dump(json_data, out, separators=(',', ':')) - - -def MakeSourceMap(symbols): - sources = {} - for _sym, _symbol_type, size, path in symbols: - key = None - if path: - key = os.path.normpath(path) - else: - key = '[no path]' - if key not in sources: - sources[key] = {'path': path, 'symbol_count': 0, 'size': 0} - record = sources[key] - record['size'] += size - record['symbol_count'] += 1 - return sources - - -# Regex for parsing "nm" output. A sample line looks like this: -# 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95 -# -# The fields are: address, size, type, name, source location -# Regular expression explained ( see also: https://xkcd.com/208 ): -# ([0-9a-f]{8,}+) The address -# [\s]+ Whitespace separator -# ([0-9a-f]{8,}+) The size. From here on out it's all optional. -# [\s]+ Whitespace separator -# (\S?) The symbol type, which is any non-whitespace char -# [\s*] Whitespace separator -# ([^\t]*) Symbol name, any non-tab character (spaces ok!) -# [\t]? Tab separator -# (.*) The location (filename[:linennum|?][ (discriminator n)] -sNmPattern = re.compile( - r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)') - -class Progress(): - def __init__(self): - self.count = 0 - self.skip_count = 0 - self.collisions = 0 - self.time_last_output = time.time() - self.count_last_output = 0 - self.disambiguations = 0 - self.was_ambiguous = 0 - - -def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, - disambiguate, src_path): - nm_output = RunNm(library, nm_binary) - nm_output_lines = nm_output.splitlines() - nm_output_lines_len = len(nm_output_lines) - address_symbol = {} - progress = Progress() - def map_address_symbol(symbol, addr): - progress.count += 1 - if addr in address_symbol: - # 'Collision between %s and %s.' % (str(symbol.name), - # str(address_symbol[addr].name)) - progress.collisions += 1 - else: - if symbol.disambiguated: - progress.disambiguations += 1 - if symbol.was_ambiguous: - progress.was_ambiguous += 1 - - address_symbol[addr] = symbol - - progress_output() - - def progress_output(): - progress_chunk = 100 - if progress.count % progress_chunk == 0: - time_now = time.time() - time_spent = time_now - progress.time_last_output - if time_spent > 1.0: - # Only output at most once per second. - progress.time_last_output = time_now - chunk_size = progress.count - progress.count_last_output - progress.count_last_output = progress.count - if time_spent > 0: - speed = chunk_size / time_spent - else: - speed = 0 - progress_percent = (100.0 * (progress.count + progress.skip_count) / - nm_output_lines_len) - disambiguation_percent = 0 - if progress.disambiguations != 0: - disambiguation_percent = (100.0 * progress.disambiguations / - progress.was_ambiguous) - - sys.stdout.write('\r%.1f%%: Looked up %d symbols (%d collisions, ' - '%d disambiguations where %.1f%% succeeded)' - ' - %.1f lookups/s.' % - (progress_percent, progress.count, progress.collisions, - progress.disambiguations, disambiguation_percent, speed)) - - # In case disambiguation was disabled, we remove the source path (which upon - # being set signals the symbolizer to enable disambiguation) - if not disambiguate: - src_path = None - symbol_path_origin_dir = os.path.dirname(library) - # Skia specific. - symbol_path_prefix = symbol_path_origin_dir.replace(LIBSKIA_RELATIVE_PATH, '') - symbolizer = elf_symbolizer.ELFSymbolizer(library, addr2line_binary, - map_address_symbol, - max_concurrent_jobs=jobs, - source_root_path=src_path, - prefix_to_remove=symbol_path_prefix) - user_interrupted = False - try: - for line in nm_output_lines: - match = sNmPattern.match(line) - if match: - location = match.group(5) - if not location: - addr = int(match.group(1), 16) - size = int(match.group(2), 16) - if addr in address_symbol: # Already looked up, shortcut - # ELFSymbolizer. - map_address_symbol(address_symbol[addr], addr) - continue - elif size == 0: - # Save time by not looking up empty symbols (do they even exist?) - print('Empty symbol: ' + line) - else: - symbolizer.SymbolizeAsync(addr, addr) - continue - - progress.skip_count += 1 - except KeyboardInterrupt: - user_interrupted = True - print('Interrupting - killing subprocesses. Please wait.') - - try: - symbolizer.Join() - except KeyboardInterrupt: - # Don't want to abort here since we will be finished in a few seconds. - user_interrupted = True - print('Patience you must have my young padawan.') - - print '' - - if user_interrupted: - print('Skipping the rest of the file mapping. ' - 'Output will not be fully classified.') - - symbol_path_origin_dir = os.path.dirname(library) - # Skia specific: path prefix to strip. - symbol_path_prefix = symbol_path_origin_dir.replace(LIBSKIA_RELATIVE_PATH, '') - - with open(outfile, 'w') as out: - for line in nm_output_lines: - match = sNmPattern.match(line) - if match: - location = match.group(5) - if not location: - addr = int(match.group(1), 16) - symbol = address_symbol.get(addr) - if symbol is not None: - path = '??' - if symbol.source_path is not None: - path = symbol.source_path.replace(symbol_path_prefix, '') - line_number = 0 - if symbol.source_line is not None: - line_number = symbol.source_line - out.write('%s\t%s:%d\n' % (line, path, line_number)) - continue - - out.write('%s\n' % line) - - print('%d symbols in the results.' % len(address_symbol)) - - -def RunNm(binary, nm_binary): - cmd = [nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort', - binary] - nm_process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - (process_output, err_output) = nm_process.communicate() - - if nm_process.returncode != 0: - if err_output: - raise Exception, err_output - else: - raise Exception, process_output - - return process_output - - -def GetNmSymbols(nm_infile, outfile, library, jobs, verbose, - addr2line_binary, nm_binary, disambiguate, src_path): - if nm_infile is None: - if outfile is None: - outfile = tempfile.NamedTemporaryFile(delete=False).name - - if verbose: - print 'Running parallel addr2line, dumping symbols to ' + outfile - RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, - disambiguate, src_path) - - nm_infile = outfile - - elif verbose: - print 'Using nm input from ' + nm_infile - with file(nm_infile, 'r') as infile: - return list(binary_size_utils.ParseNm(infile)) - - -PAK_RESOURCE_ID_TO_STRING = { "inited": False } - -def LoadPakIdsFromResourceFile(filename): - """Given a file name, it loads everything that looks like a resource id - into PAK_RESOURCE_ID_TO_STRING.""" - with open(filename) as resource_header: - for line in resource_header: - if line.startswith("#define "): - line_data = line.split() - if len(line_data) == 3: - try: - resource_number = int(line_data[2]) - resource_name = line_data[1] - PAK_RESOURCE_ID_TO_STRING[resource_number] = resource_name - except ValueError: - pass - -def GetReadablePakResourceName(pak_file, resource_id): - """Pak resources have a numeric identifier. It is not helpful when - trying to locate where footprint is generated. This does its best to - map the number to a usable string.""" - if not PAK_RESOURCE_ID_TO_STRING['inited']: - # Try to find resource header files generated by grit when - # building the pak file. We'll look for files named *resources.h" - # and lines of the type: - # #define MY_RESOURCE_JS 1234 - PAK_RESOURCE_ID_TO_STRING['inited'] = True - gen_dir = os.path.join(os.path.dirname(pak_file), 'gen') - if os.path.isdir(gen_dir): - for dirname, _dirs, files in os.walk(gen_dir): - for filename in files: - if filename.endswith('resources.h'): - LoadPakIdsFromResourceFile(os.path.join(dirname, filename)) - return PAK_RESOURCE_ID_TO_STRING.get(resource_id, - 'Pak Resource %d' % resource_id) - -def AddPakData(symbols, pak_file): - """Adds pseudo-symbols from a pak file.""" - pak_file = os.path.abspath(pak_file) - with open(pak_file, 'rb') as pak: - data = pak.read() - - PAK_FILE_VERSION = 4 - HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries) - # and one uint8 (encoding of text resources) - INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32. - version, num_entries, _encoding = struct.unpack(' 0: - # Read the index and data. - data = data[HEADER_LENGTH:] - for _ in range(num_entries): - resource_id, offset = struct.unpack(' 2 or major == 2 and minor > 22 - - if supports_dwarf4: - return - - print('Checking version of debug information in %s.' % library) - debug_info = subprocess.check_output(['readelf', '--debug-dump=info', - '--dwarf-depth=1', library]) - dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M) - parsed_dwarf_format_output = dwarf_version_re.search(debug_info) - version = int(parsed_dwarf_format_output.group(1)) - if version > 2: - print('The supplied tools only support DWARF2 debug data but the binary\n' + - 'uses DWARF%d. Update the tools or compile the binary\n' % version + - 'with -gdwarf-2.') - sys.exit(1) - - -def main(): - usage = """%prog [options] - - Runs a spatial analysis on a given library, looking up the source locations - of its symbols and calculating how much space each directory, source file, - and so on is taking. The result is a report that can be used to pinpoint - sources of large portions of the binary, etceteras. - - Under normal circumstances, you only need to pass two arguments, thusly: - - %prog --library /path/to/library --destdir /path/to/output - - In this mode, the program will dump the symbols from the specified library - and map those symbols back to source locations, producing a web-based - report in the specified output directory. - - Other options are available via '--help'. - """ - parser = optparse.OptionParser(usage=usage) - parser.add_option('--nm-in', metavar='PATH', - help='if specified, use nm input from instead of ' - 'generating it. Note that source locations should be ' - 'present in the file; i.e., no addr2line symbol lookups ' - 'will be performed when this option is specified. ' - 'Mutually exclusive with --library.') - parser.add_option('--destdir', metavar='PATH', - help='write output to the specified directory. An HTML ' - 'report is generated here along with supporting files; ' - 'any existing report will be overwritten. Not used in ' - 'Skia.') - parser.add_option('--library', metavar='PATH', - help='if specified, process symbols in the library at ' - 'the specified path. Mutually exclusive with --nm-in.') - parser.add_option('--pak', metavar='PATH', - help='if specified, includes the contents of the ' - 'specified *.pak file in the output.') - parser.add_option('--nm-binary', - help='use the specified nm binary to analyze library. ' - 'This is to be used when the nm in the path is not for ' - 'the right architecture or of the right version.') - parser.add_option('--addr2line-binary', - help='use the specified addr2line binary to analyze ' - 'library. This is to be used when the addr2line in ' - 'the path is not for the right architecture or ' - 'of the right version.') - parser.add_option('--jobs', type='int', - help='number of jobs to use for the parallel ' - 'addr2line processing pool; defaults to 1. More ' - 'jobs greatly improve throughput but eat RAM like ' - 'popcorn, and take several gigabytes each. Start low ' - 'and ramp this number up until your machine begins to ' - 'struggle with RAM. ' - 'This argument is only valid when using --library.') - parser.add_option('-v', dest='verbose', action='store_true', - help='be verbose, printing lots of status information.') - parser.add_option('--nm-out', metavar='PATH', - help='keep the nm output file, and store it at the ' - 'specified path. This is useful if you want to see the ' - 'fully processed nm output after the symbols have been ' - 'mapped to source locations. By default, a tempfile is ' - 'used and is deleted when the program terminates.' - 'This argument is only valid when using --library.') - parser.add_option('--legacy', action='store_true', - help='emit legacy binary size report instead of modern') - parser.add_option('--disable-disambiguation', action='store_true', - help='disables the disambiguation process altogether,' - ' NOTE: this may, depending on your toolchain, produce' - ' output with some symbols at the top layer if addr2line' - ' could not get the entire source path.') - parser.add_option('--source-path', default='./', - help='the path to the source code of the output binary, ' - 'default set to current directory. Used in the' - ' disambiguation process.') - parser.add_option('--githash', default='latest', - help='Git hash for the binary version. Added by Skia.') - parser.add_option('--commit_ts', type='int', default=-1, - help='Timestamp for the commit. Added by Skia.') - parser.add_option('--issue_number', default='', - help='The trybot issue number in string. Added by Skia.') - parser.add_option('--dest', default=None, - help='Destination file to write results.') - opts, _args = parser.parse_args() - - if ((not opts.library) and (not opts.nm_in)) or (opts.library and opts.nm_in): - parser.error('exactly one of --library or --nm-in is required') - if (opts.nm_in): - if opts.jobs: - print >> sys.stderr, ('WARNING: --jobs has no effect ' - 'when used with --nm-in') - if not opts.jobs: - # Use the number of processors but cap between 2 and 4 since raw - # CPU power isn't the limiting factor. It's I/O limited, memory - # bus limited and available-memory-limited. Too many processes and - # the computer will run out of memory and it will be slow. - opts.jobs = max(2, min(4, str(multiprocessing.cpu_count()))) - - if opts.addr2line_binary: - assert os.path.isfile(opts.addr2line_binary) - addr2line_binary = opts.addr2line_binary - else: - addr2line_binary = _find_in_system_path('addr2line') - assert addr2line_binary, 'Unable to find addr2line in the path. '\ - 'Use --addr2line-binary to specify location.' - - if opts.nm_binary: - assert os.path.isfile(opts.nm_binary) - nm_binary = opts.nm_binary - else: - nm_binary = _find_in_system_path('nm') - assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\ - 'to specify location.' - - if opts.pak: - assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak - - print('addr2line: %s' % addr2line_binary) - print('nm: %s' % nm_binary) - - if opts.library: - CheckDebugFormatSupport(opts.library, addr2line_binary) - - symbols = GetNmSymbols(opts.nm_in, opts.nm_out, opts.library, - opts.jobs, opts.verbose is True, - addr2line_binary, nm_binary, - opts.disable_disambiguation is None, - opts.source_path) - - if opts.pak: - AddPakData(symbols, opts.pak) - - if opts.legacy: # legacy report - print 'Do Not set legacy flag.' - - else: # modern report - if opts.library: - symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library)) - else: - # Just a guess. Hopefully all paths in the input file are absolute. - symbol_path_origin_dir = os.path.abspath(os.getcwd()) - DumpCompactTree(symbols, symbol_path_origin_dir, opts.githash, - opts.commit_ts, opts.issue_number, opts.dest) - print 'Report data uploaded to GS.' - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/infra/bots/recipe_modules/doxygen/__init__.py b/infra/bots/recipe_modules/doxygen/__init__.py new file mode 100644 index 0000000000..1825f6116a --- /dev/null +++ b/infra/bots/recipe_modules/doxygen/__init__.py @@ -0,0 +1,9 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +DEPS = [ + 'recipe_engine/context', + 'recipe_engine/step', + 'run', +] diff --git a/infra/bots/recipe_modules/doxygen/api.py b/infra/bots/recipe_modules/doxygen/api.py new file mode 100644 index 0000000000..6141556fe0 --- /dev/null +++ b/infra/bots/recipe_modules/doxygen/api.py @@ -0,0 +1,17 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +from recipe_engine import recipe_api +from recipe_engine import config_types + + +class DoxygenApi(recipe_api.RecipeApi): + def generate_and_upload(self, skia_dir): + with self.m.context(cwd=skia_dir): + self.m.run( + self.m.step, + 'generate and upload doxygen', + cmd=['python', self.resource('generate_and_upload_doxygen.py')], + abort_on_failure=False) diff --git a/infra/bots/recipe_modules/doxygen/examples/full.expected/doxygen.json b/infra/bots/recipe_modules/doxygen/examples/full.expected/doxygen.json new file mode 100644 index 0000000000..bef199f5a1 --- /dev/null +++ b/infra/bots/recipe_modules/doxygen/examples/full.expected/doxygen.json @@ -0,0 +1,18 @@ +[ + { + "cmd": [ + "python", + "RECIPE_MODULE[skia::doxygen]/resources/generate_and_upload_doxygen.py" + ], + "env": { + "CHROME_HEADLESS": "1", + "PATH": ":RECIPE_PACKAGE_REPO[depot_tools]" + }, + "name": "generate and upload doxygen" + }, + { + "name": "$result", + "recipe_result": null, + "status_code": 0 + } +] \ No newline at end of file diff --git a/infra/bots/recipe_modules/doxygen/examples/full.py b/infra/bots/recipe_modules/doxygen/examples/full.py new file mode 100644 index 0000000000..c7335fa41c --- /dev/null +++ b/infra/bots/recipe_modules/doxygen/examples/full.py @@ -0,0 +1,27 @@ +# Copyright 2018 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +DEPS = [ + 'doxygen', + 'recipe_engine/path', + 'recipe_engine/properties', + 'vars', +] + + +def RunSteps(api): + api.vars.setup() + api.doxygen.generate_and_upload(api.path['start_dir']) + + +def GenTests(api): + yield ( + api.test('doxygen') + + api.properties(buildername='Housekeeper-PerCommit', + repository='https://skia.googlesource.com/skia.git', + revision='abc123', + path_config='kitchen', + swarm_out_dir='[SWARM_OUT_DIR]') + ) diff --git a/infra/bots/recipe_modules/doxygen/resources/generate_and_upload_doxygen.py b/infra/bots/recipe_modules/doxygen/resources/generate_and_upload_doxygen.py new file mode 100755 index 0000000000..968f80debf --- /dev/null +++ b/infra/bots/recipe_modules/doxygen/resources/generate_and_upload_doxygen.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +"""Generate Doxygen documentation.""" + + +import datetime +import os +import shutil +import subprocess +import sys + + +DOXYFILE_BASENAME = 'Doxyfile' # must match name of Doxyfile in skia root +DOXYGEN_BINARY = 'doxygen' +WORKDIR = os.path.join(os.pardir, 'doxygen_workdir') +DOXYGEN_CONFIG_DIR = os.path.join(WORKDIR, 'doxygen-config') +DOXYGEN_WORKING_DIR = os.path.join(WORKDIR, 'doxygen') +DOXYGEN_GS_PATH = '/'.join(['gs://skia-doc', 'doxygen']) + +IFRAME_FOOTER_TEMPLATE = """ +
+Generated at %s for skia +by doxygen +%s
+""" + + +def recreate_dir(path): + """Delete and recreate the directory.""" + try: + shutil.rmtree(path) + except OSError: + if os.path.exists(path): + raise Exception('Could not remove %s' % path) + os.makedirs(path) + + +def generate_and_upload_doxygen(): + """Generate Doxygen.""" + # Create empty dir and add static_footer.txt + recreate_dir(DOXYGEN_WORKING_DIR) + static_footer_path = os.path.join(DOXYGEN_WORKING_DIR, 'static_footer.txt') + shutil.copyfile(os.path.join('tools', 'doxygen_footer.txt'), + static_footer_path) + + # Make copy of doxygen config file, overriding any necessary configs, + # and run doxygen. + recreate_dir(DOXYGEN_CONFIG_DIR) + modified_doxyfile = os.path.join(DOXYGEN_CONFIG_DIR, DOXYFILE_BASENAME) + with open(DOXYFILE_BASENAME, 'r') as reader: + with open(modified_doxyfile, 'w') as writer: + shutil.copyfileobj(reader, writer) + writer.write('OUTPUT_DIRECTORY = %s\n' % DOXYGEN_WORKING_DIR) + writer.write('HTML_FOOTER = %s\n' % static_footer_path) + subprocess.check_call([DOXYGEN_BINARY, modified_doxyfile]) + + # Create iframe_footer.html + with open(os.path.join(DOXYGEN_WORKING_DIR, 'iframe_footer.html'), 'w') as f: + f.write(IFRAME_FOOTER_TEMPLATE % ( + datetime.datetime.now().isoformat(' '), + subprocess.check_output([DOXYGEN_BINARY, '--version']).rstrip())) + + # Upload. + cmd = ['gsutil', 'cp', '-a', 'public-read', '-R', + DOXYGEN_WORKING_DIR, DOXYGEN_GS_PATH] + subprocess.check_call(cmd) + + +if '__main__' == __name__: + generate_and_upload_doxygen() + diff --git a/infra/bots/recipe_modules/flavor/__init__.py b/infra/bots/recipe_modules/flavor/__init__.py index 37321e559a..28b3d9bacb 100644 --- a/infra/bots/recipe_modules/flavor/__init__.py +++ b/infra/bots/recipe_modules/flavor/__init__.py @@ -12,6 +12,7 @@ DEPS = [ 'infra', 'recipe_engine/context', 'recipe_engine/file', + 'recipe_engine/json', 'recipe_engine/path', 'recipe_engine/platform', 'recipe_engine/python', diff --git a/infra/bots/recipe_modules/flavor/android.py b/infra/bots/recipe_modules/flavor/android.py new file mode 100644 index 0000000000..ed8d9af2d4 --- /dev/null +++ b/infra/bots/recipe_modules/flavor/android.py @@ -0,0 +1,542 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +from recipe_engine import recipe_api + +from . import default +import subprocess # TODO(borenet): No! Remove this. + + +"""Android flavor, used for running code on Android.""" + + +class AndroidFlavor(default.DefaultFlavor): + def __init__(self, m): + super(AndroidFlavor, self).__init__(m) + self._ever_ran_adb = False + self.ADB_BINARY = '/usr/bin/adb.1.0.35' + self.ADB_PUB_KEY = '/home/chrome-bot/.android/adbkey' + self._golo_devices = ['Nexus5x'] + if self.m.vars.builder_cfg.get('model') in self._golo_devices: + self.ADB_BINARY = '/opt/infra-android/tools/adb' + self.ADB_PUB_KEY = ('/home/chrome-bot/.android/' + 'chrome_infrastructure_adbkey') + + # Data should go in android_data_dir, which may be preserved across runs. + android_data_dir = '/sdcard/revenge_of_the_skiabot/' + self.device_dirs = default.DeviceDirs( + bin_dir = '/data/local/tmp/', + dm_dir = android_data_dir + 'dm_out', + perf_data_dir = android_data_dir + 'perf', + resource_dir = android_data_dir + 'resources', + images_dir = android_data_dir + 'images', + skp_dir = android_data_dir + 'skps', + svg_dir = android_data_dir + 'svgs', + tmp_dir = android_data_dir) + + # A list of devices we can't root. If rooting fails and a device is not + # on the list, we fail the task to avoid perf inconsistencies. + self.rootable_blacklist = ['GalaxyS6', 'GalaxyS7_G930A', 'GalaxyS7_G930FD', + 'MotoG4', 'NVIDIA_Shield'] + + # Maps device type -> CPU ids that should be scaled for nanobench. + # Many devices have two (or more) different CPUs (e.g. big.LITTLE + # on Nexus5x). The CPUs listed are the biggest cpus on the device. + # The CPUs are grouped together, so we only need to scale one of them + # (the one listed) in order to scale them all. + # E.g. Nexus5x has cpu0-3 as one chip and cpu4-5 as the other. Thus, + # if one wants to run a single-threaded application (e.g. nanobench), one + # can disable cpu0-3 and scale cpu 4 to have only cpu4 and 5 at the same + # frequency. See also disable_for_nanobench. + self.cpus_to_scale = { + 'Nexus5x': [4], + 'NexusPlayer': [0, 2], # has 2 identical chips, so scale them both. + 'Pixel': [2], + 'Pixel2XL': [4] + } + + # Maps device type -> CPU ids that should be turned off when running + # single-threaded applications like nanobench. The devices listed have + # multiple, differnt CPUs. We notice a lot of noise that seems to be + # caused by nanobench running on the slow CPU, then the big CPU. By + # disabling this, we see less of that noise by forcing the same CPU + # to be used for the performance testing every time. + self.disable_for_nanobench = { + 'Nexus5x': range(0, 4), + 'Pixel': range(0, 2), + 'Pixel2XL': range(0, 4), + 'PixelC': range(0, 2) + } + + self.gpu_scaling = { + "Nexus5": 450000000, + "Nexus5x": 600000000, + } + + def _run(self, title, *cmd, **kwargs): + with self.m.context(cwd=self.m.path['start_dir'].join('skia')): + return self.m.run(self.m.step, title, cmd=list(cmd), **kwargs) + + def _adb(self, title, *cmd, **kwargs): + # The only non-infra adb steps (dm / nanobench) happen to not use _adb(). + if 'infra_step' not in kwargs: + kwargs['infra_step'] = True + + self._ever_ran_adb = True + attempts = 1 + flaky_devices = ['NexusPlayer', 'PixelC'] + if self.m.vars.builder_cfg.get('model') in flaky_devices: + attempts = 3 + + def wait_for_device(attempt): + self.m.run(self.m.step, + 'kill adb server after failure of \'%s\' (attempt %d)' % ( + title, attempt), + cmd=[self.ADB_BINARY, 'kill-server'], + infra_step=True, timeout=30, abort_on_failure=False, + fail_build_on_failure=False) + self.m.run(self.m.step, + 'wait for device after failure of \'%s\' (attempt %d)' % ( + title, attempt), + cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True, + timeout=180, abort_on_failure=False, + fail_build_on_failure=False) + + with self.m.context(cwd=self.m.path['start_dir'].join('skia')): + with self.m.env({'ADB_VENDOR_KEYS': self.ADB_PUB_KEY}): + return self.m.run.with_retry(self.m.step, title, attempts, + cmd=[self.ADB_BINARY]+list(cmd), + between_attempts_fn=wait_for_device, + **kwargs) + + def _scale_for_dm(self): + device = self.m.vars.builder_cfg.get('model') + if (device in self.rootable_blacklist or + self.m.vars.internal_hardware_label): + return + + # This is paranoia... any CPUs we disabled while running nanobench + # ought to be back online now that we've restarted the device. + for i in self.disable_for_nanobench.get(device, []): + self._set_cpu_online(i, 1) # enable + + scale_up = self.cpus_to_scale.get(device, [0]) + # For big.LITTLE devices, make sure we scale the LITTLE cores up; + # there is a chance they are still in powersave mode from when + # swarming slows things down for cooling down and charging. + if 0 not in scale_up: + scale_up.append(0) + for i in scale_up: + # AndroidOne doesn't support ondemand governor. hotplug is similar. + if device == 'AndroidOne': + self._set_governor(i, 'hotplug') + else: + self._set_governor(i, 'ondemand') + + def _scale_for_nanobench(self): + device = self.m.vars.builder_cfg.get('model') + if (device in self.rootable_blacklist or + self.m.vars.internal_hardware_label): + return + + for i in self.cpus_to_scale.get(device, [0]): + self._set_governor(i, 'userspace') + self._scale_cpu(i, 0.6) + + for i in self.disable_for_nanobench.get(device, []): + self._set_cpu_online(i, 0) # disable + + if device in self.gpu_scaling: + #https://developer.qualcomm.com/qfile/28823/lm80-p0436-11_adb_commands.pdf + # Section 3.2.1 Commands to put the GPU in performance mode + # Nexus 5 is 320000000 by default + # Nexus 5x is 180000000 by default + gpu_freq = self.gpu_scaling[device] + self.m.run.with_retry(self.m.python.inline, + "Lock GPU to %d (and other perf tweaks)" % gpu_freq, + 3, # attempts + program=""" +import os +import subprocess +import sys +import time +ADB = sys.argv[1] +freq = sys.argv[2] +idle_timer = "10000" + +log = subprocess.check_output([ADB, 'root']) +# check for message like 'adbd cannot run as root in production builds' +print log +if 'cannot' in log: + raise Exception('adb root failed') + +subprocess.check_output([ADB, 'shell', 'stop', 'thermald']) + +subprocess.check_output([ADB, 'shell', 'echo "%s" > ' + '/sys/class/kgsl/kgsl-3d0/gpuclk' % freq]) + +actual_freq = subprocess.check_output([ADB, 'shell', 'cat ' + '/sys/class/kgsl/kgsl-3d0/gpuclk']).strip() +if actual_freq != freq: + raise Exception('Frequency (actual, expected) (%s, %s)' + % (actual_freq, freq)) + +subprocess.check_output([ADB, 'shell', 'echo "%s" > ' + '/sys/class/kgsl/kgsl-3d0/idle_timer' % idle_timer]) + +actual_timer = subprocess.check_output([ADB, 'shell', 'cat ' + '/sys/class/kgsl/kgsl-3d0/idle_timer']).strip() +if actual_timer != idle_timer: + raise Exception('idle_timer (actual, expected) (%s, %s)' + % (actual_timer, idle_timer)) + +for s in ['force_bus_on', 'force_rail_on', 'force_clk_on']: + subprocess.check_output([ADB, 'shell', 'echo "1" > ' + '/sys/class/kgsl/kgsl-3d0/%s' % s]) + actual_set = subprocess.check_output([ADB, 'shell', 'cat ' + '/sys/class/kgsl/kgsl-3d0/%s' % s]).strip() + if actual_set != "1": + raise Exception('%s (actual, expected) (%s, 1)' + % (s, actual_set)) +""", + args = [self.ADB_BINARY, gpu_freq], + infra_step=True, + timeout=30) + + def _set_governor(self, cpu, gov): + self._ever_ran_adb = True + self.m.run.with_retry(self.m.python.inline, + "Set CPU %d's governor to %s" % (cpu, gov), + 3, # attempts + program=""" +import os +import subprocess +import sys +import time +ADB = sys.argv[1] +cpu = int(sys.argv[2]) +gov = sys.argv[3] + +log = subprocess.check_output([ADB, 'root']) +# check for message like 'adbd cannot run as root in production builds' +print log +if 'cannot' in log: + raise Exception('adb root failed') + +subprocess.check_output([ADB, 'shell', 'echo "%s" > ' + '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % (gov, cpu)]) +actual_gov = subprocess.check_output([ADB, 'shell', 'cat ' + '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % cpu]).strip() +if actual_gov != gov: + raise Exception('(actual, expected) (%s, %s)' + % (actual_gov, gov)) +""", + args = [self.ADB_BINARY, cpu, gov], + infra_step=True, + timeout=30) + + + def _set_cpu_online(self, cpu, value): + """Set /sys/devices/system/cpu/cpu{N}/online to value (0 or 1).""" + self._ever_ran_adb = True + msg = 'Disabling' + if value: + msg = 'Enabling' + self.m.run.with_retry(self.m.python.inline, + '%s CPU %d' % (msg, cpu), + 3, # attempts + program=""" +import os +import subprocess +import sys +import time +ADB = sys.argv[1] +cpu = int(sys.argv[2]) +value = int(sys.argv[3]) + +log = subprocess.check_output([ADB, 'root']) +# check for message like 'adbd cannot run as root in production builds' +print log +if 'cannot' in log: + raise Exception('adb root failed') + +# If we try to echo 1 to an already online cpu, adb returns exit code 1. +# So, check the value before trying to write it. +prior_status = subprocess.check_output([ADB, 'shell', 'cat ' + '/sys/devices/system/cpu/cpu%d/online' % cpu]).strip() +if prior_status == str(value): + print 'CPU %d online already %d' % (cpu, value) + sys.exit() + +subprocess.check_output([ADB, 'shell', 'echo %s > ' + '/sys/devices/system/cpu/cpu%d/online' % (value, cpu)]) +actual_status = subprocess.check_output([ADB, 'shell', 'cat ' + '/sys/devices/system/cpu/cpu%d/online' % cpu]).strip() +if actual_status != str(value): + raise Exception('(actual, expected) (%s, %d)' + % (actual_status, value)) +""", + args = [self.ADB_BINARY, cpu, value], + infra_step=True, + timeout=30) + + + def _scale_cpu(self, cpu, target_percent): + self._ever_ran_adb = True + self.m.run.with_retry(self.m.python.inline, + 'Scale CPU %d to %f' % (cpu, target_percent), + 3, # attempts + program=""" +import os +import subprocess +import sys +import time +ADB = sys.argv[1] +target_percent = float(sys.argv[2]) +cpu = int(sys.argv[3]) +log = subprocess.check_output([ADB, 'root']) +# check for message like 'adbd cannot run as root in production builds' +print log +if 'cannot' in log: + raise Exception('adb root failed') + +root = '/sys/devices/system/cpu/cpu%d/cpufreq' %cpu + +# All devices we test on give a list of their available frequencies. +available_freqs = subprocess.check_output([ADB, 'shell', + 'cat %s/scaling_available_frequencies' % root]) + +# Check for message like '/system/bin/sh: file not found' +if available_freqs and '/system/bin/sh' not in available_freqs: + available_freqs = sorted( + int(i) for i in available_freqs.strip().split()) +else: + raise Exception('Could not get list of available frequencies: %s' % + available_freqs) + +maxfreq = available_freqs[-1] +target = int(round(maxfreq * target_percent)) +freq = maxfreq +for f in reversed(available_freqs): + if f <= target: + freq = f + break + +print 'Setting frequency to %d' % freq + +# If scaling_max_freq is lower than our attempted setting, it won't take. +# We must set min first, because if we try to set max to be less than min +# (which sometimes happens after certain devices reboot) it returns a +# perplexing permissions error. +subprocess.check_output([ADB, 'shell', 'echo 0 > ' + '%s/scaling_min_freq' % root]) +subprocess.check_output([ADB, 'shell', 'echo %d > ' + '%s/scaling_max_freq' % (freq, root)]) +subprocess.check_output([ADB, 'shell', 'echo %d > ' + '%s/scaling_setspeed' % (freq, root)]) +time.sleep(5) +actual_freq = subprocess.check_output([ADB, 'shell', 'cat ' + '%s/scaling_cur_freq' % root]).strip() +if actual_freq != str(freq): + raise Exception('(actual, expected) (%s, %d)' + % (actual_freq, freq)) +""", + args = [self.ADB_BINARY, str(target_percent), cpu], + infra_step=True, + timeout=30) + + def install(self): + self._adb('mkdir ' + self.device_dirs.resource_dir, + 'shell', 'mkdir', '-p', self.device_dirs.resource_dir) + if 'ASAN' in self.m.vars.extra_tokens: + asan_setup = self.m.vars.slave_dir.join( + 'android_ndk_linux', 'toolchains', 'llvm', 'prebuilt', + 'linux-x86_64', 'lib64', 'clang', '6.0.2', 'bin', + 'asan_device_setup') + self.m.run(self.m.python.inline, 'Setting up device to run ASAN', + program=""" +import os +import subprocess +import sys +import time +ADB = sys.argv[1] +ASAN_SETUP = sys.argv[2] + +def wait_for_device(): + while True: + time.sleep(5) + print 'Waiting for device' + subprocess.check_output([ADB, 'wait-for-device']) + bit1 = subprocess.check_output([ADB, 'shell', 'getprop', + 'dev.bootcomplete']) + bit2 = subprocess.check_output([ADB, 'shell', 'getprop', + 'sys.boot_completed']) + if '1' in bit1 and '1' in bit2: + print 'Device detected' + break + +log = subprocess.check_output([ADB, 'root']) +# check for message like 'adbd cannot run as root in production builds' +print log +if 'cannot' in log: + raise Exception('adb root failed') + +output = subprocess.check_output([ADB, 'disable-verity']) +print output + +if 'already disabled' not in output: + print 'Rebooting device' + subprocess.check_output([ADB, 'reboot']) + wait_for_device() + +def installASAN(revert=False): + # ASAN setup script is idempotent, either it installs it or + # says it's installed. Returns True on success, false otherwise. + out = subprocess.check_output([ADB, 'wait-for-device']) + print out + cmd = [ASAN_SETUP] + if revert: + cmd = [ASAN_SETUP, '--revert'] + process = subprocess.Popen(cmd, env={'ADB': ADB}, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + # this also blocks until command finishes + (stdout, stderr) = process.communicate() + print stdout + print 'Stderr: %s' % stderr + return process.returncode == 0 + +if not installASAN(): + print 'Trying to revert the ASAN install and then re-install' + # ASAN script sometimes has issues if it was interrupted or partially applied + # Try reverting it, then re-enabling it + if not installASAN(revert=True): + raise Exception('reverting ASAN install failed') + + # Sleep because device does not reboot instantly + time.sleep(10) + + if not installASAN(): + raise Exception('Tried twice to setup ASAN and failed.') + +# Sleep because device does not reboot instantly +time.sleep(10) +wait_for_device() +""", + args = [self.ADB_BINARY, asan_setup], + infra_step=True, + timeout=300, + abort_on_failure=True) + + def cleanup_steps(self): + if self._ever_ran_adb: + self.m.run(self.m.python.inline, 'dump log', program=""" + import os + import subprocess + import sys + out = sys.argv[1] + log = subprocess.check_output(['%s', 'logcat', '-d']) + for line in log.split('\\n'): + tokens = line.split() + if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc': + addr, path = tokens[-2:] + local = os.path.join(out, os.path.basename(path)) + if os.path.exists(local): + sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr]) + line = line.replace(addr, addr + ' ' + sym.strip()) + print line + """ % self.ADB_BINARY, + args=[self.m.vars.skia_out], + infra_step=True, + timeout=300, + abort_on_failure=False) + + # Only quarantine the bot if the first failed step + # is an infra step. If, instead, we did this for any infra failures, we + # would do this too much. For example, if a Nexus 10 died during dm + # and the following pull step would also fail "device not found" - causing + # us to run the shutdown command when the device was probably not in a + # broken state; it was just rebooting. + if (self.m.run.failed_steps and + isinstance(self.m.run.failed_steps[0], recipe_api.InfraFailure)): + bot_id = self.m.vars.swarming_bot_id + self.m.file.write_text('Quarantining Bot', + '/home/chrome-bot/%s.force_quarantine' % bot_id, + ' ') + + if self._ever_ran_adb: + self._adb('kill adb server', 'kill-server') + + def step(self, name, cmd, **kwargs): + if (cmd[0] == 'nanobench'): + self._scale_for_nanobench() + else: + self._scale_for_dm() + app = self.m.vars.skia_out.join(cmd[0]) + self._adb('push %s' % cmd[0], + 'push', app, self.device_dirs.bin_dir) + + sh = '%s.sh' % cmd[0] + self.m.run.writefile(self.m.vars.tmp_dir.join(sh), + 'set -x; %s%s; echo $? >%src' % ( + self.device_dirs.bin_dir, subprocess.list2cmdline(map(str, cmd)), + self.device_dirs.bin_dir)) + self._adb('push %s' % sh, + 'push', self.m.vars.tmp_dir.join(sh), self.device_dirs.bin_dir) + + self._adb('clear log', 'logcat', '-c') + self.m.python.inline('%s' % cmd[0], """ + import subprocess + import sys + bin_dir = sys.argv[1] + sh = sys.argv[2] + subprocess.check_call(['%s', 'shell', 'sh', bin_dir + sh]) + try: + sys.exit(int(subprocess.check_output(['%s', 'shell', 'cat', + bin_dir + 'rc']))) + except ValueError: + print "Couldn't read the return code. Probably killed for OOM." + sys.exit(1) + """ % (self.ADB_BINARY, self.ADB_BINARY), + args=[self.device_dirs.bin_dir, sh]) + + def copy_file_to_device(self, host, device): + self._adb('push %s %s' % (host, device), 'push', host, device) + + def copy_directory_contents_to_device(self, host, device): + # Copy the tree, avoiding hidden directories and resolving symlinks. + self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device), + program=""" + import os + import subprocess + import sys + host = sys.argv[1] + device = sys.argv[2] + for d, _, fs in os.walk(host): + p = os.path.relpath(d, host) + if p != '.' and p.startswith('.'): + continue + for f in fs: + print os.path.join(p,f) + subprocess.check_call(['%s', 'push', + os.path.realpath(os.path.join(host, p, f)), + os.path.join(device, p, f)]) + """ % self.ADB_BINARY, args=[host, device], infra_step=True) + + def copy_directory_contents_to_host(self, device, host): + self._adb('pull %s %s' % (device, host), 'pull', device, host) + + def read_file_on_device(self, path, **kwargs): + rv = self._adb('read %s' % path, + 'shell', 'cat', path, stdout=self.m.raw_io.output(), + **kwargs) + return rv.stdout.rstrip() if rv and rv.stdout else None + + def remove_file_on_device(self, path): + self._adb('rm %s' % path, 'shell', 'rm', '-f', path) + + def create_clean_device_dir(self, path): + self._adb('rm %s' % path, 'shell', 'rm', '-rf', path) + self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path) diff --git a/infra/bots/recipe_modules/flavor/api.py b/infra/bots/recipe_modules/flavor/api.py index c5a1be8030..9dee6f77ee 100644 --- a/infra/bots/recipe_modules/flavor/api.py +++ b/infra/bots/recipe_modules/flavor/api.py @@ -8,13 +8,24 @@ from recipe_engine import recipe_api -from . import default_flavor -from . import gn_android_flavor -from . import gn_chromebook_flavor -from . import gn_chromecast_flavor -from . import gn_flavor -from . import ios_flavor -from . import valgrind_flavor +from . import android +from . import chromebook +from . import chromecast +from . import default +from . import ios +from . import valgrind + + +"""Abstractions for running code on various platforms. + +The methods in this module define how certain high-level functions should work. +Each flavor should correspond to a subclass of DefaultFlavor which may override +any of these functions as appropriate for that flavor. + +For example, the AndroidFlavor will override the functions for copying files +between the host and Android device, as well as the 'step' function, so that +commands may be run through ADB. +""" VERSION_FILE_SK_IMAGE = 'SK_IMAGE_VERSION' @@ -50,17 +61,17 @@ class SkiaFlavorApi(recipe_api.RecipeApi): def get_flavor(self, vars_api): """Return a flavor utils object specific to the given builder.""" if is_chromecast(vars_api): - return gn_chromecast_flavor.GNChromecastFlavorUtils(self) + return chromecast.ChromecastFlavor(self) if is_chromebook(vars_api): - return gn_chromebook_flavor.GNChromebookFlavorUtils(self) + return chromebook.ChromebookFlavor(self) if is_android(vars_api) and not is_test_skqp(vars_api): - return gn_android_flavor.GNAndroidFlavorUtils(self) + return android.AndroidFlavor(self) elif is_ios(vars_api): - return ios_flavor.iOSFlavorUtils(self) + return ios.iOSFlavor(self) elif is_valgrind(vars_api): - return valgrind_flavor.ValgrindFlavorUtils(self) + return valgrind.ValgrindFlavor(self) else: - return gn_flavor.GNFlavorUtils(self) + return default.DefaultFlavor(self) def setup(self): self._f = self.get_flavor(self.m.vars) diff --git a/infra/bots/recipe_modules/flavor/chromebook.py b/infra/bots/recipe_modules/flavor/chromebook.py new file mode 100644 index 0000000000..eb745d3972 --- /dev/null +++ b/infra/bots/recipe_modules/flavor/chromebook.py @@ -0,0 +1,131 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +from recipe_engine import recipe_api + +import default +import json # TODO(borenet): No! Remove this. + + +"""Chromebook flavor, used for running code on Chromebooks.""" + + +class ChromebookFlavor(default.DefaultFlavor): + + def __init__(self, m): + super(ChromebookFlavor, self).__init__(m) + self._user_ip = '' + + self.chromeos_homedir = '/home/chronos/user/' + self.device_dirs = default.DeviceDirs( + bin_dir = self.chromeos_homedir + 'bin', + dm_dir = self.chromeos_homedir + 'dm_out', + perf_data_dir = self.chromeos_homedir + 'perf', + resource_dir = self.chromeos_homedir + 'resources', + images_dir = self.chromeos_homedir + 'images', + skp_dir = self.chromeos_homedir + 'skps', + svg_dir = self.chromeos_homedir + 'svgs', + tmp_dir = self.chromeos_homedir) + + @property + def user_ip(self): + if not self._user_ip: + ssh_info = self.m.run(self.m.python.inline, 'read chromeos ip', + program=""" + import os + SSH_MACHINE_FILE = os.path.expanduser('~/ssh_machine.json') + with open(SSH_MACHINE_FILE, 'r') as f: + print f.read() + """, + stdout=self.m.raw_io.output(), + infra_step=True).stdout + + self._user_ip = json.loads(ssh_info).get(u'user_ip', 'ERROR') + return self._user_ip + + def _ssh(self, title, *cmd, **kwargs): + if 'infra_step' not in kwargs: + kwargs['infra_step'] = True + + ssh_cmd = ['ssh', '-oConnectTimeout=15', '-oBatchMode=yes', + '-t', '-t', self.user_ip] + list(cmd) + + return self._run(title, ssh_cmd, **kwargs) + + def install(self): + self._ssh('mkdir %s' % self.device_dirs.resource_dir, 'mkdir', '-p', + self.device_dirs.resource_dir) + + # Ensure the home dir is marked executable + self._ssh('remount %s as exec' % self.chromeos_homedir, + 'sudo', 'mount', '-i', '-o', 'remount,exec', '/home/chronos') + + self.create_clean_device_dir(self.device_dirs.bin_dir) + + def create_clean_device_dir(self, path): + # use -f to silently return if path doesn't exist + self._ssh('rm %s' % path, 'rm', '-rf', path) + self._ssh('mkdir %s' % path, 'mkdir', '-p', path) + + def read_file_on_device(self, path, **kwargs): + rv = self._ssh('read %s' % path, + 'cat', path, stdout=self.m.raw_io.output(), + **kwargs) + return rv.stdout.rstrip() if rv and rv.stdout else None + + def remove_file_on_device(self, path): + # use -f to silently return if path doesn't exist + self._ssh('rm %s' % path, 'rm', '-f', path) + + def _prefix_device_path(self, device_path): + return '%s:%s' % (self.user_ip, device_path) + + def copy_file_to_device(self, host_path, device_path): + device_path = self._prefix_device_path(device_path) + # Recipe + self.m.python.inline(str('scp %s %s' % (host_path, device_path)), + """ + import subprocess + import sys + host = sys.argv[1] + device = sys.argv[2] + print subprocess.check_output(['scp', host, device]) + """, args=[host_path, device_path], infra_step=True) + + def _copy_dir(self, src, dest): + # We can't use rsync to communicate with the chromebooks because the + # chromebooks don't have rsync installed on them. + self.m.python.inline(str('scp -r %s %s' % (src, dest)), + """ + import subprocess + import sys + src = sys.argv[1] + '/*' + dest = sys.argv[2] + print subprocess.check_output('scp -r %s %s' % (src, dest), shell=True) + """, args=[src, dest], infra_step=True) + + def copy_directory_contents_to_device(self, host_path, device_path): + self._copy_dir(host_path, self._prefix_device_path(device_path)) + + def copy_directory_contents_to_host(self, device_path, host_path): + self._copy_dir(self._prefix_device_path(device_path), host_path) + + def step(self, name, cmd, **kwargs): + # Push and run either dm or nanobench + + name = cmd[0] + + if name == 'dm': + self.create_clean_host_dir(self.host_dirs.dm_dir) + if name == 'nanobench': + self.create_clean_host_dir(self.host_dirs.perf_data_dir) + + app = self.m.vars.skia_out.join(cmd[0]) + + cmd[0] = '%s/%s' % (self.device_dirs.bin_dir, cmd[0]) + self.copy_file_to_device(app, cmd[0]) + + self._ssh('chmod %s' % name, 'chmod', '+x', cmd[0]) + self._ssh(str(name), *cmd) diff --git a/infra/bots/recipe_modules/flavor/chromecast.py b/infra/bots/recipe_modules/flavor/chromecast.py new file mode 100644 index 0000000000..c897aa93ab --- /dev/null +++ b/infra/bots/recipe_modules/flavor/chromecast.py @@ -0,0 +1,154 @@ +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from recipe_engine import recipe_api + +from . import android +from . import default + + +"""Chromecast flavor, used for running code on Chromecast""" + + +class ChromecastFlavor(android.AndroidFlavor): + def __init__(self, m): + super(ChromecastFlavor, self).__init__(m) + self._ever_ran_adb = False + self._user_ip = '' + + # Disk space is extremely tight on the Chromecasts (~100M) There is not + # enough space on the android_data_dir (/cache/skia) to fit the images, + # resources, executable and output the dm images. So, we have dm_out be + # on the tempfs (i.e. RAM) /dev/shm. (which is about 140M) + data_dir = '/cache/skia/' + self.device_dirs = default.DeviceDirs( + bin_dir = '/cache/skia/bin', + dm_dir = '/dev/shm/skia/dm_out', + perf_data_dir = data_dir + 'perf', + resource_dir = data_dir + 'resources', + images_dir = data_dir + 'images', + skp_dir = data_dir + 'skps', + svg_dir = data_dir + 'svgs', + tmp_dir = data_dir) + + @property + def user_ip_host(self): + if not self._user_ip: + self._user_ip = self.m.run(self.m.python.inline, 'read chromecast ip', + program=""" + import os + CHROMECAST_IP_FILE = os.path.expanduser('~/chromecast.txt') + with open(CHROMECAST_IP_FILE, 'r') as f: + print f.read() + """, + stdout=self.m.raw_io.output(), + infra_step=True).stdout + + return self._user_ip + + @property + def user_ip(self): + return self.user_ip_host.split(':')[0] + + def install(self): + super(ChromecastFlavor, self).install() + self._adb('mkdir ' + self.device_dirs.bin_dir, + 'shell', 'mkdir', '-p', self.device_dirs.bin_dir) + + def _adb(self, title, *cmd, **kwargs): + if not self._ever_ran_adb: + self._connect_to_remote() + + self._ever_ran_adb = True + # The only non-infra adb steps (dm / nanobench) happen to not use _adb(). + if 'infra_step' not in kwargs: + kwargs['infra_step'] = True + return self._run(title, 'adb', *cmd, **kwargs) + + def _connect_to_remote(self): + self.m.run(self.m.step, 'adb connect %s' % self.user_ip_host, cmd=['adb', + 'connect', self.user_ip_host], infra_step=True) + + def create_clean_device_dir(self, path): + # Note: Chromecast does not support -rf + self._adb('rm %s' % path, 'shell', 'rm', '-r', path) + self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path) + + def copy_directory_contents_to_device(self, host, device): + # Copy the tree, avoiding hidden directories and resolving symlinks. + # Additionally, due to space restraints, we don't push files > 3 MB + # which cuts down the size of the SKP asset to be around 50 MB as of + # version 41. + self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device), + program=""" + import os + import subprocess + import sys + host = sys.argv[1] + device = sys.argv[2] + for d, _, fs in os.walk(host): + p = os.path.relpath(d, host) + if p != '.' and p.startswith('.'): + continue + for f in fs: + print os.path.join(p,f) + hp = os.path.realpath(os.path.join(host, p, f)) + if os.stat(hp).st_size > (1.5 * 1024 * 1024): + print "Skipping because it is too big" + else: + subprocess.check_call(['adb', 'push', + hp, os.path.join(device, p, f)]) + """, args=[host, device], infra_step=True) + + def cleanup_steps(self): + if self._ever_ran_adb: + # To clean up disk space for next time + self._ssh('Delete executables', 'rm', '-r', self.device_dirs.bin_dir, + abort_on_failure=False, infra_step=True) + # Reconnect if was disconnected + self._adb('disconnect', 'disconnect') + self._connect_to_remote() + self.m.run(self.m.python.inline, 'dump log', program=""" + import os + import subprocess + import sys + out = sys.argv[1] + log = subprocess.check_output(['adb', 'logcat', '-d']) + for line in log.split('\\n'): + tokens = line.split() + if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc': + addr, path = tokens[-2:] + local = os.path.join(out, os.path.basename(path)) + if os.path.exists(local): + sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr]) + line = line.replace(addr, addr + ' ' + sym.strip()) + print line + """, + args=[self.m.vars.skia_out], + infra_step=True, + abort_on_failure=False) + + self._adb('disconnect', 'disconnect') + self._adb('kill adb server', 'kill-server') + + def _ssh(self, title, *cmd, **kwargs): + # Don't use -t -t (Force psuedo-tty allocation) like in the ChromeOS + # version because the pseudo-tty allocation seems to fail + # instantly when talking to a Chromecast. + # This was excacerbated when we migrated to kitchen and was marked by + # the symptoms of all the ssh commands instantly failing (even after + # connecting and authenticating) with exit code -1 (255) + ssh_cmd = ['ssh', '-oConnectTimeout=15', '-oBatchMode=yes', + '-T', 'root@%s' % self.user_ip] + list(cmd) + + return self.m.run(self.m.step, title, cmd=ssh_cmd, **kwargs) + + def step(self, name, cmd, **kwargs): + app = self.m.vars.skia_out.join(cmd[0]) + + self._adb('push %s' % cmd[0], + 'push', app, self.device_dirs.bin_dir) + + cmd[0] = '%s/%s' % (self.device_dirs.bin_dir, cmd[0]) + self._ssh(str(name), *cmd, infra_step=False) diff --git a/infra/bots/recipe_modules/flavor/default.py b/infra/bots/recipe_modules/flavor/default.py new file mode 100644 index 0000000000..501c6bead5 --- /dev/null +++ b/infra/bots/recipe_modules/flavor/default.py @@ -0,0 +1,251 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +# pylint: disable=W0201 + + +"""Default flavor, used for running code on desktop machines.""" + + +WIN_TOOLCHAIN_DIR = 't' + + +class DeviceDirs(object): + def __init__(self, + bin_dir, + dm_dir, + perf_data_dir, + resource_dir, + images_dir, + skp_dir, + svg_dir, + tmp_dir): + self._bin_dir = bin_dir + self._dm_dir = dm_dir + self._perf_data_dir = perf_data_dir + self._resource_dir = resource_dir + self._images_dir = images_dir + self._skp_dir = skp_dir + self._svg_dir = svg_dir + self._tmp_dir = tmp_dir + + @property + def bin_dir(self): + return self._bin_dir + + @property + def dm_dir(self): + """Where DM writes.""" + return self._dm_dir + + @property + def perf_data_dir(self): + return self._perf_data_dir + + @property + def resource_dir(self): + return self._resource_dir + + @property + def images_dir(self): + return self._images_dir + + @property + def skp_dir(self): + """Holds SKP files that are consumed by RenderSKPs and BenchPictures.""" + return self._skp_dir + + @property + def svg_dir(self): + return self._svg_dir + + @property + def tmp_dir(self): + return self._tmp_dir + + +class DefaultFlavor(object): + def __init__(self, module): + # Store a pointer to the parent recipe module (SkiaFlavorApi) so that + # FlavorUtils objects can do recipe module-like things, like run steps or + # access module-level resources. + self.module = module + + # self.m is just a shortcut so that Flavor objects can use the same + # syntax as regular recipe modules to run steps, eg: self.m.step(...) + self.m = module.m + self._chrome_path = None + self.device_dirs = DeviceDirs( + bin_dir=self.m.vars.build_dir.join('out', self.m.vars.configuration), + dm_dir=self.m.path.join(self.m.vars.swarming_out_dir, 'dm'), + perf_data_dir=self.m.path.join( + self.m.vars.swarming_out_dir, + 'perfdata', self.m.vars.builder_name, 'data'), + resource_dir=self.m.path['start_dir'].join('skia', 'resources'), + images_dir=self.m.path['start_dir'].join('skimage'), + skp_dir=self.m.path['start_dir'].join('skp'), + svg_dir=self.m.path['start_dir'].join('svg'), + tmp_dir=self.m.vars.tmp_dir) + self.host_dirs = self.device_dirs + + def device_path_join(self, *args): + """Like os.path.join(), but for paths on a connected device.""" + return self.m.path.join(*args) + + def copy_directory_contents_to_device(self, host_dir, device_dir): + """Like shutil.copytree(), but for copying to a connected device.""" + # For "normal" builders who don't have an attached device, we expect + # host_dir and device_dir to be the same. + if str(host_dir) != str(device_dir): + raise ValueError('For builders who do not have attached devices, copying ' + 'from host to device is undefined and only allowed if ' + 'host_path and device_path are the same (%s vs %s).' % ( + str(host_dir), str(device_dir))) + + def copy_directory_contents_to_host(self, device_dir, host_dir): + """Like shutil.copytree(), but for copying from a connected device.""" + # For "normal" builders who don't have an attached device, we expect + # host_dir and device_dir to be the same. + if str(host_dir) != str(device_dir): + raise ValueError('For builders who do not have attached devices, copying ' + 'from device to host is undefined and only allowed if ' + 'host_path and device_path are the same (%s vs %s).' % ( + str(host_dir), str(device_dir))) + + def copy_file_to_device(self, host_path, device_path): + """Like shutil.copyfile, but for copying to a connected device.""" + # For "normal" builders who don't have an attached device, we expect + # host_dir and device_dir to be the same. + if str(host_path) != str(device_path): + raise ValueError('For builders who do not have attached devices, copying ' + 'from host to device is undefined and only allowed if ' + 'host_path and device_path are the same (%s vs %s).' % ( + str(host_path), str(device_path))) + + def create_clean_device_dir(self, path): + """Like shutil.rmtree() + os.makedirs(), but on a connected device.""" + self.create_clean_host_dir(path) + + def create_clean_host_dir(self, path): + """Convenience function for creating a clean directory.""" + self.m.run.rmtree(path) + self.m.file.ensure_directory( + 'makedirs %s' % self.m.path.basename(path), path) + + def install(self): + """Run device-specific installation steps.""" + pass + + def cleanup_steps(self): + """Run any device-specific cleanup steps.""" + pass + + def _run(self, title, cmd, infra_step=False, **kwargs): + return self.m.run(self.m.step, title, cmd=cmd, + infra_step=infra_step, **kwargs) + + def _py(self, title, script, infra_step=True, args=()): + return self.m.run(self.m.python, title, script=script, args=args, + infra_step=infra_step) + + def step(self, name, cmd): + app = self.device_dirs.bin_dir.join(cmd[0]) + cmd = [app] + cmd[1:] + env = self.m.context.env + path = [] + ld_library_path = [] + + slave_dir = self.m.vars.slave_dir + clang_linux = str(slave_dir.join('clang_linux')) + extra_tokens = self.m.vars.extra_tokens + + if self.m.vars.is_linux: + if (self.m.vars.builder_cfg.get('cpu_or_gpu', '') == 'GPU' + and 'Intel' in self.m.vars.builder_cfg.get('cpu_or_gpu_value', '')): + # The vulkan in this asset name simply means that the graphics driver + # supports Vulkan. It is also the driver used for GL code. + dri_path = slave_dir.join('linux_vulkan_intel_driver_release') + if self.m.vars.builder_cfg.get('configuration', '') == 'Debug': + dri_path = slave_dir.join('linux_vulkan_intel_driver_debug') + ld_library_path.append(dri_path) + env['LIBGL_DRIVERS_PATH'] = str(dri_path) + env['VK_ICD_FILENAMES'] = str(dri_path.join('intel_icd.x86_64.json')) + + if 'Vulkan' in extra_tokens: + path.append(slave_dir.join('linux_vulkan_sdk', 'bin')) + ld_library_path.append(slave_dir.join('linux_vulkan_sdk', 'lib')) + + if 'SwiftShader' in extra_tokens: + ld_library_path.append( + self.m.vars.build_dir.join('out', 'swiftshader_out')) + + if 'MSAN' in extra_tokens: + # Find the MSAN-built libc++. + ld_library_path.append(clang_linux + '/msan') + + if any('SAN' in t for t in extra_tokens): + # Sanitized binaries may want to run clang_linux/bin/llvm-symbolizer. + path.append(clang_linux + '/bin') + # We find that testing sanitizer builds with libc++ uncovers more issues + # than with the system-provided C++ standard library, which is usually + # libstdc++. libc++ proactively hooks into sanitizers to help their + # analyses. We ship a copy of libc++ with our Linux toolchain in /lib. + ld_library_path.append(clang_linux + '/lib') + elif self.m.vars.is_linux: + cmd = ['catchsegv'] + cmd + elif 'ProcDump' in extra_tokens: + dumps_dir = self.m.path.join(self.m.vars.swarming_out_dir, 'dumps') + self.m.file.ensure_directory('makedirs dumps', dumps_dir) + procdump = str(self.m.vars.slave_dir.join('procdump_win', + 'procdump64.exe')) + # Full docs for ProcDump here: + # https://docs.microsoft.com/en-us/sysinternals/downloads/procdump + # -accepteula automatically accepts the license agreement + # -mp saves a packed minidump to save space + # -e 1 tells procdump to dump once + # -x launches exe and writes dumps to the + # specified dir + cmd = [procdump, '-accepteula', '-mp', '-e', '1', '-x', dumps_dir] + cmd + + if 'ASAN' in extra_tokens or 'UBSAN' in extra_tokens: + if 'Mac' in self.m.vars.builder_cfg.get('os', ''): + env['ASAN_OPTIONS'] = 'symbolize=1' # Mac doesn't support detect_leaks. + else: + env['ASAN_OPTIONS'] = 'symbolize=1 detect_leaks=1' + env[ 'LSAN_OPTIONS'] = 'symbolize=1 print_suppressions=1' + env['UBSAN_OPTIONS'] = 'symbolize=1 print_stacktrace=1' + + if 'TSAN' in extra_tokens: + # We don't care about malloc(), fprintf, etc. used in signal handlers. + # If we're in a signal handler, we're already crashing... + env['TSAN_OPTIONS'] = 'report_signal_unsafe=0' + + if 'Coverage' in extra_tokens: + # This is the output file for the coverage data. Just running the binary + # will produce the output. The output_file is in the swarming_out_dir and + # thus will be an isolated output of the Test step. + profname = '%s.profraw' % self.m.vars.builder_cfg.get('test_filter','o') + env['LLVM_PROFILE_FILE'] = self.m.path.join(self.m.vars.swarming_out_dir, + profname) + + if path: + env['PATH'] = '%%(PATH)s:%s' % ':'.join('%s' % p for p in path) + if ld_library_path: + env['LD_LIBRARY_PATH'] = ':'.join('%s' % p for p in ld_library_path) + + to_symbolize = ['dm', 'nanobench'] + if name in to_symbolize and self.m.vars.is_linux: + # Convert path objects or placeholders into strings such that they can + # be passed to symbolize_stack_trace.py + args = [slave_dir] + [str(x) for x in cmd] + with self.m.context(cwd=self.m.path['start_dir'].join('skia'), env=env): + self._py('symbolized %s' % name, + self.module.resource('symbolize_stack_trace.py'), + args=args, + infra_step=False) + + else: + with self.m.context(env=env): + self._run(name, cmd) diff --git a/infra/bots/recipe_modules/flavor/default_flavor.py b/infra/bots/recipe_modules/flavor/default_flavor.py deleted file mode 100644 index ec684efd1f..0000000000 --- a/infra/bots/recipe_modules/flavor/default_flavor.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - - -# pylint: disable=W0201 - - -"""Default flavor utils class, used for desktop builders.""" - - -import json - - -WIN_TOOLCHAIN_DIR = 't' - - -class DeviceDirs(object): - def __init__(self, - bin_dir, - dm_dir, - perf_data_dir, - resource_dir, - images_dir, - skp_dir, - svg_dir, - tmp_dir): - self._bin_dir = bin_dir - self._dm_dir = dm_dir - self._perf_data_dir = perf_data_dir - self._resource_dir = resource_dir - self._images_dir = images_dir - self._skp_dir = skp_dir - self._svg_dir = svg_dir - self._tmp_dir = tmp_dir - - @property - def bin_dir(self): - return self._bin_dir - - @property - def dm_dir(self): - """Where DM writes.""" - return self._dm_dir - - @property - def perf_data_dir(self): - return self._perf_data_dir - - @property - def resource_dir(self): - return self._resource_dir - - @property - def images_dir(self): - return self._images_dir - - @property - def skp_dir(self): - """Holds SKP files that are consumed by RenderSKPs and BenchPictures.""" - return self._skp_dir - - @property - def svg_dir(self): - return self._svg_dir - - @property - def tmp_dir(self): - return self._tmp_dir - - -class DefaultFlavorUtils(object): - """Utilities to be used by build steps. - - The methods in this class define how certain high-level functions should - work. Each build step flavor should correspond to a subclass of - DefaultFlavorUtils which may override any of these functions as appropriate - for that flavor. - - For example, the AndroidFlavorUtils will override the functions for - copying files between the host and Android device, as well as the - 'step' function, so that commands may be run through ADB. - """ - def __init__(self, module): - # Store a pointer to the parent recipe module (SkiaFlavorApi) so that - # FlavorUtils objects can do recipe module-like things, like run steps or - # access module-level resources. - self.module = module - - # self.m is just a shortcut so that FlavorUtils objects can use the same - # syntax as regular recipe modules to run steps, eg: self.m.step(...) - self.m = module.m - self._chrome_path = None - self.device_dirs = DeviceDirs( - bin_dir=self.m.vars.build_dir.join('out', self.m.vars.configuration), - dm_dir=self.m.path.join(self.m.vars.swarming_out_dir, 'dm'), - perf_data_dir=self.m.path.join( - self.m.vars.swarming_out_dir, - 'perfdata', self.m.vars.builder_name, 'data'), - resource_dir=self.m.path['start_dir'].join('skia', 'resources'), - images_dir=self.m.path['start_dir'].join('skimage'), - skp_dir=self.m.path['start_dir'].join('skp'), - svg_dir=self.m.path['start_dir'].join('svg'), - tmp_dir=self.m.vars.tmp_dir) - self.host_dirs = self.device_dirs - - def device_path_join(self, *args): - """Like os.path.join(), but for paths on a connected device.""" - return self.m.path.join(*args) - - def copy_directory_contents_to_device(self, host_dir, device_dir): - """Like shutil.copytree(), but for copying to a connected device.""" - # For "normal" builders who don't have an attached device, we expect - # host_dir and device_dir to be the same. - if str(host_dir) != str(device_dir): - raise ValueError('For builders who do not have attached devices, copying ' - 'from host to device is undefined and only allowed if ' - 'host_path and device_path are the same (%s vs %s).' % ( - str(host_dir), str(device_dir))) - - def copy_directory_contents_to_host(self, device_dir, host_dir): - """Like shutil.copytree(), but for copying from a connected device.""" - # For "normal" builders who don't have an attached device, we expect - # host_dir and device_dir to be the same. - if str(host_dir) != str(device_dir): - raise ValueError('For builders who do not have attached devices, copying ' - 'from device to host is undefined and only allowed if ' - 'host_path and device_path are the same (%s vs %s).' % ( - str(host_dir), str(device_dir))) - - def copy_file_to_device(self, host_path, device_path): - """Like shutil.copyfile, but for copying to a connected device.""" - # For "normal" builders who don't have an attached device, we expect - # host_dir and device_dir to be the same. - if str(host_path) != str(device_path): - raise ValueError('For builders who do not have attached devices, copying ' - 'from host to device is undefined and only allowed if ' - 'host_path and device_path are the same (%s vs %s).' % ( - str(host_path), str(device_path))) - - def create_clean_device_dir(self, path): - """Like shutil.rmtree() + os.makedirs(), but on a connected device.""" - self.create_clean_host_dir(path) - - def create_clean_host_dir(self, path): - """Convenience function for creating a clean directory.""" - self.m.run.rmtree(path) - self.m.file.ensure_directory( - 'makedirs %s' % self.m.path.basename(path), path) - - def install(self): - """Run device-specific installation steps.""" - pass - - def cleanup_steps(self): - """Run any device-specific cleanup steps.""" - pass diff --git a/infra/bots/recipe_modules/flavor/gn_android_flavor.py b/infra/bots/recipe_modules/flavor/gn_android_flavor.py deleted file mode 100644 index 1c352afc28..0000000000 --- a/infra/bots/recipe_modules/flavor/gn_android_flavor.py +++ /dev/null @@ -1,540 +0,0 @@ -# Copyright 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from recipe_engine import recipe_api - -import default_flavor -import re -import subprocess - - -"""GN Android flavor utils, used for building Skia for Android with GN.""" -class GNAndroidFlavorUtils(default_flavor.DefaultFlavorUtils): - def __init__(self, m): - super(GNAndroidFlavorUtils, self).__init__(m) - self._ever_ran_adb = False - self.ADB_BINARY = '/usr/bin/adb.1.0.35' - self.ADB_PUB_KEY = '/home/chrome-bot/.android/adbkey' - self._golo_devices = ['Nexus5x'] - if self.m.vars.builder_cfg.get('model') in self._golo_devices: - self.ADB_BINARY = '/opt/infra-android/tools/adb' - self.ADB_PUB_KEY = ('/home/chrome-bot/.android/' - 'chrome_infrastructure_adbkey') - - # Data should go in android_data_dir, which may be preserved across runs. - android_data_dir = '/sdcard/revenge_of_the_skiabot/' - self.device_dirs = default_flavor.DeviceDirs( - bin_dir = '/data/local/tmp/', - dm_dir = android_data_dir + 'dm_out', - perf_data_dir = android_data_dir + 'perf', - resource_dir = android_data_dir + 'resources', - images_dir = android_data_dir + 'images', - skp_dir = android_data_dir + 'skps', - svg_dir = android_data_dir + 'svgs', - tmp_dir = android_data_dir) - - # A list of devices we can't root. If rooting fails and a device is not - # on the list, we fail the task to avoid perf inconsistencies. - self.rootable_blacklist = ['GalaxyS6', 'GalaxyS7_G930A', 'GalaxyS7_G930FD', - 'MotoG4', 'NVIDIA_Shield'] - - # Maps device type -> CPU ids that should be scaled for nanobench. - # Many devices have two (or more) different CPUs (e.g. big.LITTLE - # on Nexus5x). The CPUs listed are the biggest cpus on the device. - # The CPUs are grouped together, so we only need to scale one of them - # (the one listed) in order to scale them all. - # E.g. Nexus5x has cpu0-3 as one chip and cpu4-5 as the other. Thus, - # if one wants to run a single-threaded application (e.g. nanobench), one - # can disable cpu0-3 and scale cpu 4 to have only cpu4 and 5 at the same - # frequency. See also disable_for_nanobench. - self.cpus_to_scale = { - 'Nexus5x': [4], - 'NexusPlayer': [0, 2], # has 2 identical chips, so scale them both. - 'Pixel': [2], - 'Pixel2XL': [4] - } - - # Maps device type -> CPU ids that should be turned off when running - # single-threaded applications like nanobench. The devices listed have - # multiple, differnt CPUs. We notice a lot of noise that seems to be - # caused by nanobench running on the slow CPU, then the big CPU. By - # disabling this, we see less of that noise by forcing the same CPU - # to be used for the performance testing every time. - self.disable_for_nanobench = { - 'Nexus5x': range(0, 4), - 'Pixel': range(0, 2), - 'Pixel2XL': range(0, 4), - 'PixelC': range(0, 2) - } - - self.gpu_scaling = { - "Nexus5": 450000000, - "Nexus5x": 600000000, - } - - def _run(self, title, *cmd, **kwargs): - with self.m.context(cwd=self.m.path['start_dir'].join('skia')): - return self.m.run(self.m.step, title, cmd=list(cmd), **kwargs) - - def _adb(self, title, *cmd, **kwargs): - # The only non-infra adb steps (dm / nanobench) happen to not use _adb(). - if 'infra_step' not in kwargs: - kwargs['infra_step'] = True - - self._ever_ran_adb = True - attempts = 1 - flaky_devices = ['NexusPlayer', 'PixelC'] - if self.m.vars.builder_cfg.get('model') in flaky_devices: - attempts = 3 - - def wait_for_device(attempt): - self.m.run(self.m.step, - 'kill adb server after failure of \'%s\' (attempt %d)' % ( - title, attempt), - cmd=[self.ADB_BINARY, 'kill-server'], - infra_step=True, timeout=30, abort_on_failure=False, - fail_build_on_failure=False) - self.m.run(self.m.step, - 'wait for device after failure of \'%s\' (attempt %d)' % ( - title, attempt), - cmd=[self.ADB_BINARY, 'wait-for-device'], infra_step=True, - timeout=180, abort_on_failure=False, - fail_build_on_failure=False) - - with self.m.context(cwd=self.m.path['start_dir'].join('skia')): - with self.m.env({'ADB_VENDOR_KEYS': self.ADB_PUB_KEY}): - return self.m.run.with_retry(self.m.step, title, attempts, - cmd=[self.ADB_BINARY]+list(cmd), - between_attempts_fn=wait_for_device, - **kwargs) - - def _scale_for_dm(self): - device = self.m.vars.builder_cfg.get('model') - if (device in self.rootable_blacklist or - self.m.vars.internal_hardware_label): - return - - # This is paranoia... any CPUs we disabled while running nanobench - # ought to be back online now that we've restarted the device. - for i in self.disable_for_nanobench.get(device, []): - self._set_cpu_online(i, 1) # enable - - scale_up = self.cpus_to_scale.get(device, [0]) - # For big.LITTLE devices, make sure we scale the LITTLE cores up; - # there is a chance they are still in powersave mode from when - # swarming slows things down for cooling down and charging. - if 0 not in scale_up: - scale_up.append(0) - for i in scale_up: - # AndroidOne doesn't support ondemand governor. hotplug is similar. - if device == 'AndroidOne': - self._set_governor(i, 'hotplug') - else: - self._set_governor(i, 'ondemand') - - def _scale_for_nanobench(self): - device = self.m.vars.builder_cfg.get('model') - if (device in self.rootable_blacklist or - self.m.vars.internal_hardware_label): - return - - for i in self.cpus_to_scale.get(device, [0]): - self._set_governor(i, 'userspace') - self._scale_cpu(i, 0.6) - - for i in self.disable_for_nanobench.get(device, []): - self._set_cpu_online(i, 0) # disable - - if device in self.gpu_scaling: - #https://developer.qualcomm.com/qfile/28823/lm80-p0436-11_adb_commands.pdf - # Section 3.2.1 Commands to put the GPU in performance mode - # Nexus 5 is 320000000 by default - # Nexus 5x is 180000000 by default - gpu_freq = self.gpu_scaling[device] - self.m.run.with_retry(self.m.python.inline, - "Lock GPU to %d (and other perf tweaks)" % gpu_freq, - 3, # attempts - program=""" -import os -import subprocess -import sys -import time -ADB = sys.argv[1] -freq = sys.argv[2] -idle_timer = "10000" - -log = subprocess.check_output([ADB, 'root']) -# check for message like 'adbd cannot run as root in production builds' -print log -if 'cannot' in log: - raise Exception('adb root failed') - -subprocess.check_output([ADB, 'shell', 'stop', 'thermald']) - -subprocess.check_output([ADB, 'shell', 'echo "%s" > ' - '/sys/class/kgsl/kgsl-3d0/gpuclk' % freq]) - -actual_freq = subprocess.check_output([ADB, 'shell', 'cat ' - '/sys/class/kgsl/kgsl-3d0/gpuclk']).strip() -if actual_freq != freq: - raise Exception('Frequency (actual, expected) (%s, %s)' - % (actual_freq, freq)) - -subprocess.check_output([ADB, 'shell', 'echo "%s" > ' - '/sys/class/kgsl/kgsl-3d0/idle_timer' % idle_timer]) - -actual_timer = subprocess.check_output([ADB, 'shell', 'cat ' - '/sys/class/kgsl/kgsl-3d0/idle_timer']).strip() -if actual_timer != idle_timer: - raise Exception('idle_timer (actual, expected) (%s, %s)' - % (actual_timer, idle_timer)) - -for s in ['force_bus_on', 'force_rail_on', 'force_clk_on']: - subprocess.check_output([ADB, 'shell', 'echo "1" > ' - '/sys/class/kgsl/kgsl-3d0/%s' % s]) - actual_set = subprocess.check_output([ADB, 'shell', 'cat ' - '/sys/class/kgsl/kgsl-3d0/%s' % s]).strip() - if actual_set != "1": - raise Exception('%s (actual, expected) (%s, 1)' - % (s, actual_set)) -""", - args = [self.ADB_BINARY, gpu_freq], - infra_step=True, - timeout=30) - - def _set_governor(self, cpu, gov): - self._ever_ran_adb = True - self.m.run.with_retry(self.m.python.inline, - "Set CPU %d's governor to %s" % (cpu, gov), - 3, # attempts - program=""" -import os -import subprocess -import sys -import time -ADB = sys.argv[1] -cpu = int(sys.argv[2]) -gov = sys.argv[3] - -log = subprocess.check_output([ADB, 'root']) -# check for message like 'adbd cannot run as root in production builds' -print log -if 'cannot' in log: - raise Exception('adb root failed') - -subprocess.check_output([ADB, 'shell', 'echo "%s" > ' - '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % (gov, cpu)]) -actual_gov = subprocess.check_output([ADB, 'shell', 'cat ' - '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' % cpu]).strip() -if actual_gov != gov: - raise Exception('(actual, expected) (%s, %s)' - % (actual_gov, gov)) -""", - args = [self.ADB_BINARY, cpu, gov], - infra_step=True, - timeout=30) - - - def _set_cpu_online(self, cpu, value): - """Set /sys/devices/system/cpu/cpu{N}/online to value (0 or 1).""" - self._ever_ran_adb = True - msg = 'Disabling' - if value: - msg = 'Enabling' - self.m.run.with_retry(self.m.python.inline, - '%s CPU %d' % (msg, cpu), - 3, # attempts - program=""" -import os -import subprocess -import sys -import time -ADB = sys.argv[1] -cpu = int(sys.argv[2]) -value = int(sys.argv[3]) - -log = subprocess.check_output([ADB, 'root']) -# check for message like 'adbd cannot run as root in production builds' -print log -if 'cannot' in log: - raise Exception('adb root failed') - -# If we try to echo 1 to an already online cpu, adb returns exit code 1. -# So, check the value before trying to write it. -prior_status = subprocess.check_output([ADB, 'shell', 'cat ' - '/sys/devices/system/cpu/cpu%d/online' % cpu]).strip() -if prior_status == str(value): - print 'CPU %d online already %d' % (cpu, value) - sys.exit() - -subprocess.check_output([ADB, 'shell', 'echo %s > ' - '/sys/devices/system/cpu/cpu%d/online' % (value, cpu)]) -actual_status = subprocess.check_output([ADB, 'shell', 'cat ' - '/sys/devices/system/cpu/cpu%d/online' % cpu]).strip() -if actual_status != str(value): - raise Exception('(actual, expected) (%s, %d)' - % (actual_status, value)) -""", - args = [self.ADB_BINARY, cpu, value], - infra_step=True, - timeout=30) - - - def _scale_cpu(self, cpu, target_percent): - self._ever_ran_adb = True - self.m.run.with_retry(self.m.python.inline, - 'Scale CPU %d to %f' % (cpu, target_percent), - 3, # attempts - program=""" -import os -import subprocess -import sys -import time -ADB = sys.argv[1] -target_percent = float(sys.argv[2]) -cpu = int(sys.argv[3]) -log = subprocess.check_output([ADB, 'root']) -# check for message like 'adbd cannot run as root in production builds' -print log -if 'cannot' in log: - raise Exception('adb root failed') - -root = '/sys/devices/system/cpu/cpu%d/cpufreq' %cpu - -# All devices we test on give a list of their available frequencies. -available_freqs = subprocess.check_output([ADB, 'shell', - 'cat %s/scaling_available_frequencies' % root]) - -# Check for message like '/system/bin/sh: file not found' -if available_freqs and '/system/bin/sh' not in available_freqs: - available_freqs = sorted( - int(i) for i in available_freqs.strip().split()) -else: - raise Exception('Could not get list of available frequencies: %s' % - available_freqs) - -maxfreq = available_freqs[-1] -target = int(round(maxfreq * target_percent)) -freq = maxfreq -for f in reversed(available_freqs): - if f <= target: - freq = f - break - -print 'Setting frequency to %d' % freq - -# If scaling_max_freq is lower than our attempted setting, it won't take. -# We must set min first, because if we try to set max to be less than min -# (which sometimes happens after certain devices reboot) it returns a -# perplexing permissions error. -subprocess.check_output([ADB, 'shell', 'echo 0 > ' - '%s/scaling_min_freq' % root]) -subprocess.check_output([ADB, 'shell', 'echo %d > ' - '%s/scaling_max_freq' % (freq, root)]) -subprocess.check_output([ADB, 'shell', 'echo %d > ' - '%s/scaling_setspeed' % (freq, root)]) -time.sleep(5) -actual_freq = subprocess.check_output([ADB, 'shell', 'cat ' - '%s/scaling_cur_freq' % root]).strip() -if actual_freq != str(freq): - raise Exception('(actual, expected) (%s, %d)' - % (actual_freq, freq)) -""", - args = [self.ADB_BINARY, str(target_percent), cpu], - infra_step=True, - timeout=30) - - def install(self): - self._adb('mkdir ' + self.device_dirs.resource_dir, - 'shell', 'mkdir', '-p', self.device_dirs.resource_dir) - if 'ASAN' in self.m.vars.extra_tokens: - asan_setup = self.m.vars.slave_dir.join( - 'android_ndk_linux', 'toolchains', 'llvm', 'prebuilt', - 'linux-x86_64', 'lib64', 'clang', '6.0.2', 'bin', - 'asan_device_setup') - self.m.run(self.m.python.inline, 'Setting up device to run ASAN', - program=""" -import os -import subprocess -import sys -import time -ADB = sys.argv[1] -ASAN_SETUP = sys.argv[2] - -def wait_for_device(): - while True: - time.sleep(5) - print 'Waiting for device' - subprocess.check_output([ADB, 'wait-for-device']) - bit1 = subprocess.check_output([ADB, 'shell', 'getprop', - 'dev.bootcomplete']) - bit2 = subprocess.check_output([ADB, 'shell', 'getprop', - 'sys.boot_completed']) - if '1' in bit1 and '1' in bit2: - print 'Device detected' - break - -log = subprocess.check_output([ADB, 'root']) -# check for message like 'adbd cannot run as root in production builds' -print log -if 'cannot' in log: - raise Exception('adb root failed') - -output = subprocess.check_output([ADB, 'disable-verity']) -print output - -if 'already disabled' not in output: - print 'Rebooting device' - subprocess.check_output([ADB, 'reboot']) - wait_for_device() - -def installASAN(revert=False): - # ASAN setup script is idempotent, either it installs it or - # says it's installed. Returns True on success, false otherwise. - out = subprocess.check_output([ADB, 'wait-for-device']) - print out - cmd = [ASAN_SETUP] - if revert: - cmd = [ASAN_SETUP, '--revert'] - process = subprocess.Popen(cmd, env={'ADB': ADB}, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - # this also blocks until command finishes - (stdout, stderr) = process.communicate() - print stdout - print 'Stderr: %s' % stderr - return process.returncode == 0 - -if not installASAN(): - print 'Trying to revert the ASAN install and then re-install' - # ASAN script sometimes has issues if it was interrupted or partially applied - # Try reverting it, then re-enabling it - if not installASAN(revert=True): - raise Exception('reverting ASAN install failed') - - # Sleep because device does not reboot instantly - time.sleep(10) - - if not installASAN(): - raise Exception('Tried twice to setup ASAN and failed.') - -# Sleep because device does not reboot instantly -time.sleep(10) -wait_for_device() -""", - args = [self.ADB_BINARY, asan_setup], - infra_step=True, - timeout=300, - abort_on_failure=True) - - def cleanup_steps(self): - if self._ever_ran_adb: - self.m.run(self.m.python.inline, 'dump log', program=""" - import os - import subprocess - import sys - out = sys.argv[1] - log = subprocess.check_output(['%s', 'logcat', '-d']) - for line in log.split('\\n'): - tokens = line.split() - if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc': - addr, path = tokens[-2:] - local = os.path.join(out, os.path.basename(path)) - if os.path.exists(local): - sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr]) - line = line.replace(addr, addr + ' ' + sym.strip()) - print line - """ % self.ADB_BINARY, - args=[self.m.vars.skia_out], - infra_step=True, - timeout=300, - abort_on_failure=False) - - # Only quarantine the bot if the first failed step - # is an infra step. If, instead, we did this for any infra failures, we - # would do this too much. For example, if a Nexus 10 died during dm - # and the following pull step would also fail "device not found" - causing - # us to run the shutdown command when the device was probably not in a - # broken state; it was just rebooting. - if (self.m.run.failed_steps and - isinstance(self.m.run.failed_steps[0], recipe_api.InfraFailure)): - bot_id = self.m.vars.swarming_bot_id - self.m.file.write_text('Quarantining Bot', - '/home/chrome-bot/%s.force_quarantine' % bot_id, - ' ') - - if self._ever_ran_adb: - self._adb('kill adb server', 'kill-server') - - def step(self, name, cmd, **kwargs): - if (cmd[0] == 'nanobench'): - self._scale_for_nanobench() - else: - self._scale_for_dm() - app = self.m.vars.skia_out.join(cmd[0]) - self._adb('push %s' % cmd[0], - 'push', app, self.device_dirs.bin_dir) - - sh = '%s.sh' % cmd[0] - self.m.run.writefile(self.m.vars.tmp_dir.join(sh), - 'set -x; %s%s; echo $? >%src' % ( - self.device_dirs.bin_dir, subprocess.list2cmdline(map(str, cmd)), - self.device_dirs.bin_dir)) - self._adb('push %s' % sh, - 'push', self.m.vars.tmp_dir.join(sh), self.device_dirs.bin_dir) - - self._adb('clear log', 'logcat', '-c') - self.m.python.inline('%s' % cmd[0], """ - import subprocess - import sys - bin_dir = sys.argv[1] - sh = sys.argv[2] - subprocess.check_call(['%s', 'shell', 'sh', bin_dir + sh]) - try: - sys.exit(int(subprocess.check_output(['%s', 'shell', 'cat', - bin_dir + 'rc']))) - except ValueError: - print "Couldn't read the return code. Probably killed for OOM." - sys.exit(1) - """ % (self.ADB_BINARY, self.ADB_BINARY), - args=[self.device_dirs.bin_dir, sh]) - - def copy_file_to_device(self, host, device): - self._adb('push %s %s' % (host, device), 'push', host, device) - - def copy_directory_contents_to_device(self, host, device): - # Copy the tree, avoiding hidden directories and resolving symlinks. - self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device), - program=""" - import os - import subprocess - import sys - host = sys.argv[1] - device = sys.argv[2] - for d, _, fs in os.walk(host): - p = os.path.relpath(d, host) - if p != '.' and p.startswith('.'): - continue - for f in fs: - print os.path.join(p,f) - subprocess.check_call(['%s', 'push', - os.path.realpath(os.path.join(host, p, f)), - os.path.join(device, p, f)]) - """ % self.ADB_BINARY, args=[host, device], infra_step=True) - - def copy_directory_contents_to_host(self, device, host): - self._adb('pull %s %s' % (device, host), 'pull', device, host) - - def read_file_on_device(self, path, **kwargs): - rv = self._adb('read %s' % path, - 'shell', 'cat', path, stdout=self.m.raw_io.output(), - **kwargs) - return rv.stdout.rstrip() if rv and rv.stdout else None - - def remove_file_on_device(self, path): - self._adb('rm %s' % path, 'shell', 'rm', '-f', path) - - def create_clean_device_dir(self, path): - self._adb('rm %s' % path, 'shell', 'rm', '-rf', path) - self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path) diff --git a/infra/bots/recipe_modules/flavor/gn_chromebook_flavor.py b/infra/bots/recipe_modules/flavor/gn_chromebook_flavor.py deleted file mode 100644 index a133f2bb90..0000000000 --- a/infra/bots/recipe_modules/flavor/gn_chromebook_flavor.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - - -from recipe_engine import recipe_api - -import default_flavor -import gn_flavor -import json -import subprocess - - -""" - GN Chromebook flavor utils, used for building and testing Skia for ARM - Chromebooks with GN -""" -class GNChromebookFlavorUtils(gn_flavor.GNFlavorUtils): - - def __init__(self, m): - super(GNChromebookFlavorUtils, self).__init__(m) - self._user_ip = '' - - self.chromeos_homedir = '/home/chronos/user/' - self.device_dirs = default_flavor.DeviceDirs( - bin_dir = self.chromeos_homedir + 'bin', - dm_dir = self.chromeos_homedir + 'dm_out', - perf_data_dir = self.chromeos_homedir + 'perf', - resource_dir = self.chromeos_homedir + 'resources', - images_dir = self.chromeos_homedir + 'images', - skp_dir = self.chromeos_homedir + 'skps', - svg_dir = self.chromeos_homedir + 'svgs', - tmp_dir = self.chromeos_homedir) - - @property - def user_ip(self): - if not self._user_ip: - ssh_info = self.m.run(self.m.python.inline, 'read chromeos ip', - program=""" - import os - SSH_MACHINE_FILE = os.path.expanduser('~/ssh_machine.json') - with open(SSH_MACHINE_FILE, 'r') as f: - print f.read() - """, - stdout=self.m.raw_io.output(), - infra_step=True).stdout - - self._user_ip = json.loads(ssh_info).get(u'user_ip', 'ERROR') - return self._user_ip - - def _ssh(self, title, *cmd, **kwargs): - if 'infra_step' not in kwargs: - kwargs['infra_step'] = True - - ssh_cmd = ['ssh', '-oConnectTimeout=15', '-oBatchMode=yes', - '-t', '-t', self.user_ip] + list(cmd) - - return self._run(title, ssh_cmd, **kwargs) - - def install(self): - self._ssh('mkdir %s' % self.device_dirs.resource_dir, 'mkdir', '-p', - self.device_dirs.resource_dir) - - # Ensure the home dir is marked executable - self._ssh('remount %s as exec' % self.chromeos_homedir, - 'sudo', 'mount', '-i', '-o', 'remount,exec', '/home/chronos') - - self.create_clean_device_dir(self.device_dirs.bin_dir) - - def create_clean_device_dir(self, path): - # use -f to silently return if path doesn't exist - self._ssh('rm %s' % path, 'rm', '-rf', path) - self._ssh('mkdir %s' % path, 'mkdir', '-p', path) - - def read_file_on_device(self, path, **kwargs): - rv = self._ssh('read %s' % path, - 'cat', path, stdout=self.m.raw_io.output(), - **kwargs) - return rv.stdout.rstrip() if rv and rv.stdout else None - - def remove_file_on_device(self, path): - # use -f to silently return if path doesn't exist - self._ssh('rm %s' % path, 'rm', '-f', path) - - def _prefix_device_path(self, device_path): - return '%s:%s' % (self.user_ip, device_path) - - def copy_file_to_device(self, host_path, device_path): - device_path = self._prefix_device_path(device_path) - # Recipe - self.m.python.inline(str('scp %s %s' % (host_path, device_path)), - """ - import subprocess - import sys - host = sys.argv[1] - device = sys.argv[2] - print subprocess.check_output(['scp', host, device]) - """, args=[host_path, device_path], infra_step=True) - - def _copy_dir(self, src, dest): - # We can't use rsync to communicate with the chromebooks because the - # chromebooks don't have rsync installed on them. - self.m.python.inline(str('scp -r %s %s' % (src, dest)), - """ - import subprocess - import sys - src = sys.argv[1] + '/*' - dest = sys.argv[2] - print subprocess.check_output('scp -r %s %s' % (src, dest), shell=True) - """, args=[src, dest], infra_step=True) - - def copy_directory_contents_to_device(self, host_path, device_path): - self._copy_dir(host_path, self._prefix_device_path(device_path)) - - def copy_directory_contents_to_host(self, device_path, host_path): - self._copy_dir(self._prefix_device_path(device_path), host_path) - - def step(self, name, cmd, **kwargs): - # Push and run either dm or nanobench - - name = cmd[0] - - if name == 'dm': - self.create_clean_host_dir(self.host_dirs.dm_dir) - if name == 'nanobench': - self.create_clean_host_dir(self.host_dirs.perf_data_dir) - - app = self.m.vars.skia_out.join(cmd[0]) - - cmd[0] = '%s/%s' % (self.device_dirs.bin_dir, cmd[0]) - self.copy_file_to_device(app, cmd[0]) - - self._ssh('chmod %s' % name, 'chmod', '+x', cmd[0]) - self._ssh(str(name), *cmd) diff --git a/infra/bots/recipe_modules/flavor/gn_chromecast_flavor.py b/infra/bots/recipe_modules/flavor/gn_chromecast_flavor.py deleted file mode 100644 index bd65e01516..0000000000 --- a/infra/bots/recipe_modules/flavor/gn_chromecast_flavor.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -from recipe_engine import recipe_api - -import default_flavor -import gn_android_flavor -import subprocess - - -"""GN Chromecast flavor utils, used for building Skia for Chromecast with GN""" -class GNChromecastFlavorUtils(gn_android_flavor.GNAndroidFlavorUtils): - def __init__(self, m): - super(GNChromecastFlavorUtils, self).__init__(m) - self._ever_ran_adb = False - self._user_ip = '' - - # Disk space is extremely tight on the Chromecasts (~100M) There is not - # enough space on the android_data_dir (/cache/skia) to fit the images, - # resources, executable and output the dm images. So, we have dm_out be - # on the tempfs (i.e. RAM) /dev/shm. (which is about 140M) - data_dir = '/cache/skia/' - self.device_dirs = default_flavor.DeviceDirs( - bin_dir = '/cache/skia/bin', - dm_dir = '/dev/shm/skia/dm_out', - perf_data_dir = data_dir + 'perf', - resource_dir = data_dir + 'resources', - images_dir = data_dir + 'images', - skp_dir = data_dir + 'skps', - svg_dir = data_dir + 'svgs', - tmp_dir = data_dir) - - @property - def user_ip_host(self): - if not self._user_ip: - self._user_ip = self.m.run(self.m.python.inline, 'read chromecast ip', - program=""" - import os - CHROMECAST_IP_FILE = os.path.expanduser('~/chromecast.txt') - with open(CHROMECAST_IP_FILE, 'r') as f: - print f.read() - """, - stdout=self.m.raw_io.output(), - infra_step=True).stdout - - return self._user_ip - - @property - def user_ip(self): - return self.user_ip_host.split(':')[0] - - def install(self): - super(GNChromecastFlavorUtils, self).install() - self._adb('mkdir ' + self.device_dirs.bin_dir, - 'shell', 'mkdir', '-p', self.device_dirs.bin_dir) - - def _adb(self, title, *cmd, **kwargs): - if not self._ever_ran_adb: - self._connect_to_remote() - - self._ever_ran_adb = True - # The only non-infra adb steps (dm / nanobench) happen to not use _adb(). - if 'infra_step' not in kwargs: - kwargs['infra_step'] = True - return self._run(title, 'adb', *cmd, **kwargs) - - def _connect_to_remote(self): - self.m.run(self.m.step, 'adb connect %s' % self.user_ip_host, cmd=['adb', - 'connect', self.user_ip_host], infra_step=True) - - def create_clean_device_dir(self, path): - # Note: Chromecast does not support -rf - self._adb('rm %s' % path, 'shell', 'rm', '-r', path) - self._adb('mkdir %s' % path, 'shell', 'mkdir', '-p', path) - - def copy_directory_contents_to_device(self, host, device): - # Copy the tree, avoiding hidden directories and resolving symlinks. - # Additionally, due to space restraints, we don't push files > 3 MB - # which cuts down the size of the SKP asset to be around 50 MB as of - # version 41. - self.m.run(self.m.python.inline, 'push %s/* %s' % (host, device), - program=""" - import os - import subprocess - import sys - host = sys.argv[1] - device = sys.argv[2] - for d, _, fs in os.walk(host): - p = os.path.relpath(d, host) - if p != '.' and p.startswith('.'): - continue - for f in fs: - print os.path.join(p,f) - hp = os.path.realpath(os.path.join(host, p, f)) - if os.stat(hp).st_size > (1.5 * 1024 * 1024): - print "Skipping because it is too big" - else: - subprocess.check_call(['adb', 'push', - hp, os.path.join(device, p, f)]) - """, args=[host, device], infra_step=True) - - def cleanup_steps(self): - if self._ever_ran_adb: - # To clean up disk space for next time - self._ssh('Delete executables', 'rm', '-r', self.device_dirs.bin_dir, - abort_on_failure=False, infra_step=True) - # Reconnect if was disconnected - self._adb('disconnect', 'disconnect') - self._connect_to_remote() - self.m.run(self.m.python.inline, 'dump log', program=""" - import os - import subprocess - import sys - out = sys.argv[1] - log = subprocess.check_output(['adb', 'logcat', '-d']) - for line in log.split('\\n'): - tokens = line.split() - if len(tokens) == 11 and tokens[-7] == 'F' and tokens[-3] == 'pc': - addr, path = tokens[-2:] - local = os.path.join(out, os.path.basename(path)) - if os.path.exists(local): - sym = subprocess.check_output(['addr2line', '-Cfpe', local, addr]) - line = line.replace(addr, addr + ' ' + sym.strip()) - print line - """, - args=[self.m.vars.skia_out], - infra_step=True, - abort_on_failure=False) - - self._adb('disconnect', 'disconnect') - self._adb('kill adb server', 'kill-server') - - def _ssh(self, title, *cmd, **kwargs): - # Don't use -t -t (Force psuedo-tty allocation) like in the ChromeOS - # version because the pseudo-tty allocation seems to fail - # instantly when talking to a Chromecast. - # This was excacerbated when we migrated to kitchen and was marked by - # the symptoms of all the ssh commands instantly failing (even after - # connecting and authenticating) with exit code -1 (255) - ssh_cmd = ['ssh', '-oConnectTimeout=15', '-oBatchMode=yes', - '-T', 'root@%s' % self.user_ip] + list(cmd) - - return self.m.run(self.m.step, title, cmd=ssh_cmd, **kwargs) - - def step(self, name, cmd, **kwargs): - app = self.m.vars.skia_out.join(cmd[0]) - - self._adb('push %s' % cmd[0], - 'push', app, self.device_dirs.bin_dir) - - cmd[0] = '%s/%s' % (self.device_dirs.bin_dir, cmd[0]) - self._ssh(str(name), *cmd, infra_step=False) diff --git a/infra/bots/recipe_modules/flavor/gn_flavor.py b/infra/bots/recipe_modules/flavor/gn_flavor.py deleted file mode 100644 index 284f20b365..0000000000 --- a/infra/bots/recipe_modules/flavor/gn_flavor.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import default_flavor - -"""GN flavor utils, used for building Skia with GN.""" -class GNFlavorUtils(default_flavor.DefaultFlavorUtils): - # TODO(borenet): Delete this file. - def _run(self, title, cmd, infra_step=False, **kwargs): - return self.m.run(self.m.step, title, cmd=cmd, - infra_step=infra_step, **kwargs) - - def _py(self, title, script, infra_step=True, args=()): - return self.m.run(self.m.python, title, script=script, args=args, - infra_step=infra_step) - - def step(self, name, cmd): - app = self.device_dirs.bin_dir.join(cmd[0]) - cmd = [app] + cmd[1:] - env = self.m.context.env - path = [] - ld_library_path = [] - - slave_dir = self.m.vars.slave_dir - clang_linux = str(slave_dir.join('clang_linux')) - extra_tokens = self.m.vars.extra_tokens - - if self.m.vars.is_linux: - if (self.m.vars.builder_cfg.get('cpu_or_gpu', '') == 'GPU' - and 'Intel' in self.m.vars.builder_cfg.get('cpu_or_gpu_value', '')): - # The vulkan in this asset name simply means that the graphics driver - # supports Vulkan. It is also the driver used for GL code. - dri_path = slave_dir.join('linux_vulkan_intel_driver_release') - if self.m.vars.builder_cfg.get('configuration', '') == 'Debug': - dri_path = slave_dir.join('linux_vulkan_intel_driver_debug') - ld_library_path.append(dri_path) - env['LIBGL_DRIVERS_PATH'] = str(dri_path) - env['VK_ICD_FILENAMES'] = str(dri_path.join('intel_icd.x86_64.json')) - - if 'Vulkan' in extra_tokens: - path.append(slave_dir.join('linux_vulkan_sdk', 'bin')) - ld_library_path.append(slave_dir.join('linux_vulkan_sdk', 'lib')) - - if 'SwiftShader' in extra_tokens: - ld_library_path.append( - self.m.vars.build_dir.join('out', 'swiftshader_out')) - - if 'MSAN' in extra_tokens: - # Find the MSAN-built libc++. - ld_library_path.append(clang_linux + '/msan') - - if any('SAN' in t for t in extra_tokens): - # Sanitized binaries may want to run clang_linux/bin/llvm-symbolizer. - path.append(clang_linux + '/bin') - # We find that testing sanitizer builds with libc++ uncovers more issues - # than with the system-provided C++ standard library, which is usually - # libstdc++. libc++ proactively hooks into sanitizers to help their - # analyses. We ship a copy of libc++ with our Linux toolchain in /lib. - ld_library_path.append(clang_linux + '/lib') - elif self.m.vars.is_linux: - cmd = ['catchsegv'] + cmd - elif 'ProcDump' in extra_tokens: - dumps_dir = self.m.path.join(self.m.vars.swarming_out_dir, 'dumps') - self.m.file.ensure_directory('makedirs dumps', dumps_dir) - procdump = str(self.m.vars.slave_dir.join('procdump_win', - 'procdump64.exe')) - # Full docs for ProcDump here: - # https://docs.microsoft.com/en-us/sysinternals/downloads/procdump - # -accepteula automatically accepts the license agreement - # -mp saves a packed minidump to save space - # -e 1 tells procdump to dump once - # -x launches exe and writes dumps to the - # specified dir - cmd = [procdump, '-accepteula', '-mp', '-e', '1', '-x', dumps_dir] + cmd - - if 'ASAN' in extra_tokens or 'UBSAN' in extra_tokens: - if 'Mac' in self.m.vars.builder_cfg.get('os', ''): - env['ASAN_OPTIONS'] = 'symbolize=1' # Mac doesn't support detect_leaks. - else: - env['ASAN_OPTIONS'] = 'symbolize=1 detect_leaks=1' - env[ 'LSAN_OPTIONS'] = 'symbolize=1 print_suppressions=1' - env['UBSAN_OPTIONS'] = 'symbolize=1 print_stacktrace=1' - - if 'TSAN' in extra_tokens: - # We don't care about malloc(), fprintf, etc. used in signal handlers. - # If we're in a signal handler, we're already crashing... - env['TSAN_OPTIONS'] = 'report_signal_unsafe=0' - - if 'Coverage' in extra_tokens: - # This is the output file for the coverage data. Just running the binary - # will produce the output. The output_file is in the swarming_out_dir and - # thus will be an isolated output of the Test step. - profname = '%s.profraw' % self.m.vars.builder_cfg.get('test_filter','o') - env['LLVM_PROFILE_FILE'] = self.m.path.join(self.m.vars.swarming_out_dir, - profname) - - if path: - env['PATH'] = '%%(PATH)s:%s' % ':'.join('%s' % p for p in path) - if ld_library_path: - env['LD_LIBRARY_PATH'] = ':'.join('%s' % p for p in ld_library_path) - - to_symbolize = ['dm', 'nanobench'] - if name in to_symbolize and self.m.vars.is_linux: - # Convert path objects or placeholders into strings such that they can - # be passed to symbolize_stack_trace.py - args = [slave_dir] + [str(x) for x in cmd] - with self.m.context(cwd=self.m.path['start_dir'].join('skia'), env=env): - self._py('symbolized %s' % name, - self.module.resource('symbolize_stack_trace.py'), - args=args, - infra_step=False) - - else: - with self.m.context(env=env): - self._run(name, cmd) diff --git a/infra/bots/recipe_modules/flavor/ios.py b/infra/bots/recipe_modules/flavor/ios.py new file mode 100644 index 0000000000..d78cd4ca39 --- /dev/null +++ b/infra/bots/recipe_modules/flavor/ios.py @@ -0,0 +1,90 @@ +# Copyright 2017 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Disable warning about setting self.device_dirs in install(); we need to. +# pylint: disable=W0201 + + +from . import default + + +"""iOS flavor, used for running code on iOS.""" + + +class iOSFlavor(default.DefaultFlavor): + def __init__(self, m): + super(iOSFlavor, self).__init__(m) + self.device_dirs = default.DeviceDirs( + bin_dir='[unused]', + dm_dir='dm', + perf_data_dir='perf', + resource_dir='resources', + images_dir='images', + skp_dir='skps', + svg_dir='svgs', + tmp_dir='tmp') + + def install(self): + # Set up the device + self.m.run(self.m.step, 'setup_device', cmd=['ios.py'], infra_step=True) + + # Install the app. + for app_name in ['dm', 'nanobench']: + app_package = self.m.vars.skia_out.join('%s.app' % app_name) + + def uninstall_app(attempt): + # If app ID changes, upgrade will fail, so try uninstalling. + self.m.run(self.m.step, + 'uninstall_' + app_name, + cmd=['ideviceinstaller', '-U', 'com.google.%s' % app_name], + infra_step=True, + # App may not be installed. + abort_on_failure=False, fail_build_on_failure=False) + + num_attempts = 2 + self.m.run.with_retry(self.m.step, 'install_' + app_name, num_attempts, + cmd=['ideviceinstaller', '-i', app_package], + between_attempts_fn=uninstall_app, + infra_step=True) + + def step(self, name, cmd, env=None, **kwargs): + bundle_id = 'com.google.%s' % cmd[0] + self.m.run(self.m.step, name, + cmd=['idevice-app-runner', '-s', bundle_id, '--args'] + + map(str, cmd[1:])) + + def _run_ios_script(self, script, first, *rest): + full = self.m.path['start_dir'].join( + 'skia', 'platform_tools', 'ios', 'bin', 'ios_' + script) + self.m.run(self.m.step, + name = '%s %s' % (script, first), + cmd = [full, first] + list(rest), + infra_step=True) + + def copy_file_to_device(self, host, device): + self._run_ios_script('push_file', host, device) + + def copy_directory_contents_to_device(self, host, device): + self._run_ios_script('push_if_needed', host, device) + + def copy_directory_contents_to_host(self, device, host): + self._run_ios_script('pull_if_needed', device, host) + + def remove_file_on_device(self, path): + self._run_ios_script('rm', path) + + def create_clean_device_dir(self, path): + self._run_ios_script('rm', path) + self._run_ios_script('mkdir', path) + + def read_file_on_device(self, path, **kwargs): + full = self.m.path['start_dir'].join( + 'skia', 'platform_tools', 'ios', 'bin', 'ios_cat_file') + rv = self.m.run(self.m.step, + name = 'cat_file %s' % path, + cmd = [full, path], + stdout=self.m.raw_io.output(), + infra_step=True, + **kwargs) + return rv.stdout.rstrip() if rv and rv.stdout else None diff --git a/infra/bots/recipe_modules/flavor/ios_flavor.py b/infra/bots/recipe_modules/flavor/ios_flavor.py deleted file mode 100644 index 3df07451e9..0000000000 --- a/infra/bots/recipe_modules/flavor/ios_flavor.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2017 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -# Disable warning about setting self.device_dirs in install(); we need to. -# pylint: disable=W0201 - -import default_flavor -import gn_flavor -import os - -class iOSFlavorUtils(gn_flavor.GNFlavorUtils): - def __init__(self, m): - super(iOSFlavorUtils, self).__init__(m) - self.device_dirs = default_flavor.DeviceDirs( - bin_dir='[unused]', - dm_dir='dm', - perf_data_dir='perf', - resource_dir='resources', - images_dir='images', - skp_dir='skps', - svg_dir='svgs', - tmp_dir='tmp') - - def install(self): - # Set up the device - self.m.run(self.m.step, 'setup_device', cmd=['ios.py'], infra_step=True) - - # Install the app. - for app_name in ['dm', 'nanobench']: - app_package = self.m.vars.skia_out.join('%s.app' % app_name) - - def uninstall_app(attempt): - # If app ID changes, upgrade will fail, so try uninstalling. - self.m.run(self.m.step, - 'uninstall_' + app_name, - cmd=['ideviceinstaller', '-U', 'com.google.%s' % app_name], - infra_step=True, - # App may not be installed. - abort_on_failure=False, fail_build_on_failure=False) - - num_attempts = 2 - self.m.run.with_retry(self.m.step, 'install_' + app_name, num_attempts, - cmd=['ideviceinstaller', '-i', app_package], - between_attempts_fn=uninstall_app, - infra_step=True) - - def step(self, name, cmd, env=None, **kwargs): - bundle_id = 'com.google.%s' % cmd[0] - self.m.run(self.m.step, name, - cmd=['idevice-app-runner', '-s', bundle_id, '--args'] + - map(str, cmd[1:])) - - def _run_ios_script(self, script, first, *rest): - full = self.m.path['start_dir'].join( - 'skia', 'platform_tools', 'ios', 'bin', 'ios_' + script) - self.m.run(self.m.step, - name = '%s %s' % (script, first), - cmd = [full, first] + list(rest), - infra_step=True) - - def copy_file_to_device(self, host, device): - self._run_ios_script('push_file', host, device) - - def copy_directory_contents_to_device(self, host, device): - self._run_ios_script('push_if_needed', host, device) - - def copy_directory_contents_to_host(self, device, host): - self._run_ios_script('pull_if_needed', device, host) - - def remove_file_on_device(self, path): - self._run_ios_script('rm', path) - - def create_clean_device_dir(self, path): - self._run_ios_script('rm', path) - self._run_ios_script('mkdir', path) - - def read_file_on_device(self, path, **kwargs): - full = self.m.path['start_dir'].join( - 'skia', 'platform_tools', 'ios', 'bin', 'ios_cat_file') - rv = self.m.run(self.m.step, - name = 'cat_file %s' % path, - cmd = [full, path], - stdout=self.m.raw_io.output(), - infra_step=True, - **kwargs) - return rv.stdout.rstrip() if rv and rv.stdout else None diff --git a/infra/bots/recipe_modules/flavor/valgrind.py b/infra/bots/recipe_modules/flavor/valgrind.py new file mode 100644 index 0000000000..774dbd461a --- /dev/null +++ b/infra/bots/recipe_modules/flavor/valgrind.py @@ -0,0 +1,30 @@ +# Copyright 2014 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +from . import default + + +"""Valgrind flavor, used for running code through Valgrind.""" + + +class ValgrindFlavor(default.DefaultFlavor): + def __init__(self, m): + super(ValgrindFlavor, self).__init__(m) + self._suppressions_file = self.m.path['start_dir'].join( + 'skia', 'tools', 'valgrind.supp') + self._valgrind_cipd_dir = self.m.vars.slave_dir.join('valgrind') + self._valgrind_fake_dir = self._valgrind_cipd_dir + self._valgrind = self._valgrind_fake_dir.join('bin', 'valgrind') + self._lib_dir = self._valgrind_fake_dir.join('lib', 'valgrind') + + def step(self, name, cmd, **kwargs): + new_cmd = [self._valgrind, '--gen-suppressions=all', '--leak-check=full', + '--track-origins=yes', '--error-exitcode=1', '--num-callers=40', + '--suppressions=%s' % self._suppressions_file] + path_to_app = self.m.vars.skia_out.join(cmd[0]) + new_cmd.append(path_to_app) + new_cmd.extend(cmd[1:]) + with self.m.env({'VALGRIND_LIB': self._lib_dir}): + return self.m.run(self.m.step, name, cmd=new_cmd, **kwargs) diff --git a/infra/bots/recipe_modules/flavor/valgrind_flavor.py b/infra/bots/recipe_modules/flavor/valgrind_flavor.py deleted file mode 100644 index b54fcc36dd..0000000000 --- a/infra/bots/recipe_modules/flavor/valgrind_flavor.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - - -import gn_flavor - - -"""Utils for running under Valgrind.""" - - -class ValgrindFlavorUtils(gn_flavor.GNFlavorUtils): - def __init__(self, m): - super(ValgrindFlavorUtils, self).__init__(m) - self._suppressions_file = self.m.path['start_dir'].join( - 'skia', 'tools', 'valgrind.supp') - self._valgrind_cipd_dir = self.m.vars.slave_dir.join('valgrind') - self._valgrind_fake_dir = self._valgrind_cipd_dir - self._valgrind = self._valgrind_fake_dir.join('bin', 'valgrind') - self._lib_dir = self._valgrind_fake_dir.join('lib', 'valgrind') - - def step(self, name, cmd, **kwargs): - new_cmd = [self._valgrind, '--gen-suppressions=all', '--leak-check=full', - '--track-origins=yes', '--error-exitcode=1', '--num-callers=40', - '--suppressions=%s' % self._suppressions_file] - path_to_app = self.m.vars.skia_out.join(cmd[0]) - new_cmd.append(path_to_app) - new_cmd.extend(cmd[1:]) - with self.m.env({'VALGRIND_LIB': self._lib_dir}): - return self.m.run(self.m.step, name, cmd=new_cmd, **kwargs) diff --git a/infra/bots/recipe_modules/infra/examples/full.py b/infra/bots/recipe_modules/infra/examples/full.py index 84b2e2c442..3db5de26cb 100644 --- a/infra/bots/recipe_modules/infra/examples/full.py +++ b/infra/bots/recipe_modules/infra/examples/full.py @@ -7,7 +7,6 @@ DEPS = [ - 'core', 'infra', 'recipe_engine/file', 'recipe_engine/path', diff --git a/infra/bots/recipes/bookmaker.py b/infra/bots/recipes/bookmaker.py index f499422516..3a568ec9bf 100644 --- a/infra/bots/recipes/bookmaker.py +++ b/infra/bots/recipes/bookmaker.py @@ -18,7 +18,7 @@ DEPS = [ 'recipe_engine/path', 'recipe_engine/properties', 'recipe_engine/step', - 'core', + 'checkout', 'infra', 'run', 'vars', @@ -39,8 +39,8 @@ def go_get_fiddlecli(api): def RunSteps(api): api.vars.setup() - checkout_root = api.core.default_checkout_root - api.core.checkout_bot_update(checkout_root=checkout_root) + checkout_root = api.checkout.default_checkout_root + api.checkout.bot_update(checkout_root=checkout_root) api.infra.go_version() go_get_fiddlecli(api) diff --git a/infra/bots/recipes/calmbench.py b/infra/bots/recipes/calmbench.py index f75cd35d72..659b3a4f02 100644 --- a/infra/bots/recipes/calmbench.py +++ b/infra/bots/recipes/calmbench.py @@ -6,7 +6,6 @@ # Recipe module for Skia Swarming calmbench. DEPS = [ - 'core', 'flavor', 'recipe_engine/context', 'recipe_engine/file', diff --git a/infra/bots/recipes/check_generated_files.py b/infra/bots/recipes/check_generated_files.py index 61a859f26a..537d7758da 100644 --- a/infra/bots/recipes/check_generated_files.py +++ b/infra/bots/recipes/check_generated_files.py @@ -14,7 +14,7 @@ DEPS = [ 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/step', - 'core', + 'checkout', 'flavor', 'run', 'vars', @@ -24,8 +24,8 @@ DEPS = [ def RunSteps(api): # Checkout, compile, etc. api.vars.setup() - checkout_root = api.core.default_checkout_root - api.core.checkout_bot_update(checkout_root=checkout_root) + checkout_root = api.checkout.default_checkout_root + api.checkout.bot_update(checkout_root=checkout_root) api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir) api.flavor.setup() diff --git a/infra/bots/recipes/compile.py b/infra/bots/recipes/compile.py index 96ed7a9cbc..e5e553cd16 100644 --- a/infra/bots/recipes/compile.py +++ b/infra/bots/recipes/compile.py @@ -8,7 +8,7 @@ DEPS = [ 'build', - 'core', + 'checkout', 'recipe_engine/context', 'recipe_engine/file', 'recipe_engine/json', @@ -28,12 +28,12 @@ def RunSteps(api): # Check out code. if 'NoDEPS' in api.properties['buildername']: checkout_root = api.path['start_dir'] - api.core.checkout_git(checkout_root=checkout_root) + api.checkout.git(checkout_root=checkout_root) else: - checkout_root = api.core.default_checkout_root + checkout_root = api.checkout.default_checkout_root if 'Flutter' in api.vars.builder_name: checkout_root = checkout_root.join('flutter') - api.core.checkout_bot_update(checkout_root=checkout_root) + api.checkout.bot_update(checkout_root=checkout_root) api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir) diff --git a/infra/bots/recipes/ct_skps.py b/infra/bots/recipes/ct_skps.py index 5a679d30b5..9e85a2ecba 100644 --- a/infra/bots/recipes/ct_skps.py +++ b/infra/bots/recipes/ct_skps.py @@ -8,7 +8,7 @@ import math DEPS = [ 'build', - 'core', + 'checkout', 'ct', 'recipe_engine/context', 'recipe_engine/file', @@ -78,8 +78,8 @@ def RunSteps(api): api.vars.setup() checkout_root = make_path(api, '/', 'b', 'work') gclient_cache = make_path(api, '/', 'b', 'cache') - got_revision = api.core.checkout_bot_update(checkout_root=checkout_root, - gclient_cache=gclient_cache) + got_revision = api.checkout.bot_update(checkout_root=checkout_root, + gclient_cache=gclient_cache) api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir) out_dir = api.vars.build_dir.join('out', api.vars.configuration) diff --git a/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit-Trybot.json b/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit-Trybot.json index 2863e42c9b..8d7f7912fb 100644 --- a/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit-Trybot.json +++ b/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit-Trybot.json @@ -124,14 +124,13 @@ "0777", "[START_DIR]/[SWARM_OUT_DIR]/perfdata/Housekeeper-PerCommit/data" ], - "cwd": "[START_DIR]/cache/work/skia", "infra_step": true, "name": "makedirs perf_dir" }, { "cmd": [ "python", - "RECIPE_MODULE[skia::core]/resources/run_binary_size_analysis.py", + "RECIPE_MODULE[skia::binary_size]/resources/run_binary_size_analysis.py", "--library", "[START_DIR]/build/out/Release/libskia.so", "--githash", diff --git a/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit.json b/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit.json index ccc1ff38b5..d152ff9c82 100644 --- a/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit.json +++ b/infra/bots/recipes/housekeeper.expected/Housekeeper-PerCommit.json @@ -111,7 +111,7 @@ { "cmd": [ "python", - "RECIPE_MODULE[skia::core]/resources/generate_and_upload_doxygen.py" + "RECIPE_MODULE[skia::doxygen]/resources/generate_and_upload_doxygen.py" ], "cwd": "[START_DIR]/cache/work/skia", "env": { @@ -132,14 +132,13 @@ "0777", "[START_DIR]/[SWARM_OUT_DIR]/perfdata/Housekeeper-PerCommit/data" ], - "cwd": "[START_DIR]/cache/work/skia", "infra_step": true, "name": "makedirs perf_dir" }, { "cmd": [ "python", - "RECIPE_MODULE[skia::core]/resources/run_binary_size_analysis.py", + "RECIPE_MODULE[skia::binary_size]/resources/run_binary_size_analysis.py", "--library", "[START_DIR]/build/out/Release/libskia.so", "--githash", diff --git a/infra/bots/recipes/housekeeper.py b/infra/bots/recipes/housekeeper.py index 3cc76c26e0..e565216bd4 100644 --- a/infra/bots/recipes/housekeeper.py +++ b/infra/bots/recipes/housekeeper.py @@ -10,15 +10,14 @@ import calendar DEPS = [ - 'core', - 'depot_tools/bot_update', + 'binary_size', + 'checkout', + 'doxygen', 'flavor', 'recipe_engine/context', 'recipe_engine/file', 'recipe_engine/path', 'recipe_engine/properties', - 'recipe_engine/python', - 'recipe_engine/step', 'recipe_engine/time', 'run', 'vars', @@ -28,37 +27,24 @@ DEPS = [ def RunSteps(api): # Checkout, compile, etc. api.vars.setup() - checkout_root = api.core.default_checkout_root - got_revision = api.core.checkout_bot_update(checkout_root=checkout_root) + checkout_root = api.checkout.default_checkout_root + got_revision = api.checkout.bot_update(checkout_root=checkout_root) api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir) api.flavor.setup() # TODO(borenet): Detect static initializers? - with api.context(cwd=checkout_root.join('skia')): - if not api.vars.is_trybot: - api.run( - api.step, - 'generate and upload doxygen', - cmd=['python', api.core.resource('generate_and_upload_doxygen.py')], - abort_on_failure=False) + skia_dir = checkout_root.join('skia') + if not api.vars.is_trybot: + api.doxygen.generate_and_upload(skia_dir) - now = api.time.utcnow() - ts = int(calendar.timegm(now.utctimetuple())) - filename = 'nanobench_%s_%d.json' % (got_revision, ts) - dest_dir = api.flavor.host_dirs.perf_data_dir - dest_file = dest_dir + '/' + filename - api.file.ensure_directory('makedirs perf_dir', dest_dir) - cmd = ['python', api.core.resource('run_binary_size_analysis.py'), - '--library', api.vars.skia_out.join('libskia.so'), - '--githash', api.properties['revision'], - '--dest', dest_file] - if api.vars.is_trybot: - cmd.extend(['--issue_number', str(api.properties['patch_issue'])]) - api.run( - api.step, - 'generate binary size data', - cmd=cmd) + now = api.time.utcnow() + ts = int(calendar.timegm(now.utctimetuple())) + filename = 'nanobench_%s_%d.json' % (got_revision, ts) + dest_dir = api.flavor.host_dirs.perf_data_dir + dest_file = dest_dir + '/' + filename + api.file.ensure_directory('makedirs perf_dir', dest_dir) + api.binary_size.run_analysis(skia_dir, dest_file) def GenTests(api): diff --git a/infra/bots/recipes/infra.py b/infra/bots/recipes/infra.py index 902bf98833..c299d17c47 100644 --- a/infra/bots/recipes/infra.py +++ b/infra/bots/recipes/infra.py @@ -7,21 +7,19 @@ DEPS = [ + 'checkout', + 'infra', 'recipe_engine/context', - 'recipe_engine/path', 'recipe_engine/properties', 'recipe_engine/step', - 'core', - 'infra', - 'run', 'vars', ] def RunSteps(api): api.vars.setup() - checkout_root = api.core.default_checkout_root - api.core.checkout_bot_update(checkout_root=checkout_root) + checkout_root = api.checkout.default_checkout_root + api.checkout.bot_update(checkout_root=checkout_root) api.infra.update_go_deps() # Run the infra tests. diff --git a/infra/bots/recipes/perf.py b/infra/bots/recipes/perf.py index a3524adecb..7880b48273 100644 --- a/infra/bots/recipes/perf.py +++ b/infra/bots/recipes/perf.py @@ -11,7 +11,6 @@ import os DEPS = [ - 'core', 'env', 'flavor', 'recipe_engine/file', diff --git a/infra/bots/recipes/recreate_skps.py b/infra/bots/recipes/recreate_skps.py index b41d56f3f6..80f4dcf0df 100644 --- a/infra/bots/recipes/recreate_skps.py +++ b/infra/bots/recipes/recreate_skps.py @@ -7,7 +7,7 @@ DEPS = [ - 'core', + 'checkout', 'depot_tools/gclient', 'flavor', 'infra', @@ -36,8 +36,8 @@ TEST_BUILDERS = { def RunSteps(api): # Check out Chrome. api.vars.setup() - checkout_root = api.core.default_checkout_root - api.core.checkout_bot_update(checkout_root=checkout_root) + checkout_root = api.checkout.default_checkout_root + api.checkout.bot_update(checkout_root=checkout_root) api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir) api.flavor.setup() diff --git a/infra/bots/recipes/skqp_test.py b/infra/bots/recipes/skqp_test.py index 6162c3741a..56c27f7fe9 100644 --- a/infra/bots/recipes/skqp_test.py +++ b/infra/bots/recipes/skqp_test.py @@ -5,7 +5,6 @@ # Recipe module for Skia Swarming SKQP testing. DEPS = [ - 'core', 'flavor', 'recipe_engine/file', 'recipe_engine/path', diff --git a/infra/bots/recipes/test.py b/infra/bots/recipes/test.py index 33a1b983b1..8145f368da 100644 --- a/infra/bots/recipes/test.py +++ b/infra/bots/recipes/test.py @@ -7,7 +7,6 @@ DEPS = [ - 'core', 'env', 'flavor', 'recipe_engine/context', diff --git a/infra/bots/recipes/upload_calmbench_results.py b/infra/bots/recipes/upload_calmbench_results.py index 8aa6c01644..ef71575b2d 100644 --- a/infra/bots/recipes/upload_calmbench_results.py +++ b/infra/bots/recipes/upload_calmbench_results.py @@ -10,7 +10,6 @@ import calendar DEPS = [ - 'core', 'flavor', 'recipe_engine/context', 'recipe_engine/file', diff --git a/infra/bots/recipes/upload_skiaserve.py b/infra/bots/recipes/upload_skiaserve.py index cee9584c77..251802b943 100644 --- a/infra/bots/recipes/upload_skiaserve.py +++ b/infra/bots/recipes/upload_skiaserve.py @@ -7,7 +7,6 @@ DEPS = [ - 'core', 'flavor', 'gsutil', 'recipe_engine/context', -- cgit v1.2.3