aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2016-12-09 08:37:48 -0800
committerGravatar Craig Tiller <ctiller@google.com>2016-12-09 08:37:48 -0800
commit7ae081b9f02667a22797d313c56b1f592a568dd0 (patch)
tree8e1d1329587a1d83fe3598f95147b90f3f3e8413 /tools
parentab7b2d82e989840f299c136f7062d8c6548ec5e1 (diff)
parentc204647295437b01337ad8e6c17c4296609c7a13 (diff)
Merge github.com:grpc/grpc into slice_with_exec_ctx
Diffstat (limited to 'tools')
-rw-r--r--tools/buildgen/generate_build_additions.sh2
-rwxr-xr-xtools/gce/linux_performance_worker_init.sh16
-rwxr-xr-xtools/run_tests/build_python.sh3
-rwxr-xr-xtools/run_tests/performance/process_local_perf_flamegraphs.sh40
-rwxr-xr-xtools/run_tests/performance/process_remote_perf_flamegraphs.sh44
-rw-r--r--tools/run_tests/report_utils.py7
-rwxr-xr-xtools/run_tests/run_performance_tests.py148
-rwxr-xr-xtools/run_tests/sanity/check_submodules.sh2
-rw-r--r--tools/run_tests/sources_and_headers.json52
9 files changed, 266 insertions, 48 deletions
diff --git a/tools/buildgen/generate_build_additions.sh b/tools/buildgen/generate_build_additions.sh
index 1ea47042f4..53c30c7609 100644
--- a/tools/buildgen/generate_build_additions.sh
+++ b/tools/buildgen/generate_build_additions.sh
@@ -30,7 +30,7 @@
gen_build_yaml_dirs=" \
src/boringssl \
- src/google_benchmark \
+ src/benchmark \
src/proto \
src/zlib \
test/core/bad_client \
diff --git a/tools/gce/linux_performance_worker_init.sh b/tools/gce/linux_performance_worker_init.sh
index 523749ee81..ab29e015e0 100755
--- a/tools/gce/linux_performance_worker_init.sh
+++ b/tools/gce/linux_performance_worker_init.sh
@@ -150,3 +150,19 @@ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz
# Put go on the PATH, keep the usual installation dir
sudo ln -s /usr/local/go/bin/go /usr/bin/go
rm go$GO_VERSION.$OS-$ARCH.tar.gz
+
+# Install perf, to profile benchmarks. (need to get the right linux-tools-<> for kernel version)
+sudo apt-get install -y linux-tools-common linux-tools-generic linux-tools-`uname -r`
+# see http://unix.stackexchange.com/questions/14227/do-i-need-root-admin-permissions-to-run-userspace-perf-tool-perf-events-ar
+echo 0 | sudo tee /proc/sys/kernel/perf_event_paranoid
+# see http://stackoverflow.com/questions/21284906/perf-couldnt-record-kernel-reference-relocation-symbol
+echo 0 | sudo tee /proc/sys/kernel/kptr_restrict
+
+# qps workers under perf appear to need a lot of mmap pages under certain scenarios and perf args in
+# order to not lose perf events or time out
+echo 4096 | sudo tee /proc/sys/kernel/perf_event_mlock_kb
+
+# Fetch scripts to generate flame graphs from perf data collected
+# on benchmarks
+git clone -v https://github.com/brendangregg/FlameGraph ~/FlameGraph
+
diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh
index fb884ad166..7cac394960 100755
--- a/tools/run_tests/build_python.sh
+++ b/tools/run_tests/build_python.sh
@@ -171,8 +171,7 @@ pip_install_dir() {
}
$VENV_PYTHON -m pip install --upgrade pip
-# TODO(https://github.com/pypa/setuptools/issues/709) get the latest setuptools
-$VENV_PYTHON -m pip install setuptools==25.1.1
+$VENV_PYTHON -m pip install setuptools
$VENV_PYTHON -m pip install cython
pip_install_dir $ROOT
$VENV_PYTHON $ROOT/tools/distrib/python/make_grpcio_tools.py
diff --git a/tools/run_tests/performance/process_local_perf_flamegraphs.sh b/tools/run_tests/performance/process_local_perf_flamegraphs.sh
new file mode 100755
index 0000000000..d15610f137
--- /dev/null
+++ b/tools/run_tests/performance/process_local_perf_flamegraphs.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+mkdir -p $OUTPUT_DIR
+
+PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data
+PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf
+
+# Generate Flame graphs
+echo "running perf script on $PERF_DATA_FILE"
+perf script -i $PERF_DATA_FILE > $PERF_SCRIPT_OUTPUT
+
+~/FlameGraph/stackcollapse-perf.pl $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg
diff --git a/tools/run_tests/performance/process_remote_perf_flamegraphs.sh b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh
new file mode 100755
index 0000000000..cc075354cc
--- /dev/null
+++ b/tools/run_tests/performance/process_remote_perf_flamegraphs.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2015, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+mkdir -p $OUTPUT_DIR
+
+PERF_DATA_FILE=${PERF_BASE_NAME}-perf.data
+PERF_SCRIPT_OUTPUT=${PERF_BASE_NAME}-out.perf
+
+# Generate Flame graphs
+echo "running perf script on $USER_AT_HOST with perf.data"
+ssh $USER_AT_HOST "cd ~/performance_workspace/grpc && perf script -i $PERF_DATA_FILE | gzip > ${PERF_SCRIPT_OUTPUT}.gz"
+
+scp $USER_AT_HOST:~/performance_workspace/grpc/$PERF_SCRIPT_OUTPUT.gz .
+
+gzip -d -f $PERF_SCRIPT_OUTPUT.gz
+
+~/FlameGraph/stackcollapse-perf.pl --kernel $PERF_SCRIPT_OUTPUT | ~/FlameGraph/flamegraph.pl --color=java --hash > ${OUTPUT_DIR}/${OUTPUT_FILENAME}.svg
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index 90055e3530..5ce2a87cfa 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -122,3 +122,10 @@ def render_interop_html_report(
except:
print(exceptions.text_error_template().render())
raise
+
+def render_perf_profiling_results(output_filepath, profile_names):
+ with open(output_filepath, 'w') as output_file:
+ output_file.write('<ul>\n')
+ for name in profile_names:
+ output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
+ output_file.write('</ul>\n')
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 1d0c98fb69..69ccff85cf 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -49,6 +49,7 @@ import tempfile
import time
import traceback
import uuid
+import report_utils
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
@@ -57,15 +58,18 @@ os.chdir(_ROOT)
_REMOTE_HOST_USERNAME = 'jenkins'
+_PERF_REPORT_OUTPUT_DIR = 'perf_reports'
+
class QpsWorkerJob:
"""Encapsulates a qps worker server job."""
- def __init__(self, spec, language, host_and_port):
+ def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
self._spec = spec
self.language = language
self.host_and_port = host_and_port
self._job = None
+ self.perf_file_base_name = perf_file_base_name
def start(self):
self._job = jobset.Job(self._spec, newline_on_success=True, travis=True, add_env={})
@@ -80,24 +84,32 @@ class QpsWorkerJob:
self._job = None
-def create_qpsworker_job(language, shortname=None,
- port=10000, remote_host=None):
- cmdline = language.worker_cmdline() + ['--driver_port=%s' % port]
+def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, perf_cmd=None):
+ cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
+
if remote_host:
- user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
- cmdline = ['ssh',
- str(user_at_host),
- 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)]
host_and_port='%s:%s' % (remote_host, port)
else:
host_and_port='localhost:%s' % port
+ perf_file_base_name = None
+ if perf_cmd:
+ perf_file_base_name = '%s-%s' % (host_and_port, shortname)
+ # specify -o output file so perf.data gets collected when worker stopped
+ cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name] + cmdline
+
+ if remote_host:
+ user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+ ssh_cmd = ['ssh']
+ ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && %s' % ' '.join(cmdline)])
+ cmdline = ssh_cmd
+
jobspec = jobset.JobSpec(
cmdline=cmdline,
shortname=shortname,
timeout_seconds=5*60, # workers get restarted after each scenario
verbose_success=True)
- return QpsWorkerJob(jobspec, language, host_and_port)
+ return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
def create_scenario_jobspec(scenario_json, workers, remote_host=None,
@@ -259,7 +271,7 @@ def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), bui
sys.exit(1)
-def create_qpsworkers(languages, worker_hosts):
+def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
"""Creates QPS workers (but does not start them)."""
if not worker_hosts:
# run two workers locally (for each language)
@@ -275,11 +287,32 @@ def create_qpsworkers(languages, worker_hosts):
shortname= 'qps_worker_%s_%s' % (language,
worker_idx),
port=worker[1] + language.worker_port_offset(),
- remote_host=worker[0])
+ remote_host=worker[0],
+ perf_cmd=perf_cmd)
for language in languages
for worker_idx, worker in enumerate(workers)]
+def perf_report_processor_job(worker_host, perf_base_name, output_filename):
+ print('Creating perf report collection job for %s' % worker_host)
+ cmd = ''
+ if worker_host != 'localhost':
+ user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
+ cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
+ tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \
+ % (user_at_host, output_filename, _PERF_REPORT_OUTPUT_DIR, perf_base_name)
+ else:
+ cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
+ tools/run_tests/performance/process_local_perf_flamegraphs.sh" \
+ % (output_filename, _PERF_REPORT_OUTPUT_DIR, perf_base_name)
+
+ return jobset.JobSpec(cmdline=cmd,
+ timeout_seconds=3*60,
+ shell=True,
+ verbose_success=True,
+ shortname='process perf report')
+
+
Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
@@ -372,6 +405,31 @@ def finish_qps_workers(jobs):
print('All QPS workers finished.')
return num_killed
+profile_output_files = []
+
+# Collect perf text reports and flamegraphs if perf_cmd was used
+# Note the base names of perf text reports are used when creating and processing
+# perf data. The scenario name uniqifies the output name in the final
+# perf reports directory.
+# Alos, the perf profiles need to be fetched and processed after each scenario
+# in order to avoid clobbering the output files.
+def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name):
+ perf_report_jobs = []
+ global profile_output_files
+ for host_and_port in hosts_and_base_names:
+ perf_base_name = hosts_and_base_names[host_and_port]
+ output_filename = '%s-%s' % (scenario_name, perf_base_name)
+ # from the base filename, create .svg output filename
+ host = host_and_port.split(':')[0]
+ profile_output_files.append('%s.svg' % output_filename)
+ perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename))
+
+ jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
+ failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1)
+ jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
+ return failures
+
+
argp = argparse.ArgumentParser(description='Run performance tests.')
argp.add_argument('-l', '--language',
choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
@@ -405,6 +463,33 @@ argp.add_argument('--netperf',
help='Run netperf benchmark as one of the scenarios.')
argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
help='Name of XML report file to generate.')
+argp.add_argument('--perf_args',
+ help=('Example usage: "--perf_args=record -F 99 -g". '
+ 'Wrap QPS workers in a perf command '
+ 'with the arguments to perf specified here. '
+ '".svg" flame graph profiles will be '
+ 'created for each Qps Worker on each scenario. '
+ 'Files will output to "<repo_root>/perf_reports" '
+ 'directory. Output files from running the worker '
+ 'under perf are saved in the repo root where its ran. '
+ 'Note that the perf "-g" flag is necessary for '
+ 'flame graphs generation to work (assuming the binary '
+ 'being profiled uses frame pointers, check out '
+ '"--call-graph dwarf" option using libunwind otherwise.) '
+ 'Also note that the entire "--perf_args=<arg(s)>" must '
+ 'be wrapped in quotes as in the example usage. '
+ 'If the "--perg_args" is unspecified, "perf" will '
+ 'not be used at all. '
+ 'See http://www.brendangregg.com/perf.html '
+ 'for more general perf examples.'))
+argp.add_argument('--skip_generate_flamegraphs',
+ default=False,
+ action='store_const',
+ const=True,
+ help=('Turn flame graph generation off. '
+ 'May be useful if "perf_args" arguments do not make sense for '
+ 'generating flamegraphs (e.g., "--perf_args=stat ...")'))
+
args = argp.parse_args()
@@ -435,7 +520,13 @@ if not args.remote_driver_host:
if not args.dry_run:
build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
-qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host)
+perf_cmd = None
+if args.perf_args:
+ # Expect /usr/bin/perf to be installed here, as is usual
+ perf_cmd = ['/usr/bin/perf']
+ perf_cmd.extend(re.split('\s+', args.perf_args))
+
+qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd)
# get list of worker addresses for each language.
workers_by_lang = dict([(str(language), []) for language in languages])
@@ -457,16 +548,20 @@ if not scenarios:
total_scenario_failures = 0
qps_workers_killed = 0
merged_resultset = {}
+perf_report_failures = 0
+
for scenario in scenarios:
if args.dry_run:
print(scenario.name)
else:
+ scenario_failures = 0
try:
for worker in scenario.workers:
worker.start()
- scenario_failures, resultset = jobset.run([scenario.jobspec,
- create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host)],
- newline_on_success=True, maxjobs=1)
+ jobs = [scenario.jobspec]
+ if scenario.workers:
+ jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
+ scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1)
total_scenario_failures += scenario_failures
merged_resultset = dict(itertools.chain(merged_resultset.iteritems(),
resultset.iteritems()))
@@ -474,10 +569,27 @@ for scenario in scenarios:
# Consider qps workers that need to be killed as failures
qps_workers_killed += finish_qps_workers(scenario.workers)
+ if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
+ workers_and_base_names = {}
+ for worker in scenario.workers:
+ if not worker.perf_file_base_name:
+ raise Exception('using perf buf perf report filename is unspecified')
+ workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
+ perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name)
+
-report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
- suite_name='benchmarks')
+# Still write the index.html even if some scenarios failed.
+# 'profile_output_files' will only have names for scenarios that passed
+if perf_cmd and not args.skip_generate_flamegraphs:
+ # write the index fil to the output dir, with all profiles from all scenarios/workers
+ report_utils.render_perf_profiling_results('%s/index.html' % _PERF_REPORT_OUTPUT_DIR, profile_output_files)
if total_scenario_failures > 0 or qps_workers_killed > 0:
- print ("%s scenarios failed and %s qps worker jobs killed" % (total_scenario_failures, qps_workers_killed))
+ print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
+ sys.exit(1)
+
+report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
+ suite_name='benchmarks')
+if perf_report_failures > 0:
+ print('%s perf profile collection jobs failed' % perf_report_failures)
sys.exit(1)
diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh
index 6ec0786c96..be12f968d2 100755
--- a/tools/run_tests/sanity/check_submodules.sh
+++ b/tools/run_tests/sanity/check_submodules.sh
@@ -43,7 +43,7 @@ git submodule | awk '{ print $1 }' | sort > $submodules
cat << EOF | awk '{ print $1 }' | sort > $want_submodules
c880e42ba1c8032d4cdde2aba0541d8a9d9fa2e9 third_party/boringssl (version_for_cocoapods_2.0-100-gc880e42)
05b155ff59114735ec8cd089f669c4c3d8f59029 third_party/gflags (v2.1.0-45-g05b155f)
- 44c25c892a6229b20db7cd9dc05584ea865896de third_party/google_benchmark (v0.1.0-343-g44c25c8)
+ 44c25c892a6229b20db7cd9dc05584ea865896de third_party/benchmark (v0.1.0-343-g44c25c8)
c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0)
a428e42072765993ff674fda72863c9f1aa2d268 third_party/protobuf (v3.1.0)
50893291621658f355bc5b4d450a8d06a563053d third_party/zlib (v1.2.8)
diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json
index 6fedc9e68d..14700a07fd 100644
--- a/tools/run_tests/sources_and_headers.json
+++ b/tools/run_tests/sources_and_headers.json
@@ -2263,7 +2263,7 @@
},
{
"deps": [
- "google_benchmark",
+ "benchmark",
"gpr",
"gpr_test_util",
"grpc",
@@ -2913,7 +2913,7 @@
},
{
"deps": [
- "google_benchmark"
+ "benchmark"
],
"headers": [],
"is_filegroup": false,
@@ -6207,30 +6207,30 @@
{
"deps": [],
"headers": [
- "third_party/google_benchmark/include/benchmark/benchmark.h",
- "third_party/google_benchmark/include/benchmark/benchmark_api.h",
- "third_party/google_benchmark/include/benchmark/macros.h",
- "third_party/google_benchmark/include/benchmark/reporter.h",
- "third_party/google_benchmark/src/arraysize.h",
- "third_party/google_benchmark/src/benchmark_api_internal.h",
- "third_party/google_benchmark/src/check.h",
- "third_party/google_benchmark/src/colorprint.h",
- "third_party/google_benchmark/src/commandlineflags.h",
- "third_party/google_benchmark/src/complexity.h",
- "third_party/google_benchmark/src/cycleclock.h",
- "third_party/google_benchmark/src/internal_macros.h",
- "third_party/google_benchmark/src/log.h",
- "third_party/google_benchmark/src/mutex.h",
- "third_party/google_benchmark/src/re.h",
- "third_party/google_benchmark/src/sleep.h",
- "third_party/google_benchmark/src/stat.h",
- "third_party/google_benchmark/src/string_util.h",
- "third_party/google_benchmark/src/sysinfo.h",
- "third_party/google_benchmark/src/timers.h"
- ],
- "is_filegroup": false,
- "language": "c++",
- "name": "google_benchmark",
+ "third_party/benchmark/include/benchmark/benchmark.h",
+ "third_party/benchmark/include/benchmark/benchmark_api.h",
+ "third_party/benchmark/include/benchmark/macros.h",
+ "third_party/benchmark/include/benchmark/reporter.h",
+ "third_party/benchmark/src/arraysize.h",
+ "third_party/benchmark/src/benchmark_api_internal.h",
+ "third_party/benchmark/src/check.h",
+ "third_party/benchmark/src/colorprint.h",
+ "third_party/benchmark/src/commandlineflags.h",
+ "third_party/benchmark/src/complexity.h",
+ "third_party/benchmark/src/cycleclock.h",
+ "third_party/benchmark/src/internal_macros.h",
+ "third_party/benchmark/src/log.h",
+ "third_party/benchmark/src/mutex.h",
+ "third_party/benchmark/src/re.h",
+ "third_party/benchmark/src/sleep.h",
+ "third_party/benchmark/src/stat.h",
+ "third_party/benchmark/src/string_util.h",
+ "third_party/benchmark/src/sysinfo.h",
+ "third_party/benchmark/src/timers.h"
+ ],
+ "is_filegroup": false,
+ "language": "c++",
+ "name": "benchmark",
"src": [],
"third_party": false,
"type": "lib"