aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/run_tests/run_microbenchmark.py
diff options
context:
space:
mode:
authorGravatar Craig Tiller <ctiller@google.com>2017-02-24 17:00:19 -0800
committerGravatar Craig Tiller <ctiller@google.com>2017-02-24 17:00:19 -0800
commit510f38a711a15aa0e4c87f4677dbdfe2da32cb81 (patch)
tree1e054e67e65de98da1037ec1832d89a759ea6e33 /tools/run_tests/run_microbenchmark.py
parentb8e2bca4eb71c109943ea3ed59dca74dd30cbd68 (diff)
parentb325a1dd4af3f3d3443da3e0ac0f007162c63772 (diff)
Merge branch 'bm_chttp2' into bm_meta
Diffstat (limited to 'tools/run_tests/run_microbenchmark.py')
-rwxr-xr-xtools/run_tests/run_microbenchmark.py21
1 files changed, 16 insertions, 5 deletions
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index 2bf4bc1d4d..7f871bf0ad 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -28,6 +28,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import cgi
import multiprocessing
import os
import subprocess
@@ -71,11 +72,12 @@ def heading(name):
def link(txt, tgt):
global index_html
- index_html += "<p><a href=\"%s\">%s</a></p>\n" % (tgt, txt)
+ index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
+ cgi.escape(tgt, quote=True), cgi.escape(txt))
def text(txt):
global index_html
- index_html += "<p><pre>%s</pre></p>\n" % txt
+ index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
def collect_latency(bm_name, args):
"""generate latency profiles"""
@@ -91,7 +93,9 @@ def collect_latency(bm_name, args):
'--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
- jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line],
+ jobset.JobSpec(['bins/basicprof/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line,
+ '--benchmark_min_time=0.05'],
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
profile_analysis.append(
jobset.JobSpec([sys.executable,
@@ -103,7 +107,7 @@ def collect_latency(bm_name, args):
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
- if len(benchmarks) >= min(4, multiprocessing.cpu_count()):
+ if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),
@@ -195,7 +199,14 @@ argp.add_argument('-c', '--collect',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b', '--benchmarks',
- default=['bm_fullstack', 'bm_closure', 'bm_cq', 'bm_call_create', 'bm_error', 'bm_metadata'],
+ default=['bm_fullstack',
+ 'bm_closure',
+ 'bm_cq',
+ 'bm_call_create',
+ 'bm_error',
+ 'bm_chttp2_hpack',
+ 'bm_metadata'],
+>>>>>>> b325a1dd4af3f3d3443da3e0ac0f007162c63772
nargs='+',
type=str,
help='Which microbenchmarks should be run')