aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling
diff options
context:
space:
mode:
authorGravatar kpayson64 <kpayson@google.com>2018-05-11 12:20:11 -0700
committerGravatar kpayson64 <kpayson@google.com>2018-05-11 12:20:11 -0700
commit4fad281ce8affe27fb7428f264d2c3b9dfc45f2f (patch)
treeca96c9efd69afec56aa2e5fe072a9f758247d0a3 /tools/profiling
parentec445cc2bb270ed4acb1c710c3533fca14a50019 (diff)
parent61fdb46ac456027c79841949272ec540f66d2317 (diff)
Merge remote-tracking branch 'upstream/master' into fork_exec_ctx_check
Diffstat (limited to 'tools/profiling')
-rwxr-xr-xtools/profiling/latency_profile/profile_analyzer.py21
-rwxr-xr-xtools/profiling/microbenchmarks/bm2bq.py8
-rw-r--r--tools/profiling/microbenchmarks/bm_diff/bm_constants.py9
-rw-r--r--tools/profiling/microbenchmarks/bm_json.py8
4 files changed, 23 insertions, 23 deletions
diff --git a/tools/profiling/latency_profile/profile_analyzer.py b/tools/profiling/latency_profile/profile_analyzer.py
index d4d14ef8c7..cdc2f1cbab 100755
--- a/tools/profiling/latency_profile/profile_analyzer.py
+++ b/tools/profiling/latency_profile/profile_analyzer.py
@@ -184,24 +184,23 @@ for cs in call_stacks:
def percentile(N, percent, key=lambda x: x):
"""
- Find the percentile of a list of values.
+ Find the percentile of an already sorted list of values.
- @parameter N - is a list of values. Note N MUST BE already sorted.
- @parameter percent - a float value from 0.0 to 1.0.
+ @parameter N - is a list of values. MUST be already sorted.
+ @parameter percent - a float value from [0.0,1.0].
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
- k = (len(N) - 1) * percent
- f = math.floor(k)
- c = math.ceil(k)
- if f == c:
- return key(N[int(k)])
- d0 = key(N[int(f)]) * (c - k)
- d1 = key(N[int(c)]) * (k - f)
- return d0 + d1
+ float_idx = (len(N) - 1) * percent
+ idx = int(float_idx)
+ result = key(N[idx])
+ if idx < len(N) - 1:
+ # interpolate with the next element's value
+ result += (float_idx - idx) * (key(N[idx + 1]) - key(N[idx]))
+ return result
def tidy_tag(tag):
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py
index e084e28dcf..c5307c52bd 100755
--- a/tools/profiling/microbenchmarks/bm2bq.py
+++ b/tools/profiling/microbenchmarks/bm2bq.py
@@ -1,9 +1,5 @@
#!/usr/bin/env python2.7
#
-# Convert google-benchmark json output to something that can be uploaded to
-# BigQuery
-#
-#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,6 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Convert google-benchmark json output to something that can be uploaded to
+# BigQuery
+
import sys
import json
import csv
@@ -54,6 +53,7 @@ if len(sys.argv) > 2:
else:
js2 = None
+# TODO(jtattermusch): write directly to a file instead of stdout
writer = csv.DictWriter(sys.stdout, [c for c, t in columns])
for row in bm_json.expand_json(js, js2):
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index 5719e42620..c8b6c1ebd0 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -22,11 +22,10 @@ _AVAILABLE_BENCHMARK_TESTS = [
'bm_metadata', 'bm_fullstack_trickle'
]
-_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median',
- 'locks_per_iteration', 'allocs_per_iteration',
- 'writes_per_iteration', 'atm_cas_per_iteration',
- 'atm_add_per_iteration', 'nows_per_iteration',
- 'cli_transport_stalls_per_iteration',
+_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
+ 'allocs_per_iteration', 'writes_per_iteration',
+ 'atm_cas_per_iteration', 'atm_add_per_iteration',
+ 'nows_per_iteration', 'cli_transport_stalls_per_iteration',
'cli_stream_stalls_per_iteration',
'svr_transport_stalls_per_iteration',
'svr_stream_stalls_per_iteration',
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index 497d7ca813..2f5eb708de 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Utilities for manipulating JSON data that represents microbenchmark results.
+
import os
+# template arguments and dynamic arguments of individual benchmark types
+# Example benchmark name: "BM_UnaryPingPong<TCP, NoOpMutator, NoOpMutator>/0/0"
_BM_SPECS = {
'BM_UnaryPingPong': {
'tpl': ['fixture', 'client_mutator', 'server_mutator'],
@@ -115,6 +119,7 @@ _BM_SPECS = {
def numericalize(s):
+ """Convert abbreviations like '100M' or '10k' to a number."""
if not s: return ''
if s[-1] == 'k':
return float(s[:-1]) * 1024
@@ -159,9 +164,6 @@ def parse_name(name):
rest = s[0]
dyn_args = s[1:]
name = rest
- print(name)
- print(dyn_args, _BM_SPECS[name]['dyn'])
- print(tpl_args, _BM_SPECS[name]['tpl'])
assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])