aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/profiling/latency_profile/run_latency_profile.sh
blob: 64c3e58fcbdb4acafc5270d401073babf9507204 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#!/bin/bash

set -ex

cd $(dirname $0)/../../..

BINS="sync_unary_ping_pong_test sync_streaming_ping_pong_test"
CPUS=`python -c 'import multiprocessing; print multiprocessing.cpu_count()'`

make CONFIG=basicprof -j$CPUS $BINS

mkdir -p reports

# try to use pypy for generating reports
# each trace dumps 7-8gig of text to disk, and processing this into a report is
# heavyweight - so any speed boost is worthwhile
# TODO(ctiller): consider rewriting report generation in C++ for performance
if which pypy >/dev/null; then
  PYTHON=pypy
else
  PYTHON=python2.7
fi

# start processes, interleaving report index generation
echo '<html><head></head><body>' > reports/index.html
for bin in $BINS
do
  bins/basicprof/$bin
  mv latency_trace.txt $bin.trace
  echo "<a href='$bin.txt'>$bin</a><br/>" >> reports/index.html
done
pids=""
# generate report pages... this will take some time
# run them in parallel: they take 1 cpu each
for bin in $BINS
do
  $PYTHON tools/profiling/latency_profile/profile_analyzer.py \
    --source=$bin.trace --fmt=simple > reports/$bin.txt &
  pids+=" $!"
done
echo '</body></html>' >> reports/index.html

# make sure we kill the report generation if something goes wrong
trap "kill $pids || true" 0

# finally, wait for the background report generation to finish
for pid in $pids
do
	if wait $pid
	then
		echo "Finished $pid"
	else
		exit 1
	fi
done