diff options
author | 2014-07-16 19:59:32 -0400 | |
---|---|---|
committer | 2014-07-16 19:59:32 -0400 | |
commit | 912947737a973421f4c58682b6171cb5ee00ad3a (patch) | |
tree | 87a3caef4916a894403f8d02edc0d64a9a945728 /bench | |
parent | 7ef21622b2ed6b9c5fc4c149cb62944fc191f054 (diff) |
Use __rdtsc on Windows.
This seems to be ~100x higher resolution than QueryPerformanceCounter. AFAIK, all our Windows perf bots have constant_tsc, so we can be a bit more direct about using rdtsc directly: it'll always tick at the max CPU frequency.
Now, the question remains, what is the max CPU frequency to divide through by? It looks like QueryPerformanceFrequency actually gives the CPU frequency in kHz, suspiciously exactly what we need to divide through to get elapsed milliseconds. That was a freebie.
I did some before/after comparison on slow benchmarks. Timings look the same. Going to land this without review tonight to see what happens on the bots; happy to review carefully tomorrow.
R=mtklein@google.com
TBR=bungeman
BUG=skia:
Review URL: https://codereview.chromium.org/394363003
Diffstat (limited to 'bench')
-rw-r--r-- | bench/nanobench.cpp | 9 |
1 files changed, 2 insertions, 7 deletions
diff --git a/bench/nanobench.cpp b/bench/nanobench.cpp index 4453707416..a9862c6ef0 100644 --- a/bench/nanobench.cpp +++ b/bench/nanobench.cpp @@ -50,8 +50,6 @@ DEFINE_bool(gpu, true, "Master switch for GPU-bound work."); DEFINE_string(outResultsFile, "", "If given, write results here as JSON."); DEFINE_bool(resetGpuContext, true, "Reset the GrContext before running each bench."); -DEFINE_int32(maxCalibrationAttempts, 3, - "Try up to this many times to guess loops for a bench, or skip the bench."); static SkString humanize(double ms) { @@ -95,13 +93,8 @@ static double estimate_timer_overhead() { static int cpu_bench(const double overhead, Benchmark* bench, SkCanvas* canvas, double* samples) { // First figure out approximately how many loops of bench it takes to make overhead negligible. double bench_plus_overhead; - int round = 0; do { bench_plus_overhead = time(1, bench, canvas, NULL); - if (++round == FLAGS_maxCalibrationAttempts) { - // At some point we have to just give up. - return 0; - } } while (bench_plus_overhead < overhead); // Later we'll just start and stop the timer once but loop N times. @@ -288,6 +281,8 @@ int tool_main(int argc, char** argv) { fill_static_options(&log); const double overhead = estimate_timer_overhead(); + SkDebugf("Timer overhead: %s\n", humanize(overhead).c_str()); + SkAutoTMalloc<double> samples(FLAGS_samples); if (FLAGS_runOnce) { |