aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar michajlo <michajlo@google.com>2018-05-02 13:27:20 -0700
committerGravatar Copybara-Service <copybara-piper@google.com>2018-05-02 13:29:18 -0700
commit53af74ad4ba4004eb24117368d2de5e622e7ddef (patch)
tree7e2c9cd7c8c413e17f0834650980db41179c38a4
parent7e40847f7418dadb0f7b478f408b56f6d7404569 (diff)
Harden Profiler slow tasks capping test
Internally slow task aggregators are sharded by thread id, so we need to throw some threads at it to get an interesting test case, otherwise everything comes from one thread and lands in one shard and the shard combination logic isn't exercised. Of course it's possible that all the threads created in the test wind up landing in the same shard, let's just pretend that's not going to happen, since the odds are pretty low. PiperOrigin-RevId: 195141874
-rw-r--r--src/test/java/com/google/devtools/build/lib/profiler/ProfilerTest.java55
1 files changed, 50 insertions, 5 deletions
diff --git a/src/test/java/com/google/devtools/build/lib/profiler/ProfilerTest.java b/src/test/java/com/google/devtools/build/lib/profiler/ProfilerTest.java
index a3e7993481..5840294c59 100644
--- a/src/test/java/com/google/devtools/build/lib/profiler/ProfilerTest.java
+++ b/src/test/java/com/google/devtools/build/lib/profiler/ProfilerTest.java
@@ -28,6 +28,7 @@ import com.google.devtools.build.lib.testutil.FoundationTestCase;
import com.google.devtools.build.lib.testutil.ManualClock;
import com.google.devtools.build.lib.testutil.Suite;
import com.google.devtools.build.lib.testutil.TestSpec;
+import com.google.devtools.build.lib.testutil.TestUtils;
import com.google.devtools.build.lib.vfs.FileSystemUtils;
import com.google.devtools.build.lib.vfs.Path;
import java.io.InputStream;
@@ -215,16 +216,60 @@ public class ProfilerTest extends FoundationTestCase {
BlazeClock.instance(),
BlazeClock.instance().nanoTime());
+ // Add some fast tasks - these shouldn't show up in the slowest.
+ for (int i = 0; i < ProfilerTask.VFS_STAT.slowestInstancesCount; i++) {
+ profiler.logSimpleTask(
+ /*startTimeNanos=*/ 1,
+ /*stopTimeNanos=*/ ProfilerTask.VFS_STAT.minDuration + 10,
+ ProfilerTask.VFS_STAT,
+ "stat");
+ }
+
+ // Add some slow tasks we expect to show up in the slowest.
List<Long> expectedSlowestDurations = new ArrayList<>();
for (int i = 0; i < ProfilerTask.VFS_STAT.slowestInstancesCount; i++) {
- long fakeDuration = i + 1000;
- profiler.logSimpleTask(1, fakeDuration + 1, ProfilerTask.VFS_STAT, "stat");
+ long fakeDuration = ProfilerTask.VFS_STAT.minDuration + i + 10_000;
+ profiler.logSimpleTask(
+ /*startTimeNanos=*/ 1,
+ /*stopTimeNanos=*/ fakeDuration + 1,
+ ProfilerTask.VFS_STAT,
+ "stat");
expectedSlowestDurations.add(fakeDuration);
}
- // Sprinkle in a whole bunch of fast tasks.
- for (int i = 0; i < 100; i++) {
- profiler.logSimpleTask(1, i + 1, ProfilerTask.VFS_STAT, "stat" + i);
+ // Sprinkle in a whole bunch of fast tasks from different thread ids - necessary because
+ // internally aggregation is sharded across several aggregators, sharded by thread id.
+ // It's possible all these threads wind up in the same shard, we'll take our chances.
+ ImmutableList.Builder<Thread> threadsBuilder = ImmutableList.builder();
+ try {
+ for (int i = 0; i < 32; i++) {
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ for (int j = 0; j < 100; j++) {
+ profiler.logSimpleTask(
+ /*startTimeNanos=*/ 1,
+ /*stopTimeNanos=*/ ProfilerTask.VFS_STAT.minDuration + j + 1,
+ ProfilerTask.VFS_STAT,
+ "stat");
+ }
+ }
+ };
+ threadsBuilder.add(thread);
+ thread.start();
+ }
+ } finally {
+ threadsBuilder.build().forEach(
+ t -> {
+ try {
+ t.join(TestUtils.WAIT_TIMEOUT_MILLISECONDS);
+ } catch (InterruptedException e) {
+ t.interrupt();
+ // This'll go ahead and interrupt all the others. The thread we just interrupted is
+ // lightweight enough that it's reasonable to assume it'll exit.
+ Thread.currentThread().interrupt();
+ }
+ });
}
ImmutableList<SlowTask> slowTasks = ImmutableList.copyOf(profiler.getSlowestTasks());