aboutsummaryrefslogtreecommitdiffhomepage
path: root/tests/RTreeTest.cpp
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2014-10-27 10:27:10 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-10-27 10:27:10 -0700
commit4477c3c0e6eb064772aefe8737425cd1c2ce557f (patch)
treee841aeb0174e7ddc621f0b5dca88592e8c37d975 /tests/RTreeTest.cpp
parent5e44b00392e791088b693a0b462b107b0b5a91ba (diff)
Cut down SkBBH API more.
- The expected case is now a single bulk-load insert() call instead of N; - reserve() and flushDeferredInserts() can fold into insert() now; - SkBBH subclasses may take ownership of the bounds This appears to be a performance no-op on both my Mac and N5. I guess even the simplest indirect branch predictor ("same as last time") can predict the repeated virtual calls to SkBBH::insert() perfectly. BUG=skia: Review URL: https://codereview.chromium.org/670213002
Diffstat (limited to 'tests/RTreeTest.cpp')
-rw-r--r--tests/RTreeTest.cpp43
1 files changed, 8 insertions, 35 deletions
diff --git a/tests/RTreeTest.cpp b/tests/RTreeTest.cpp
index 6e0622c2fb..5d5c1cb873 100644
--- a/tests/RTreeTest.cpp
+++ b/tests/RTreeTest.cpp
@@ -29,12 +29,6 @@ static SkRect random_rect(SkRandom& rand) {
return rect;
}
-static void random_data_rects(SkRandom& rand, SkRect out[], int n) {
- for (int i = 0; i < n; ++i) {
- out[i] = random_rect(rand);
- }
-}
-
static bool verify_query(SkRect query, SkRect rects[], SkTDArray<unsigned>& found) {
// TODO(mtklein): no need to do this after everything's SkRects
query.roundOut();
@@ -73,9 +67,7 @@ static void run_queries(skiatest::Reporter* reporter, SkRandom& rand, SkRect rec
}
static void rtree_test_main(SkRTree* rtree, skiatest::Reporter* reporter) {
- SkRect rects[NUM_RECTS];
- SkRandom rand;
- REPORTER_ASSERT(reporter, rtree);
+ SkASSERT(rtree);
int expectedDepthMin = -1;
int expectedDepthMax = -1;
@@ -94,42 +86,23 @@ static void rtree_test_main(SkRTree* rtree, skiatest::Reporter* reporter) {
++expectedDepthMax;
}
+ SkRandom rand;
+ SkAutoTMalloc<SkRect> rects(NUM_RECTS);
for (size_t i = 0; i < NUM_ITERATIONS; ++i) {
- random_data_rects(rand, rects, NUM_RECTS);
-
- // First try bulk-loaded inserts
- for (int i = 0; i < NUM_RECTS; ++i) {
- rtree->insert(i, rects[i], true);
- }
- rtree->flushDeferredInserts();
- run_queries(reporter, rand, rects, *rtree);
- REPORTER_ASSERT(reporter, NUM_RECTS == rtree->getCount());
- REPORTER_ASSERT(reporter, expectedDepthMin <= rtree->getDepth() &&
- expectedDepthMax >= rtree->getDepth());
rtree->clear();
REPORTER_ASSERT(reporter, 0 == rtree->getCount());
- // Then try immediate inserts
- for (int i = 0; i < NUM_RECTS; ++i) {
- rtree->insert(i, rects[i]);
+ for (int j = 0; j < NUM_RECTS; j++) {
+ rects[j] = random_rect(rand);
}
- run_queries(reporter, rand, rects, *rtree);
- REPORTER_ASSERT(reporter, NUM_RECTS == rtree->getCount());
- REPORTER_ASSERT(reporter, expectedDepthMin <= rtree->getDepth() &&
- expectedDepthMax >= rtree->getDepth());
- rtree->clear();
- REPORTER_ASSERT(reporter, 0 == rtree->getCount());
- // And for good measure try immediate inserts, but in reversed order
- for (int i = NUM_RECTS - 1; i >= 0; --i) {
- rtree->insert(i, rects[i]);
- }
+ rtree->insert(&rects, NUM_RECTS);
+ SkASSERT(rects); // SkRTree doesn't take ownership of rects.
+
run_queries(reporter, rand, rects, *rtree);
REPORTER_ASSERT(reporter, NUM_RECTS == rtree->getCount());
REPORTER_ASSERT(reporter, expectedDepthMin <= rtree->getDepth() &&
expectedDepthMax >= rtree->getDepth());
- rtree->clear();
- REPORTER_ASSERT(reporter, 0 == rtree->getCount());
}
}