diff options
author | 2014-10-27 10:27:10 -0700 | |
---|---|---|
committer | 2014-10-27 10:27:10 -0700 | |
commit | 4477c3c0e6eb064772aefe8737425cd1c2ce557f (patch) | |
tree | e841aeb0174e7ddc621f0b5dca88592e8c37d975 /tests/RecordDrawTest.cpp | |
parent | 5e44b00392e791088b693a0b462b107b0b5a91ba (diff) |
Cut down SkBBH API more.
- The expected case is now a single bulk-load insert() call instead of N;
- reserve() and flushDeferredInserts() can fold into insert() now;
- SkBBH subclasses may take ownership of the bounds
This appears to be a performance no-op on both my Mac and N5. I guess
even the simplest indirect branch predictor ("same as last time") can predict
the repeated virtual calls to SkBBH::insert() perfectly.
BUG=skia:
Review URL: https://codereview.chromium.org/670213002
Diffstat (limited to 'tests/RecordDrawTest.cpp')
-rw-r--r-- | tests/RecordDrawTest.cpp | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/tests/RecordDrawTest.cpp b/tests/RecordDrawTest.cpp index 43d4f6068b..6105c00f7f 100644 --- a/tests/RecordDrawTest.cpp +++ b/tests/RecordDrawTest.cpp @@ -100,9 +100,12 @@ DEF_TEST(RecordDraw_SetMatrixClobber, r) { } struct TestBBH : public SkBBoxHierarchy { - virtual void insert(unsigned opIndex, const SkRect& bounds, bool defer) SK_OVERRIDE { - Entry e = { opIndex, bounds }; - fEntries.push(e); + virtual void insert(SkAutoTMalloc<SkRect>* boundsArray, int N) SK_OVERRIDE { + fEntries.setCount(N); + for (int i = 0; i < N; i++) { + Entry e = { (unsigned)i, (*boundsArray)[i] }; + fEntries[i] = e; + } } virtual void search(const SkRect& query, SkTDArray<unsigned>* results) const SK_OVERRIDE {} |