diff options
author | Yuqian Li <liyuqian@google.com> | 2017-11-06 19:55:25 +0000 |
---|---|---|
committer | Skia Commit-Bot <skia-commit-bot@chromium.org> | 2017-11-06 19:55:30 +0000 |
commit | c8f2d19ea5af24cdbdfd16f2f92455f1799e7714 (patch) | |
tree | 93b3387481e8faf2a60463c4d237012a329877db /src/core/SkScan_DAAPath.cpp | |
parent | f4a95bc22b3c102525672c6ec3922289702f0626 (diff) |
Revert "Simplify fill path call by removing do_fill_path"
This reverts commit 12f322b9d4e98619bd128f39b02d3a6f3b78ba79.
Reason for revert: Unexpectedly break Chrome layout tests. Will check why.
Original change's description:
> Simplify fill path call by removing do_fill_path
>
> The git diff is not very informative for this CL.
> Here's a better diff:
>
> 1. do_fill_path is removed and its content is copied to AntiFillPath
>
> 2. Any call to do_fill_path is removed.
>
> 3. std::function FillPathFunc is removed (and replaced by direct
> AAAFillPath, DAAFillPath, and SAAFillPath call).
>
> 4. The old call chain is:
> AntiFillPath -> (AAAFillPath/DAAFillPath/...)
> -> do_fill_path
> -> specific FillPathFunc
> The new call chain is:
> AntiFillPath -> AAAFillPath/DAAFillPath/SAAFillPath
>
> This is made possible by the removal of SK_SUPPORT_LEGACY_AA_CHOICE
> which makes sure that AntiFillPath is the only function that makes
> the choice of AAA/DAA/SAA.
>
> In the next CL, I'll improve the structure of SkScan::AntiFillPath
> to prepare for Threaded Backend's init-once change.
>
> Bug: skia:
> Change-Id: If6ebbdab207cadb7bfe2cb3fcf33ea3d180c3896
> Reviewed-on: https://skia-review.googlesource.com/67340
> Reviewed-by: Mike Reed <reed@google.com>
> Reviewed-by: Cary Clark <caryclark@google.com>
> Commit-Queue: Yuqian Li <liyuqian@google.com>
TBR=caryclark@google.com,liyuqian@google.com,reed@google.com,caryclark@skia.org
Change-Id: I7d9517574265db5bc372a5749e6480df8e938f2e
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: skia:
Reviewed-on: https://skia-review.googlesource.com/67855
Reviewed-by: Yuqian Li <liyuqian@google.com>
Commit-Queue: Yuqian Li <liyuqian@google.com>
Diffstat (limited to 'src/core/SkScan_DAAPath.cpp')
-rw-r--r-- | src/core/SkScan_DAAPath.cpp | 75 |
1 files changed, 40 insertions, 35 deletions
diff --git a/src/core/SkScan_DAAPath.cpp b/src/core/SkScan_DAAPath.cpp index f3c7cbc2f4..e213e34fa6 100644 --- a/src/core/SkScan_DAAPath.cpp +++ b/src/core/SkScan_DAAPath.cpp @@ -315,43 +315,48 @@ void gen_alpha_deltas(const SkPath& path, const SkIRect& clipBounds, Deltas& res } } -// For threaded backend with out-of-order init-once, we probably have to take care of the -// blitRegion, sk_blit_above, sk_blit_below in SkScan::AntiFillPath to maintain the draw order. If -// we do that, be caureful that blitRect may throw exception if the rect is empty. -void SkScan::DAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir, - const SkIRect& clipBounds, bool forceRLE) { - bool isEvenOdd = path.getFillType() & 1; - bool isConvex = path.isConvex(); - bool isInverse = path.isInverseFillType(); - bool skipRect = isConvex && !isInverse; - bool containedInClip = clipBounds.contains(ir); - - SkIRect clippedIR = ir; - clippedIR.intersect(clipBounds); - - // The overhead of even constructing SkCoverageDeltaList/Mask is too big. - // So TryBlitFatAntiRect and return if it's successful. - if (!isInverse && TryBlitFatAntiRect(blitter, path, clipBounds)) { - return; - } +void SkScan::DAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter* blitter, + bool forceRLE) { + + FillPathFunc fillPathFunc = [](const SkPath& path, SkBlitter* blitter, bool isInverse, + const SkIRect& ir, const SkIRect& clipBounds, bool containedInClip, bool forceRLE){ + bool isEvenOdd = path.getFillType() & 1; + bool isConvex = path.isConvex(); + bool skipRect = isConvex && !isInverse; + + SkIRect clippedIR = ir; + clippedIR.intersect(clipBounds); + + // The overhead of even constructing SkCoverageDeltaList/Mask is too big. + // So TryBlitFatAntiRect and return if it's successful. + if (!isInverse && TryBlitFatAntiRect(blitter, path, clipBounds)) { + return; + } #ifdef GOOGLE3 - constexpr int STACK_SIZE = 12 << 10; // 12K stack size alloc; Google3 has 16K limit. + constexpr int STACK_SIZE = 12 << 10; // 12K stack size alloc; Google3 has 16K limit. #else - constexpr int STACK_SIZE = 64 << 10; // 64k stack size to avoid heap allocation + constexpr int STACK_SIZE = 64 << 10; // 64k stack size to avoid heap allocation #endif - SkSTArenaAlloc<STACK_SIZE> alloc; // avoid heap allocation with SkSTArenaAlloc - - // Only blitter->blitXXX needs to be done in order in the threaded backend. - // Everything before can be done out of order in the threaded backend. - if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) { - SkCoverageDeltaMask deltaMask(&alloc, clippedIR); - gen_alpha_deltas(path, clipBounds, deltaMask, blitter, skipRect, containedInClip); - deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex); - blitter->blitMask(deltaMask.prepareSkMask(), clippedIR); - } else { - SkCoverageDeltaList deltaList(&alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE); - gen_alpha_deltas(path, clipBounds, deltaList, blitter, skipRect, containedInClip); - blitter->blitCoverageDeltas(&deltaList, clipBounds, isEvenOdd, isInverse, isConvex); - } + SkSTArenaAlloc<STACK_SIZE> alloc; // avoid heap allocation with SkSTArenaAlloc + + // Only blitter->blitXXX need to be done in order in the threaded backend. + // Everything before can be done out of order in the threaded backend. + if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) { + SkCoverageDeltaMask deltaMask(&alloc, clippedIR); + gen_alpha_deltas(path, clipBounds, deltaMask, blitter, skipRect, containedInClip); + deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex); + blitter->blitMask(deltaMask.prepareSkMask(), clippedIR); + } else { + SkCoverageDeltaList deltaList(&alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE); + gen_alpha_deltas(path, clipBounds, deltaList, blitter, skipRect, containedInClip); + blitter->blitCoverageDeltas(&deltaList, clipBounds, isEvenOdd, isInverse, isConvex); + } + }; + + // For threaded backend with out-of-order init-once (and therefore out-of-order do_fill_path), + // we probably have to take care of the blitRegion, sk_blit_above, sk_blit_below in do_fill_path + // to maintain the draw order. If we do that, be caureful that blitRect may throw exception is + // the rect is empty. + do_fill_path(path, origClip, blitter, forceRLE, 2, std::move(fillPathFunc)); } |