aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/SkScan_DAAPath.cpp
diff options
context:
space:
mode:
authorGravatar Yuqian Li <liyuqian@google.com>2017-11-06 10:56:30 -0500
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-11-06 18:40:49 +0000
commit12f322b9d4e98619bd128f39b02d3a6f3b78ba79 (patch)
treef08ca36cfb6d55ab1a8e990804764a3fc5b74fbb /src/core/SkScan_DAAPath.cpp
parent2650bfa089968462a3627c7d60ae3ed637d82d06 (diff)
Simplify fill path call by removing do_fill_path
The git diff is not very informative for this CL. Here's a better diff: 1. do_fill_path is removed and its content is copied to AntiFillPath 2. Any call to do_fill_path is removed. 3. std::function FillPathFunc is removed (and replaced by direct AAAFillPath, DAAFillPath, and SAAFillPath call). 4. The old call chain is: AntiFillPath -> (AAAFillPath/DAAFillPath/...) -> do_fill_path -> specific FillPathFunc The new call chain is: AntiFillPath -> AAAFillPath/DAAFillPath/SAAFillPath This is made possible by the removal of SK_SUPPORT_LEGACY_AA_CHOICE which makes sure that AntiFillPath is the only function that makes the choice of AAA/DAA/SAA. In the next CL, I'll improve the structure of SkScan::AntiFillPath to prepare for Threaded Backend's init-once change. Bug: skia: Change-Id: If6ebbdab207cadb7bfe2cb3fcf33ea3d180c3896 Reviewed-on: https://skia-review.googlesource.com/67340 Reviewed-by: Mike Reed <reed@google.com> Reviewed-by: Cary Clark <caryclark@google.com> Commit-Queue: Yuqian Li <liyuqian@google.com>
Diffstat (limited to 'src/core/SkScan_DAAPath.cpp')
-rw-r--r--src/core/SkScan_DAAPath.cpp75
1 files changed, 35 insertions, 40 deletions
diff --git a/src/core/SkScan_DAAPath.cpp b/src/core/SkScan_DAAPath.cpp
index e213e34fa6..f3c7cbc2f4 100644
--- a/src/core/SkScan_DAAPath.cpp
+++ b/src/core/SkScan_DAAPath.cpp
@@ -315,48 +315,43 @@ void gen_alpha_deltas(const SkPath& path, const SkIRect& clipBounds, Deltas& res
}
}
-void SkScan::DAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter* blitter,
- bool forceRLE) {
-
- FillPathFunc fillPathFunc = [](const SkPath& path, SkBlitter* blitter, bool isInverse,
- const SkIRect& ir, const SkIRect& clipBounds, bool containedInClip, bool forceRLE){
- bool isEvenOdd = path.getFillType() & 1;
- bool isConvex = path.isConvex();
- bool skipRect = isConvex && !isInverse;
-
- SkIRect clippedIR = ir;
- clippedIR.intersect(clipBounds);
-
- // The overhead of even constructing SkCoverageDeltaList/Mask is too big.
- // So TryBlitFatAntiRect and return if it's successful.
- if (!isInverse && TryBlitFatAntiRect(blitter, path, clipBounds)) {
- return;
- }
+// For threaded backend with out-of-order init-once, we probably have to take care of the
+// blitRegion, sk_blit_above, sk_blit_below in SkScan::AntiFillPath to maintain the draw order. If
+// we do that, be caureful that blitRect may throw exception if the rect is empty.
+void SkScan::DAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
+ const SkIRect& clipBounds, bool forceRLE) {
+ bool isEvenOdd = path.getFillType() & 1;
+ bool isConvex = path.isConvex();
+ bool isInverse = path.isInverseFillType();
+ bool skipRect = isConvex && !isInverse;
+ bool containedInClip = clipBounds.contains(ir);
+
+ SkIRect clippedIR = ir;
+ clippedIR.intersect(clipBounds);
+
+ // The overhead of even constructing SkCoverageDeltaList/Mask is too big.
+ // So TryBlitFatAntiRect and return if it's successful.
+ if (!isInverse && TryBlitFatAntiRect(blitter, path, clipBounds)) {
+ return;
+ }
#ifdef GOOGLE3
- constexpr int STACK_SIZE = 12 << 10; // 12K stack size alloc; Google3 has 16K limit.
+ constexpr int STACK_SIZE = 12 << 10; // 12K stack size alloc; Google3 has 16K limit.
#else
- constexpr int STACK_SIZE = 64 << 10; // 64k stack size to avoid heap allocation
+ constexpr int STACK_SIZE = 64 << 10; // 64k stack size to avoid heap allocation
#endif
- SkSTArenaAlloc<STACK_SIZE> alloc; // avoid heap allocation with SkSTArenaAlloc
-
- // Only blitter->blitXXX need to be done in order in the threaded backend.
- // Everything before can be done out of order in the threaded backend.
- if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) {
- SkCoverageDeltaMask deltaMask(&alloc, clippedIR);
- gen_alpha_deltas(path, clipBounds, deltaMask, blitter, skipRect, containedInClip);
- deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex);
- blitter->blitMask(deltaMask.prepareSkMask(), clippedIR);
- } else {
- SkCoverageDeltaList deltaList(&alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE);
- gen_alpha_deltas(path, clipBounds, deltaList, blitter, skipRect, containedInClip);
- blitter->blitCoverageDeltas(&deltaList, clipBounds, isEvenOdd, isInverse, isConvex);
- }
- };
-
- // For threaded backend with out-of-order init-once (and therefore out-of-order do_fill_path),
- // we probably have to take care of the blitRegion, sk_blit_above, sk_blit_below in do_fill_path
- // to maintain the draw order. If we do that, be caureful that blitRect may throw exception is
- // the rect is empty.
- do_fill_path(path, origClip, blitter, forceRLE, 2, std::move(fillPathFunc));
+ SkSTArenaAlloc<STACK_SIZE> alloc; // avoid heap allocation with SkSTArenaAlloc
+
+ // Only blitter->blitXXX needs to be done in order in the threaded backend.
+ // Everything before can be done out of order in the threaded backend.
+ if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) {
+ SkCoverageDeltaMask deltaMask(&alloc, clippedIR);
+ gen_alpha_deltas(path, clipBounds, deltaMask, blitter, skipRect, containedInClip);
+ deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex);
+ blitter->blitMask(deltaMask.prepareSkMask(), clippedIR);
+ } else {
+ SkCoverageDeltaList deltaList(&alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE);
+ gen_alpha_deltas(path, clipBounds, deltaList, blitter, skipRect, containedInClip);
+ blitter->blitCoverageDeltas(&deltaList, clipBounds, isEvenOdd, isInverse, isConvex);
+ }
}