diff options
author | Yuqian Li <liyuqian@google.com> | 2017-11-06 10:56:30 -0500 |
---|---|---|
committer | Skia Commit-Bot <skia-commit-bot@chromium.org> | 2017-11-06 18:40:49 +0000 |
commit | 12f322b9d4e98619bd128f39b02d3a6f3b78ba79 (patch) | |
tree | f08ca36cfb6d55ab1a8e990804764a3fc5b74fbb /src/core/SkScan_AAAPath.cpp | |
parent | 2650bfa089968462a3627c7d60ae3ed637d82d06 (diff) |
Simplify fill path call by removing do_fill_path
The git diff is not very informative for this CL.
Here's a better diff:
1. do_fill_path is removed and its content is copied to AntiFillPath
2. Any call to do_fill_path is removed.
3. std::function FillPathFunc is removed (and replaced by direct
AAAFillPath, DAAFillPath, and SAAFillPath call).
4. The old call chain is:
AntiFillPath -> (AAAFillPath/DAAFillPath/...)
-> do_fill_path
-> specific FillPathFunc
The new call chain is:
AntiFillPath -> AAAFillPath/DAAFillPath/SAAFillPath
This is made possible by the removal of SK_SUPPORT_LEGACY_AA_CHOICE
which makes sure that AntiFillPath is the only function that makes
the choice of AAA/DAA/SAA.
In the next CL, I'll improve the structure of SkScan::AntiFillPath
to prepare for Threaded Backend's init-once change.
Bug: skia:
Change-Id: If6ebbdab207cadb7bfe2cb3fcf33ea3d180c3896
Reviewed-on: https://skia-review.googlesource.com/67340
Reviewed-by: Mike Reed <reed@google.com>
Reviewed-by: Cary Clark <caryclark@google.com>
Commit-Queue: Yuqian Li <liyuqian@google.com>
Diffstat (limited to 'src/core/SkScan_AAAPath.cpp')
-rw-r--r-- | src/core/SkScan_AAAPath.cpp | 76 |
1 files changed, 37 insertions, 39 deletions
diff --git a/src/core/SkScan_AAAPath.cpp b/src/core/SkScan_AAAPath.cpp index d62b151e59..c1f4f41893 100644 --- a/src/core/SkScan_AAAPath.cpp +++ b/src/core/SkScan_AAAPath.cpp @@ -1672,48 +1672,46 @@ static SK_ALWAYS_INLINE void aaa_fill_path(const SkPath& path, const SkIRect& cl /////////////////////////////////////////////////////////////////////////////// -void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter* blitter, - bool forceRLE) { - FillPathFunc fillPathFunc = [](const SkPath& path, SkBlitter* blitter, bool isInverse, - const SkIRect& ir, const SkIRect& clipBounds, bool containedInClip, bool forceRLE){ - // The mask blitter (where we store intermediate alpha values directly in a mask, and then - // call the real blitter once in the end to blit the whole mask) is faster than the RLE - // blitter when the blit region is small enough (i.e., canHandleRect(ir)). - // When isInverse is true, the blit region is no longer ir so we won't use the mask blitter. - // The caller may also use the forceRLE flag to force not using the mask blitter. - // Also, when the path is a simple rect, preparing a mask and blitting it might have too - // much overhead. Hence we'll use blitFatAntiRect to avoid the mask and its overhead. - if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) { +void SkScan::AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir, + const SkIRect& clipBounds, bool forceRLE) { + bool isInverse = path.isInverseFillType(); + bool containedInClip = clipBounds.contains(ir); + + // The mask blitter (where we store intermediate alpha values directly in a mask, and then call + // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when + // the blit region is small enough (i.e., canHandleRect(ir)). When isInverse is true, the blit + // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also + // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple + // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use + // blitFatAntiRect to avoid the mask and its overhead. + if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) { #ifdef SK_SUPPORT_LEGACY_SMALLRECT_AA + MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); + aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, + containedInClip, true, forceRLE); +#else + // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter. + // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used. + if (!TryBlitFatAntiRect(blitter, path, clipBounds)) { MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, containedInClip, true, forceRLE); -#else - // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter. - // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used. - if (!TryBlitFatAntiRect(blitter, path, clipBounds)) { - MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); - aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, - containedInClip, true, forceRLE); - } -#endif - } else if (!isInverse && path.isConvex()) { - // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler - // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need - // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter - // RunBasedAdditiveBlitter would suffice. - RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); - aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, - containedInClip, false, forceRLE); - } else { - // If the filling area might not be convex, the more involved aaa_walk_edges would - // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter - // does that at a cost of performance. - SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); - aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, - containedInClip, false, forceRLE); } - }; - - do_fill_path(path, origClip, blitter, forceRLE, 2, std::move(fillPathFunc)); +#endif + } else if (!isInverse && path.isConvex()) { + // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler + // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need + // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter + // RunBasedAdditiveBlitter would suffice. + RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); + aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, + containedInClip, false, forceRLE); + } else { + // If the filling area might not be convex, the more involved aaa_walk_edges would + // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter + // does that at a cost of performance. + SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse); + aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom, + containedInClip, false, forceRLE); + } } |