aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/SkScan_AAAPath.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/SkScan_AAAPath.cpp')
-rw-r--r--src/core/SkScan_AAAPath.cpp76
1 files changed, 39 insertions, 37 deletions
diff --git a/src/core/SkScan_AAAPath.cpp b/src/core/SkScan_AAAPath.cpp
index c1f4f41893..d62b151e59 100644
--- a/src/core/SkScan_AAAPath.cpp
+++ b/src/core/SkScan_AAAPath.cpp
@@ -1672,46 +1672,48 @@ static SK_ALWAYS_INLINE void aaa_fill_path(const SkPath& path, const SkIRect& cl
///////////////////////////////////////////////////////////////////////////////
-void SkScan::AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
- const SkIRect& clipBounds, bool forceRLE) {
- bool isInverse = path.isInverseFillType();
- bool containedInClip = clipBounds.contains(ir);
-
- // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
- // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
- // the blit region is small enough (i.e., canHandleRect(ir)). When isInverse is true, the blit
- // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
- // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
- // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
- // blitFatAntiRect to avoid the mask and its overhead.
- if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) {
+void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter* blitter,
+ bool forceRLE) {
+ FillPathFunc fillPathFunc = [](const SkPath& path, SkBlitter* blitter, bool isInverse,
+ const SkIRect& ir, const SkIRect& clipBounds, bool containedInClip, bool forceRLE){
+ // The mask blitter (where we store intermediate alpha values directly in a mask, and then
+ // call the real blitter once in the end to blit the whole mask) is faster than the RLE
+ // blitter when the blit region is small enough (i.e., canHandleRect(ir)).
+ // When isInverse is true, the blit region is no longer ir so we won't use the mask blitter.
+ // The caller may also use the forceRLE flag to force not using the mask blitter.
+ // Also, when the path is a simple rect, preparing a mask and blitting it might have too
+ // much overhead. Hence we'll use blitFatAntiRect to avoid the mask and its overhead.
+ if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) {
#ifdef SK_SUPPORT_LEGACY_SMALLRECT_AA
- MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
- aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
- containedInClip, true, forceRLE);
-#else
- // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
- // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
- if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
containedInClip, true, forceRLE);
- }
+#else
+ // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
+ // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
+ if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
+ MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
+ containedInClip, true, forceRLE);
+ }
#endif
- } else if (!isInverse && path.isConvex()) {
- // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
- // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
- // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
- // RunBasedAdditiveBlitter would suffice.
- RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
- aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
- containedInClip, false, forceRLE);
- } else {
- // If the filling area might not be convex, the more involved aaa_walk_edges would
- // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
- // does that at a cost of performance.
- SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
- aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
- containedInClip, false, forceRLE);
- }
+ } else if (!isInverse && path.isConvex()) {
+ // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
+ // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
+ // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
+ // RunBasedAdditiveBlitter would suffice.
+ RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
+ containedInClip, false, forceRLE);
+ } else {
+ // If the filling area might not be convex, the more involved aaa_walk_edges would
+ // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
+ // does that at a cost of performance.
+ SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
+ aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
+ containedInClip, false, forceRLE);
+ }
+ };
+
+ do_fill_path(path, origClip, blitter, forceRLE, 2, std::move(fillPathFunc));
}