aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/SkScan_AAAPath.cpp
diff options
context:
space:
mode:
authorGravatar Yuqian Li <liyuqian@google.com>2017-09-28 10:58:38 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-09-28 15:22:28 +0000
commit3a5e1fee080cb82c29254c53b2c67b86a7713760 (patch)
treecb28c8a6f8508092e515520768a2f2908830bc6e /src/core/SkScan_AAAPath.cpp
parent837c6c7c0cc76bdb9d61a05244ca5f31e7573c37 (diff)
Simplify scan converter's args
All scan converters need an SkIRect clipBounds and a bool containedInClip. However, we previously sent in an SkRegion and a SkIRect*, and convert them into clipBounds and containedInClip all over the places. This CL converts them only once inside do_fill_path and change all args to SkIRect and bool. Bug: skia: Change-Id: I05f1d76322942d8817860fd33991f7f7ce918e7c Reviewed-on: https://skia-review.googlesource.com/52741 Reviewed-by: Cary Clark <caryclark@google.com> Commit-Queue: Yuqian Li <liyuqian@google.com>
Diffstat (limited to 'src/core/SkScan_AAAPath.cpp')
-rw-r--r--src/core/SkScan_AAAPath.cpp38
1 files changed, 18 insertions, 20 deletions
diff --git a/src/core/SkScan_AAAPath.cpp b/src/core/SkScan_AAAPath.cpp
index b6377edbce..d62b151e59 100644
--- a/src/core/SkScan_AAAPath.cpp
+++ b/src/core/SkScan_AAAPath.cpp
@@ -134,7 +134,7 @@ public:
// We need this mask blitter because it significantly accelerates small path filling.
class MaskAdditiveBlitter : public AdditiveBlitter {
public:
- MaskAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
+ MaskAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
bool isInverse);
~MaskAdditiveBlitter() override {
fRealBlitter->blitMask(fMask, fClipRect);
@@ -202,7 +202,7 @@ private:
};
MaskAdditiveBlitter::MaskAdditiveBlitter(
- SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip, bool isInverse) {
+ SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) {
SkASSERT(canHandleRect(ir));
SkASSERT(!isInverse);
@@ -217,7 +217,7 @@ MaskAdditiveBlitter::MaskAdditiveBlitter(
fRow = nullptr;
fClipRect = ir;
- if (!fClipRect.intersect(clip.getBounds())) {
+ if (!fClipRect.intersect(clipBounds)) {
SkASSERT(0);
fClipRect.setEmpty();
}
@@ -276,7 +276,7 @@ void MaskAdditiveBlitter::blitAntiRect(int x, int y, int width, int height,
class RunBasedAdditiveBlitter : public AdditiveBlitter {
public:
- RunBasedAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
+ RunBasedAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
bool isInverse);
~RunBasedAdditiveBlitter() override;
@@ -372,16 +372,16 @@ protected:
};
RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(
- SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip, bool isInverse) {
+ SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds, bool isInverse) {
fRealBlitter = realBlitter;
SkIRect sectBounds;
if (isInverse) {
// We use the clip bounds instead of the ir, since we may be asked to
//draw outside of the rect when we're a inverse filltype
- sectBounds = clip.getBounds();
+ sectBounds = clipBounds;
} else {
- if (!sectBounds.intersect(ir, clip.getBounds())) {
+ if (!sectBounds.intersect(ir, clipBounds)) {
sectBounds.setEmpty();
}
}
@@ -471,8 +471,8 @@ int RunBasedAdditiveBlitter::getWidth() { return fWidth; }
// In those cases, we can easily accumulate alpha greater than 0xFF.
class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
public:
- SafeRLEAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
- bool isInverse) : RunBasedAdditiveBlitter(realBlitter, ir, clip, isInverse) {}
+ SafeRLEAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
+ bool isInverse) : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
void blitAntiH(int x, int y, const SkAlpha alpha) override;
@@ -1675,9 +1675,7 @@ static SK_ALWAYS_INLINE void aaa_fill_path(const SkPath& path, const SkIRect& cl
void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter* blitter,
bool forceRLE) {
FillPathFunc fillPathFunc = [](const SkPath& path, SkBlitter* blitter, bool isInverse,
- const SkIRect& ir, const SkRegion* clipRgn, const SkIRect* clipRect, bool forceRLE){
- const SkIRect& clipBounds = clipRgn->getBounds();
-
+ const SkIRect& ir, const SkIRect& clipBounds, bool containedInClip, bool forceRLE){
// The mask blitter (where we store intermediate alpha values directly in a mask, and then
// call the real blitter once in the end to blit the whole mask) is faster than the RLE
// blitter when the blit region is small enough (i.e., canHandleRect(ir)).
@@ -1687,16 +1685,16 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
// much overhead. Hence we'll use blitFatAntiRect to avoid the mask and its overhead.
if (MaskAdditiveBlitter::canHandleRect(ir) && !isInverse && !forceRLE) {
#ifdef SK_SUPPORT_LEGACY_SMALLRECT_AA
- MaskAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
+ MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
- clipRect == nullptr, true, forceRLE);
+ containedInClip, true, forceRLE);
#else
// blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
// Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
- MaskAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
+ MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
- clipRect == nullptr, true, forceRLE);
+ containedInClip, true, forceRLE);
}
#endif
} else if (!isInverse && path.isConvex()) {
@@ -1704,16 +1702,16 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
// aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
// SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
// RunBasedAdditiveBlitter would suffice.
- RunBasedAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
+ RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
- clipRect == nullptr, false, forceRLE);
+ containedInClip, false, forceRLE);
} else {
// If the filling area might not be convex, the more involved aaa_walk_edges would
// be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
// does that at a cost of performance.
- SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
+ SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
aaa_fill_path(path, clipBounds, &additiveBlitter, ir.fTop, ir.fBottom,
- clipRect == nullptr, false, forceRLE);
+ containedInClip, false, forceRLE);
}
};