aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/SkScan_AAAPath.cpp
diff options
context:
space:
mode:
authorGravatar Yuqian Li <liyuqian@google.com>2016-11-16 11:54:48 -0500
committerGravatar Yuqian Li <liyuqian@google.com>2016-11-16 18:05:06 +0000
commit721625b25e3e99d23b7765c75ba0b2ae5a351f7e (patch)
tree278f2478a11303e9c2bc3044e4cec926af97d542 /src/core/SkScan_AAAPath.cpp
parent58bf693ffd9faab97eff69bc21203c138f142564 (diff)
Drop forceRLE to simplify aaa_walk_convex_edges
The virtual flush function doesn't seem to affect the performance much. Maybe there's a 1% drop in performance in nanobench against fill_big_triangle and fill_big_circle, but that's too small a change for nanobench to reliabily diffrentiate. The smooth jump (ignore fractional y if edges don't change their directions significantly) no longer needs to be guarded against SkAAClip because our recent CL (https://skia-review.googlesource.com/c/4636/) make left/rightBound much tighter. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=4899 Change-Id: If323013b810cc1ff5f6dbb868a8981354ee6f9b5 Reviewed-on: https://skia-review.googlesource.com/4899 Reviewed-by: Mike Reed <reed@google.com> Reviewed-by: Stan Iliev <stani@google.com> Commit-Queue: Yuqian Li <liyuqian@google.com> Commit-Queue: Stan Iliev <stani@google.com>
Diffstat (limited to 'src/core/SkScan_AAAPath.cpp')
-rw-r--r--src/core/SkScan_AAAPath.cpp47
1 files changed, 16 insertions, 31 deletions
diff --git a/src/core/SkScan_AAAPath.cpp b/src/core/SkScan_AAAPath.cpp
index 1c989f86fd..8bfe7bbb1b 100644
--- a/src/core/SkScan_AAAPath.cpp
+++ b/src/core/SkScan_AAAPath.cpp
@@ -120,6 +120,10 @@ public:
}
virtual int getWidth() = 0;
+
+ // Flush the additive alpha cache if floor(y) and floor(nextY) is different
+ // (i.e., we'll start working on a new pixel row).
+ virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
};
// We need this mask blitter because it significantly accelerates small path filling.
@@ -150,6 +154,9 @@ public:
void blitAntiRect(int x, int y, int width, int height,
SkAlpha leftAlpha, SkAlpha rightAlpha) override;
+ // The flush is only needed for RLE (RunBasedAdditiveBlitter)
+ void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
+
int getWidth() override { return fClipRect.width(); }
static bool canHandleRect(const SkIRect& bounds) {
@@ -276,14 +283,7 @@ public:
int getWidth() override;
- // This should only be called when forceRLE = true which implies that SkAAClip
- // is calling us.
- // SkAAClip requires that we blit in scan-line order so we have to flush
- // for each row in order. Without this, we may have the first row unflushed,
- // then blit the 2nd and the 3rd row with full alpha (so we won't flush the first row);
- // finally when we blit the fourth row, we trigger the first row to flush, and this
- // would cause SkAAClip to crash.
- inline void flush_if_y_changed(SkFixed y, SkFixed nextY) {
+ void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
this->flush();
}
@@ -900,7 +900,7 @@ static inline bool isSmoothEnough(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE,
static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitter* blitter,
int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound,
- bool isUsingMask, bool forceRLE) {
+ bool isUsingMask) {
validate_sort((SkAnalyticEdge*)prevHead->fNext);
SkAnalyticEdge* leftE = (SkAnalyticEdge*) prevHead->fNext;
@@ -956,11 +956,7 @@ static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitt
}
SkFixed local_bot_fixed = SkMin32(leftE->fLowerY, riteE->fLowerY);
- // Skip the fractional y if edges are changing smoothly.
- // If forceRLE is true, we won't skip the fractional y because it
- // implies that SkAAClip is calling us and there are strict
- // assertions inside SkAAClip.
- if (isSmoothEnough(leftE, riteE, currE, stop_y) && !forceRLE) {
+ if (isSmoothEnough(leftE, riteE, currE, stop_y)) {
local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
}
local_bot_fixed = SkMin32(local_bot_fixed, SkIntToFixed(stop_y));
@@ -995,9 +991,7 @@ static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitt
blitter->blitAntiH(fullRite, fullTop - 1,
f2a(SkFixedMul_lowprec(partialTop, partialRite)));
}
- if (forceRLE) {
- ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, y + partialTop);
- }
+ blitter->flush_if_y_changed(y, y + partialTop);
}
// Blit all full-height rows from fullTop to fullBot
@@ -1025,9 +1019,7 @@ static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitt
if (partialTop > 0) {
blitter->getRealBlitter()->blitV(fullLeft - 1, fullTop - 1, 1,
f2a(SkFixedMul_lowprec(partialTop, rite - left)));
- if (forceRLE) {
- ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, y + partialTop);
- }
+ blitter->flush_if_y_changed(y, y + partialTop);
}
if (fullBot > fullTop) {
blitter->getRealBlitter()->blitV(fullLeft - 1, fullTop, fullBot - fullTop,
@@ -1083,9 +1075,7 @@ static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitt
blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask,
nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY,
getPartialAlpha(0xFF, dY), maskRow, isUsingMask);
- if (forceRLE) {
- ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
- }
+ blitter->flush_if_y_changed(y, nextY);
left = nextLeft; rite = nextRite; y = nextY;
}
@@ -1101,9 +1091,7 @@ static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitt
blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask,
nextLeft & kSnapMask, nextRite & kSnapMask,
leftE->fDY, riteE->fDY, 0xFF, maskRow, isUsingMask);
- if (forceRLE) {
- ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
- }
+ blitter->flush_if_y_changed(y, nextY);
left = nextLeft; rite = nextRite; y = nextY;
}
}
@@ -1124,9 +1112,7 @@ static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitt
blit_trapezoid_row(blitter, y >> 16, left & kSnapMask, rite & kSnapMask,
nextLeft & kSnapMask, nextRite & kSnapMask, leftE->fDY, riteE->fDY,
getPartialAlpha(0xFF, dY), maskRow, isUsingMask);
- if (forceRLE) {
- ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, local_bot_fixed);
- }
+ blitter->flush_if_y_changed(y, local_bot_fixed);
left = nextLeft; rite = nextRite; y = local_bot_fixed;
left -= kSnapHalf; rite -= kSnapHalf;
}
@@ -1219,8 +1205,7 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
if (!path.isInverseFillType() && path.isConvex()) {
SkASSERT(count >= 2); // convex walker does not handle missing right edges
aaa_walk_convex_edges(&headEdge, blitter, start_y, stop_y,
- rect.fLeft << 16, rect.fRight << 16, isUsingMask,
- forceRLE);
+ rect.fLeft << 16, rect.fRight << 16, isUsingMask);
} else {
SkFAIL("Concave AAA is not yet implemented!");
}