aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/SkScan_AAAPath.cpp
diff options
context:
space:
mode:
authorGravatar Yuqian Li <liyuqian@google.com>2017-01-12 15:35:15 -0500
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-01-12 21:13:20 +0000
commitb46fff60bc82fe6f0c64b2241d854a121f7cb5f9 (patch)
tree02dd344b40691317a7847f77200f050622555d9e /src/core/SkScan_AAAPath.cpp
parent25b60833e7c3dd25f2317b3f0e7af07f04b5beba (diff)
The only difference is that we now put the guard flag SK_SUPPORT_LEGACY_AAA in SkUserConfig.h instead of SkScan.h. Previously, SkAnalyticEdge.cpp doesn't get that flag from SkScan.h and that caused many problems. BUG=skia: TBR=reed@google.com,caryclark@google.com Change-Id: I7b89d3cb64ad71715101d2a5e8e77be3a8a6fa16 Reviewed-on: https://skia-review.googlesource.com/6972 Reviewed-by: Yuqian Li <liyuqian@google.com> Commit-Queue: Yuqian Li <liyuqian@google.com>
Diffstat (limited to 'src/core/SkScan_AAAPath.cpp')
-rw-r--r--src/core/SkScan_AAAPath.cpp669
1 files changed, 589 insertions, 80 deletions
diff --git a/src/core/SkScan_AAAPath.cpp b/src/core/SkScan_AAAPath.cpp
index 8c0a748bdf..126322820f 100644
--- a/src/core/SkScan_AAAPath.cpp
+++ b/src/core/SkScan_AAAPath.cpp
@@ -84,9 +84,13 @@ number of scan lines in our algorithm is only about 3 + H while the
///////////////////////////////////////////////////////////////////////////////
-static inline void addAlpha(SkAlpha& alpha, SkAlpha delta) {
- SkASSERT(alpha + (int)delta <= 256);
- alpha = SkAlphaRuns::CatchOverflow(alpha + (int)delta);
+static inline void addAlpha(SkAlpha* alpha, SkAlpha delta) {
+ SkASSERT(*alpha + (int)delta <= 256);
+ *alpha = SkAlphaRuns::CatchOverflow(*alpha + (int)delta);
+}
+
+static inline void safelyAddAlpha(SkAlpha* alpha, SkAlpha delta) {
+ *alpha = SkTMin(0xFF, *alpha + (int)delta);
}
class AdditiveBlitter : public SkBlitter {
@@ -147,7 +151,7 @@ public:
void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
// Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
- // Since there aren't many rectangles, we can still break the slow speed of virtual functions.
+ // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
void blitAntiH(int x, int y, const SkAlpha alpha) override;
void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
void blitV(int x, int y, int height, SkAlpha alpha) override;
@@ -227,14 +231,14 @@ void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int
void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
SkASSERT(x >= fMask.fBounds.fLeft -1);
- addAlpha(this->getRow(y)[x], alpha);
+ addAlpha(&this->getRow(y)[x], alpha);
}
void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
SkASSERT(x >= fMask.fBounds.fLeft -1);
uint8_t* row = this->getRow(y);
- for (int i=0; i<width; i++) {
- addAlpha(row[x + i], alpha);
+ for (int i = 0; i < width; ++i) {
+ addAlpha(&row[x + i], alpha);
}
}
@@ -246,7 +250,7 @@ void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
// This must be called as if this is a real blitter.
// So we directly set alpha rather than adding it.
uint8_t* row = this->getRow(y);
- for (int i=0; i<height; i++) {
+ for (int i = 0; i < height; ++i) {
row[x] = alpha;
row += fMask.fRowBytes;
}
@@ -257,7 +261,7 @@ void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
// This must be called as if this is a real blitter.
// So we directly set alpha rather than adding it.
uint8_t* row = this->getRow(y);
- for (int i=0; i<height; i++) {
+ for (int i = 0; i < height; ++i) {
memset(row + x, 0xFF, width);
row += fMask.fRowBytes;
}
@@ -290,7 +294,7 @@ public:
}
}
-private:
+protected:
SkBlitter* fRealBlitter;
/// Current y coordinate
@@ -313,7 +317,7 @@ private:
int fOffsetX;
- inline bool check(int x, int width) {
+ inline bool check(int x, int width) const {
#ifdef SK_DEBUG
if (x < 0 || x + width > fWidth) {
// SkDebugf("Ignore x = %d, width = %d\n", x, width);
@@ -431,8 +435,8 @@ void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[],
}
fRuns.fRuns[x + i] = 1;
}
- for (int i=0; i<len; i++) {
- addAlpha(fRuns.fAlpha[x + i], antialias[i]);
+ for (int i = 0; i < len; ++i) {
+ addAlpha(&fRuns.fAlpha[x + i], antialias[i]);
}
}
void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
@@ -463,12 +467,85 @@ void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha a
int RunBasedAdditiveBlitter::getWidth() { return fWidth; }
+// This exists specifically for concave path filling.
+// In those cases, we can easily accumulate alpha greater than 0xFF.
+class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
+public:
+ SafeRLEAdditiveBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
+ bool isInverse) : RunBasedAdditiveBlitter(realBlitter, ir, clip, isInverse) {}
+
+ void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
+ void blitAntiH(int x, int y, const SkAlpha alpha) override;
+ void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
+};
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < 0) {
+ len += x;
+ antialias -= x;
+ x = 0;
+ }
+ len = SkTMin(len, fWidth - x);
+ SkASSERT(check(x, len));
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
+ for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
+ for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
+ fRuns.fRuns[x + i + j] = 1;
+ fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
+ }
+ fRuns.fRuns[x + i] = 1;
+ }
+ for (int i = 0; i < len; ++i) {
+ safelyAddAlpha(&fRuns.fAlpha[x + i], antialias[i]);
+ }
+}
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (check(x, 1)) {
+ // Break the run
+ fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
+ safelyAddAlpha(&fRuns.fAlpha[x], alpha);
+ }
+}
+
+void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
+ checkY(y);
+ x -= fLeft;
+
+ if (x < fOffsetX) {
+ fOffsetX = 0;
+ }
+
+ if (check(x, width)) {
+ // Break the run
+ fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
+ for(int i = x; i < x + width; i += fRuns.fRuns[i]) {
+ safelyAddAlpha(&fRuns.fAlpha[i], alpha);
+ }
+ }
+}
+
///////////////////////////////////////////////////////////////////////////////
// Return the alpha of a trapezoid whose height is 1
static inline SkAlpha trapezoidToAlpha(SkFixed l1, SkFixed l2) {
SkASSERT(l1 >= 0 && l2 >= 0);
- return ((l1 + l2) >> 9);
+ return (l1 + l2) >> 9;
}
// The alpha of right-triangle (a, a*b), in 16 bits
@@ -524,7 +601,7 @@ static inline void computeAlphaAboveLine(SkAlpha* alphas, SkFixed l, SkFixed r,
SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
- for (int i = 1; i < R - 1; i++) {
+ for (int i = 1; i < R - 1; ++i) {
alphas[i] = alpha16 >> 8;
alpha16 += dY;
}
@@ -557,17 +634,19 @@ static inline void computeAlphaBelowLine(
}
// Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
-static inline void blit_single_alpha(AdditiveBlitter* blitter, int y, int x,
+static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter, int y, int x,
SkAlpha alpha, SkAlpha fullAlpha, SkAlpha* maskRow,
- bool isUsingMask) {
+ bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
if (isUsingMask) {
- if (fullAlpha == 0xFF) {
+ if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
maskRow[x] = alpha;
+ } else if (needSafeCheck) {
+ safelyAddAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha));
} else {
- addAlpha(maskRow[x], getPartialAlpha(alpha, fullAlpha));
+ addAlpha(&maskRow[x], getPartialAlpha(alpha, fullAlpha));
}
} else {
- if (fullAlpha == 0xFF) {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitV(x, y, 1, alpha);
} else {
blitter->blitAntiH(x, y, getPartialAlpha(alpha, fullAlpha));
@@ -575,14 +654,19 @@ static inline void blit_single_alpha(AdditiveBlitter* blitter, int y, int x,
}
}
-static inline void blit_two_alphas(AdditiveBlitter* blitter, int y, int x,
+static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter, int y, int x,
SkAlpha a1, SkAlpha a2, SkAlpha fullAlpha, SkAlpha* maskRow,
- bool isUsingMask) {
+ bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
if (isUsingMask) {
- addAlpha(maskRow[x], a1);
- addAlpha(maskRow[x + 1], a2);
+ if (needSafeCheck) {
+ safelyAddAlpha(&maskRow[x], a1);
+ safelyAddAlpha(&maskRow[x + 1], a2);
+ } else {
+ addAlpha(&maskRow[x], a1);
+ addAlpha(&maskRow[x + 1], a2);
+ }
} else {
- if (fullAlpha == 0xFF) {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
} else {
blitter->blitAntiH(x, y, a1);
@@ -593,13 +677,18 @@ static inline void blit_two_alphas(AdditiveBlitter* blitter, int y, int x,
// It's important that this is inline. Otherwise it'll be much slower.
static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter, int y, int x, int len,
- SkAlpha fullAlpha, SkAlpha* maskRow, bool isUsingMask) {
+ SkAlpha fullAlpha, SkAlpha* maskRow, bool isUsingMask,
+ bool noRealBlitter, bool needSafeCheck) {
if (isUsingMask) {
- for (int i=0; i<len; i++) {
- addAlpha(maskRow[x + i], fullAlpha);
+ for (int i = 0; i < len; ++i) {
+ if (needSafeCheck) {
+ safelyAddAlpha(&maskRow[x + i], fullAlpha);
+ } else {
+ addAlpha(&maskRow[x + i], fullAlpha);
+ }
}
} else {
- if (fullAlpha == 0xFF) {
+ if (fullAlpha == 0xFF && !noRealBlitter) {
blitter->getRealBlitter()->blitH(x, y, len);
} else {
blitter->blitAntiH(x, y, len, fullAlpha);
@@ -610,13 +699,14 @@ static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter, int y, in
static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr,
SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha, SkAlpha* maskRow,
- bool isUsingMask) {
+ bool isUsingMask, bool noRealBlitter, bool needSafeCheck) {
int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
int len = R - L;
if (len == 1) {
SkAlpha alpha = trapezoidToAlpha(ur - ul, lr - ll);
- blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, isUsingMask);
+ blit_single_alpha(blitter, y, L, alpha, fullAlpha, maskRow, isUsingMask, noRealBlitter,
+ needSafeCheck);
return;
}
@@ -637,7 +727,7 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
SkAlpha* tempAlphas = alphas + len + 1;
int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
- for (int i = 0; i < len; i++) {
+ for (int i = 0; i < len; ++i) {
runs[i] = 1;
alphas[i] = fullAlpha;
}
@@ -655,7 +745,7 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
} else {
computeAlphaBelowLine(tempAlphas + uL - L, ul - (uL << 16), ll - (uL << 16),
lDY, fullAlpha);
- for (int i = uL; i < lL; i++) {
+ for (int i = uL; i < lL; ++i) {
if (alphas[i - L] > tempAlphas[i - L]) {
alphas[i - L] -= tempAlphas[i - L];
} else {
@@ -676,7 +766,7 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
} else {
computeAlphaAboveLine(tempAlphas + uR - L, ur - (uR << 16), lr - (uR << 16),
rDY, fullAlpha);
- for (int i = uR; i < lR; i++) {
+ for (int i = uR; i < lR; ++i) {
if (alphas[i - L] > tempAlphas[i - L]) {
alphas[i - L] -= tempAlphas[i - L];
} else {
@@ -686,11 +776,16 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
}
if (isUsingMask) {
- for (int i=0; i<len; i++) {
- addAlpha(maskRow[L + i], alphas[i]);
+ for (int i = 0; i < len; ++i) {
+ if (needSafeCheck) {
+ safelyAddAlpha(&maskRow[L + i], alphas[i]);
+ } else {
+ addAlpha(&maskRow[L + i], alphas[i]);
+ }
}
} else {
- if (fullAlpha == 0xFF) { // Real blitter is faster than RunBasedAdditiveBlitter
+ if (fullAlpha == 0xFF && !noRealBlitter) {
+ // Real blitter is faster than RunBasedAdditiveBlitter
blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
} else {
blitter->blitAntiH(L, y, alphas, len);
@@ -702,10 +797,11 @@ static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter, int y,
}
}
-static inline void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
+static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
SkFixed ul, SkFixed ur, SkFixed ll, SkFixed lr,
SkFixed lDY, SkFixed rDY, SkAlpha fullAlpha,
- SkAlpha* maskRow, bool isUsingMask) {
+ SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter = false,
+ bool needSafeCheck = false) {
SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
if (ul > ur) {
@@ -743,16 +839,19 @@ static inline void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
int len = SkFixedCeilToInt(joinLeft - ul);
if (len == 1) {
SkAlpha alpha = trapezoidToAlpha(joinLeft - ul, joinLeft - ll);
- blit_single_alpha(blitter, y, ul >> 16, alpha, fullAlpha, maskRow, isUsingMask);
+ blit_single_alpha(blitter, y, ul >> 16, alpha, fullAlpha, maskRow, isUsingMask,
+ noRealBlitter, needSafeCheck);
} else if (len == 2) {
SkFixed first = joinLeft - SK_Fixed1 - ul;
SkFixed second = ll - ul - first;
SkAlpha a1 = partialTriangleToAlpha(first, lDY);
SkAlpha a2 = fullAlpha - partialTriangleToAlpha(second, lDY);
- blit_two_alphas(blitter, y, ul >> 16, a1, a2, fullAlpha, maskRow, isUsingMask);
+ blit_two_alphas(blitter, y, ul >> 16, a1, a2, fullAlpha, maskRow, isUsingMask,
+ noRealBlitter, needSafeCheck);
} else {
blit_aaa_trapezoid_row(blitter, y, ul, joinLeft, ll, joinLeft, lDY, SK_MaxS32,
- fullAlpha, maskRow, isUsingMask);
+ fullAlpha, maskRow, isUsingMask, noRealBlitter,
+ needSafeCheck);
}
}
// SkAAClip requires that we blit from left to right.
@@ -760,29 +859,30 @@ static inline void blit_trapezoid_row(AdditiveBlitter* blitter, int y,
if (joinLeft < joinRite) {
blit_full_alpha(blitter, y, SkFixedFloorToInt(joinLeft),
SkFixedFloorToInt(joinRite - joinLeft),
- fullAlpha, maskRow, isUsingMask);
+ fullAlpha, maskRow, isUsingMask, noRealBlitter, needSafeCheck);
}
if (lr > joinRite) {
int len = SkFixedCeilToInt(lr - joinRite);
if (len == 1) {
SkAlpha alpha = trapezoidToAlpha(ur - joinRite, lr - joinRite);
blit_single_alpha(blitter, y, joinRite >> 16, alpha, fullAlpha, maskRow,
- isUsingMask);
+ isUsingMask, noRealBlitter, needSafeCheck);
} else if (len == 2) {
SkFixed first = joinRite + SK_Fixed1 - ur;
SkFixed second = lr - ur - first;
SkAlpha a1 = fullAlpha - partialTriangleToAlpha(first, rDY);
SkAlpha a2 = partialTriangleToAlpha(second, rDY);
blit_two_alphas(blitter, y, joinRite >> 16, a1, a2, fullAlpha, maskRow,
- isUsingMask);
+ isUsingMask, noRealBlitter, needSafeCheck);
} else {
blit_aaa_trapezoid_row(blitter, y, joinRite, ur, joinRite, lr, SK_MaxS32, rDY,
- fullAlpha, maskRow, isUsingMask);
+ fullAlpha, maskRow, isUsingMask, noRealBlitter,
+ needSafeCheck);
}
}
} else {
blit_aaa_trapezoid_row(blitter, y, ul, ur, ll, lr, lDY, rDY, fullAlpha, maskRow,
- isUsingMask);
+ isUsingMask, noRealBlitter, needSafeCheck);
}
}
@@ -809,7 +909,7 @@ static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticE
SkTQSort(list, list + count - 1);
// now make the edges linked in sorted order
- for (int i = 1; i < count; i++) {
+ for (int i = 1; i < count; ++i) {
list[i - 1]->fNext = list[i];
list[i]->fPrev = list[i - 1];
}
@@ -899,9 +999,9 @@ static inline bool isSmoothEnough(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE,
return isSmoothEnough(leftE, currE, stop_y) && isSmoothEnough(riteE, nextCurrE, stop_y);
}
-static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead, AdditiveBlitter* blitter,
- int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound,
- bool isUsingMask) {
+static inline void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
+ AdditiveBlitter* blitter, int start_y, int stop_y, SkFixed leftBound, SkFixed riteBound,
+ bool isUsingMask) {
validate_sort((SkAnalyticEdge*)prevHead->fNext);
SkAnalyticEdge* leftE = (SkAnalyticEdge*) prevHead->fNext;
@@ -1130,13 +1230,407 @@ END_WALK:
#endif
}
-void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter* blitter,
- int start_y, int stop_y, bool pathContainedInClip, bool isUsingMask,
- bool forceRLE) { // forceRLE implies that SkAAClip is calling us
- SkASSERT(blitter);
+///////////////////////////////////////////////////////////////////////////////
+
+static inline void remove_edge(SkAnalyticEdge* edge) {
+ edge->fPrev->fNext = edge->fNext;
+ edge->fNext->fPrev = edge->fPrev;
+}
+
+static inline void insert_edge_after(SkAnalyticEdge* edge, SkAnalyticEdge* afterMe) {
+ edge->fPrev = afterMe;
+ edge->fNext = afterMe->fNext;
+ afterMe->fNext->fPrev = edge;
+ afterMe->fNext = edge;
+}
+
+static void backward_insert_edge_based_on_x(SkAnalyticEdge* edge) {
+ SkFixed x = edge->fX;
+ SkAnalyticEdge* prev = edge->fPrev;
+ while (prev->fPrev && prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ if (prev->fNext != edge) {
+ remove_edge(edge);
+ insert_edge_after(edge, prev);
+ }
+}
+
+static SkAnalyticEdge* backward_insert_start(SkAnalyticEdge* prev, SkFixed x) {
+ while (prev->fPrev && prev->fX > x) {
+ prev = prev->fPrev;
+ }
+ return prev;
+}
+
+static inline void updateNextNextY(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
+ *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
+}
+
+static inline void checkIntersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY)
+{
+ if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
+ *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
+ }
+}
+
+static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
+ if (newEdge->fUpperY > y) {
+ updateNextNextY(newEdge->fUpperY, y, nextNextY);
+ return;
+ }
+ SkAnalyticEdge* prev = newEdge->fPrev;
+ if (prev->fX <= newEdge->fX) {
+ while (newEdge->fUpperY <= y) {
+ checkIntersection(newEdge, y, nextNextY);
+ updateNextNextY(newEdge->fLowerY, y, nextNextY);
+ newEdge = newEdge->fNext;
+ }
+ updateNextNextY(newEdge->fUpperY, y, nextNextY);
+ return;
+ }
+ // find first x pos to insert
+ SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
+ //insert the lot, fixing up the links as we go
+ do {
+ SkAnalyticEdge* next = newEdge->fNext;
+ do {
+ if (start->fNext == newEdge) {
+ goto nextEdge;
+ }
+ SkAnalyticEdge* after = start->fNext;
+ if (after->fX >= newEdge->fX) {
+ break;
+ }
+ SkASSERT(start != after);
+ start = after;
+ } while (true);
+ remove_edge(newEdge);
+ insert_edge_after(newEdge, start);
+nextEdge:
+ checkIntersection(newEdge, y, nextNextY);
+ updateNextNextY(newEdge->fLowerY, y, nextNextY);
+ start = newEdge;
+ newEdge = next;
+ } while (newEdge->fUpperY <= y);
+ updateNextNextY(newEdge->fUpperY, y, nextNextY);
+}
+
+static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
+#ifdef SK_DEBUG
+ while (edge->fUpperY <= y) {
+ SkASSERT(edge->fPrev && edge->fNext);
+ SkASSERT(edge->fPrev->fNext == edge);
+ SkASSERT(edge->fNext->fPrev == edge);
+ SkASSERT(edge->fUpperY <= edge->fLowerY);
+ SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
+ edge = edge->fNext;
+ }
+#endif
+}
+
+// Return true if prev->fX, next->fX are too close in the current pixel row.
+static inline bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
+ // Note that even if the following test failed, the edges might still be very close to each
+ // other at some point within the current pixel row because of prev->fDX and next->fDX.
+ // However, to handle that case, we have to sacrafice more performance.
+ // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
+ // so I'll ignore fDX for performance tradeoff.
+ return next && prev && next->fUpperY < lowerY && prev->fX >= next->fX - SkAbs32(next->fDX);
+ // The following is more accurate but also slower.
+ // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
+ // prev->fX + SkAbs32(prev->fDX) >= next->fX - SkAbs32(next->fDX));
+}
+
+// This function exists for the case where the previous rite edge is removed because
+// its fLowerY <= nextY
+static inline bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
+ return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
+}
+
+static inline void blit_saved_trapezoid(SkAnalyticEdge* leftE, SkFixed lowerY,
+ SkFixed lowerLeft, SkFixed lowerRite,
+ AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter,
+ SkFixed leftClip, SkFixed rightClip) {
+ SkAnalyticEdge* riteE = leftE->fRiteE;
+ SkASSERT(riteE);
+ SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
+ SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
+ int y = SkFixedFloorToInt(leftE->fSavedY);
+ // Instead of using f2a(lowerY - leftE->fSavedY), we use the following fullAlpha
+ // to elimiate cumulative error: if there are many fractional y scan lines within the
+ // same row, the former may accumulate the rounding error while the later won't.
+ SkAlpha fullAlpha = f2a(lowerY - SkIntToFixed(y)) - f2a(leftE->fSavedY - SkIntToFixed(y));
+ // We need fSavedDY because the (quad or cubic) edge might be updated
+ blit_trapezoid_row(blitter, y,
+ SkTMax(leftE->fSavedX, leftClip), SkTMin(riteE->fSavedX, rightClip),
+ SkTMax(lowerLeft, leftClip), SkTMin(lowerRite, rightClip),
+ leftE->fSavedDY, riteE->fSavedDY, fullAlpha, maskRow, isUsingMask,
+ noRealBlitter ||
+ (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY)
+ || edges_too_close(riteE, riteE->fNext, lowerY))),
+ true);
+ leftE->fRiteE = nullptr;
+}
+
+static inline void deferred_blit(SkAnalyticEdge* leftE, SkAnalyticEdge* riteE,
+ SkFixed left, SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
+ SkFixed y, SkFixed nextY, bool isIntegralNextY, bool leftEnds, bool riteEnds,
+ AdditiveBlitter* blitter, SkAlpha* maskRow, bool isUsingMask, bool noRealBlitter,
+ SkFixed leftClip, SkFixed rightClip, int yShift) {
+ if (leftE->fRiteE && leftE->fRiteE != riteE) {
+ // leftE's right edge changed. Blit the saved trapezoid.
+ SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
+ blit_saved_trapezoid(leftE, y, left, leftE->fRiteE->fX,
+ blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
+ }
+ if (!leftE->fRiteE) {
+ // Save and defer blitting the trapezoid
+ SkASSERT(riteE->fRiteE == nullptr);
+ SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
+ SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
+ leftE->saveXY(left, y, leftDY);
+ riteE->saveXY(riteE->fX, y, riteE->fDY);
+ leftE->fRiteE = riteE;
+ }
+ SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
+ riteE->goY(nextY, yShift);
+ // Always blit when edges end or nextY is integral
+ if (isIntegralNextY || leftEnds || riteEnds) {
+ blit_saved_trapezoid(leftE, nextY, leftE->fX, riteE->fX,
+ blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
+ }
+}
+
+static void aaa_walk_edges(SkAnalyticEdge* prevHead, SkAnalyticEdge* nextTail,
+ SkPath::FillType fillType, AdditiveBlitter* blitter, int start_y, int stop_y,
+ SkFixed leftClip, SkFixed rightClip, bool isUsingMask, bool forceRLE, bool useDeferred,
+ bool skipIntersect) {
+ prevHead->fX = prevHead->fUpperX = leftClip;
+ nextTail->fX = nextTail->fUpperX = rightClip;
+ SkFixed y = SkTMax(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
+ SkFixed nextNextY = SK_MaxS32;
+
+ {
+ SkAnalyticEdge* edge;
+ for(edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
+ edge->goY(y);
+ updateNextNextY(edge->fLowerY, y, &nextNextY);
+ }
+ updateNextNextY(edge->fUpperY, y, &nextNextY);
+ }
+
+ // returns 1 for evenodd, -1 for winding, regardless of inverse-ness
+ int windingMask = (fillType & 1) ? 1 : -1;
+
+ bool isInverse = SkPath::IsInverseFillType(fillType);
+
+ if (isInverse && SkIntToFixed(start_y) != y) {
+ int width = SkFixedFloorToInt(rightClip - leftClip);
+ if (SkFixedFloorToInt(y) != start_y) {
+ blitter->getRealBlitter()->blitRect(SkFixedFloorToInt(leftClip), start_y,
+ width, SkFixedFloorToInt(y) - start_y);
+ start_y = SkFixedFloorToInt(y);
+ }
+ SkAlpha* maskRow = isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y)
+ : nullptr;
+ blit_full_alpha(blitter, start_y, SkFixedFloorToInt(leftClip), width,
+ f2a(y - SkIntToFixed(start_y)), maskRow, isUsingMask, false, false);
+ }
+
+ while (true) {
+ int w = 0;
+ bool in_interval = isInverse;
+ SkFixed prevX = prevHead->fX;
+ SkFixed nextY = SkTMin(nextNextY, SkFixedCeilToFixed(y + 1));
+ bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
+ SkAnalyticEdge* currE = prevHead->fNext;
+ SkAnalyticEdge* leftE = prevHead;
+ SkFixed left = leftClip;
+ SkFixed leftDY = 0;
+ bool leftEnds = false;
+ int prevRite = SkFixedFloorToInt(leftClip);
+
+ nextNextY = SK_MaxS32;
+
+ SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
+ int yShift = 0;
+ if ((nextY - y) & (SK_Fixed1 >> 2)) {
+ yShift = 2;
+ nextY = y + (SK_Fixed1 >> 2);
+ } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
+ yShift = 1;
+ SkASSERT(nextY == y + (SK_Fixed1 >> 1));
+ }
+
+ SkAlpha fullAlpha = f2a(nextY - y);
+
+ // If we're using mask blitter, we advance the mask row in this function
+ // to save some "if" condition checks.
+ SkAlpha* maskRow = nullptr;
+ if (isUsingMask) {
+ maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
+ }
+
+ SkASSERT(currE->fPrev == prevHead);
+ validate_edges_for_y(currE, y);
+
+ // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
+ // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
+ bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
+
+ while (currE->fUpperY <= y) {
+ SkASSERT(currE->fLowerY >= nextY);
+ SkASSERT(currE->fY == y);
+
+ w += currE->fWinding;
+ bool prev_in_interval = in_interval;
+ in_interval = !(w & windingMask) == isInverse;
+
+ bool isLeft = in_interval && !prev_in_interval;
+ bool isRite = !in_interval && prev_in_interval;
+ bool currEnds = currE->fLowerY == nextY;
+
+ if (useDeferred) {
+ if (currE->fRiteE && !isLeft) {
+ // currE is a left edge previously, but now it's not.
+ // Blit the trapezoid between fSavedY and y.
+ SkASSERT(currE->fRiteE->fY == y);
+ blit_saved_trapezoid(currE, y, currE->fX, currE->fRiteE->fX,
+ blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
+ }
+ if (leftE->fRiteE == currE && !isRite) {
+ // currE is a right edge previously, but now it's not.
+ // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
+ // in the previous if clause). Hence we blit the trapezoid.
+ blit_saved_trapezoid(leftE, y, left, currE->fX,
+ blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
+ }
+ }
+
+ if (isRite) {
+ if (useDeferred) {
+ deferred_blit(leftE, currE, left, leftDY, y, nextY, isIntegralNextY,
+ leftEnds, currEnds, blitter, maskRow, isUsingMask, noRealBlitter,
+ leftClip, rightClip, yShift);
+ } else {
+ SkFixed rite = currE->fX;
+ currE->goY(nextY, yShift);
+ if (leftE->fDX < 0) {
+ left = SkTMax(leftClip, left);
+ leftE->fX = SkTMax(leftClip, leftE->fX);
+ } else {
+ left = SkTMin(rightClip, left);
+ leftE->fX = SkTMin(rightClip, leftE->fX);
+ }
+ if (currE->fDX < 0) {
+ rite = SkTMax(leftClip, rite);
+ currE->fX = SkTMax(leftClip, currE->fX);
+ } else {
+ rite = SkTMin(rightClip, rite);
+ currE->fX = SkTMin(rightClip, currE->fX);
+ }
+ blit_trapezoid_row(blitter, y >> 16, left, rite, leftE->fX, currE->fX,
+ leftDY, currE->fDY, fullAlpha, maskRow, isUsingMask,
+ noRealBlitter || (fullAlpha == 0xFF && (
+ edges_too_close(prevRite, left, leftE->fX) ||
+ edges_too_close(currE, currE->fNext, nextY)
+ )),
+ true);
+ prevRite = SkFixedCeilToInt(SkTMax(rite, currE->fX));
+ }
+ } else {
+ if (isLeft) {
+ left = currE->fX;
+ leftDY = currE->fDY;
+ leftE = currE;
+ leftEnds = leftE->fLowerY == nextY;
+ }
+ currE->goY(nextY, yShift);
+ }
+
+
+ SkAnalyticEdge* next = currE->fNext;
+ SkFixed newX;
+
+ while (currE->fLowerY <= nextY) {
+ if (currE->fCurveCount < 0) {
+ SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
+ cubicEdge->keepContinuous();
+ if (!cubicEdge->updateCubic()) {
+ break;
+ }
+ } else if (currE->fCurveCount > 0) {
+ SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
+ quadEdge->keepContinuous();
+ if (!quadEdge->updateQuadratic()) {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ SkASSERT(currE->fY == nextY);
+
+ if (currE->fLowerY <= nextY) {
+ remove_edge(currE);
+ } else {
+ updateNextNextY(currE->fLowerY, nextY, &nextNextY);
+ newX = currE->fX;
+ SkASSERT(currE->fLowerY > nextY);
+ if (newX < prevX) { // ripple currE backwards until it is x-sorted
+ // If the crossing edge is a right edge, blit the saved trapezoid.
+ if (leftE->fRiteE == currE && useDeferred) {
+ SkASSERT(leftE->fY == nextY && currE->fY == nextY);
+ blit_saved_trapezoid(leftE, nextY, leftE->fX, currE->fX,
+ blitter, maskRow, isUsingMask, noRealBlitter, leftClip, rightClip);
+ }
+ backward_insert_edge_based_on_x(currE);
+ } else {
+ prevX = newX;
+ }
+ if (!skipIntersect) {
+ checkIntersection(currE, nextY, &nextNextY);
+ }
+ }
+
+ currE = next;
+ SkASSERT(currE);
+ }
+
+ // was our right-edge culled away?
+ if (in_interval) {
+ if (useDeferred) {
+ deferred_blit(leftE, nextTail, left, leftDY, y, nextY, isIntegralNextY,
+ leftEnds, false, blitter, maskRow, isUsingMask, noRealBlitter,
+ leftClip, rightClip, yShift);
+ } else {
+ blit_trapezoid_row(blitter, y >> 16, left, rightClip, leftE->fX, rightClip,
+ leftDY, 0, fullAlpha, maskRow, isUsingMask,
+ noRealBlitter ||
+ (fullAlpha == 0xFF && edges_too_close(leftE->fPrev, leftE, nextY)),
+ true);
+ }
+ }
+
+ if (forceRLE) {
+ ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
+ }
+
+ y = nextY;
+ if (y >= SkIntToFixed(stop_y)) {
+ break;
+ }
- // we only implemented the convex shapes yet
- SkASSERT(!path.isInverseFillType() && path.isConvex());
+ // now currE points to the first edge with a fUpperY larger than the previous y
+ insert_new_edges(currE, y, &nextNextY);
+ }
+}
+
+static void aaa_fill_path(const SkPath& path, const SkIRect& clipRect,
+ AdditiveBlitter* blitter, int start_y, int stop_y, bool pathContainedInClip,
+ bool isUsingMask, bool forceRLE) { // forceRLE implies that SkAAClip is calling us
+ SkASSERT(blitter);
SkEdgeBuilder builder;
@@ -1166,7 +1660,8 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
rect.fBottom = stop_y;
}
if (!rect.isEmpty()) {
- blitter->blitRect(rect.fLeft, rect.fTop, rect.width(), rect.height());
+ blitter->getRealBlitter()->blitRect(rect.fLeft, rect.fTop,
+ rect.width(), rect.height());
}
}
return;
@@ -1176,6 +1671,7 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
// this returns the first and last edge after they're sorted into a dlink list
SkAnalyticEdge* edge = sort_edges(list, count, &last);
+ headEdge.fRiteE = nullptr;
headEdge.fPrev = nullptr;
headEdge.fNext = edge;
headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
@@ -1185,13 +1681,14 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
headEdge.fUpperX = SK_MinS32;
edge->fPrev = &headEdge;
+ tailEdge.fRiteE = nullptr;
tailEdge.fPrev = last;
tailEdge.fNext = nullptr;
tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
- headEdge.fX = SK_MaxS32;
- headEdge.fDX = 0;
- headEdge.fDY = SK_MaxS32;
- headEdge.fUpperX = SK_MaxS32;
+ tailEdge.fX = SK_MaxS32;
+ tailEdge.fDX = 0;
+ tailEdge.fDY = SK_MaxS32;
+ tailEdge.fUpperX = SK_MaxS32;
last->fNext = &tailEdge;
// now edge is the head of the sorted linklist
@@ -1203,22 +1700,32 @@ void aaa_fill_path(const SkPath& path, const SkIRect& clipRect, AdditiveBlitter*
stop_y = clipRect.fBottom;
}
+ SkFixed leftBound = SkIntToFixed(rect.fLeft);
+ SkFixed rightBound = SkIntToFixed(rect.fRight);
+ if (isUsingMask) {
+ // If we're using mask, then we have to limit the bound within the path bounds.
+ // Otherwise, the edge drift may access an invalid address inside the mask.
+ SkIRect ir;
+ path.getBounds().roundOut(&ir);
+ leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
+ rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
+ }
+
if (!path.isInverseFillType() && path.isConvex()) {
SkASSERT(count >= 2); // convex walker does not handle missing right edges
- SkFixed leftBound = SkIntToFixed(rect.fLeft);
- SkFixed rightBound = SkIntToFixed(rect.fRight);
- if (isUsingMask) {
- // If we're using mask, then we have to limit the bound within the path bounds.
- // Otherwise, the edge drift may access an invalid address inside the mask.
- SkIRect ir;
- path.getBounds().roundOut(&ir);
- leftBound = SkTMax(leftBound, SkIntToFixed(ir.fLeft));
- rightBound = SkTMin(rightBound, SkIntToFixed(ir.fRight));
- }
aaa_walk_convex_edges(&headEdge, blitter, start_y, stop_y,
leftBound, rightBound, isUsingMask);
} else {
- SkFAIL("Concave AAA is not yet implemented!");
+ // Only use deferred blitting if there are many edges.
+ bool useDeferred = count >
+ (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
+
+ // We skip intersection computation if there are many points which probably already
+ // give us enough fractional scan lines.
+ bool skipIntersect = path.countPoints() > (stop_y - start_y) / 2;
+
+ aaa_walk_edges(&headEdge, &tailEdge, path.getFillType(), blitter, start_y, stop_y,
+ leftBound, rightBound, isUsingMask, forceRLE, useDeferred, skipIntersect);
}
}
@@ -1268,11 +1775,13 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
if (origClip.isEmpty()) {
return;
}
+ #ifdef SK_SUPPORT_LEGACY_AAA
if (path.isInverseFillType() || !path.isConvex()) {
// Fall back as we only implemented the algorithm for convex shapes yet.
SkScan::AntiFillPath(path, origClip, blitter, forceRLE);
return;
}
+ #endif
const bool isInverse = path.isInverseFillType();
SkIRect ir;
@@ -1337,9 +1846,7 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
blitter = clipper.getBlitter();
if (isInverse) {
- // Currently, we use the old path to render the inverse path,
- // so we don't need this.
- // sk_blit_above(blitter, ir, *clipRgn);
+ sk_blit_above(blitter, ir, *clipRgn);
}
SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
@@ -1348,16 +1855,18 @@ void SkScan::AAAFillPath(const SkPath& path, const SkRegion& origClip, SkBlitter
MaskAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
aaa_fill_path(path, clipRgn->getBounds(), &additiveBlitter, ir.fTop, ir.fBottom,
clipRect == nullptr, true, forceRLE);
- } else {
+ } else if (!isInverse && path.isConvex()) {
RunBasedAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
aaa_fill_path(path, clipRgn->getBounds(), &additiveBlitter, ir.fTop, ir.fBottom,
clipRect == nullptr, false, forceRLE);
+ } else {
+ SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, *clipRgn, isInverse);
+ aaa_fill_path(path, clipRgn->getBounds(), &additiveBlitter, ir.fTop, ir.fBottom,
+ clipRect == nullptr, false, forceRLE);
}
if (isInverse) {
- // Currently, we use the old path to render the inverse path,
- // so we don't need this.
- // sk_blit_below(blitter, ir, *clipRgn);
+ sk_blit_below(blitter, ir, *clipRgn);
}
}