aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Yuqian Li <liyuqian@google.com>2018-02-12 17:02:30 +0800
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-02-14 13:20:17 +0000
commit36fa0ac9b5716cad25d60dcab98e3f127fb2a000 (patch)
tree3f304aa8ba8062c0f6a2d3a7f3d657b6e013a949 /src
parentbc8bb02f3fb94eeb0a66871d68284dc903a1f685 (diff)
Add SkDAARecord to prepare for init-once
This is just refactoring our code so we can easily bring DAA to init-once. No GMs are expected to change. Bug: skia: Change-Id: I05dd1bdfb68bb40b5393ee854de51795b55ed426 Reviewed-on: https://skia-review.googlesource.com/106480 Reviewed-by: Cary Clark <caryclark@google.com> Commit-Queue: Yuqian Li <liyuqian@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/SkCoverageDelta.h14
-rw-r--r--src/core/SkScan.h10
-rw-r--r--src/core/SkScan_AntiPath.cpp12
-rw-r--r--src/core/SkScan_DAAPath.cpp58
4 files changed, 67 insertions, 27 deletions
diff --git a/src/core/SkCoverageDelta.h b/src/core/SkCoverageDelta.h
index d7af66ee8b..cfbee5ed3e 100644
--- a/src/core/SkCoverageDelta.h
+++ b/src/core/SkCoverageDelta.h
@@ -182,6 +182,20 @@ static SK_ALWAYS_INLINE SkAlpha CoverageToAlpha(SkFixed coverage, bool isEvenOdd
return isInverse ? 255 - result : result;
}
+struct SkDAARecord {
+ enum class Type {
+ kToBeComputed,
+ kMask,
+ kList
+ } fType;
+
+ SkMask fMask;
+ SkCoverageDeltaList* fList;
+ SkArenaAlloc* fAlloc;
+
+ SkDAARecord(SkArenaAlloc* alloc) : fType(Type::kToBeComputed), fAlloc(alloc) {}
+};
+
template<typename T>
static SK_ALWAYS_INLINE T CoverageToAlpha(const T& coverage, bool isEvenOdd, bool isInverse) {
T t0(0), t255(255);
diff --git a/src/core/SkScan.h b/src/core/SkScan.h
index c04f09051f..75fd2ac3c6 100644
--- a/src/core/SkScan.h
+++ b/src/core/SkScan.h
@@ -9,6 +9,7 @@
#ifndef SkScan_DEFINED
#define SkScan_DEFINED
+#include "SkCoverageDelta.h"
#include "SkFixed.h"
#include "SkRect.h"
#include <atomic>
@@ -52,7 +53,7 @@ public:
static void AntiFillRect(const SkRect&, const SkRasterClip&, SkBlitter*);
static void AntiFillXRect(const SkXRect&, const SkRasterClip&, SkBlitter*);
static void FillPath(const SkPath&, const SkRasterClip&, SkBlitter*);
- static void AntiFillPath(const SkPath&, const SkRasterClip&, SkBlitter*, bool forceDAA);
+ static void AntiFillPath(const SkPath&, const SkRasterClip&, SkBlitter*, SkDAARecord*);
static void FrameRect(const SkRect&, const SkPoint& strokeSize,
const SkRasterClip&, SkBlitter*);
static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
@@ -72,8 +73,9 @@ public:
// Needed by do_fill_path in SkScanPriv.h
static void FillPath(const SkPath&, const SkRegion& clip, SkBlitter*);
+ // We have this instead of a default nullptr parameter because of function pointer match.
static void AntiFillPath(const SkPath& path, const SkRasterClip& rc, SkBlitter* blitter) {
- AntiFillPath(path, rc, blitter, false);
+ AntiFillPath(path, rc, blitter, nullptr);
}
private:
friend class SkAAClip;
@@ -85,7 +87,7 @@ private:
static void AntiFillRect(const SkRect&, const SkRegion* clip, SkBlitter*);
static void AntiFillXRect(const SkXRect&, const SkRegion*, SkBlitter*);
static void AntiFillPath(const SkPath&, const SkRegion& clip, SkBlitter*,
- bool forceRLE = false, bool forceDAA = false);
+ bool forceRLE = false, SkDAARecord* daaRecord = nullptr);
static void FillTriangle(const SkPoint pts[], const SkRegion*, SkBlitter*);
static void AntiFrameRect(const SkRect&, const SkPoint& strokeSize,
@@ -95,7 +97,7 @@ private:
static void AAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& pathIR,
const SkIRect& clipBounds, bool forceRLE);
static void DAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& pathIR,
- const SkIRect& clipBounds, bool forceRLE);
+ const SkIRect& clipBounds, bool forceRLE, SkDAARecord* daaRecord);
static void SAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& pathIR,
const SkIRect& clipBounds, bool forceRLE);
};
diff --git a/src/core/SkScan_AntiPath.cpp b/src/core/SkScan_AntiPath.cpp
index db1527f650..43ef09abbf 100644
--- a/src/core/SkScan_AntiPath.cpp
+++ b/src/core/SkScan_AntiPath.cpp
@@ -667,7 +667,7 @@ static SkIRect safeRoundOut(const SkRect& src) {
}
void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
- SkBlitter* blitter, bool forceRLE, bool forceDAA) {
+ SkBlitter* blitter, bool forceRLE, SkDAARecord* daaRecord) {
if (origClip.isEmpty()) {
return;
}
@@ -737,8 +737,8 @@ void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
sk_blit_above(blitter, ir, *clipRgn);
}
- if (forceDAA || ShouldUseDAA(path)) {
- SkScan::DAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
+ if (daaRecord || ShouldUseDAA(path)) {
+ SkScan::DAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE, daaRecord);
} else if (ShouldUseAAA(path)) {
// Do not use AAA if path is too complicated:
// there won't be any speedup or significant visual improvement.
@@ -774,19 +774,19 @@ void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip, SkBlitter* b
}
void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
- SkBlitter* blitter, bool forceDAA) {
+ SkBlitter* blitter, SkDAARecord* daaRecord) {
if (clip.isEmpty() || !path.isFinite()) {
return;
}
if (clip.isBW()) {
- AntiFillPath(path, clip.bwRgn(), blitter, false, forceDAA);
+ AntiFillPath(path, clip.bwRgn(), blitter, false, daaRecord);
} else {
SkRegion tmp;
SkAAClipBlitter aaBlitter;
tmp.setRect(clip.getBounds());
aaBlitter.init(blitter, &clip.aaRgn());
- AntiFillPath(path, tmp, &aaBlitter, true, forceDAA); // SkAAClipBlitter can blitMask, why forceRLE?
+ AntiFillPath(path, tmp, &aaBlitter, true, daaRecord); // SkAAClipBlitter can blitMask, why forceRLE?
}
}
diff --git a/src/core/SkScan_DAAPath.cpp b/src/core/SkScan_DAAPath.cpp
index dca2bd2249..9eb7c14cfa 100644
--- a/src/core/SkScan_DAAPath.cpp
+++ b/src/core/SkScan_DAAPath.cpp
@@ -315,16 +315,14 @@ void gen_alpha_deltas(const SkPath& path, const SkIRect& clipBounds, Deltas& res
}
}
-// For threaded backend with out-of-order init-once, we probably have to take care of the
-// blitRegion, sk_blit_above, sk_blit_below in SkScan::AntiFillPath to maintain the draw order. If
-// we do that, be caureful that blitRect may throw exception if the rect is empty.
void SkScan::DAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect& ir,
- const SkIRect& clipBounds, bool forceRLE) {
+ const SkIRect& clipBounds, bool forceRLE, SkDAARecord* record) {
bool containedInClip = clipBounds.contains(ir);
bool isEvenOdd = path.getFillType() & 1;
bool isConvex = path.isConvex();
bool isInverse = path.isInverseFillType();
bool skipRect = isConvex && !isInverse;
+ bool isInitOnce = record && record->fType == SkDAARecord::Type::kToBeComputed;
SkIRect clippedIR = ir;
clippedIR.intersect(clipBounds);
@@ -340,18 +338,44 @@ void SkScan::DAAFillPath(const SkPath& path, SkBlitter* blitter, const SkIRect&
#else
constexpr int STACK_SIZE = 64 << 10; // 64k stack size to avoid heap allocation
#endif
- SkSTArenaAlloc<STACK_SIZE> alloc; // avoid heap allocation with SkSTArenaAlloc
-
- // Only blitter->blitXXX needs to be done in order in the threaded backend.
- // Everything before can be done out of order in the threaded backend.
- if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) {
- SkCoverageDeltaMask deltaMask(&alloc, clippedIR);
- gen_alpha_deltas(path, clipBounds, deltaMask, blitter, skipRect, containedInClip);
- deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex);
- blitter->blitMask(deltaMask.prepareSkMask(), clippedIR);
- } else {
- SkCoverageDeltaList deltaList(&alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE);
- gen_alpha_deltas(path, clipBounds, deltaList, blitter, skipRect, containedInClip);
- blitter->blitCoverageDeltas(&deltaList, clipBounds, isEvenOdd, isInverse, isConvex, &alloc);
+ SkSTArenaAlloc<STACK_SIZE> stackAlloc; // avoid heap allocation with SkSTArenaAlloc
+
+ // Set alloc to record's alloc if and only if we're in the init-once phase. We have to do that
+ // during init phase because the mask or list needs to live longer. We can't do that during blit
+ // phase because the same record could be accessed by multiple threads simultaneously.
+ SkArenaAlloc* alloc = isInitOnce ? record->fAlloc : &stackAlloc;
+
+ if (record == nullptr) {
+ record = alloc->make<SkDAARecord>(alloc);
+ }
+
+ // Only blitter->blitXXX needs to be done in order in the threaded backend. Everything else can
+ // be done out of order in the init-once phase. We do that by calling DAAFillPath twice: first
+ // with a null blitter, and then second with the real blitter and the SkMask/SkCoverageDeltaList
+ // generated in the first step.
+ if (record->fType == SkDAARecord::Type::kToBeComputed) {
+ if (!forceRLE && !isInverse && SkCoverageDeltaMask::Suitable(clippedIR)) {
+ record->fType = SkDAARecord::Type::kMask;
+ SkCoverageDeltaMask deltaMask(alloc, clippedIR);
+ gen_alpha_deltas(path, clipBounds, deltaMask, blitter, skipRect, containedInClip);
+ deltaMask.convertCoverageToAlpha(isEvenOdd, isInverse, isConvex);
+ record->fMask = deltaMask.prepareSkMask();
+ } else {
+ record->fType = SkDAARecord::Type::kList;
+ SkCoverageDeltaList* deltaList = alloc->make<SkCoverageDeltaList>(
+ alloc, clippedIR.fTop, clippedIR.fBottom, forceRLE);
+ gen_alpha_deltas(path, clipBounds, *deltaList, blitter, skipRect, containedInClip);
+ record->fList = deltaList;
+ }
+ }
+
+ if (!isInitOnce) {
+ SkASSERT(record->fType != SkDAARecord::Type::kToBeComputed);
+ if (record->fType == SkDAARecord::Type::kMask) {
+ blitter->blitMask(record->fMask, clippedIR);
+ } else {
+ blitter->blitCoverageDeltas(record->fList,
+ clipBounds, isEvenOdd, isInverse, isConvex, alloc);
+ }
}
}