aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gpu/GrPathRendererChain.cpp6
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.cpp13
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp47
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.h40
-rw-r--r--src/gpu/gl/GrGLCaps.cpp8
5 files changed, 72 insertions, 42 deletions
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
index 4dc814e59a..6301045cb7 100644
--- a/src/gpu/GrPathRendererChain.cpp
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -39,9 +39,9 @@ GrPathRendererChain::GrPathRendererChain(GrContext* context, const Options& opti
fChain.push_back(sk_make_sp<GrAAHairLinePathRenderer>());
if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
- bool drawCachablePaths = !options.fAllowPathMaskCaching;
- if (auto ccpr =
- GrCoverageCountingPathRenderer::CreateIfSupported(caps, drawCachablePaths)) {
+ using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching;
+ if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported(
+ caps, AllowCaching(options.fAllowPathMaskCaching))) {
fCoverageCountingPathRenderer = ccpr.get();
context->contextPriv().addOnFlushCallbackObject(fCoverageCountingPathRenderer);
fChain.push_back(std::move(ccpr));
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 612cc15e02..db5ea355aa 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -9,6 +9,7 @@
#include "GrMemoryPool.h"
#include "GrOpFlushState.h"
+#include "ccpr/GrCCPathCache.h"
#include "ccpr/GrCCPerFlushResources.h"
#include "ccpr/GrCoverageCountingPathRenderer.h"
@@ -152,14 +153,16 @@ void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
using MaskTransform = GrCCPathCache::MaskTransform;
for (SingleDraw& draw : fDraws) {
- SkASSERT(!draw.fCacheEntry);
-
SkPath path;
draw.fShape.asPath(&path);
- MaskTransform m(draw.fMatrix, &draw.fCachedMaskShift);
- bool canStashPathMask = draw.fMaskVisibility >= Visibility::kMostlyComplete;
- draw.fCacheEntry = pathCache->find(draw.fShape, m, CreateIfAbsent(canStashPathMask));
+ SkASSERT(!draw.fCacheEntry);
+
+ if (pathCache) {
+ MaskTransform m(draw.fMatrix, &draw.fCachedMaskShift);
+ bool canStashPathMask = draw.fMaskVisibility >= Visibility::kMostlyComplete;
+ draw.fCacheEntry = pathCache->find(draw.fShape, m, CreateIfAbsent(canStashPathMask));
+ }
if (auto cacheEntry = draw.fCacheEntry.get()) {
SkASSERT(!cacheEntry->currFlushAtlas()); // Shouldn't be set until setupResources().
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 122569c072..d68d4c2d98 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -14,6 +14,7 @@
#include "SkPathOps.h"
#include "ccpr/GrCCClipProcessor.h"
#include "ccpr/GrCCDrawPathsOp.h"
+#include "ccpr/GrCCPathCache.h"
#include "ccpr/GrCCPathParser.h"
using PathInstance = GrCCPathProcessor::Instance;
@@ -56,9 +57,21 @@ bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
}
sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
- const GrCaps& caps, bool drawCachablePaths) {
- auto ccpr = IsSupported(caps) ? new GrCoverageCountingPathRenderer(drawCachablePaths) : nullptr;
- return sk_sp<GrCoverageCountingPathRenderer>(ccpr);
+ const GrCaps& caps, AllowCaching allowCaching) {
+ return sk_sp<GrCoverageCountingPathRenderer>(
+ IsSupported(caps) ? new GrCoverageCountingPathRenderer(allowCaching) : nullptr);
+}
+
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) {
+ if (AllowCaching::kYes == allowCaching) {
+ fPathCache = skstd::make_unique<GrCCPathCache>();
+ }
+}
+
+GrCoverageCountingPathRenderer::~GrCoverageCountingPathRenderer() {
+ // Ensure callers are actually flushing paths they record, not causing us to leak memory.
+ SkASSERT(fPendingPaths.empty());
+ SkASSERT(!fFlushing);
}
GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
@@ -72,10 +85,6 @@ GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t
GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
const CanDrawPathArgs& args) const {
- if (args.fShape->hasUnstyledKey() && !fDrawCachablePaths) {
- return CanDrawPath::kNo;
- }
-
if (!args.fShape->style().isSimpleFill() || args.fShape->inverseFilled() ||
args.fViewMatrix->hasPerspective() || GrAAType::kCoverage != args.fAAType) {
return CanDrawPath::kNo;
@@ -83,17 +92,27 @@ GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
SkPath path;
args.fShape->asPath(&path);
+
SkRect devBounds;
- SkIRect devIBounds;
args.fViewMatrix->mapRect(&devBounds, path.getBounds());
- devBounds.roundOut(&devIBounds);
- if (!devIBounds.intersect(*args.fClipConservativeBounds)) {
+
+ SkIRect clippedIBounds;
+ devBounds.roundOut(&clippedIBounds);
+ if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
// Path is completely clipped away. Our code will eventually notice this before doing any
// real work.
return CanDrawPath::kYes;
}
- if (devIBounds.height() * devIBounds.width() > 256 * 256) {
+ int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
+ if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
+ // This is a complicated path that has more vertices than pixels! Let's let the SW renderer
+ // have this one: It will probably be faster and a bitmap will require less total memory on
+ // the GPU than CCPR instance buffers would for the raw path data.
+ return CanDrawPath::kNo;
+ }
+
+ if (numPixels > 256 * 256) {
// Large paths can blow up the atlas fast. And they are not ideal for a two-pass rendering
// algorithm. Give the simpler direct renderers a chance before we commit to drawing it.
return CanDrawPath::kAsBackup;
@@ -209,7 +228,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = SkTMin(2048, maxPreferredRTSize);
SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
- specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(1024, maxPreferredRTSize);
+ specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
// Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
// and count them up so we can preallocate buffers.
@@ -224,7 +243,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
fPendingPaths.erase(iter);
for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
- op->accountForOwnPaths(&fPathCache, onFlushRP, fStashedAtlasKey, &specs);
+ op->accountForOwnPaths(fPathCache.get(), onFlushRP, fStashedAtlasKey, &specs);
}
for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
clipsIter.second.accountForOwnPath(&specs);
@@ -239,7 +258,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
// Determine if there are enough reusable paths from last flush for it to be worth our time to
// copy them to cached atlas(es).
DoCopiesToCache doCopies = DoCopiesToCache(specs.fNumCopiedPaths > 100 ||
- specs.fCopyAtlasSpecs.fApproxNumPixels > 512 * 256);
+ specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
if (specs.fNumCopiedPaths && DoCopiesToCache::kNo == doCopies) {
specs.convertCopiesToRenders();
SkASSERT(!specs.fNumCopiedPaths);
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 0007319b55..d92d915958 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -11,11 +11,11 @@
#include "GrCCPerOpListPaths.h"
#include "GrPathRenderer.h"
#include "GrRenderTargetOpList.h"
-#include "ccpr/GrCCPathCache.h"
#include "ccpr/GrCCPerFlushResources.h"
#include <map>
class GrCCDrawPathsOp;
+class GrCCPathCache;
/**
* This is a path renderer that draws antialiased paths by counting coverage in an offscreen
@@ -27,13 +27,15 @@ class GrCCDrawPathsOp;
class GrCoverageCountingPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
public:
static bool IsSupported(const GrCaps&);
- static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&,
- bool drawCachablePaths);
- ~GrCoverageCountingPathRenderer() override {
- // Ensure callers are actually flushing paths they record, not causing us to leak memory.
- SkASSERT(fPendingPaths.empty());
- SkASSERT(!fFlushing);
- }
+
+ enum class AllowCaching : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching);
+
+ ~GrCoverageCountingPathRenderer() override;
using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
@@ -54,13 +56,6 @@ public:
fPendingPaths.insert(paths.begin(), paths.end());
}
- // GrPathRenderer overrides.
- StencilSupport onGetStencilSupport(const GrShape&) const override {
- return GrPathRenderer::kNoSupport_StencilSupport;
- }
- CanDrawPath onCanDrawPath(const CanDrawPathArgs& args) const override;
- bool onDrawPath(const DrawPathArgs&) override;
-
std::unique_ptr<GrFragmentProcessor> makeClipProcessor(uint32_t oplistID,
const SkPath& deviceSpacePath,
const SkIRect& accessRect, int rtWidth,
@@ -71,11 +66,18 @@ public:
SkTArray<sk_sp<GrRenderTargetContext>>* out) override;
void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
+ void testingOnly_drawPathDirectly(const DrawPathArgs&);
const GrUniqueKey& testingOnly_getStashedAtlasKey() const;
private:
- GrCoverageCountingPathRenderer(bool drawCachablePaths)
- : fDrawCachablePaths(drawCachablePaths) {}
+ GrCoverageCountingPathRenderer(AllowCaching);
+
+ // GrPathRenderer overrides.
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+ bool onDrawPath(const DrawPathArgs&) override;
GrCCPerOpListPaths* lookupPendingPaths(uint32_t opListID);
void recordOp(std::unique_ptr<GrCCDrawPathsOp>, const DrawPathArgs&);
@@ -89,12 +91,10 @@ private:
// (It will only contain elements when fFlushing is true.)
SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
- GrCCPathCache fPathCache;
+ std::unique_ptr<GrCCPathCache> fPathCache;
GrUniqueKey fStashedAtlasKey;
SkDEBUGCODE(bool fFlushing = false);
-
- const bool fDrawCachablePaths;
};
#endif
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index 4f33ef4ce4..3705e5c98c 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -2688,6 +2688,14 @@ void GrGLCaps::applyDriverCorrectnessWorkarounds(const GrGLContextInfo& ctxInfo,
// https://bugreport.apple.com/web/?problemID=39948888
fUnpackRowLengthSupport = false;
#endif
+
+#ifdef SK_BUILD_FOR_MAC
+ // Radeon MacBooks hit a crash in glReadPixels() when using CCPR.
+ // http://skbug.com/8097
+ if (kATI_GrGLVendor == ctxInfo.vendor()) {
+ fBlacklistCoverageCounting = true;
+ }
+#endif
}
void GrGLCaps::onApplyOptionsOverrides(const GrContextOptions& options) {