aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/gpu/GrPathRendererChain.cpp6
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.cpp13
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp47
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.h40
-rw-r--r--src/gpu/gl/GrGLCaps.cpp8
-rw-r--r--tests/GrCCPRTest.cpp31
-rw-r--r--tools/gpu/GrTest.cpp8
-rw-r--r--tools/skpbench/skpbench.cpp22
8 files changed, 109 insertions, 66 deletions
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
index 4dc814e59a..6301045cb7 100644
--- a/src/gpu/GrPathRendererChain.cpp
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -39,9 +39,9 @@ GrPathRendererChain::GrPathRendererChain(GrContext* context, const Options& opti
fChain.push_back(sk_make_sp<GrAAHairLinePathRenderer>());
if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
- bool drawCachablePaths = !options.fAllowPathMaskCaching;
- if (auto ccpr =
- GrCoverageCountingPathRenderer::CreateIfSupported(caps, drawCachablePaths)) {
+ using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching;
+ if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported(
+ caps, AllowCaching(options.fAllowPathMaskCaching))) {
fCoverageCountingPathRenderer = ccpr.get();
context->contextPriv().addOnFlushCallbackObject(fCoverageCountingPathRenderer);
fChain.push_back(std::move(ccpr));
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 612cc15e02..db5ea355aa 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -9,6 +9,7 @@
#include "GrMemoryPool.h"
#include "GrOpFlushState.h"
+#include "ccpr/GrCCPathCache.h"
#include "ccpr/GrCCPerFlushResources.h"
#include "ccpr/GrCoverageCountingPathRenderer.h"
@@ -152,14 +153,16 @@ void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
using MaskTransform = GrCCPathCache::MaskTransform;
for (SingleDraw& draw : fDraws) {
- SkASSERT(!draw.fCacheEntry);
-
SkPath path;
draw.fShape.asPath(&path);
- MaskTransform m(draw.fMatrix, &draw.fCachedMaskShift);
- bool canStashPathMask = draw.fMaskVisibility >= Visibility::kMostlyComplete;
- draw.fCacheEntry = pathCache->find(draw.fShape, m, CreateIfAbsent(canStashPathMask));
+ SkASSERT(!draw.fCacheEntry);
+
+ if (pathCache) {
+ MaskTransform m(draw.fMatrix, &draw.fCachedMaskShift);
+ bool canStashPathMask = draw.fMaskVisibility >= Visibility::kMostlyComplete;
+ draw.fCacheEntry = pathCache->find(draw.fShape, m, CreateIfAbsent(canStashPathMask));
+ }
if (auto cacheEntry = draw.fCacheEntry.get()) {
SkASSERT(!cacheEntry->currFlushAtlas()); // Shouldn't be set until setupResources().
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 122569c072..d68d4c2d98 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -14,6 +14,7 @@
#include "SkPathOps.h"
#include "ccpr/GrCCClipProcessor.h"
#include "ccpr/GrCCDrawPathsOp.h"
+#include "ccpr/GrCCPathCache.h"
#include "ccpr/GrCCPathParser.h"
using PathInstance = GrCCPathProcessor::Instance;
@@ -56,9 +57,21 @@ bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
}
sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
- const GrCaps& caps, bool drawCachablePaths) {
- auto ccpr = IsSupported(caps) ? new GrCoverageCountingPathRenderer(drawCachablePaths) : nullptr;
- return sk_sp<GrCoverageCountingPathRenderer>(ccpr);
+ const GrCaps& caps, AllowCaching allowCaching) {
+ return sk_sp<GrCoverageCountingPathRenderer>(
+ IsSupported(caps) ? new GrCoverageCountingPathRenderer(allowCaching) : nullptr);
+}
+
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) {
+ if (AllowCaching::kYes == allowCaching) {
+ fPathCache = skstd::make_unique<GrCCPathCache>();
+ }
+}
+
+GrCoverageCountingPathRenderer::~GrCoverageCountingPathRenderer() {
+ // Ensure callers are actually flushing paths they record, not causing us to leak memory.
+ SkASSERT(fPendingPaths.empty());
+ SkASSERT(!fFlushing);
}
GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
@@ -72,10 +85,6 @@ GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t
GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
const CanDrawPathArgs& args) const {
- if (args.fShape->hasUnstyledKey() && !fDrawCachablePaths) {
- return CanDrawPath::kNo;
- }
-
if (!args.fShape->style().isSimpleFill() || args.fShape->inverseFilled() ||
args.fViewMatrix->hasPerspective() || GrAAType::kCoverage != args.fAAType) {
return CanDrawPath::kNo;
@@ -83,17 +92,27 @@ GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
SkPath path;
args.fShape->asPath(&path);
+
SkRect devBounds;
- SkIRect devIBounds;
args.fViewMatrix->mapRect(&devBounds, path.getBounds());
- devBounds.roundOut(&devIBounds);
- if (!devIBounds.intersect(*args.fClipConservativeBounds)) {
+
+ SkIRect clippedIBounds;
+ devBounds.roundOut(&clippedIBounds);
+ if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
// Path is completely clipped away. Our code will eventually notice this before doing any
// real work.
return CanDrawPath::kYes;
}
- if (devIBounds.height() * devIBounds.width() > 256 * 256) {
+ int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
+ if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
+ // This is a complicated path that has more vertices than pixels! Let's let the SW renderer
+ // have this one: It will probably be faster and a bitmap will require less total memory on
+ // the GPU than CCPR instance buffers would for the raw path data.
+ return CanDrawPath::kNo;
+ }
+
+ if (numPixels > 256 * 256) {
// Large paths can blow up the atlas fast. And they are not ideal for a two-pass rendering
// algorithm. Give the simpler direct renderers a chance before we commit to drawing it.
return CanDrawPath::kAsBackup;
@@ -209,7 +228,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = SkTMin(2048, maxPreferredRTSize);
SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
- specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(1024, maxPreferredRTSize);
+ specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
// Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
// and count them up so we can preallocate buffers.
@@ -224,7 +243,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
fPendingPaths.erase(iter);
for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
- op->accountForOwnPaths(&fPathCache, onFlushRP, fStashedAtlasKey, &specs);
+ op->accountForOwnPaths(fPathCache.get(), onFlushRP, fStashedAtlasKey, &specs);
}
for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
clipsIter.second.accountForOwnPath(&specs);
@@ -239,7 +258,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
// Determine if there are enough reusable paths from last flush for it to be worth our time to
// copy them to cached atlas(es).
DoCopiesToCache doCopies = DoCopiesToCache(specs.fNumCopiedPaths > 100 ||
- specs.fCopyAtlasSpecs.fApproxNumPixels > 512 * 256);
+ specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
if (specs.fNumCopiedPaths && DoCopiesToCache::kNo == doCopies) {
specs.convertCopiesToRenders();
SkASSERT(!specs.fNumCopiedPaths);
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 0007319b55..d92d915958 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -11,11 +11,11 @@
#include "GrCCPerOpListPaths.h"
#include "GrPathRenderer.h"
#include "GrRenderTargetOpList.h"
-#include "ccpr/GrCCPathCache.h"
#include "ccpr/GrCCPerFlushResources.h"
#include <map>
class GrCCDrawPathsOp;
+class GrCCPathCache;
/**
* This is a path renderer that draws antialiased paths by counting coverage in an offscreen
@@ -27,13 +27,15 @@ class GrCCDrawPathsOp;
class GrCoverageCountingPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
public:
static bool IsSupported(const GrCaps&);
- static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&,
- bool drawCachablePaths);
- ~GrCoverageCountingPathRenderer() override {
- // Ensure callers are actually flushing paths they record, not causing us to leak memory.
- SkASSERT(fPendingPaths.empty());
- SkASSERT(!fFlushing);
- }
+
+ enum class AllowCaching : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching);
+
+ ~GrCoverageCountingPathRenderer() override;
using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
@@ -54,13 +56,6 @@ public:
fPendingPaths.insert(paths.begin(), paths.end());
}
- // GrPathRenderer overrides.
- StencilSupport onGetStencilSupport(const GrShape&) const override {
- return GrPathRenderer::kNoSupport_StencilSupport;
- }
- CanDrawPath onCanDrawPath(const CanDrawPathArgs& args) const override;
- bool onDrawPath(const DrawPathArgs&) override;
-
std::unique_ptr<GrFragmentProcessor> makeClipProcessor(uint32_t oplistID,
const SkPath& deviceSpacePath,
const SkIRect& accessRect, int rtWidth,
@@ -71,11 +66,18 @@ public:
SkTArray<sk_sp<GrRenderTargetContext>>* out) override;
void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
+ void testingOnly_drawPathDirectly(const DrawPathArgs&);
const GrUniqueKey& testingOnly_getStashedAtlasKey() const;
private:
- GrCoverageCountingPathRenderer(bool drawCachablePaths)
- : fDrawCachablePaths(drawCachablePaths) {}
+ GrCoverageCountingPathRenderer(AllowCaching);
+
+ // GrPathRenderer overrides.
+ StencilSupport onGetStencilSupport(const GrShape&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+ bool onDrawPath(const DrawPathArgs&) override;
GrCCPerOpListPaths* lookupPendingPaths(uint32_t opListID);
void recordOp(std::unique_ptr<GrCCDrawPathsOp>, const DrawPathArgs&);
@@ -89,12 +91,10 @@ private:
// (It will only contain elements when fFlushing is true.)
SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
- GrCCPathCache fPathCache;
+ std::unique_ptr<GrCCPathCache> fPathCache;
GrUniqueKey fStashedAtlasKey;
SkDEBUGCODE(bool fFlushing = false);
-
- const bool fDrawCachablePaths;
};
#endif
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index 4f33ef4ce4..3705e5c98c 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -2688,6 +2688,14 @@ void GrGLCaps::applyDriverCorrectnessWorkarounds(const GrGLContextInfo& ctxInfo,
// https://bugreport.apple.com/web/?problemID=39948888
fUnpackRowLengthSupport = false;
#endif
+
+#ifdef SK_BUILD_FOR_MAC
+ // Radeon MacBooks hit a crash in glReadPixels() when using CCPR.
+ // http://skbug.com/8097
+ if (kATI_GrGLVendor == ctxInfo.vendor()) {
+ fBlacklistCoverageCounting = true;
+ }
+#endif
}
void GrGLCaps::onApplyOptionsOverrides(const GrContextOptions& options) {
diff --git a/tests/GrCCPRTest.cpp b/tests/GrCCPRTest.cpp
index 2cc456946c..9ac425c987 100644
--- a/tests/GrCCPRTest.cpp
+++ b/tests/GrCCPRTest.cpp
@@ -53,11 +53,6 @@ private:
const SkPath fPath;
};
-enum class MarkVolatile : bool {
- kNo = false,
- kYes = true
-};
-
class CCPRPathDrawer {
public:
CCPRPathDrawer(GrContext* ctx, skiatest::Reporter* reporter)
@@ -82,8 +77,7 @@ public:
void clear() const { fRTC->clear(nullptr, 0, GrRenderTargetContext::CanClearFullscreen::kYes); }
void abandonGrContext() { fCtx = nullptr; fCCPR = nullptr; fRTC = nullptr; }
- void drawPath(SkPath path, const SkMatrix& matrix = SkMatrix::I(),
- MarkVolatile markVolatile = MarkVolatile::kYes) const {
+ void drawPath(const SkPath& path, const SkMatrix& matrix = SkMatrix::I()) const {
SkASSERT(this->valid());
GrPaint paint;
@@ -92,13 +86,11 @@ public:
GrNoClip noClip;
SkIRect clipBounds = SkIRect::MakeWH(kCanvasSize, kCanvasSize);
- if (MarkVolatile::kYes == markVolatile) {
- path.setIsVolatile(true);
- }
GrShape shape(path);
- fCCPR->drawPath({fCtx, std::move(paint), &GrUserStencilSettings::kUnused, fRTC.get(),
- &noClip, &clipBounds, &matrix, &shape, GrAAType::kCoverage, false});
+ fCCPR->testingOnly_drawPathDirectly({
+ fCtx, std::move(paint), &GrUserStencilSettings::kUnused, fRTC.get(), &noClip,
+ &clipBounds, &matrix, &shape, GrAAType::kCoverage, false});
}
void clipFullscreenRect(SkPath clipPath, GrColor4f color = GrColor4f(0, 1, 0, 1)) {
@@ -137,12 +129,13 @@ public:
mockOptions.fGeometryShaderSupport = true;
mockOptions.fIntegerSupport = true;
mockOptions.fFlatInterpolationSupport = true;
- this->customizeMockOptions(&mockOptions);
GrContextOptions ctxOptions;
ctxOptions.fAllowPathMaskCaching = false;
ctxOptions.fGpuPathRenderers = GpuPathRenderers::kCoverageCounting;
+ this->customizeOptions(&mockOptions, &ctxOptions);
+
fMockContext = GrContext::MakeMock(&mockOptions, ctxOptions);
if (!fMockContext) {
ERRORF(reporter, "could not create mock context");
@@ -166,7 +159,7 @@ public:
virtual ~CCPRTest() {}
protected:
- virtual void customizeMockOptions(GrMockOptions*) {}
+ virtual void customizeOptions(GrMockOptions*, GrContextOptions*) {}
virtual void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) = 0;
sk_sp<GrContext> fMockContext;
@@ -213,8 +206,8 @@ class GrCCPRTest_cleanup : public CCPRTest {
DEF_CCPR_TEST(GrCCPRTest_cleanup)
class GrCCPRTest_cleanupWithTexAllocFail : public GrCCPRTest_cleanup {
- void customizeMockOptions(GrMockOptions* options) override {
- options->fFailTextureAllocations = true;
+ void customizeOptions(GrMockOptions* mockOptions, GrContextOptions*) override {
+ mockOptions->fFailTextureAllocations = true;
}
};
DEF_CCPR_TEST(GrCCPRTest_cleanupWithTexAllocFail)
@@ -281,6 +274,10 @@ DEF_CCPR_TEST(GrCCPRTest_parseEmptyPath)
// transformation matrices. We then vary the matrices independently by whole and partial pixels,
// and verify the caching behaved as expected.
class GrCCPRTest_cache : public CCPRTest {
+ void customizeOptions(GrMockOptions*, GrContextOptions* ctxOptions) override {
+ ctxOptions->fAllowPathMaskCaching = true;
+ }
+
void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
static constexpr int kPathSize = 20;
SkRandom rand;
@@ -312,7 +309,7 @@ class GrCCPRTest_cache : public CCPRTest {
REPORTER_ASSERT(reporter, !stashedAtlasKey->isValid());
for (size_t i = 0; i < SK_ARRAY_COUNT(paths); ++i) {
- ccpr.drawPath(paths[i], matrices[i % 2], MarkVolatile::kNo);
+ ccpr.drawPath(paths[i], matrices[i % 2]);
}
ccpr.flush();
diff --git a/tools/gpu/GrTest.cpp b/tools/gpu/GrTest.cpp
index a827442cd0..7ec66bbd68 100644
--- a/tools/gpu/GrTest.cpp
+++ b/tools/gpu/GrTest.cpp
@@ -304,6 +304,14 @@ GrPixelConfig GrBackendRenderTarget::testingOnly_getPixelConfig() const {
//////////////////////////////////////////////////////////////////////////////
+
+void GrCoverageCountingPathRenderer::testingOnly_drawPathDirectly(const DrawPathArgs& args) {
+ // Call onDrawPath() directly: We want to test paths that might fail onCanDrawPath() simply for
+ // performance reasons, and GrPathRenderer::drawPath() assert that this call returns true.
+ // The test is responsible to not draw any paths that CCPR is not actually capable of.
+ this->onDrawPath(args);
+}
+
const GrUniqueKey& GrCoverageCountingPathRenderer::testingOnly_getStashedAtlasKey() const {
return fStashedAtlasKey;
}
diff --git a/tools/skpbench/skpbench.cpp b/tools/skpbench/skpbench.cpp
index 25acb1c9f1..399bf6cdb2 100644
--- a/tools/skpbench/skpbench.cpp
+++ b/tools/skpbench/skpbench.cpp
@@ -68,6 +68,8 @@ static const char* header =
static const char* resultFormat =
"%8.4g %8.4g %8.4g %8.4g %6.3g%% %7li %9i %-5s %-6s %-9s %s";
+static constexpr int kNumFlushesToPrimeCache = 3;
+
struct Sample {
using duration = std::chrono::nanoseconds;
@@ -190,11 +192,14 @@ static void run_benchmark(const sk_gpu_test::FenceSync* fenceSync, SkCanvas* can
const Sample::duration sampleDuration = std::chrono::milliseconds(FLAGS_sampleMs);
const clock::duration benchDuration = std::chrono::milliseconds(FLAGS_duration);
- draw_skp_and_flush(canvas, skp); // draw1
+ draw_skp_and_flush(canvas, skp); // draw 1
GpuSync gpuSync(fenceSync);
- draw_skp_and_flush(canvas, skp); // draw2
- gpuSync.syncToPreviousFrame(); // waits for draw1 to finish (after draw2's cpu work is done).
+ for (int i = 1; i < kNumFlushesToPrimeCache; ++i) {
+ draw_skp_and_flush(canvas, skp); // draw N
+ // Waits for draw N-1 to finish (after draw N's cpu work is done).
+ gpuSync.syncToPreviousFrame();
+ }
clock::time_point now = clock::now();
const clock::time_point endTime = now + benchDuration;
@@ -231,10 +236,13 @@ static void run_gpu_time_benchmark(sk_gpu_test::GpuTimer* gpuTimer,
draw_skp_and_flush(canvas, skp);
GpuSync gpuSync(fenceSync);
- gpuTimer->queueStart();
- draw_skp_and_flush(canvas, skp);
- PlatformTimerQuery previousTime = gpuTimer->queueStop();
- gpuSync.syncToPreviousFrame();
+ PlatformTimerQuery previousTime = 0;
+ for (int i = 1; i < kNumFlushesToPrimeCache; ++i) {
+ gpuTimer->queueStart();
+ draw_skp_and_flush(canvas, skp);
+ previousTime = gpuTimer->queueStop();
+ gpuSync.syncToPreviousFrame();
+ }
clock::time_point now = clock::now();
const clock::time_point endTime = now + benchDuration;