aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Chris Dalton <csmartdalton@google.com>2018-06-16 17:22:59 -0600
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-06-18 15:32:48 +0000
commit4c458b12f68d8704a297c1ec252127c77bdee595 (patch)
tree588efa8ae1870e6d20107c371cc1077662c5ff15
parent3567c14a41cd55860fcc836af32d8748c1e3c856 (diff)
ccpr: Use lazy proxies with GrCCAtlas
Bug: skia: Change-Id: I576d9303d451352778de0425e3ecbc561331cd09 Reviewed-on: https://skia-review.googlesource.com/135362 Reviewed-by: Greg Daniel <egdaniel@google.com> Commit-Queue: Chris Dalton <csmartdalton@google.com>
-rw-r--r--gm/windowrectangles.cpp3
-rw-r--r--include/private/GrCCClipPath.h5
-rw-r--r--src/gpu/GrClipStackClip.cpp9
-rw-r--r--src/gpu/GrOnFlushResourceProvider.cpp16
-rw-r--r--src/gpu/GrProxyProvider.cpp23
-rw-r--r--src/gpu/GrProxyProvider.h15
-rw-r--r--src/gpu/GrReducedClip.cpp12
-rw-r--r--src/gpu/GrReducedClip.h9
-rw-r--r--src/gpu/ccpr/GrCCAtlas.cpp57
-rw-r--r--src/gpu/ccpr/GrCCAtlas.h23
-rw-r--r--src/gpu/ccpr/GrCCClipPath.cpp10
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.cpp21
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.h14
-rw-r--r--src/gpu/ccpr/GrCCPerFlushResources.cpp4
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp11
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.h6
-rw-r--r--tests/ClipStackTest.cpp12
-rw-r--r--tests/GrCCPRTest.cpp7
-rw-r--r--tests/LazyProxyTest.cpp48
-rw-r--r--tests/OnFlushCallbackTest.cpp5
20 files changed, 164 insertions, 146 deletions
diff --git a/gm/windowrectangles.cpp b/gm/windowrectangles.cpp
index 9e1b64e08d..14d10e251b 100644
--- a/gm/windowrectangles.cpp
+++ b/gm/windowrectangles.cpp
@@ -179,8 +179,7 @@ void WindowRectanglesMaskGM::onCoverClipStack(const SkClipStack& stack, SkCanvas
return;
}
- const GrReducedClip reducedClip(stack, SkRect::Make(kCoverRect), rtc->caps()->shaderCaps(),
- kNumWindows);
+ const GrReducedClip reducedClip(stack, SkRect::Make(kCoverRect), rtc->caps(), kNumWindows);
GrPaint paint;
if (GrFSAAType::kNone == rtc->fsaaType()) {
diff --git a/include/private/GrCCClipPath.h b/include/private/GrCCClipPath.h
index 7c39c45d90..3a4ea565fa 100644
--- a/include/private/GrCCClipPath.h
+++ b/include/private/GrCCClipPath.h
@@ -35,9 +35,8 @@ public:
}
bool isInitialized() const { return fAtlasLazyProxy != nullptr; }
- void init(GrProxyProvider* proxyProvider,
- const SkPath& deviceSpacePath, const SkIRect& accessRect,
- int rtWidth, int rtHeight);
+ void init(const SkPath& deviceSpacePath, const SkIRect& accessRect, int rtWidth, int rtHeight,
+ const GrCaps&);
void addAccess(const SkIRect& accessRect) {
SkASSERT(this->isInitialized());
diff --git a/src/gpu/GrClipStackClip.cpp b/src/gpu/GrClipStackClip.cpp
index 212aa364b5..04b8b8155e 100644
--- a/src/gpu/GrClipStackClip.cpp
+++ b/src/gpu/GrClipStackClip.cpp
@@ -192,8 +192,6 @@ bool GrClipStackClip::apply(GrContext* context, GrRenderTargetContext* renderTar
return true;
}
- GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider();
- const auto* caps = context->contextPriv().caps()->shaderCaps();
int maxWindowRectangles = renderTargetContext->priv().maxWindowRectangles();
int maxAnalyticFPs = context->contextPriv().caps()->maxClipAnalyticFPs();
if (GrFSAAType::kNone != renderTargetContext->fsaaType()) {
@@ -208,8 +206,8 @@ bool GrClipStackClip::apply(GrContext* context, GrRenderTargetContext* renderTar
}
auto* ccpr = context->contextPriv().drawingManager()->getCoverageCountingPathRenderer();
- GrReducedClip reducedClip(*fStack, devBounds, caps, maxWindowRectangles, maxAnalyticFPs,
- ccpr ? maxAnalyticFPs : 0);
+ GrReducedClip reducedClip(*fStack, devBounds, context->contextPriv().caps(),
+ maxWindowRectangles, maxAnalyticFPs, ccpr ? maxAnalyticFPs : 0);
if (InitialState::kAllOut == reducedClip.initialState() &&
reducedClip.maskElements().isEmpty()) {
return false;
@@ -235,8 +233,7 @@ bool GrClipStackClip::apply(GrContext* context, GrRenderTargetContext* renderTar
// can cause a flush or otherwise change which opList our draw is going into.
uint32_t opListID = renderTargetContext->getOpList()->uniqueID();
int rtWidth = renderTargetContext->width(), rtHeight = renderTargetContext->height();
- if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(ccpr, proxyProvider, opListID,
- rtWidth, rtHeight)) {
+ if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(ccpr, opListID, rtWidth, rtHeight)) {
out->addCoverageFP(std::move(clipFPs));
}
diff --git a/src/gpu/GrOnFlushResourceProvider.cpp b/src/gpu/GrOnFlushResourceProvider.cpp
index ff56c33c12..535f825e57 100644
--- a/src/gpu/GrOnFlushResourceProvider.cpp
+++ b/src/gpu/GrOnFlushResourceProvider.cpp
@@ -58,6 +58,13 @@ sk_sp<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTargetContext(
sk_sp<GrSurfaceProxy> proxy,
sk_sp<SkColorSpace> colorSpace,
const SkSurfaceProps* props) {
+ // Since this is at flush time and these won't be allocated for us by the GrResourceAllocator
+ // we have to manually ensure it is allocated here. The proxy had best have been created
+ // with the kNoPendingIO flag!
+ if (!this->instatiateProxy(proxy.get())) {
+ return nullptr;
+ }
+
sk_sp<GrRenderTargetContext> renderTargetContext(
fDrawingMgr->makeRenderTargetContext(std::move(proxy),
std::move(colorSpace),
@@ -67,15 +74,6 @@ sk_sp<GrRenderTargetContext> GrOnFlushResourceProvider::makeRenderTargetContext(
return nullptr;
}
- auto resourceProvider = fDrawingMgr->getContext()->contextPriv().resourceProvider();
-
- // Since this is at flush time and these won't be allocated for us by the GrResourceAllocator
- // we have to manually ensure it is allocated here. The proxy had best have been created
- // with the kNoPendingIO flag!
- if (!renderTargetContext->asSurfaceProxy()->instantiate(resourceProvider)) {
- return nullptr;
- }
-
renderTargetContext->discard();
return renderTargetContext;
diff --git a/src/gpu/GrProxyProvider.cpp b/src/gpu/GrProxyProvider.cpp
index a1f14df3e9..5814c7627a 100644
--- a/src/gpu/GrProxyProvider.cpp
+++ b/src/gpu/GrProxyProvider.cpp
@@ -603,15 +603,16 @@ sk_sp<GrRenderTargetProxy> GrProxyProvider::createLazyRenderTargetProxy(
std::move(callback), lazyType, desc, origin, fit, budgeted, surfaceFlags));
}
-sk_sp<GrTextureProxy> GrProxyProvider::createFullyLazyProxy(LazyInstantiateCallback&& callback,
- Renderable renderable,
- GrSurfaceOrigin origin,
- GrPixelConfig config) {
+sk_sp<GrTextureProxy> GrProxyProvider::MakeFullyLazyProxy(LazyInstantiateCallback&& callback,
+ Renderable renderable,
+ GrSurfaceOrigin origin,
+ GrPixelConfig config,
+ const GrCaps& caps) {
GrSurfaceDesc desc;
GrInternalSurfaceFlags surfaceFlags = GrInternalSurfaceFlags::kNoPendingIO;
if (Renderable::kYes == renderable) {
desc.fFlags = kRenderTarget_GrSurfaceFlag;
- if (fCaps->maxWindowRectangles() > 0) {
+ if (caps.maxWindowRectangles() > 0) {
surfaceFlags |= GrInternalSurfaceFlags::kWindowRectsSupport;
}
}
@@ -620,8 +621,16 @@ sk_sp<GrTextureProxy> GrProxyProvider::createFullyLazyProxy(LazyInstantiateCallb
desc.fConfig = config;
desc.fSampleCnt = 1;
- return this->createLazyProxy(std::move(callback), desc, origin, GrMipMapped::kNo,
- surfaceFlags, SkBackingFit::kApprox, SkBudgeted::kYes);
+ return sk_sp<GrTextureProxy>(
+ (Renderable::kYes == renderable)
+ ? new GrTextureRenderTargetProxy(std::move(callback),
+ LazyInstantiationType::kSingleUse, desc,
+ origin, GrMipMapped::kNo,
+ SkBackingFit::kApprox, SkBudgeted::kYes,
+ surfaceFlags)
+ : new GrTextureProxy(std::move(callback), LazyInstantiationType::kSingleUse,
+ desc, origin, GrMipMapped::kNo, SkBackingFit::kApprox,
+ SkBudgeted::kYes, surfaceFlags));
}
bool GrProxyProvider::IsFunctionallyExact(GrSurfaceProxy* proxy) {
diff --git a/src/gpu/GrProxyProvider.h b/src/gpu/GrProxyProvider.h
index c466c98475..64b9ac4355 100644
--- a/src/gpu/GrProxyProvider.h
+++ b/src/gpu/GrProxyProvider.h
@@ -178,20 +178,19 @@ public:
sk_sp<GrTextureProxy> createLazyProxy(LazyInstantiateCallback&&, const GrSurfaceDesc&,
GrSurfaceOrigin, GrMipMapped, SkBackingFit, SkBudgeted);
-
- /**
- * Fully lazy proxies have unspecified width and height. Methods that rely on those values
- * (e.g., width, height, getBoundsRect) should be avoided.
- */
- sk_sp<GrTextureProxy> createFullyLazyProxy(LazyInstantiateCallback&&,
- Renderable, GrSurfaceOrigin, GrPixelConfig);
-
sk_sp<GrRenderTargetProxy> createLazyRenderTargetProxy(LazyInstantiateCallback&&,
const GrSurfaceDesc&,
GrSurfaceOrigin origin,
GrInternalSurfaceFlags, Textureable,
GrMipMapped, SkBackingFit, SkBudgeted);
+ /**
+ * Fully lazy proxies have unspecified width and height. Methods that rely on those values
+ * (e.g., width, height, getBoundsRect) should be avoided.
+ */
+ static sk_sp<GrTextureProxy> MakeFullyLazyProxy(LazyInstantiateCallback&&, Renderable,
+ GrSurfaceOrigin, GrPixelConfig, const GrCaps&);
+
// 'proxy' is about to be used as a texture src or drawn to. This query can be used to
// determine if it is going to need a texture domain or a full clear.
static bool IsFunctionallyExact(GrSurfaceProxy* proxy);
diff --git a/src/gpu/GrReducedClip.cpp b/src/gpu/GrReducedClip.cpp
index e6cd6b36fe..2377846153 100644
--- a/src/gpu/GrReducedClip.cpp
+++ b/src/gpu/GrReducedClip.cpp
@@ -34,7 +34,7 @@
* take a rect in case the caller knows a bound on what is to be drawn through this clip.
*/
GrReducedClip::GrReducedClip(const SkClipStack& stack, const SkRect& queryBounds,
- const GrShaderCaps* caps, int maxWindowRectangles, int maxAnalyticFPs,
+ const GrCaps* caps, int maxWindowRectangles, int maxAnalyticFPs,
int maxCCPRClipPaths)
: fCaps(caps)
, fMaxWindowRectangles(maxWindowRectangles)
@@ -630,7 +630,8 @@ GrReducedClip::ClipResult GrReducedClip::addAnalyticFP(const SkRRect& deviceSpac
return ClipResult::kNotClipped;
}
- if (auto fp = GrRRectEffect::Make(GetClipEdgeType(invert, aa), deviceSpaceRRect, *fCaps)) {
+ if (auto fp = GrRRectEffect::Make(GetClipEdgeType(invert, aa), deviceSpaceRRect,
+ *fCaps->shaderCaps())) {
fAnalyticFPs.push_back(std::move(fp));
return ClipResult::kClipped;
}
@@ -956,8 +957,7 @@ bool GrReducedClip::drawStencilClipMask(GrContext* context,
}
std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticFPs(
- GrCoverageCountingPathRenderer* ccpr, GrProxyProvider* proxyProvider, uint32_t opListID,
- int rtWidth, int rtHeight) {
+ GrCoverageCountingPathRenderer* ccpr, uint32_t opListID, int rtWidth, int rtHeight) {
// Make sure finishAndDetachAnalyticFPs hasn't been called already.
SkDEBUGCODE(for (const auto& fp : fAnalyticFPs) { SkASSERT(fp); })
@@ -966,8 +966,8 @@ std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticFPs(
for (const SkPath& ccprClipPath : fCCPRClipPaths) {
SkASSERT(ccpr);
SkASSERT(fHasScissor);
- auto fp = ccpr->makeClipProcessor(proxyProvider, opListID, ccprClipPath, fScissor,
- rtWidth, rtHeight);
+ auto fp = ccpr->makeClipProcessor(opListID, ccprClipPath, fScissor, rtWidth, rtHeight,
+ *fCaps);
fAnalyticFPs.push_back(std::move(fp));
}
fCCPRClipPaths.reset();
diff --git a/src/gpu/GrReducedClip.h b/src/gpu/GrReducedClip.h
index 87ef3461e5..fcdc66c843 100644
--- a/src/gpu/GrReducedClip.h
+++ b/src/gpu/GrReducedClip.h
@@ -26,7 +26,7 @@ public:
using Element = SkClipStack::Element;
using ElementList = SkTLList<SkClipStack::Element, 16>;
- GrReducedClip(const SkClipStack&, const SkRect& queryBounds, const GrShaderCaps* caps,
+ GrReducedClip(const SkClipStack&, const SkRect& queryBounds, const GrCaps* caps,
int maxWindowRectangles = 0, int maxAnalyticFPs = 0, int maxCCPRClipPaths = 0);
enum class InitialState : bool {
@@ -97,9 +97,8 @@ public:
* may cause flushes or otherwise change which opList the actual draw is going into.
*/
std::unique_ptr<GrFragmentProcessor> finishAndDetachAnalyticFPs(GrCoverageCountingPathRenderer*,
- GrProxyProvider*,
- uint32_t opListID,
- int rtWidth, int rtHeight);
+ uint32_t opListID, int rtWidth,
+ int rtHeight);
private:
void walkStack(const SkClipStack&, const SkRect& queryBounds);
@@ -132,7 +131,7 @@ private:
void makeEmpty();
- const GrShaderCaps* fCaps;
+ const GrCaps* fCaps;
const int fMaxWindowRectangles;
const int fMaxAnalyticFPs;
const int fMaxCCPRClipPaths;
diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp
index cbf6993f04..94a37e810f 100644
--- a/src/gpu/ccpr/GrCCAtlas.cpp
+++ b/src/gpu/ccpr/GrCCAtlas.cpp
@@ -9,8 +9,10 @@
#include "GrCaps.h"
#include "GrOnFlushResourceProvider.h"
+#include "GrProxyProvider.h"
#include "GrRectanizer_skyline.h"
#include "GrRenderTargetContext.h"
+#include "GrTexture.h"
#include "GrTextureProxy.h"
#include "SkMakeUnique.h"
#include "SkMathPriv.h"
@@ -44,9 +46,12 @@ private:
GrRectanizerSkyline fRectanizer;
};
-GrCCAtlas::GrCCAtlas(const Specs& specs)
+GrCCAtlas::GrCCAtlas(GrPixelConfig pixelConfig, const Specs& specs, const GrCaps& caps)
: fMaxTextureSize(SkTMax(SkTMax(specs.fMinHeight, specs.fMinWidth),
specs.fMaxPreferredTextureSize)) {
+ // Caller should have cropped any paths to the destination render target instead of asking for
+ // an atlas larger than maxRenderTargetSize.
+ SkASSERT(fMaxTextureSize <= caps.maxTextureSize());
SkASSERT(specs.fMaxPreferredTextureSize > 0);
// Begin with the first pow2 dimensions whose area is theoretically large enough to contain the
@@ -66,14 +71,28 @@ GrCCAtlas::GrCCAtlas(const Specs& specs)
}
fTopNode = skstd::make_unique<Node>(nullptr, 0, 0, fWidth, fHeight);
+
+ fTextureProxy = GrProxyProvider::MakeFullyLazyProxy(
+ [this, pixelConfig](GrResourceProvider* resourceProvider) {
+ if (!resourceProvider) {
+ return sk_sp<GrTexture>();
+ }
+ GrSurfaceDesc desc;
+ desc.fFlags = kRenderTarget_GrSurfaceFlag;
+ desc.fWidth = fWidth;
+ desc.fHeight = fHeight;
+ desc.fConfig = pixelConfig;
+ return resourceProvider->createTexture(desc, SkBudgeted::kYes);
+ },
+ GrProxyProvider::Renderable::kYes, kTextureOrigin, pixelConfig, caps);
}
GrCCAtlas::~GrCCAtlas() {
}
bool GrCCAtlas::addRect(const SkIRect& devIBounds, SkIVector* offset) {
- // This can't be called anymore once makeClearedTextureProxy() has been called.
- SkASSERT(!fTextureProxy);
+ // This can't be called anymore once makeRenderTargetContext() has been called.
+ SkASSERT(!fTextureProxy->priv().isInstantiated());
SkIPoint16 location;
if (!this->internalPlaceRect(devIBounds.width(), devIBounds.height(), &location)) {
@@ -112,21 +131,19 @@ bool GrCCAtlas::internalPlaceRect(int w, int h, SkIPoint16* loc) {
return true;
}
-sk_sp<GrRenderTargetContext> GrCCAtlas::initInternalTextureProxy(
- GrOnFlushResourceProvider* onFlushRP, GrPixelConfig config) {
- SkASSERT(!fTextureProxy);
- // Caller should have cropped any paths to the destination render target instead of asking for
- // an atlas larger than maxRenderTargetSize.
- SkASSERT(SkTMax(fHeight, fWidth) <= fMaxTextureSize);
+void GrCCAtlas::setUserBatchID(int id) {
+ // This can't be called anymore once makeRenderTargetContext() has been called.
+ SkASSERT(!fTextureProxy->priv().isInstantiated());
+ fUserBatchID = id;
+}
+
+sk_sp<GrRenderTargetContext> GrCCAtlas::makeRenderTargetContext(
+ GrOnFlushResourceProvider* onFlushRP) {
+ SkASSERT(!fTextureProxy->priv().isInstantiated()); // This method should only be called once.
SkASSERT(fMaxTextureSize <= onFlushRP->caps()->maxRenderTargetSize());
- GrSurfaceDesc desc;
- desc.fFlags = kRenderTarget_GrSurfaceFlag;
- desc.fWidth = fWidth;
- desc.fHeight = fHeight;
- desc.fConfig = config;
sk_sp<GrRenderTargetContext> rtc =
- onFlushRP->makeRenderTargetContext(desc, kTextureOrigin, nullptr, nullptr);
+ onFlushRP->makeRenderTargetContext(fTextureProxy, nullptr, nullptr);
if (!rtc) {
SkDebugf("WARNING: failed to allocate a %ix%i atlas. Some paths will not be drawn.\n",
fWidth, fHeight);
@@ -135,20 +152,18 @@ sk_sp<GrRenderTargetContext> GrCCAtlas::initInternalTextureProxy(
SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
rtc->clear(&clearRect, 0, GrRenderTargetContext::CanClearFullscreen::kYes);
-
- fTextureProxy = sk_ref_sp(rtc->asTextureProxy());
return rtc;
}
-GrCCAtlas* GrCCAtlasStack::addRect(const SkIRect& devIBounds, SkIVector* offset) {
+GrCCAtlas* GrCCAtlasStack::addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset) {
GrCCAtlas* retiredAtlas = nullptr;
- if (fAtlases.empty() || !fAtlases.back().addRect(devIBounds, offset)) {
+ if (fAtlases.empty() || !fAtlases.back().addRect(devIBounds, devToAtlasOffset)) {
// The retired atlas is out of room and can't grow any bigger.
retiredAtlas = !fAtlases.empty() ? &fAtlases.back() : nullptr;
- fAtlases.emplace_back(fSpecs);
+ fAtlases.emplace_back(fPixelConfig, fSpecs, *fCaps);
SkASSERT(devIBounds.width() <= fSpecs.fMinWidth);
SkASSERT(devIBounds.height() <= fSpecs.fMinHeight);
- SkAssertResult(fAtlases.back().addRect(devIBounds, offset));
+ SkAssertResult(fAtlases.back().addRect(devIBounds, devToAtlasOffset));
}
return retiredAtlas;
}
diff --git a/src/gpu/ccpr/GrCCAtlas.h b/src/gpu/ccpr/GrCCAtlas.h
index d4a07f54a7..6d82728050 100644
--- a/src/gpu/ccpr/GrCCAtlas.h
+++ b/src/gpu/ccpr/GrCCAtlas.h
@@ -43,9 +43,11 @@ public:
void accountForSpace(int width, int height);
};
- GrCCAtlas(const Specs&);
+ GrCCAtlas(GrPixelConfig, const Specs&, const GrCaps&);
~GrCCAtlas();
+ GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
+
// Attempts to add a rect to the atlas. If successful, returns the integer offset from
// device-space pixels where the path will be drawn, to atlas pixels where its mask resides.
bool addRect(const SkIRect& devIBounds, SkIVector* atlasOffset);
@@ -53,15 +55,13 @@ public:
// This is an optional space for the caller to jot down which user-defined batch to use when
// they render the content of this atlas.
- void setUserBatchID(int id) { SkASSERT(!fTextureProxy); fUserBatchID = id; }
+ void setUserBatchID(int id);
int getUserBatchID() const { return fUserBatchID; }
- // Creates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext that
- // the caller may use to render the content. After this call, it is no longer valid to call
- // addRect() or setUserBatchID().
- sk_sp<GrRenderTargetContext> initInternalTextureProxy(GrOnFlushResourceProvider*,
- GrPixelConfig);
- GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
+ // Instantiates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext
+ // that the caller may use to render the content. After this call, it is no longer valid to call
+ // addRect(), setUserBatchID(), or this method again.
+ sk_sp<GrRenderTargetContext> makeRenderTargetContext(GrOnFlushResourceProvider*);
private:
class Node;
@@ -83,7 +83,8 @@ private:
*/
class GrCCAtlasStack {
public:
- GrCCAtlasStack(const GrCCAtlas::Specs& specs) : fSpecs(specs) {}
+ GrCCAtlasStack(GrPixelConfig pixelConfig, const GrCCAtlas::Specs& specs, const GrCaps* caps)
+ : fPixelConfig(pixelConfig), fSpecs(specs), fCaps(caps) {}
bool empty() const { return fAtlases.empty(); }
const GrCCAtlas& front() const { SkASSERT(!this->empty()); return fAtlases.front(); }
@@ -106,10 +107,12 @@ public:
// atlas, so it was retired and a new one was added to the stack. The return value is the
// newly-retired atlas. The caller should call setUserBatchID() on the retired atlas before
// moving on.
- GrCCAtlas* addRect(const SkIRect& devIBounds, SkIVector* offset);
+ GrCCAtlas* addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset);
private:
+ const GrPixelConfig fPixelConfig;
const GrCCAtlas::Specs fSpecs;
+ const GrCaps* const fCaps;
GrSTAllocator<4, GrCCAtlas> fAtlases;
};
diff --git a/src/gpu/ccpr/GrCCClipPath.cpp b/src/gpu/ccpr/GrCCClipPath.cpp
index 77674e4539..61d58d4980 100644
--- a/src/gpu/ccpr/GrCCClipPath.cpp
+++ b/src/gpu/ccpr/GrCCClipPath.cpp
@@ -12,12 +12,11 @@
#include "GrTexture.h"
#include "ccpr/GrCCPerFlushResources.h"
-void GrCCClipPath::init(GrProxyProvider* proxyProvider,
- const SkPath& deviceSpacePath, const SkIRect& accessRect,
- int rtWidth, int rtHeight) {
+void GrCCClipPath::init(const SkPath& deviceSpacePath, const SkIRect& accessRect, int rtWidth,
+ int rtHeight, const GrCaps& caps) {
SkASSERT(!this->isInitialized());
- fAtlasLazyProxy = proxyProvider->createFullyLazyProxy(
+ fAtlasLazyProxy = GrProxyProvider::MakeFullyLazyProxy(
[this](GrResourceProvider* resourceProvider) {
if (!resourceProvider) {
return sk_sp<GrTexture>();
@@ -41,7 +40,8 @@ void GrCCClipPath::init(GrProxyProvider* proxyProvider,
return sk_ref_sp(textureProxy->priv().peekTexture());
},
- GrProxyProvider::Renderable::kYes, kTopLeft_GrSurfaceOrigin, kAlpha_half_GrPixelConfig);
+ GrProxyProvider::Renderable::kYes, kTopLeft_GrSurfaceOrigin, kAlpha_half_GrPixelConfig,
+ caps);
fDeviceSpacePath = deviceSpacePath;
fDeviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 16a2c663a5..c38dd76209 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -124,7 +124,8 @@ void GrCCDrawPathsOp::setupResources(GrCCPerFlushResources* resources,
}
if (currentAtlas != atlas) {
if (currentAtlas) {
- this->addAtlasBatch(currentAtlas, resources->nextPathInstanceIdx());
+ this->recordInstanceRange(currentAtlas->textureProxy(),
+ resources->nextPathInstanceIdx());
}
currentAtlas = atlas;
}
@@ -135,7 +136,7 @@ void GrCCDrawPathsOp::setupResources(GrCCPerFlushResources* resources,
SkASSERT(resources->nextPathInstanceIdx() == fBaseInstance + fNumDraws - fNumSkippedInstances);
if (currentAtlas) {
- this->addAtlasBatch(currentAtlas, resources->nextPathInstanceIdx());
+ this->recordInstanceRange(currentAtlas->textureProxy(), resources->nextPathInstanceIdx());
}
}
@@ -159,20 +160,16 @@ void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState) {
int baseInstance = fBaseInstance;
- for (int i = 0; i < fAtlasBatches.count(); baseInstance = fAtlasBatches[i++].fEndInstanceIdx) {
- const AtlasBatch& batch = fAtlasBatches[i];
- SkASSERT(batch.fEndInstanceIdx > baseInstance);
+ for (const InstanceRange& range : fInstanceRanges) {
+ SkASSERT(range.fEndInstanceIdx > baseInstance);
- if (!batch.fAtlas->textureProxy()) {
- continue; // Atlas failed to allocate.
- }
-
- GrCCPathProcessor pathProc(flushState->resourceProvider(),
- sk_ref_sp(batch.fAtlas->textureProxy()),
+ GrCCPathProcessor pathProc(flushState->resourceProvider(), sk_ref_sp(range.fAtlasProxy),
fViewMatrixIfUsingLocalCoords);
pathProc.drawPaths(flushState, pipeline, resources->indexBuffer(),
resources->vertexBuffer(), resources->instanceBuffer(),
- baseInstance, batch.fEndInstanceIdx, this->bounds());
+ baseInstance, range.fEndInstanceIdx, this->bounds());
+
+ baseInstance = range.fEndInstanceIdx;
}
SkASSERT(baseInstance == fBaseInstance + fNumDraws - fNumSkippedInstances);
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index 87cd50e1e0..071a21a614 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -54,16 +54,16 @@ private:
GrCCDrawPathsOp(const SkIRect& looseClippedIBounds, const SkMatrix&, const SkPath&,
const SkRect& devBounds, GrPaint&&);
- struct AtlasBatch {
- const GrCCAtlas* fAtlas;
+ struct InstanceRange {
+ const GrTextureProxy* fAtlasProxy;
int fEndInstanceIdx;
};
- void addAtlasBatch(const GrCCAtlas* atlas, int endInstanceIdx) {
+ void recordInstanceRange(const GrTextureProxy* atlasProxy, int endInstanceIdx) {
SkASSERT(endInstanceIdx > fBaseInstance);
- SkASSERT(fAtlasBatches.empty() ||
- endInstanceIdx > fAtlasBatches.back().fEndInstanceIdx);
- fAtlasBatches.push_back() = {atlas, endInstanceIdx};
+ SkASSERT(fInstanceRanges.empty() ||
+ endInstanceIdx > fInstanceRanges.back().fEndInstanceIdx);
+ fInstanceRanges.push_back() = {atlasProxy, endInstanceIdx};
}
const SkMatrix fViewMatrixIfUsingLocalCoords;
@@ -84,7 +84,7 @@ private:
GrProcessorSet fProcessors;
int fBaseInstance;
- SkSTArray<1, AtlasBatch, true> fAtlasBatches;
+ SkSTArray<1, InstanceRange, true> fInstanceRanges;
SkDEBUGCODE(int fNumSkippedInstances = 0);
};
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index 5b3dec0622..4738a257fe 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -69,7 +69,7 @@ private:
GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
const GrCCPerFlushResourceSpecs& specs)
: fPathParser(specs.fNumRenderedPaths + specs.fNumClipPaths, specs.fParsingPathStats)
- , fAtlasStack(specs.fAtlasSpecs)
+ , fAtlasStack(kAlpha_half_GrPixelConfig, specs.fAtlasSpecs, onFlushRP->caps())
, fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
, fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
, fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
@@ -170,7 +170,7 @@ bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
// Render the atlas(es).
for (GrCCAtlasStack::Iter atlas(fAtlasStack); atlas.next();) {
- if (auto rtc = atlas->initInternalTextureProxy(onFlushRP, kAlpha_half_GrPixelConfig)) {
+ if (auto rtc = atlas->makeRenderTargetContext(onFlushRP)) {
auto op = RenderAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
atlas->getUserBatchID(), atlas->drawBounds());
rtc->addDrawOp(GrNoClip(), std::move(op));
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 727649bdd9..eaab184ffd 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -147,9 +147,8 @@ void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> o
}
std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
- GrProxyProvider* proxyProvider,
- uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
- int rtWidth, int rtHeight) {
+ uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect, int rtWidth,
+ int rtHeight, const GrCaps& caps) {
using MustCheckBounds = GrCCClipProcessor::MustCheckBounds;
SkASSERT(!fFlushing);
@@ -162,11 +161,11 @@ std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipPro
if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
// The path is too large. Crop it or analytic AA can run out of fp32 precision.
SkPath croppedPath;
- int maxRTSize = proxyProvider->caps()->maxRenderTargetSize();
+ int maxRTSize = caps.maxRenderTargetSize();
crop_path(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
- clipPath.init(proxyProvider, croppedPath, accessRect, rtWidth, rtHeight);
+ clipPath.init(croppedPath, accessRect, rtWidth, rtHeight, caps);
} else {
- clipPath.init(proxyProvider, deviceSpacePath, accessRect, rtWidth, rtHeight);
+ clipPath.init(deviceSpacePath, accessRect, rtWidth, rtHeight, caps);
}
} else {
clipPath.addAccess(accessRect);
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index abc824b552..8cb9713bcb 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -60,10 +60,10 @@ public:
CanDrawPath onCanDrawPath(const CanDrawPathArgs& args) const override;
bool onDrawPath(const DrawPathArgs&) override;
- std::unique_ptr<GrFragmentProcessor> makeClipProcessor(GrProxyProvider*, uint32_t oplistID,
+ std::unique_ptr<GrFragmentProcessor> makeClipProcessor(uint32_t oplistID,
const SkPath& deviceSpacePath,
- const SkIRect& accessRect,
- int rtWidth, int rtHeight);
+ const SkIRect& accessRect, int rtWidth,
+ int rtHeight, const GrCaps&);
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
diff --git a/tests/ClipStackTest.cpp b/tests/ClipStackTest.cpp
index 70d7dcacee..05d705f5c9 100644
--- a/tests/ClipStackTest.cpp
+++ b/tests/ClipStackTest.cpp
@@ -1042,7 +1042,7 @@ static void test_reduced_clip_stack(skiatest::Reporter* reporter) {
}
auto context = GrContext::MakeMock(nullptr);
- const auto* caps = context->contextPriv().caps()->shaderCaps();
+ const GrCaps* caps = context->contextPriv().caps();
// Zero the memory we will new the GrReducedClip into. This ensures the elements gen ID
// will be kInvalidGenID if left uninitialized.
@@ -1113,7 +1113,7 @@ static void test_reduced_clip_stack_genid(skiatest::Reporter* reporter) {
SkRect bounds = SkRect::MakeXYWH(0, 0, 100, 100);
auto context = GrContext::MakeMock(nullptr);
- const auto* caps = context->contextPriv().caps()->shaderCaps();
+ const GrCaps* caps = context->contextPriv().caps();
SkAlignedSTStorage<1, GrReducedClip> storage;
memset(storage.get(), 0, sizeof(GrReducedClip));
@@ -1202,7 +1202,7 @@ static void test_reduced_clip_stack_genid(skiatest::Reporter* reporter) {
#undef XYWH
#undef IXYWH
auto context = GrContext::MakeMock(nullptr);
- const auto* caps = context->contextPriv().caps()->shaderCaps();
+ const GrCaps* caps = context->contextPriv().caps();
for (size_t i = 0; i < SK_ARRAY_COUNT(testCases); ++i) {
const GrReducedClip reduced(stack, testCases[i].testBounds, caps);
@@ -1230,7 +1230,7 @@ static void test_reduced_clip_stack_no_aa_crash(skiatest::Reporter* reporter) {
SkRect bounds = SkRect::MakeXYWH(0, 0, 100, 100);
auto context = GrContext::MakeMock(nullptr);
- const auto* caps = context->contextPriv().caps()->shaderCaps();
+ const GrCaps* caps = context->contextPriv().caps();
// At the time, this would crash.
const GrReducedClip reduced(stack, bounds, caps);
@@ -1249,7 +1249,7 @@ static void test_aa_query(skiatest::Reporter* reporter, const SkString& testName
const SkRect& preXformQuery, ClipMethod expectedMethod,
int numExpectedElems = 0) {
auto context = GrContext::MakeMock(nullptr);
- const auto* caps = context->contextPriv().caps()->shaderCaps();
+ const GrCaps* caps = context->contextPriv().caps();
SkRect queryBounds;
queryXform.mapRect(&queryBounds, preXformQuery);
@@ -1411,7 +1411,7 @@ static void test_tiny_query_bounds_assertion_bug(skiatest::Reporter* reporter) {
pathStack.clipPath(clipPath, SkMatrix::I(), kIntersect_SkClipOp, true);
auto context = GrContext::MakeMock(nullptr);
- const auto* caps = context->contextPriv().caps()->shaderCaps();
+ const GrCaps* caps = context->contextPriv().caps();
for (const SkClipStack& stack : {rectStack, pathStack}) {
for (SkRect queryBounds : {SkRect::MakeXYWH(53, 60, GrClip::kBoundsTolerance, 1000),
diff --git a/tests/GrCCPRTest.cpp b/tests/GrCCPRTest.cpp
index 35e3a1fde7..bbcf6706a7 100644
--- a/tests/GrCCPRTest.cpp
+++ b/tests/GrCCPRTest.cpp
@@ -33,11 +33,10 @@ public:
private:
bool apply(GrContext* context, GrRenderTargetContext* rtc, bool, bool, GrAppliedClip* out,
SkRect* bounds) const override {
- GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider();
- out->addCoverageFP(fCCPR->makeClipProcessor(proxyProvider,
- rtc->priv().testingOnly_getOpListID(), fPath,
+ out->addCoverageFP(fCCPR->makeClipProcessor(rtc->priv().testingOnly_getOpListID(), fPath,
SkIRect::MakeWH(rtc->width(), rtc->height()),
- rtc->width(), rtc->height()));
+ rtc->width(), rtc->height(),
+ *context->contextPriv().caps()));
return true;
}
bool quickContains(const SkRect&) const final { return false; }
diff --git a/tests/LazyProxyTest.cpp b/tests/LazyProxyTest.cpp
index c0dcbf9647..9349457be4 100644
--- a/tests/LazyProxyTest.cpp
+++ b/tests/LazyProxyTest.cpp
@@ -76,26 +76,30 @@ public:
Op(GrProxyProvider* proxyProvider, LazyProxyTest* test, bool nullTexture)
: GrDrawOp(ClassID()), fTest(test) {
- fProxy = proxyProvider->createFullyLazyProxy([this, nullTexture](
- GrResourceProvider* rp) {
- if (!rp) {
- return sk_sp<GrTexture>();
- }
- REPORTER_ASSERT(fTest->fReporter, !fTest->fHasOpTexture);
- fTest->fHasOpTexture = true;
- if (nullTexture) {
- return sk_sp<GrTexture>();
- } else {
- GrSurfaceDesc desc;
- desc.fWidth = 1234;
- desc.fHeight = 567;
- desc.fConfig = kRGB_565_GrPixelConfig;
- sk_sp<GrTexture> texture = rp->createTexture(desc, SkBudgeted::kYes);
- REPORTER_ASSERT(fTest->fReporter, texture);
- return texture;
- }
- }, GrProxyProvider::Renderable::kNo, kTopLeft_GrSurfaceOrigin, kRGB_565_GrPixelConfig);
- this->setBounds(SkRectPriv::MakeLargest(), GrOp::HasAABloat::kNo, GrOp::IsZeroArea::kNo);
+ fProxy = GrProxyProvider::MakeFullyLazyProxy(
+ [this, nullTexture](GrResourceProvider* rp) {
+ if (!rp) {
+ return sk_sp<GrTexture>();
+ }
+ REPORTER_ASSERT(fTest->fReporter, !fTest->fHasOpTexture);
+ fTest->fHasOpTexture = true;
+ if (nullTexture) {
+ return sk_sp<GrTexture>();
+ } else {
+ GrSurfaceDesc desc;
+ desc.fWidth = 1234;
+ desc.fHeight = 567;
+ desc.fConfig = kRGB_565_GrPixelConfig;
+ sk_sp<GrTexture> texture = rp->createTexture(desc, SkBudgeted::kYes);
+ REPORTER_ASSERT(fTest->fReporter, texture);
+ return texture;
+ }
+ },
+ GrProxyProvider::Renderable::kNo, kTopLeft_GrSurfaceOrigin,
+ kRGB_565_GrPixelConfig, *proxyProvider->caps());
+
+ this->setBounds(SkRectPriv::MakeLargest(), GrOp::HasAABloat::kNo,
+ GrOp::IsZeroArea::kNo);
}
const char* name() const override { return "LazyProxyTest::Op"; }
@@ -118,7 +122,7 @@ public:
, fProxyProvider(proxyProvider)
, fTest(test)
, fAtlas(atlas) {
- fLazyProxy = proxyProvider->createFullyLazyProxy(
+ fLazyProxy = GrProxyProvider::MakeFullyLazyProxy(
[this](GrResourceProvider* rp) {
if (!rp) {
return sk_sp<GrTexture>();
@@ -130,7 +134,7 @@ public:
},
GrProxyProvider::Renderable::kYes,
kBottomLeft_GrSurfaceOrigin,
- kAlpha_half_GrPixelConfig);
+ kAlpha_half_GrPixelConfig, *proxyProvider->caps());
fAccess.reset(fLazyProxy, GrSamplerState::Filter::kNearest,
GrSamplerState::WrapMode::kClamp, kFragment_GrShaderFlag);
this->addTextureSampler(&fAccess);
diff --git a/tests/OnFlushCallbackTest.cpp b/tests/OnFlushCallbackTest.cpp
index 7932fdc458..e2f6fbf9e1 100644
--- a/tests/OnFlushCallbackTest.cpp
+++ b/tests/OnFlushCallbackTest.cpp
@@ -300,7 +300,7 @@ public:
return fAtlasProxy;
}
- fAtlasProxy = proxyProvider->createFullyLazyProxy(
+ fAtlasProxy = GrProxyProvider::MakeFullyLazyProxy(
[](GrResourceProvider* resourceProvider) {
if (!resourceProvider) {
return sk_sp<GrTexture>();
@@ -319,7 +319,8 @@ public:
},
GrProxyProvider::Renderable::kYes,
kBottomLeft_GrSurfaceOrigin,
- kRGBA_8888_GrPixelConfig);
+ kRGBA_8888_GrPixelConfig,
+ *proxyProvider->caps());
return fAtlasProxy;
}