aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/GrDrawingManager.cpp9
-rw-r--r--src/gpu/GrDrawingManager.h6
-rw-r--r--src/gpu/GrGpu.cpp5
-rw-r--r--src/gpu/GrGpu.h13
-rw-r--r--src/gpu/GrRenderTargetContext.cpp28
-rw-r--r--src/gpu/GrRenderTargetOpList.cpp2
-rw-r--r--src/gpu/gl/GrGLGpu.cpp8
-rw-r--r--src/gpu/gl/GrGLGpu.h1
-rw-r--r--src/gpu/instanced/GLInstancedRendering.cpp35
-rw-r--r--src/gpu/instanced/GLInstancedRendering.h17
-rw-r--r--src/gpu/instanced/InstanceProcessor.h2
-rw-r--r--src/gpu/instanced/InstancedOp.cpp456
-rw-r--r--src/gpu/instanced/InstancedOp.h160
-rw-r--r--src/gpu/instanced/InstancedRendering.cpp450
-rw-r--r--src/gpu/instanced/InstancedRendering.h170
-rw-r--r--src/gpu/ops/GrOp.h3
16 files changed, 765 insertions, 600 deletions
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 6b2c5a20b4..282226c748 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -69,6 +69,15 @@ void GrDrawingManager::reset() {
fFlushState.reset();
}
+gr_instanced::OpAllocator* GrDrawingManager::instancingAllocator() {
+ if (fInstancingAllocator) {
+ return fInstancingAllocator.get();
+ }
+
+ fInstancingAllocator = fContext->getGpu()->createInstancedRenderingAllocator();
+ return fInstancingAllocator.get();
+}
+
// MDB TODO: make use of the 'proxy' parameter.
void GrDrawingManager::internalFlush(GrSurfaceProxy*, GrResourceCache::FlushType type) {
if (fFlushing || this->wasAbandoned()) {
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index d68430ecd2..e96fd3a5f8 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -15,6 +15,7 @@
#include "GrRenderTargetOpList.h"
#include "GrResourceCache.h"
#include "SkTArray.h"
+#include "instanced/InstancedRendering.h"
#include "text/GrAtlasTextContext.h"
class GrContext;
@@ -37,6 +38,8 @@ public:
bool wasAbandoned() const { return fAbandoned; }
void freeGpuResources();
+ gr_instanced::OpAllocator* instancingAllocator();
+
sk_sp<GrRenderTargetContext> makeRenderTargetContext(sk_sp<GrSurfaceProxy>,
sk_sp<SkColorSpace>,
const SkSurfaceProps*);
@@ -124,6 +127,9 @@ private:
bool fIsImmediateMode;
SkTArray<sk_sp<GrPreFlushCallbackObject>> fPreFlushCBObjects;
+
+ // Lazily allocated
+ std::unique_ptr<gr_instanced::OpAllocator> fInstancingAllocator;
};
#endif
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index 940bfea194..4bf0c87bf1 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -255,6 +255,11 @@ GrBuffer* GrGpu::createBuffer(size_t size, GrBufferType intendedType,
return buffer;
}
+std::unique_ptr<gr_instanced::OpAllocator> GrGpu::createInstancedRenderingAllocator() {
+ SkASSERT(GrCaps::InstancedSupport::kNone != this->caps()->instancedSupport());
+ return this->onCreateInstancedRenderingAllocator();
+}
+
gr_instanced::InstancedRendering* GrGpu::createInstancedRendering() {
SkASSERT(GrCaps::InstancedSupport::kNone != this->caps()->instancedSupport());
return this->onCreateInstancedRendering();
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index ad1b199e83..523add5cb7 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -15,6 +15,7 @@
#include "GrTextureProducer.h"
#include "GrTypes.h"
#include "GrXferProcessor.h"
+#include "instanced/InstancedRendering.h"
#include "SkPath.h"
#include "SkTArray.h"
#include <map>
@@ -39,7 +40,11 @@ class GrStencilSettings;
class GrSurface;
class GrTexture;
-namespace gr_instanced { class InstancedRendering; }
+namespace gr_instanced {
+ class InstancedOp;
+ class InstancedRendering;
+ class OpAllocator;
+}
class GrGpu : public SkRefCnt {
public:
@@ -155,6 +160,7 @@ public:
/**
* Creates an instanced rendering object if it is supported on this platform.
*/
+ std::unique_ptr<gr_instanced::OpAllocator> createInstancedRenderingAllocator();
gr_instanced::InstancedRendering* createInstancedRendering();
/**
@@ -558,6 +564,9 @@ private:
const void* data) = 0;
virtual gr_instanced::InstancedRendering* onCreateInstancedRendering() = 0;
+ virtual std::unique_ptr<gr_instanced::OpAllocator> onCreateInstancedRenderingAllocator() {
+ return nullptr;
+ }
virtual bool onIsACopyNeededForTextureParams(GrTextureProxy* proxy, const GrSamplerParams&,
GrTextureProducer::CopyParams*,
@@ -625,7 +634,7 @@ private:
GrContext* fContext;
friend class GrPathRendering;
- friend class gr_instanced::InstancedRendering;
+ friend class gr_instanced::InstancedOp; // for xferBarrier
typedef SkRefCnt INHERITED;
};
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index 34812ddfbe..41fa6dce0d 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -52,8 +52,6 @@
#define RETURN_FALSE_IF_ABANDONED_PRIV if (fRenderTargetContext->drawingManager()->wasAbandoned()) { return false; }
#define RETURN_NULL_IF_ABANDONED if (this->drawingManager()->wasAbandoned()) { return nullptr; }
-using gr_instanced::InstancedRendering;
-
class AutoCheckFlush {
public:
AutoCheckFlush(GrDrawingManager* drawingManager) : fDrawingManager(drawingManager) {
@@ -437,9 +435,9 @@ bool GrRenderTargetContext::drawFilledRect(const GrClip& clip,
if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport() &&
(!ss || ss->isDisabled(false))) {
- InstancedRendering* ir = this->getOpList()->instancedRendering();
- std::unique_ptr<GrDrawOp> op = ir->recordRect(croppedRect, viewMatrix, std::move(paint), aa,
- fInstancedPipelineInfo);
+ gr_instanced::OpAllocator* oa = this->drawingManager()->instancingAllocator();
+ std::unique_ptr<GrDrawOp> op = oa->recordRect(croppedRect, viewMatrix, std::move(paint),
+ aa, fInstancedPipelineInfo);
if (op) {
this->addDrawOp(clip, std::move(op));
return true;
@@ -740,8 +738,8 @@ void GrRenderTargetContext::fillRectToRect(const GrClip& clip,
AutoCheckFlush acf(this->drawingManager());
if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
- InstancedRendering* ir = this->getOpList()->instancedRendering();
- std::unique_ptr<GrDrawOp> op(ir->recordRect(croppedRect, viewMatrix, std::move(paint),
+ gr_instanced::OpAllocator* oa = this->drawingManager()->instancingAllocator();
+ std::unique_ptr<GrDrawOp> op(oa->recordRect(croppedRect, viewMatrix, std::move(paint),
croppedLocalRect, aa, fInstancedPipelineInfo));
if (op) {
this->addDrawOp(clip, std::move(op));
@@ -796,8 +794,8 @@ void GrRenderTargetContext::fillRectWithLocalMatrix(const GrClip& clip,
AutoCheckFlush acf(this->drawingManager());
if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
- InstancedRendering* ir = this->getOpList()->instancedRendering();
- std::unique_ptr<GrDrawOp> op(ir->recordRect(croppedRect, viewMatrix, std::move(paint),
+ gr_instanced::OpAllocator* oa = this->drawingManager()->instancingAllocator();
+ std::unique_ptr<GrDrawOp> op(oa->recordRect(croppedRect, viewMatrix, std::move(paint),
localMatrix, aa, fInstancedPipelineInfo));
if (op) {
this->addDrawOp(clip, std::move(op));
@@ -948,9 +946,9 @@ void GrRenderTargetContext::drawRRect(const GrClip& origClip,
if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport() &&
stroke.isFillStyle()) {
- InstancedRendering* ir = this->getOpList()->instancedRendering();
+ gr_instanced::OpAllocator* oa = this->drawingManager()->instancingAllocator();
std::unique_ptr<GrDrawOp> op(
- ir->recordRRect(rrect, viewMatrix, std::move(paint), aa, fInstancedPipelineInfo));
+ oa->recordRRect(rrect, viewMatrix, std::move(paint), aa, fInstancedPipelineInfo));
if (op) {
this->addDrawOp(*clip, std::move(op));
return;
@@ -1024,8 +1022,8 @@ bool GrRenderTargetContext::drawFilledDRRect(const GrClip& clip,
SkASSERT(!origOuter.isEmpty());
if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) {
- InstancedRendering* ir = this->getOpList()->instancedRendering();
- std::unique_ptr<GrDrawOp> op(ir->recordDRRect(
+ gr_instanced::OpAllocator* oa = this->drawingManager()->instancingAllocator();
+ std::unique_ptr<GrDrawOp> op(oa->recordDRRect(
origOuter, origInner, viewMatrix, std::move(paint), aa, fInstancedPipelineInfo));
if (op) {
this->addDrawOp(clip, std::move(op));
@@ -1173,9 +1171,9 @@ void GrRenderTargetContext::drawOval(const GrClip& clip,
if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport() &&
stroke.isFillStyle()) {
- InstancedRendering* ir = this->getOpList()->instancedRendering();
+ gr_instanced::OpAllocator* oa = this->drawingManager()->instancingAllocator();
std::unique_ptr<GrDrawOp> op(
- ir->recordOval(oval, viewMatrix, std::move(paint), aa, fInstancedPipelineInfo));
+ oa->recordOval(oval, viewMatrix, std::move(paint), aa, fInstancedPipelineInfo));
if (op) {
this->addDrawOp(clip, std::move(op));
return;
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 4d309f407e..0bc6708f3a 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -370,7 +370,7 @@ GrOp* GrRenderTargetOpList::recordOp(std::unique_ptr<GrOp> op,
clip = fClipAllocator.make<GrAppliedClip>(std::move(*clip));
}
fRecordedOps.emplace_back(std::move(op), renderTarget, clip, dstTexture);
- fRecordedOps.back().fOp->wasRecorded();
+ fRecordedOps.back().fOp->wasRecorded(this);
fLastFullClearOp = nullptr;
fLastFullClearResourceID.makeInvalid();
fLastFullClearProxyID.makeInvalid();
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 10fbabbe60..da3ec49a29 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -57,6 +57,9 @@
using gr_instanced::InstancedRendering;
using gr_instanced::GLInstancedRendering;
+using gr_instanced::OpAllocator;
+using gr_instanced::GLOpAllocator;
+
static const GrGLenum gXfermodeEquation2Blend[] = {
// Basic OpenGL blend equations.
GR_GL_FUNC_ADD,
@@ -1836,6 +1839,11 @@ GrBuffer* GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType,
return GrGLBuffer::Create(this, size, intendedType, accessPattern, data);
}
+
+std::unique_ptr<OpAllocator> GrGLGpu::onCreateInstancedRenderingAllocator() {
+ return std::unique_ptr<OpAllocator>(new GLOpAllocator(this->caps()));
+}
+
InstancedRendering* GrGLGpu::onCreateInstancedRendering() {
return new GLInstancedRendering(this);
}
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 9bfe099d0d..f7e75cc2e7 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -181,6 +181,7 @@ private:
GrSurfaceOrigin,
int sampleCnt) override;
+ std::unique_ptr<gr_instanced::OpAllocator> onCreateInstancedRenderingAllocator() override;
gr_instanced::InstancedRendering* onCreateInstancedRendering() override;
// Given a GrPixelConfig return the index into the stencil format array on GrGLCaps to a
diff --git a/src/gpu/instanced/GLInstancedRendering.cpp b/src/gpu/instanced/GLInstancedRendering.cpp
index bfdb9601f9..b2eb9ebb7a 100644
--- a/src/gpu/instanced/GLInstancedRendering.cpp
+++ b/src/gpu/instanced/GLInstancedRendering.cpp
@@ -15,12 +15,13 @@
namespace gr_instanced {
-class GLInstancedRendering::GLOp final : public InstancedRendering::Op {
+class GLInstancedOp final : public InstancedOp {
public:
DEFINE_OP_CLASS_ID
- GLOp(GLInstancedRendering* instRendering, GrPaint&& paint)
- : INHERITED(ClassID(), std::move(paint), instRendering) {}
+ GLInstancedOp(GLOpAllocator* alloc, GrPaint&& paint)
+ : INHERITED(ClassID(), std::move(paint), alloc) {
+ }
int numGLCommands() const { return 1 + fNumChangesInGeometry; }
private:
@@ -29,7 +30,7 @@ private:
friend class GLInstancedRendering;
- typedef Op INHERITED;
+ typedef InstancedOp INHERITED;
};
GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) {
@@ -43,10 +44,10 @@ GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCa
}
GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
- : INHERITED(gpu),
- fVertexArrayID(0),
- fGLDrawCmdsInfo(0),
- fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
+ : INHERITED(gpu)
+ , fVertexArrayID(0)
+ , fGLDrawCmdsInfo(0)
+ , fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
SkASSERT(GrCaps::InstancedSupport::kNone != this->gpu()->caps()->instancedSupport());
}
@@ -61,8 +62,8 @@ inline GrGLGpu* GLInstancedRendering::glGpu() const {
return static_cast<GrGLGpu*>(this->gpu());
}
-std::unique_ptr<InstancedRendering::Op> GLInstancedRendering::makeOp(GrPaint&& paint) {
- return std::unique_ptr<Op>(new GLOp(this, std::move(paint)));
+std::unique_ptr<InstancedOp> GLOpAllocator::makeOp(GrPaint&& paint) {
+ return std::unique_ptr<InstancedOp>(new GLInstancedOp(this, std::move(paint)));
}
void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
@@ -71,8 +72,8 @@ void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
int numGLInstances = 0;
int numGLDrawCmds = 0;
- while (Op* o = iter.get()) {
- GLOp* op = static_cast<GLOp*>(o);
+ while (InstancedOp* o = iter.get()) {
+ GLInstancedOp* op = (GLInstancedOp*) o;
iter.next();
numGLInstances += op->fNumDraws;
@@ -152,14 +153,14 @@ void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
// Generate the instance and draw-indirect buffer contents based on the tracked ops.
iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart);
- while (Op* o = iter.get()) {
- GLOp* op = static_cast<GLOp*>(o);
+ while (InstancedOp* o = iter.get()) {
+ GLInstancedOp* op = static_cast<GLInstancedOp*>(o);
iter.next();
op->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
op->fGLDrawCmdsIdx = glDrawCmdsIdx;
- const Op::Draw* draw = op->fHeadDraw;
+ const InstancedOp::Draw* draw = op->fHeadDraw;
SkASSERT(draw);
do {
int instanceCount = 0;
@@ -201,7 +202,7 @@ void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
}
void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
- const Op* baseOp) {
+ const InstancedOp* baseOp) {
if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
return; // beginFlush was not successful.
}
@@ -214,7 +215,7 @@ void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProc
}
const GrGLCaps& glCaps = this->glGpu()->glCaps();
- const GLOp* op = static_cast<const GLOp*>(baseOp);
+ const GLInstancedOp* op = static_cast<const GLInstancedOp*>(baseOp);
int numCommands = op->numGLCommands();
#if GR_GL_LOG_INSTANCED_OPS
diff --git a/src/gpu/instanced/GLInstancedRendering.h b/src/gpu/instanced/GLInstancedRendering.h
index d1affba2bd..d68bc86cde 100644
--- a/src/gpu/instanced/GLInstancedRendering.h
+++ b/src/gpu/instanced/GLInstancedRendering.h
@@ -10,6 +10,7 @@
#include "GrCaps.h"
#include "gl/GrGLBuffer.h"
+#include "instanced/InstancedOp.h"
#include "instanced/InstancedRendering.h"
class GrGLCaps;
@@ -19,6 +20,16 @@ class GrGLGpu;
namespace gr_instanced {
+class GLOpAllocator final : public OpAllocator {
+public:
+ GLOpAllocator(const GrCaps* caps) : INHERITED(caps) {}
+
+private:
+ std::unique_ptr<InstancedOp> makeOp(GrPaint&& paint) override;
+
+ typedef OpAllocator INHERITED;
+};
+
class GLInstancedRendering final : public InstancedRendering {
public:
GLInstancedRendering(GrGLGpu*);
@@ -33,10 +44,8 @@ private:
GrGLGpu* glGpu() const;
- std::unique_ptr<Op> makeOp(GrPaint&& paint) override;
-
void onBeginFlush(GrResourceProvider*) override;
- void onDraw(const GrPipeline&, const InstanceProcessor&, const Op*) override;
+ void onDraw(const GrPipeline&, const InstanceProcessor&, const InstancedOp*) override;
void onEndFlush() override;
void onResetGpuResources(ResetType) override;
@@ -54,8 +63,6 @@ private:
GrGpuResource::UniqueID fInstanceAttribsBufferUniqueId;
int fInstanceAttribsBaseInstance;
- class GLOp;
-
friend class ::GrGLCaps; // For CheckSupport.
typedef InstancedRendering INHERITED;
diff --git a/src/gpu/instanced/InstanceProcessor.h b/src/gpu/instanced/InstanceProcessor.h
index 8cde30f73f..84d75a9f73 100644
--- a/src/gpu/instanced/InstanceProcessor.h
+++ b/src/gpu/instanced/InstanceProcessor.h
@@ -56,7 +56,7 @@ private:
*/
static GrCaps::InstancedSupport CheckSupport(const GrShaderCaps&, const GrCaps&);
- OpInfo fOpInfo;
+ const OpInfo fOpInfo;
BufferAccess fParamsAccess;
friend class GLInstancedRendering; // For CheckSupport.
diff --git a/src/gpu/instanced/InstancedOp.cpp b/src/gpu/instanced/InstancedOp.cpp
new file mode 100644
index 0000000000..58807212f1
--- /dev/null
+++ b/src/gpu/instanced/InstancedOp.cpp
@@ -0,0 +1,456 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "InstancedOp.h"
+#include "InstanceProcessor.h"
+#include "InstancedRendering.h"
+
+#include "GrOpFlushState.h"
+#include "GrRenderTargetOpList.h"
+
+
+namespace gr_instanced {
+
+InstancedOp::InstancedOp(uint32_t classID, GrPaint&& paint, OpAllocator* alloc)
+ : INHERITED(classID)
+ , fAllocator(alloc)
+ , fInstancedRendering(nullptr)
+ , fProcessors(std::move(paint))
+ , fIsTracked(false)
+ , fRequiresBarrierOnOverlap(false)
+ , fNumDraws(1)
+ , fNumChangesInGeometry(0) {
+ fHeadDraw = fTailDraw = alloc->allocateDraw();
+#ifdef SK_DEBUG
+ fHeadDraw->fGeometry = {-1, 0};
+#endif
+ fHeadDraw->fNext = nullptr;
+}
+
+InstancedOp::~InstancedOp() {
+ if (fIsTracked) {
+ fInstancedRendering->removeOp(this);
+ }
+
+ Draw* draw = fHeadDraw;
+ while (draw) {
+ Draw* next = draw->fNext;
+ fAllocator->releaseDraw(draw);
+ draw = next;
+ }
+}
+
+void InstancedOp::appendRRectParams(const SkRRect& rrect) {
+ SkASSERT(!fIsTracked);
+ switch (rrect.getType()) {
+ case SkRRect::kSimple_Type: {
+ const SkVector& radii = rrect.getSimpleRadii();
+ this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.height());
+ return;
+ }
+ case SkRRect::kNinePatch_Type: {
+ float twoOverW = 2 / rrect.width();
+ float twoOverH = 2 / rrect.height();
+ const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
+ this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOverW,
+ radiiTL.y() * twoOverH, radiiBR.y() * twoOverH);
+ return;
+ }
+ case SkRRect::kComplex_Type: {
+ /**
+ * The x and y radii of each arc are stored in separate vectors,
+ * in the following order:
+ *
+ * __x1 _ _ _ x3__
+ * y1 | | y2
+ *
+ * | |
+ *
+ * y3 |__ _ _ _ __| y4
+ * x2 x4
+ *
+ */
+ float twoOverW = 2 / rrect.width();
+ float twoOverH = 2 / rrect.height();
+ const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
+ const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner);
+ const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
+ const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner);
+ this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOverW,
+ radiiTR.x() * twoOverW, radiiBR.x() * twoOverW);
+ this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOverH,
+ radiiBL.y() * twoOverH, radiiBR.y() * twoOverH);
+ return;
+ }
+ default: return;
+ }
+}
+
+void InstancedOp::appendParamsTexel(const SkScalar* vals, int count) {
+ SkASSERT(!fIsTracked);
+ SkASSERT(count <= 4 && count >= 0);
+ const float* valsAsFloats = vals; // Ensure SkScalar == float.
+ memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float));
+ fInfo.fHasParams = true;
+}
+
+void InstancedOp::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
+ SkASSERT(!fIsTracked);
+ ParamsTexel& texel = fParams.push_back();
+ texel.fX = SkScalarToFloat(x);
+ texel.fY = SkScalarToFloat(y);
+ texel.fZ = SkScalarToFloat(z);
+ texel.fW = SkScalarToFloat(w);
+ fInfo.fHasParams = true;
+}
+
+void InstancedOp::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
+ SkASSERT(!fIsTracked);
+ ParamsTexel& texel = fParams.push_back();
+ texel.fX = SkScalarToFloat(x);
+ texel.fY = SkScalarToFloat(y);
+ texel.fZ = SkScalarToFloat(z);
+ fInfo.fHasParams = true;
+}
+
+bool InstancedOp::xpRequiresDstTexture(const GrCaps& caps, const GrAppliedClip* clip) {
+ GrProcessorAnalysisCoverage coverageInput;
+ bool isMixedSamples = false;
+ if (GrAAType::kCoverage == fInfo.aaType() ||
+ (GrAAType::kNone == fInfo.aaType() && !fInfo.isSimpleRects() && fInfo.fCannotDiscard)) {
+ coverageInput = GrProcessorAnalysisCoverage::kSingleChannel;
+ } else {
+ coverageInput = GrProcessorAnalysisCoverage::kNone;
+ isMixedSamples = GrAAType::kMixedSamples == fInfo.aaType();
+ }
+ GrProcessorSet::Analysis analysis =
+ fProcessors.finalize(this->getSingleInstance().fColor, coverageInput, clip,
+ isMixedSamples, caps, &this->getSingleDraw().fInstance.fColor);
+
+ Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 command.
+ SkASSERT(draw.fGeometry.isEmpty());
+ SkASSERT(SkIsPow2(fInfo.fShapeTypes));
+ SkASSERT(!fIsTracked);
+
+ if (kRect_ShapeFlag == fInfo.fShapeTypes) {
+ draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.aaType());
+ } else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
+ draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.aaType(), this->bounds());
+ } else {
+ draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.aaType());
+ }
+
+ fInfo.fCannotTweakAlphaForCoverage = !analysis.isCompatibleWithCoverageAsAlpha();
+
+ fInfo.fUsesLocalCoords = analysis.usesLocalCoords();
+ fRequiresBarrierOnOverlap = analysis.requiresBarrierBetweenOverlappingDraws();
+ return analysis.requiresDstTexture();
+}
+
+void InstancedOp::wasRecorded(GrRenderTargetOpList* opList) {
+ SkASSERT(!fInstancedRendering);
+ SkASSERT(!fIsTracked);
+
+ fInstancedRendering = opList->instancedRendering();
+
+ this->getSingleInstance().fInfo |= fInstancedRendering->addOpParams(this);
+ fInstancedRendering->addOp(this);
+ fIsTracked = true;
+}
+
+bool InstancedOp::onCombineIfPossible(GrOp* other, const GrCaps& caps) {
+ InstancedOp* that = static_cast<InstancedOp*>(other);
+ SkASSERT(!that->fInstancedRendering || (fInstancedRendering == that->fInstancedRendering));
+ SkASSERT(fTailDraw);
+ SkASSERT(that->fTailDraw);
+
+ if (!OpInfo::CanCombine(fInfo, that->fInfo) || fProcessors != that->fProcessors) {
+ return false;
+ }
+
+ SkASSERT(fRequiresBarrierOnOverlap == that->fRequiresBarrierOnOverlap);
+ if (fRequiresBarrierOnOverlap && this->bounds().intersects(that->bounds())) {
+ return false;
+ }
+ OpInfo combinedInfo = fInfo | that->fInfo;
+ if (!combinedInfo.isSimpleRects()) {
+ // This threshold was chosen with the "shapes_mixed" bench on a MacBook with Intel graphics.
+ // There seems to be a wide range where it doesn't matter if we combine or not. What matters
+ // is that the itty bitty rects combine with other shapes and the giant ones don't.
+ constexpr SkScalar kMaxPixelsToGeneralizeRects = 256 * 256;
+ if (fInfo.isSimpleRects() && fPixelLoad > kMaxPixelsToGeneralizeRects) {
+ return false;
+ }
+ if (that->fInfo.isSimpleRects() && that->fPixelLoad > kMaxPixelsToGeneralizeRects) {
+ return false;
+ }
+ }
+
+ if (!that->fInstancedRendering) {
+ that->fInstancedRendering = fInstancedRendering;
+ that->getSingleInstance().fInfo |= fInstancedRendering->addOpParams(that);
+ }
+
+ this->joinBounds(*that);
+ fInfo = combinedInfo;
+ fPixelLoad += that->fPixelLoad;
+ // Adopt the other op's draws.
+ fNumDraws += that->fNumDraws;
+ fNumChangesInGeometry += that->fNumChangesInGeometry;
+ if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) {
+ ++fNumChangesInGeometry;
+ }
+ fTailDraw->fNext = that->fHeadDraw;
+ fTailDraw = that->fTailDraw;
+
+ that->fHeadDraw = that->fTailDraw = nullptr;
+
+ return true;
+}
+
+void InstancedOp::onExecute(GrOpFlushState* state) {
+ SkASSERT(fInstancedRendering->isFlushing());
+ SkASSERT(state->gpu() == fInstancedRendering->gpu());
+
+ state->gpu()->handleDirtyContext();
+
+ GrPipeline pipeline;
+ GrPipeline::InitArgs args;
+ args.fAppliedClip = state->drawOpArgs().fAppliedClip;
+ args.fCaps = &state->caps();
+ args.fProcessors = &fProcessors;
+ args.fFlags = GrAATypeIsHW(fInfo.aaType()) ? GrPipeline::kHWAntialias_Flag : 0;
+ args.fRenderTarget = state->drawOpArgs().fRenderTarget;
+ args.fDstTexture = state->drawOpArgs().fDstTexture;
+ pipeline.init(args);
+
+ if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*state->gpu()->caps())) {
+ state->gpu()->xferBarrier(pipeline.getRenderTarget(), barrierType);
+ }
+ fInstancedRendering->draw(pipeline, fInfo, this);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+OpAllocator::OpAllocator(const GrCaps* caps)
+ : fDrawPool(1024, 1024)
+ , fCaps(sk_ref_sp(caps)) {
+}
+
+OpAllocator::~OpAllocator() {}
+
+std::unique_ptr<GrDrawOp> OpAllocator::recordRect(const SkRect& rect,
+ const SkMatrix& viewMatrix,
+ GrPaint&& paint, GrAA aa,
+ const GrInstancedPipelineInfo& info) {
+ return this->recordShape(ShapeType::kRect, rect, viewMatrix, std::move(paint), rect, aa, info);
+}
+
+std::unique_ptr<GrDrawOp> OpAllocator::recordRect(const SkRect& rect,
+ const SkMatrix& viewMatrix,
+ GrPaint&& paint, const SkRect& localRect,
+ GrAA aa,
+ const GrInstancedPipelineInfo& info) {
+ return this->recordShape(ShapeType::kRect, rect, viewMatrix, std::move(paint), localRect, aa,
+ info);
+}
+
+std::unique_ptr<GrDrawOp> OpAllocator::recordRect(const SkRect& rect,
+ const SkMatrix& viewMatrix,
+ GrPaint&& paint,
+ const SkMatrix& localMatrix, GrAA aa,
+ const GrInstancedPipelineInfo& info) {
+ if (localMatrix.hasPerspective()) {
+ return nullptr; // Perspective is not yet supported in the local matrix.
+ }
+ if (std::unique_ptr<InstancedOp> op = this->recordShape(ShapeType::kRect, rect, viewMatrix,
+ std::move(paint), rect, aa, info)) {
+ op->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag;
+ op->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
+ localMatrix.getTranslateX());
+ op->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
+ localMatrix.getTranslateY());
+ op->fInfo.fHasLocalMatrix = true;
+ return std::move(op);
+ }
+ return nullptr;
+}
+
+std::unique_ptr<GrDrawOp> OpAllocator::recordOval(const SkRect& oval,
+ const SkMatrix& viewMatrix,
+ GrPaint&& paint, GrAA aa,
+ const GrInstancedPipelineInfo& info) {
+ return this->recordShape(ShapeType::kOval, oval, viewMatrix, std::move(paint), oval, aa, info);
+}
+
+std::unique_ptr<GrDrawOp> OpAllocator::recordRRect(const SkRRect& rrect,
+ const SkMatrix& viewMatrix,
+ GrPaint&& paint, GrAA aa,
+ const GrInstancedPipelineInfo& info) {
+ if (std::unique_ptr<InstancedOp> op =
+ this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix,
+ std::move(paint), rrect.rect(), aa, info)) {
+ op->appendRRectParams(rrect);
+ return std::move(op);
+ }
+ return nullptr;
+}
+
+std::unique_ptr<GrDrawOp> OpAllocator::recordDRRect(const SkRRect& outer,
+ const SkRRect& inner,
+ const SkMatrix& viewMatrix,
+ GrPaint&& paint, GrAA aa,
+ const GrInstancedPipelineInfo& info) {
+ if (inner.getType() > SkRRect::kSimple_Type) {
+ return nullptr; // Complex inner round rects are not yet supported.
+ }
+ if (SkRRect::kEmpty_Type == inner.getType()) {
+ return this->recordRRect(outer, viewMatrix, std::move(paint), aa, info);
+ }
+ if (std::unique_ptr<InstancedOp> op =
+ this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix,
+ std::move(paint), outer.rect(), aa, info)) {
+ op->appendRRectParams(outer);
+ ShapeType innerShapeType = GetRRectShapeType(inner);
+ op->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
+ op->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
+ op->appendParamsTexel(inner.rect().asScalars(), 4);
+ op->appendRRectParams(inner);
+ return std::move(op);
+ }
+ return nullptr;
+}
+
+std::unique_ptr<InstancedOp> OpAllocator::recordShape(
+ ShapeType type, const SkRect& bounds, const SkMatrix& viewMatrix, GrPaint&& paint,
+ const SkRect& localRect, GrAA aa, const GrInstancedPipelineInfo& info) {
+
+ if (info.fIsRenderingToFloat && fCaps->avoidInstancedDrawsToFPTargets()) {
+ return nullptr;
+ }
+
+ GrAAType aaType;
+ if (!this->selectAntialiasMode(viewMatrix, aa, info, &aaType)) {
+ return nullptr;
+ }
+
+ GrColor color = paint.getColor();
+ std::unique_ptr<InstancedOp> op = this->makeOp(std::move(paint));
+ op->fInfo.setAAType(aaType);
+ op->fInfo.fShapeTypes = GetShapeFlag(type);
+ op->fInfo.fCannotDiscard = true;
+ Instance& instance = op->getSingleInstance();
+ instance.fInfo = (int)type << kShapeType_InfoBit;
+
+ InstancedOp::HasAABloat aaBloat =
+ (aaType == GrAAType::kCoverage) ? InstancedOp::HasAABloat::kYes
+ : InstancedOp::HasAABloat::kNo;
+ InstancedOp::IsZeroArea zeroArea = bounds.isEmpty() ? InstancedOp::IsZeroArea::kYes
+ : InstancedOp::IsZeroArea::kNo;
+
+ // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
+ // will map this rectangle to the same device coordinates as "viewMatrix * bounds".
+ float sx = 0.5f * bounds.width();
+ float sy = 0.5f * bounds.height();
+ float tx = sx + bounds.fLeft;
+ float ty = sy + bounds.fTop;
+ if (!viewMatrix.hasPerspective()) {
+ float* m = instance.fShapeMatrix2x3;
+ m[0] = viewMatrix.getScaleX() * sx;
+ m[1] = viewMatrix.getSkewX() * sy;
+ m[2] = viewMatrix.getTranslateX() +
+ viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty;
+
+ m[3] = viewMatrix.getSkewY() * sx;
+ m[4] = viewMatrix.getScaleY() * sy;
+ m[5] = viewMatrix.getTranslateY() +
+ viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty;
+
+ // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape's device-space quad,
+ // it's quite simple to find the bounding rectangle:
+ float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
+ float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
+ SkRect opBounds;
+ opBounds.fLeft = m[2] - devBoundsHalfWidth;
+ opBounds.fRight = m[2] + devBoundsHalfWidth;
+ opBounds.fTop = m[5] - devBoundsHalfHeight;
+ opBounds.fBottom = m[5] + devBoundsHalfHeight;
+ op->setBounds(opBounds, aaBloat, zeroArea);
+
+ // TODO: Is this worth the CPU overhead?
+ op->fInfo.fNonSquare =
+ fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
+ fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
+ fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) >
+ 1e-2f; // Diff. lengths?
+ } else {
+ SkMatrix shapeMatrix(viewMatrix);
+ shapeMatrix.preTranslate(tx, ty);
+ shapeMatrix.preScale(sx, sy);
+ instance.fInfo |= kPerspective_InfoFlag;
+
+ float* m = instance.fShapeMatrix2x3;
+ m[0] = SkScalarToFloat(shapeMatrix.getScaleX());
+ m[1] = SkScalarToFloat(shapeMatrix.getSkewX());
+ m[2] = SkScalarToFloat(shapeMatrix.getTranslateX());
+ m[3] = SkScalarToFloat(shapeMatrix.getSkewY());
+ m[4] = SkScalarToFloat(shapeMatrix.getScaleY());
+ m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
+
+ // Send the perspective column as a param.
+ op->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
+ shapeMatrix[SkMatrix::kMPersp2]);
+ op->fInfo.fHasPerspective = true;
+
+ op->setBounds(bounds, aaBloat, zeroArea);
+ op->fInfo.fNonSquare = true;
+ }
+
+ instance.fColor = color;
+
+ const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
+ memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
+
+ op->fPixelLoad = op->bounds().height() * op->bounds().width();
+ return op;
+}
+
+inline bool OpAllocator::selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa,
+ const GrInstancedPipelineInfo& info,
+ GrAAType* aaType) {
+ SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled);
+ SkASSERT(GrCaps::InstancedSupport::kNone != fCaps->instancedSupport());
+
+ if (!info.fIsMultisampled || fCaps->multisampleDisableSupport()) {
+ if (GrAA::kNo == aa) {
+ *aaType = GrAAType::kNone;
+ return true;
+ }
+
+ if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) {
+ *aaType = GrAAType::kCoverage;
+ return true;
+ }
+ }
+
+ if (info.fIsMultisampled &&
+ fCaps->instancedSupport() >= GrCaps::InstancedSupport::kMultisampled) {
+ if (!info.fIsMixedSampled) {
+ *aaType = GrAAType::kMSAA;
+ return true;
+ }
+ if (fCaps->instancedSupport() >= GrCaps::InstancedSupport::kMixedSampled) {
+ *aaType = GrAAType::kMixedSamples;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+}
diff --git a/src/gpu/instanced/InstancedOp.h b/src/gpu/instanced/InstancedOp.h
new file mode 100644
index 0000000000..55cb82f284
--- /dev/null
+++ b/src/gpu/instanced/InstancedOp.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef gr_instanced_InstancedOp_DEFINED
+#define gr_instanced_InstancedOp_DEFINED
+
+
+#include "../private/GrInstancedPipelineInfo.h"
+#include "GrMemoryPool.h"
+#include "ops/GrDrawOp.h"
+#include "instanced/InstancedRenderingTypes.h"
+
+#include "SkTInternalLList.h"
+
+namespace gr_instanced {
+
+class InstancedRendering;
+class OpAllocator;
+
+class InstancedOp : public GrDrawOp {
+public:
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(InstancedOp);
+
+ ~InstancedOp() override;
+ const char* name() const override { return "InstancedOp"; }
+
+ SkString dumpInfo() const override {
+ SkString string;
+ string.printf(
+ "AA: %d, ShapeTypes: 0x%02x, IShapeTypes: 0x%02x, Persp %d, "
+ "NonSquare: %d, PLoad: %0.2f, Tracked: %d, NumDraws: %d, "
+ "GeomChanges: %d\n",
+ (unsigned)fInfo.fAAType,
+ fInfo.fShapeTypes,
+ fInfo.fInnerShapeTypes,
+ fInfo.fHasPerspective,
+ fInfo.fNonSquare,
+ fPixelLoad,
+ fIsTracked,
+ fNumDraws,
+ fNumChangesInGeometry);
+ string.append(INHERITED::dumpInfo());
+ return string;
+ }
+
+ struct Draw {
+ Instance fInstance;
+ IndexRange fGeometry;
+ Draw* fNext;
+ };
+
+ Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; }
+ Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; }
+
+ void appendRRectParams(const SkRRect&);
+ void appendParamsTexel(const SkScalar* vals, int count);
+ void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
+ void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
+ FixedFunctionFlags fixedFunctionFlags() const override {
+ return GrAATypeIsHW(fInfo.aaType()) ? FixedFunctionFlags::kUsesHWAA
+ : FixedFunctionFlags::kNone;
+ }
+ bool xpRequiresDstTexture(const GrCaps&, const GrAppliedClip*) override;
+
+ // Registers the op with the InstancedRendering list of tracked ops.
+ void wasRecorded(GrRenderTargetOpList*) override;
+
+protected:
+ InstancedOp(uint32_t classID, GrPaint&&, OpAllocator*);
+
+ OpAllocator* fAllocator;
+ InstancedRendering* fInstancedRendering;
+ OpInfo fInfo;
+ SkScalar fPixelLoad;
+ GrProcessorSet fProcessors;
+ SkSTArray<5, ParamsTexel, true> fParams;
+ bool fIsTracked : 1;
+ bool fRequiresBarrierOnOverlap : 1;
+ int fNumDraws;
+ int fNumChangesInGeometry;
+ Draw* fHeadDraw;
+ Draw* fTailDraw;
+
+private:
+ bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
+ void onPrepare(GrOpFlushState*) override {}
+ void onExecute(GrOpFlushState*) override;
+
+ typedef GrDrawOp INHERITED;
+
+ friend class InstancedRendering;
+ friend class OpAllocator;
+};
+
+class OpAllocator : public SkNoncopyable {
+public:
+ virtual ~OpAllocator();
+
+ /**
+ * These methods make a new record internally for an instanced draw, and return an op that is
+ * effectively just an index to that record. The returned op is not self-contained, but
+ * rather relies on this class to handle the rendering. The client must call beginFlush() on
+ * this class before attempting to flush ops returned by it. It is invalid to record new
+ * draws between beginFlush() and endFlush().
+ */
+ std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&,
+ GrPaint&&, GrAA,
+ const GrInstancedPipelineInfo&);
+
+ std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&,
+ GrPaint&&, const SkRect& localRect,
+ GrAA,
+ const GrInstancedPipelineInfo&);
+
+ std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&,
+ GrPaint&&,
+ const SkMatrix& localMatrix, GrAA,
+ const GrInstancedPipelineInfo&);
+
+ std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&,
+ GrPaint&&, GrAA,
+ const GrInstancedPipelineInfo&);
+
+ std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&,
+ GrPaint&&, GrAA,
+ const GrInstancedPipelineInfo&);
+
+ std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer,
+ const SkRRect& inner,
+ const SkMatrix&, GrPaint&&, GrAA,
+ const GrInstancedPipelineInfo&);
+
+ InstancedOp::Draw* allocateDraw() { return fDrawPool.allocate(); }
+ void releaseDraw(InstancedOp::Draw* draw) { fDrawPool.release(draw); }
+
+protected:
+ OpAllocator(const GrCaps*);
+
+private:
+ bool selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, const GrInstancedPipelineInfo&,
+ GrAAType*);
+ virtual std::unique_ptr<InstancedOp> makeOp(GrPaint&&) = 0;
+
+ std::unique_ptr<InstancedOp> SK_WARN_UNUSED_RESULT recordShape(
+ ShapeType, const SkRect& bounds,
+ const SkMatrix& viewMatrix, GrPaint&&,
+ const SkRect& localRect, GrAA aa,
+ const GrInstancedPipelineInfo&);
+
+ GrObjectMemoryPool<InstancedOp::Draw> fDrawPool;
+ sk_sp<const GrCaps> fCaps;
+};
+
+}
+
+#endif
diff --git a/src/gpu/instanced/InstancedRendering.cpp b/src/gpu/instanced/InstancedRendering.cpp
index 9d17a82fc1..7458437cd7 100644
--- a/src/gpu/instanced/InstancedRendering.cpp
+++ b/src/gpu/instanced/InstancedRendering.cpp
@@ -6,426 +6,29 @@
*/
#include "InstancedRendering.h"
+
+#include "InstancedOp.h"
#include "GrAppliedClip.h"
#include "GrCaps.h"
-#include "GrOpFlushState.h"
#include "GrPipeline.h"
#include "GrResourceProvider.h"
+
#include "instanced/InstanceProcessor.h"
namespace gr_instanced {
-InstancedRendering::InstancedRendering(GrGpu* gpu)
- : fGpu(SkRef(gpu)),
- fState(State::kRecordingDraws),
- fDrawPool(1024, 1024) {
-}
-
-std::unique_ptr<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect,
- const SkMatrix& viewMatrix,
- GrPaint&& paint, GrAA aa,
- const GrInstancedPipelineInfo& info) {
- return this->recordShape(ShapeType::kRect, rect, viewMatrix, std::move(paint), rect, aa, info);
-}
-
-std::unique_ptr<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect,
- const SkMatrix& viewMatrix,
- GrPaint&& paint, const SkRect& localRect,
- GrAA aa,
- const GrInstancedPipelineInfo& info) {
- return this->recordShape(ShapeType::kRect, rect, viewMatrix, std::move(paint), localRect, aa,
- info);
-}
-
-std::unique_ptr<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect,
- const SkMatrix& viewMatrix,
- GrPaint&& paint,
- const SkMatrix& localMatrix, GrAA aa,
- const GrInstancedPipelineInfo& info) {
- if (localMatrix.hasPerspective()) {
- return nullptr; // Perspective is not yet supported in the local matrix.
- }
- if (std::unique_ptr<Op> op = this->recordShape(ShapeType::kRect, rect, viewMatrix,
- std::move(paint), rect, aa, info)) {
- op->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag;
- op->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
- localMatrix.getTranslateX());
- op->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
- localMatrix.getTranslateY());
- op->fInfo.fHasLocalMatrix = true;
- return std::move(op);
- }
- return nullptr;
-}
-
-std::unique_ptr<GrDrawOp> InstancedRendering::recordOval(const SkRect& oval,
- const SkMatrix& viewMatrix,
- GrPaint&& paint, GrAA aa,
- const GrInstancedPipelineInfo& info) {
- return this->recordShape(ShapeType::kOval, oval, viewMatrix, std::move(paint), oval, aa, info);
-}
-
-std::unique_ptr<GrDrawOp> InstancedRendering::recordRRect(const SkRRect& rrect,
- const SkMatrix& viewMatrix,
- GrPaint&& paint, GrAA aa,
- const GrInstancedPipelineInfo& info) {
- if (std::unique_ptr<Op> op =
- this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix,
- std::move(paint), rrect.rect(), aa, info)) {
- op->appendRRectParams(rrect);
- return std::move(op);
- }
- return nullptr;
-}
-
-std::unique_ptr<GrDrawOp> InstancedRendering::recordDRRect(const SkRRect& outer,
- const SkRRect& inner,
- const SkMatrix& viewMatrix,
- GrPaint&& paint, GrAA aa,
- const GrInstancedPipelineInfo& info) {
- if (inner.getType() > SkRRect::kSimple_Type) {
- return nullptr; // Complex inner round rects are not yet supported.
- }
- if (SkRRect::kEmpty_Type == inner.getType()) {
- return this->recordRRect(outer, viewMatrix, std::move(paint), aa, info);
- }
- if (std::unique_ptr<Op> op =
- this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix,
- std::move(paint), outer.rect(), aa, info)) {
- op->appendRRectParams(outer);
- ShapeType innerShapeType = GetRRectShapeType(inner);
- op->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType);
- op->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit);
- op->appendParamsTexel(inner.rect().asScalars(), 4);
- op->appendRRectParams(inner);
- return std::move(op);
- }
- return nullptr;
-}
-
-std::unique_ptr<InstancedRendering::Op> InstancedRendering::recordShape(
- ShapeType type, const SkRect& bounds, const SkMatrix& viewMatrix, GrPaint&& paint,
- const SkRect& localRect, GrAA aa, const GrInstancedPipelineInfo& info) {
- SkASSERT(State::kRecordingDraws == fState);
-
- if (info.fIsRenderingToFloat && fGpu->caps()->avoidInstancedDrawsToFPTargets()) {
- return nullptr;
- }
-
- GrAAType aaType;
- if (!this->selectAntialiasMode(viewMatrix, aa, info, &aaType)) {
- return nullptr;
- }
-
- GrColor color = paint.getColor();
- std::unique_ptr<Op> op = this->makeOp(std::move(paint));
- op->fInfo.setAAType(aaType);
- op->fInfo.fShapeTypes = GetShapeFlag(type);
- op->fInfo.fCannotDiscard = true;
- Instance& instance = op->getSingleInstance();
- instance.fInfo = (int)type << kShapeType_InfoBit;
-
- Op::HasAABloat aaBloat =
- (aaType == GrAAType::kCoverage) ? Op::HasAABloat::kYes : Op::HasAABloat::kNo;
- Op::IsZeroArea zeroArea = (bounds.isEmpty()) ? Op::IsZeroArea::kYes : Op::IsZeroArea::kNo;
-
- // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
- // will map this rectangle to the same device coordinates as "viewMatrix * bounds".
- float sx = 0.5f * bounds.width();
- float sy = 0.5f * bounds.height();
- float tx = sx + bounds.fLeft;
- float ty = sy + bounds.fTop;
- if (!viewMatrix.hasPerspective()) {
- float* m = instance.fShapeMatrix2x3;
- m[0] = viewMatrix.getScaleX() * sx;
- m[1] = viewMatrix.getSkewX() * sy;
- m[2] = viewMatrix.getTranslateX() +
- viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty;
-
- m[3] = viewMatrix.getSkewY() * sx;
- m[4] = viewMatrix.getScaleY() * sy;
- m[5] = viewMatrix.getTranslateY() +
- viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty;
-
- // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape's device-space quad,
- // it's quite simple to find the bounding rectangle:
- float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
- float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
- SkRect opBounds;
- opBounds.fLeft = m[2] - devBoundsHalfWidth;
- opBounds.fRight = m[2] + devBoundsHalfWidth;
- opBounds.fTop = m[5] - devBoundsHalfHeight;
- opBounds.fBottom = m[5] + devBoundsHalfHeight;
- op->setBounds(opBounds, aaBloat, zeroArea);
-
- // TODO: Is this worth the CPU overhead?
- op->fInfo.fNonSquare =
- fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out.
- fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
- fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) >
- 1e-2f; // Diff. lengths?
- } else {
- SkMatrix shapeMatrix(viewMatrix);
- shapeMatrix.preTranslate(tx, ty);
- shapeMatrix.preScale(sx, sy);
- instance.fInfo |= kPerspective_InfoFlag;
-
- float* m = instance.fShapeMatrix2x3;
- m[0] = SkScalarToFloat(shapeMatrix.getScaleX());
- m[1] = SkScalarToFloat(shapeMatrix.getSkewX());
- m[2] = SkScalarToFloat(shapeMatrix.getTranslateX());
- m[3] = SkScalarToFloat(shapeMatrix.getSkewY());
- m[4] = SkScalarToFloat(shapeMatrix.getScaleY());
- m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
-
- // Send the perspective column as a param.
- op->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1],
- shapeMatrix[SkMatrix::kMPersp2]);
- op->fInfo.fHasPerspective = true;
-
- op->setBounds(bounds, aaBloat, zeroArea);
- op->fInfo.fNonSquare = true;
- }
-
- instance.fColor = color;
-
- const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
- memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
-
- op->fPixelLoad = op->bounds().height() * op->bounds().width();
- return op;
-}
-
-inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa,
- const GrInstancedPipelineInfo& info,
- GrAAType* aaType) {
- SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled);
- SkASSERT(GrCaps::InstancedSupport::kNone != fGpu->caps()->instancedSupport());
-
- if (!info.fIsMultisampled || fGpu->caps()->multisampleDisableSupport()) {
- if (GrAA::kNo == aa) {
- *aaType = GrAAType::kNone;
- return true;
- }
-
- if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) {
- *aaType = GrAAType::kCoverage;
- return true;
- }
- }
-
- if (info.fIsMultisampled &&
- fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMultisampled) {
- if (!info.fIsMixedSampled) {
- *aaType = GrAAType::kMSAA;
- return true;
- }
- if (fGpu->caps()->instancedSupport() >= GrCaps::InstancedSupport::kMixedSampled) {
- *aaType = GrAAType::kMixedSamples;
- return true;
- }
- }
-
- return false;
-}
-
-InstancedRendering::Op::Op(uint32_t classID, GrPaint&& paint, InstancedRendering* ir)
- : INHERITED(classID)
- , fInstancedRendering(ir)
- , fProcessors(std::move(paint))
- , fIsTracked(false)
- , fRequiresBarrierOnOverlap(false)
- , fNumDraws(1)
- , fNumChangesInGeometry(0) {
- fHeadDraw = fTailDraw = fInstancedRendering->fDrawPool.allocate();
-#ifdef SK_DEBUG
- fHeadDraw->fGeometry = {-1, 0};
-#endif
- fHeadDraw->fNext = nullptr;
-}
-
-InstancedRendering::Op::~Op() {
- if (fIsTracked) {
- fInstancedRendering->fTrackedOps.remove(this);
- }
-
- Draw* draw = fHeadDraw;
- while (draw) {
- Draw* next = draw->fNext;
- fInstancedRendering->fDrawPool.release(draw);
- draw = next;
- }
-}
-
-void InstancedRendering::Op::appendRRectParams(const SkRRect& rrect) {
- SkASSERT(!fIsTracked);
- switch (rrect.getType()) {
- case SkRRect::kSimple_Type: {
- const SkVector& radii = rrect.getSimpleRadii();
- this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.height());
- return;
- }
- case SkRRect::kNinePatch_Type: {
- float twoOverW = 2 / rrect.width();
- float twoOverH = 2 / rrect.height();
- const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
- const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
- this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOverW,
- radiiTL.y() * twoOverH, radiiBR.y() * twoOverH);
- return;
- }
- case SkRRect::kComplex_Type: {
- /**
- * The x and y radii of each arc are stored in separate vectors,
- * in the following order:
- *
- * __x1 _ _ _ x3__
- * y1 | | y2
- *
- * | |
- *
- * y3 |__ _ _ _ __| y4
- * x2 x4
- *
- */
- float twoOverW = 2 / rrect.width();
- float twoOverH = 2 / rrect.height();
- const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
- const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner);
- const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
- const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner);
- this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOverW,
- radiiTR.x() * twoOverW, radiiBR.x() * twoOverW);
- this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOverH,
- radiiBL.y() * twoOverH, radiiBR.y() * twoOverH);
- return;
- }
- default: return;
- }
-}
-
-void InstancedRendering::Op::appendParamsTexel(const SkScalar* vals, int count) {
- SkASSERT(!fIsTracked);
- SkASSERT(count <= 4 && count >= 0);
- const float* valsAsFloats = vals; // Ensure SkScalar == float.
- memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float));
- fInfo.fHasParams = true;
-}
-
-void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
- SkASSERT(!fIsTracked);
- ParamsTexel& texel = fParams.push_back();
- texel.fX = SkScalarToFloat(x);
- texel.fY = SkScalarToFloat(y);
- texel.fZ = SkScalarToFloat(z);
- texel.fW = SkScalarToFloat(w);
- fInfo.fHasParams = true;
-}
-
-void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
- SkASSERT(!fIsTracked);
- ParamsTexel& texel = fParams.push_back();
- texel.fX = SkScalarToFloat(x);
- texel.fY = SkScalarToFloat(y);
- texel.fZ = SkScalarToFloat(z);
- fInfo.fHasParams = true;
-}
-
-bool InstancedRendering::Op::xpRequiresDstTexture(const GrCaps& caps, const GrAppliedClip* clip) {
- SkASSERT(State::kRecordingDraws == fInstancedRendering->fState);
- GrProcessorAnalysisCoverage coverageInput;
- bool isMixedSamples = false;
- if (GrAAType::kCoverage == fInfo.aaType() ||
- (GrAAType::kNone == fInfo.aaType() && !fInfo.isSimpleRects() && fInfo.fCannotDiscard)) {
- coverageInput = GrProcessorAnalysisCoverage::kSingleChannel;
- } else {
- coverageInput = GrProcessorAnalysisCoverage::kNone;
- isMixedSamples = GrAAType::kMixedSamples == fInfo.aaType();
- }
- GrProcessorSet::Analysis analysis =
- fProcessors.finalize(this->getSingleInstance().fColor, coverageInput, clip,
- isMixedSamples, caps, &this->getSingleDraw().fInstance.fColor);
-
- Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 command.
- SkASSERT(draw.fGeometry.isEmpty());
- SkASSERT(SkIsPow2(fInfo.fShapeTypes));
- SkASSERT(!fIsTracked);
-
- if (kRect_ShapeFlag == fInfo.fShapeTypes) {
- draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.aaType());
- } else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
- draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.aaType(), this->bounds());
- } else {
- draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.aaType());
- }
- if (!fParams.empty()) {
- SkASSERT(fInstancedRendering->fParams.count() < (int)kParamsIdx_InfoMask); // TODO: cleaner.
- this->getSingleInstance().fInfo |= fInstancedRendering->fParams.count();
- fInstancedRendering->fParams.push_back_n(fParams.count(), fParams.begin());
- }
-
- fInfo.fCannotTweakAlphaForCoverage = !analysis.isCompatibleWithCoverageAsAlpha();
-
- fInfo.fUsesLocalCoords = analysis.usesLocalCoords();
- fRequiresBarrierOnOverlap = analysis.requiresBarrierBetweenOverlappingDraws();
- return analysis.requiresDstTexture();
-}
-
-void InstancedRendering::Op::wasRecorded() {
- SkASSERT(!fIsTracked);
- fInstancedRendering->fTrackedOps.addToTail(this);
- fIsTracked = true;
+InstancedRendering::InstancedRendering(GrGpu* gpu)
+ : fGpu(SkRef(gpu))
+ SkDEBUGCODE(, fState(State::kRecordingDraws)) {
}
-bool InstancedRendering::Op::onCombineIfPossible(GrOp* other, const GrCaps& caps) {
- Op* that = static_cast<Op*>(other);
- SkASSERT(fInstancedRendering == that->fInstancedRendering);
- SkASSERT(fTailDraw);
- SkASSERT(that->fTailDraw);
-
- if (!OpInfo::CanCombine(fInfo, that->fInfo) || fProcessors != that->fProcessors) {
- return false;
- }
-
- SkASSERT(fRequiresBarrierOnOverlap == that->fRequiresBarrierOnOverlap);
- if (fRequiresBarrierOnOverlap && this->bounds().intersects(that->bounds())) {
- return false;
- }
- OpInfo combinedInfo = fInfo | that->fInfo;
- if (!combinedInfo.isSimpleRects()) {
- // This threshold was chosen with the "shapes_mixed" bench on a MacBook with Intel graphics.
- // There seems to be a wide range where it doesn't matter if we combine or not. What matters
- // is that the itty bitty rects combine with other shapes and the giant ones don't.
- constexpr SkScalar kMaxPixelsToGeneralizeRects = 256 * 256;
- if (fInfo.isSimpleRects() && fPixelLoad > kMaxPixelsToGeneralizeRects) {
- return false;
- }
- if (that->fInfo.isSimpleRects() && that->fPixelLoad > kMaxPixelsToGeneralizeRects) {
- return false;
- }
- }
-
- this->joinBounds(*that);
- fInfo = combinedInfo;
- fPixelLoad += that->fPixelLoad;
- // Adopt the other op's draws.
- fNumDraws += that->fNumDraws;
- fNumChangesInGeometry += that->fNumChangesInGeometry;
- if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) {
- ++fNumChangesInGeometry;
- }
- fTailDraw->fNext = that->fHeadDraw;
- fTailDraw = that->fTailDraw;
-
- that->fHeadDraw = that->fTailDraw = nullptr;
-
- return true;
-}
void InstancedRendering::beginFlush(GrResourceProvider* rp) {
+#ifdef SK_DEBUG
SkASSERT(State::kRecordingDraws == fState);
fState = State::kFlushing;
+#endif
if (fTrackedOps.isEmpty()) {
return;
@@ -459,27 +62,12 @@ void InstancedRendering::beginFlush(GrResourceProvider* rp) {
this->onBeginFlush(rp);
}
-void InstancedRendering::Op::onExecute(GrOpFlushState* state) {
- SkASSERT(State::kFlushing == fInstancedRendering->fState);
- SkASSERT(state->gpu() == fInstancedRendering->gpu());
+void InstancedRendering::draw(const GrPipeline& pipeline,
+ OpInfo info,
+ const InstancedOp* baseOp) {
+ InstanceProcessor instProc(info, fParamsBuffer.get());
- state->gpu()->handleDirtyContext();
-
- GrPipeline pipeline;
- GrPipeline::InitArgs args;
- args.fAppliedClip = state->drawOpArgs().fAppliedClip;
- args.fCaps = &state->caps();
- args.fProcessors = &fProcessors;
- args.fFlags = GrAATypeIsHW(fInfo.aaType()) ? GrPipeline::kHWAntialias_Flag : 0;
- args.fRenderTarget = state->drawOpArgs().fRenderTarget;
- args.fDstTexture = state->drawOpArgs().fDstTexture;
- pipeline.init(args);
-
- if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*state->gpu()->caps())) {
- state->gpu()->xferBarrier(pipeline.getRenderTarget(), barrierType);
- }
- InstanceProcessor instProc(fInfo, fInstancedRendering->fParamsBuffer.get());
- fInstancedRendering->onDraw(pipeline, instProc, this);
+ this->onDraw(pipeline, instProc, baseOp);
}
void InstancedRendering::endFlush() {
@@ -489,7 +77,7 @@ void InstancedRendering::endFlush() {
fParams.reset();
fParamsBuffer.reset();
this->onEndFlush();
- fState = State::kRecordingDraws;
+ SkDEBUGCODE(fState = State::kRecordingDraws;)
// Hold on to the shape coords and index buffers.
}
@@ -500,4 +88,14 @@ void InstancedRendering::resetGpuResources(ResetType resetType) {
this->onResetGpuResources(resetType);
}
+int InstancedRendering::addOpParams(InstancedOp* op) {
+ if (op->fParams.empty()) {
+ return 0;
+ }
+
+ SkASSERT(fParams.count() < (int)kParamsIdx_InfoMask); // TODO: cleaner.
+ int count = fParams.count();
+ fParams.push_back_n(op->fParams.count(), op->fParams.begin());
+ return count;
+}
}
diff --git a/src/gpu/instanced/InstancedRendering.h b/src/gpu/instanced/InstancedRendering.h
index a8b9530228..3c5976df72 100644
--- a/src/gpu/instanced/InstancedRendering.h
+++ b/src/gpu/instanced/InstancedRendering.h
@@ -8,29 +8,41 @@
#ifndef gr_instanced_InstancedRendering_DEFINED
#define gr_instanced_InstancedRendering_DEFINED
-#include "../private/GrInstancedPipelineInfo.h"
#include "GrGpu.h"
-#include "GrMemoryPool.h"
-#include "SkTInternalLList.h"
+#include "instanced/InstancedOp.h"
#include "instanced/InstancedRenderingTypes.h"
-#include "ops/GrDrawOp.h"
+
+#include "SkTInternalLList.h"
class GrResourceProvider;
namespace gr_instanced {
+class InstancedOp;
class InstanceProcessor;
+
/**
- * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
- * instanced draws into one location, and creates special ops that pull from this data. The
- * nature of instanced rendering allows these ops to combine well and render efficiently.
+ * Instanced Rendering occurs through the interaction of four class:
+ * InstancedOp
+ * OpAllocator
+ * InstancedRendering
+ * InstanceProcessor
+ *
+ * The InstancedOp is a GrDrawOp but is more of a proxy than normal operations. It accumulates a
+ * LL of Draw objects that are allocated all together by the OpAllocator.
+ *
+ * There is only one OpAllocator which encapsulates the creation of InstancedOps and the pool
+ * of memory used for their Draw objects.
*
- * During a flush, this class assembles the accumulated draw data into a single vertex and texel
- * buffer, and its subclass draws the ops using backend-specific instanced rendering APIs.
+ * The InstancedRendering class tracks a list of InstancedOps that will all be drawn during
+ * the same flush. There is currently one per opList. The nature of instanced
+ * rendering allows these ops to combine well and render efficiently.
+ * During a flush, it assembles the accumulated draw data into a single vertex and texel
+ * buffer per opList, and its subclasses draw the ops using backend-specific instanced
+ * rendering APIs.
*
- * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by
- * InstanceProcessor.
+ * InstanceProcessors implement the shaders required for instance rendering.
*/
class InstancedRendering : public SkNoncopyable {
public:
@@ -39,45 +51,13 @@ public:
GrGpu* gpu() const { return fGpu.get(); }
/**
- * These methods make a new record internally for an instanced draw, and return an op that is
- * effectively just an index to that record. The returned op is not self-contained, but
- * rather relies on this class to handle the rendering. The client must call beginFlush() on
- * this class before attempting to flush ops returned by it. It is invalid to record new
- * draws between beginFlush() and endFlush().
- */
- std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&,
- GrPaint&&, GrAA,
- const GrInstancedPipelineInfo&);
-
- std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&,
- GrPaint&&, const SkRect& localRect,
- GrAA,
- const GrInstancedPipelineInfo&);
-
- std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&,
- GrPaint&&,
- const SkMatrix& localMatrix, GrAA,
- const GrInstancedPipelineInfo&);
-
- std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&,
- GrPaint&&, GrAA,
- const GrInstancedPipelineInfo&);
-
- std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&,
- GrPaint&&, GrAA,
- const GrInstancedPipelineInfo&);
-
- std::unique_ptr<GrDrawOp> SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer,
- const SkRRect& inner,
- const SkMatrix&, GrPaint&&, GrAA,
- const GrInstancedPipelineInfo&);
-
- /**
* Compiles all recorded draws into GPU buffers and allows the client to begin flushing the
* ops created by this class.
*/
void beginFlush(GrResourceProvider*);
+ void draw(const GrPipeline& pipeline, OpInfo info, const InstancedOp* baseOp);
+
/**
* Called once the ops created previously by this class have all been released. Allows the
* client to begin recording draws again.
@@ -95,81 +75,16 @@ public:
*/
void resetGpuResources(ResetType);
-protected:
- class Op : public GrDrawOp {
- public:
- SK_DECLARE_INTERNAL_LLIST_INTERFACE(Op);
-
- ~Op() override;
- const char* name() const override { return "InstancedRendering::Op"; }
-
- SkString dumpInfo() const override {
- SkString string;
- string.printf(
- "AA: %d, ShapeTypes: 0x%02x, IShapeTypes: 0x%02x, Persp %d, "
- "NonSquare: %d, PLoad: %0.2f, Tracked: %d, NumDraws: %d, "
- "GeomChanges: %d\n",
- (unsigned)fInfo.fAAType,
- fInfo.fShapeTypes,
- fInfo.fInnerShapeTypes,
- fInfo.fHasPerspective,
- fInfo.fNonSquare,
- fPixelLoad,
- fIsTracked,
- fNumDraws,
- fNumChangesInGeometry);
- string.append(INHERITED::dumpInfo());
- return string;
- }
-
- struct Draw {
- Instance fInstance;
- IndexRange fGeometry;
- Draw* fNext;
- };
-
- Draw& getSingleDraw() const { SkASSERT(fHeadDraw && !fHeadDraw->fNext); return *fHeadDraw; }
- Instance& getSingleInstance() const { return this->getSingleDraw().fInstance; }
-
- void appendRRectParams(const SkRRect&);
- void appendParamsTexel(const SkScalar* vals, int count);
- void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
- void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
- FixedFunctionFlags fixedFunctionFlags() const override {
- return GrAATypeIsHW(fInfo.aaType()) ? FixedFunctionFlags::kUsesHWAA
- : FixedFunctionFlags::kNone;
- }
- bool xpRequiresDstTexture(const GrCaps&, const GrAppliedClip*) override;
-
- // Registers the op with the InstancedRendering list of tracked ops.
- void wasRecorded() override;
-
- protected:
- Op(uint32_t classID, GrPaint&&, InstancedRendering*);
-
- InstancedRendering* const fInstancedRendering;
- OpInfo fInfo;
- SkScalar fPixelLoad;
- GrProcessorSet fProcessors;
- SkSTArray<5, ParamsTexel, true> fParams;
- bool fIsTracked : 1;
- bool fRequiresBarrierOnOverlap : 1;
- int fNumDraws;
- int fNumChangesInGeometry;
- Draw* fHeadDraw;
- Draw* fTailDraw;
-
- private:
- bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
- void onPrepare(GrOpFlushState*) override {}
- void onExecute(GrOpFlushState*) override;
-
- typedef GrDrawOp INHERITED;
-
- friend class InstancedRendering;
- };
+ void addOp(InstancedOp* op) { fTrackedOps.addToTail(op); }
+ void removeOp(InstancedOp* op) { fTrackedOps.remove(op); }
+ int addOpParams(InstancedOp* op);
+
+#ifdef SK_DEBUG
+ bool isFlushing() const { return InstancedRendering::State::kFlushing == fState; }
+#endif
- typedef SkTInternalLList<Op> OpList;
+protected:
+ typedef SkTInternalLList<InstancedOp> OpList;
InstancedRendering(GrGpu* gpu);
@@ -178,29 +93,20 @@ protected:
const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer.get(); }
virtual void onBeginFlush(GrResourceProvider*) = 0;
- virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Op*) = 0;
+ virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const InstancedOp*) = 0;
virtual void onEndFlush() = 0;
virtual void onResetGpuResources(ResetType) = 0;
private:
+#ifdef SK_DEBUG
enum class State : bool {
kRecordingDraws,
kFlushing
};
-
- std::unique_ptr<Op> SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
- const SkMatrix& viewMatrix, GrPaint&&,
- const SkRect& localRect, GrAA aa,
- const GrInstancedPipelineInfo&);
-
- bool selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, const GrInstancedPipelineInfo&,
- GrAAType*);
-
- virtual std::unique_ptr<Op> makeOp(GrPaint&&) = 0;
+#endif
const sk_sp<GrGpu> fGpu;
- State fState;
- GrObjectMemoryPool<Op::Draw> fDrawPool;
+ SkDEBUGCODE(State fState;)
SkSTArray<1024, ParamsTexel, true> fParams;
OpList fTrackedOps;
sk_sp<const GrBuffer> fVertexBuffer;
diff --git a/src/gpu/ops/GrOp.h b/src/gpu/ops/GrOp.h
index 7f8f8e7f64..953a9147aa 100644
--- a/src/gpu/ops/GrOp.h
+++ b/src/gpu/ops/GrOp.h
@@ -21,6 +21,7 @@
class GrCaps;
class GrGpuCommandBuffer;
class GrOpFlushState;
+class GrRenderTargetOpList;
/**
* GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
@@ -132,7 +133,7 @@ public:
* combined into another op or have another op combined into it via combineIfPossible() after
* this call is made.
*/
- virtual void wasRecorded() {}
+ virtual void wasRecorded(GrRenderTargetOpList*) {}
/**
* Called prior to executing. The op should perform any resource creation or data transfers