diff options
author | 2016-12-16 09:50:45 -0500 | |
---|---|---|
committer | 2016-12-16 15:19:01 +0000 | |
commit | 99ad164886ba39f688ebabecd5fe20dd5d923ba0 (patch) | |
tree | 67cf3194adf0c91dd60d4962b0b7835eb99c6882 /src/gpu | |
parent | a6aa590efc8201747c069f9da2285250957df01b (diff) |
Rename batch->op and sk_sp in instanced rendering
Change-Id: I9df450f2bce51d4f803d419bef863ca3a3314f09
Reviewed-on: https://skia-review.googlesource.com/6162
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
Diffstat (limited to 'src/gpu')
-rw-r--r-- | src/gpu/GrRenderTargetContext.cpp | 4 | ||||
-rw-r--r-- | src/gpu/instanced/GLInstancedRendering.cpp | 60 | ||||
-rw-r--r-- | src/gpu/instanced/GLInstancedRendering.h | 8 | ||||
-rw-r--r-- | src/gpu/instanced/InstanceProcessor.cpp | 296 | ||||
-rw-r--r-- | src/gpu/instanced/InstanceProcessor.h | 10 | ||||
-rw-r--r-- | src/gpu/instanced/InstancedRendering.cpp | 200 | ||||
-rw-r--r-- | src/gpu/instanced/InstancedRendering.h | 107 | ||||
-rw-r--r-- | src/gpu/instanced/InstancedRenderingTypes.h | 22 |
8 files changed, 351 insertions, 356 deletions
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp index ee87dae13b..7b15c31c40 100644 --- a/src/gpu/GrRenderTargetContext.cpp +++ b/src/gpu/GrRenderTargetContext.cpp @@ -469,8 +469,8 @@ bool GrRenderTargetContext::drawFilledRect(const GrClip& clip, if (GrCaps::InstancedSupport::kNone != fContext->caps()->instancedSupport()) { InstancedRendering* ir = this->getOpList()->instancedRendering(); - op.reset(ir->recordRect(croppedRect, viewMatrix, paint.getColor(), aa, - fInstancedPipelineInfo, &aaType)); + op = ir->recordRect(croppedRect, viewMatrix, paint.getColor(), aa, fInstancedPipelineInfo, + &aaType); if (op) { GrPipelineBuilder pipelineBuilder(paint, aaType); if (ss) { diff --git a/src/gpu/instanced/GLInstancedRendering.cpp b/src/gpu/instanced/GLInstancedRendering.cpp index b61b701edd..da764b2d2e 100644 --- a/src/gpu/instanced/GLInstancedRendering.cpp +++ b/src/gpu/instanced/GLInstancedRendering.cpp @@ -15,11 +15,11 @@ namespace gr_instanced { -class GLInstancedRendering::GLBatch final : public InstancedRendering::Batch { +class GLInstancedRendering::GLOp final : public InstancedRendering::Op { public: DEFINE_OP_CLASS_ID - GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {} + GLOp(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {} int numGLCommands() const { return 1 + fNumChangesInGeometry; } private: @@ -28,7 +28,7 @@ private: friend class GLInstancedRendering; - typedef Batch INHERITED; + typedef Op INHERITED; }; GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCaps) { @@ -60,22 +60,20 @@ inline GrGLGpu* GLInstancedRendering::glGpu() const { return static_cast<GrGLGpu*>(this->gpu()); } -InstancedRendering::Batch* GLInstancedRendering::createBatch() { - return new GLBatch(this); -} +sk_sp<InstancedRendering::Op> GLInstancedRendering::makeOp() { return sk_sp<Op>(new GLOp(this)); } void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { // Count what there is to draw. - BatchList::Iter iter; - iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart); + OpList::Iter iter; + iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart); int numGLInstances = 0; int numGLDrawCmds = 0; - while (Batch* b = iter.get()) { - GLBatch* batch = static_cast<GLBatch*>(b); + while (Op* o = iter.get()) { + GLOp* op = static_cast<GLOp*>(o); iter.next(); - numGLInstances += batch->fNumDraws; - numGLDrawCmds += batch->numGLCommands(); + numGLInstances += op->fNumDraws; + numGLDrawCmds += op->numGLCommands(); } if (!numGLDrawCmds) { return; @@ -145,20 +143,20 @@ void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer); SkASSERT(!fGLDrawCmdsInfo); - if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { + if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) { fGLDrawCmdsInfo.reset(numGLDrawCmds); } - // Generate the instance and draw-indirect buffer contents based on the tracked batches. - iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart); - while (Batch* b = iter.get()) { - GLBatch* batch = static_cast<GLBatch*>(b); + // Generate the instance and draw-indirect buffer contents based on the tracked ops. + iter.init(this->trackedOps(), OpList::Iter::kHead_IterStart); + while (Op* o = iter.get()) { + GLOp* op = static_cast<GLOp*>(o); iter.next(); - batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx; - batch->fGLDrawCmdsIdx = glDrawCmdsIdx; + op->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx; + op->fGLDrawCmdsIdx = glDrawCmdsIdx; - const Batch::Draw* draw = batch->fHeadDraw; + const Op::Draw* draw = op->fHeadDraw; SkASSERT(draw); do { int instanceCount = 0; @@ -179,7 +177,7 @@ void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0; } - if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { + if (GR_GL_LOG_INSTANCED_OPS || !baseInstanceSupport) { GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx]; cmdInfo.fGeometry = geometry; cmdInfo.fInstanceCount = instanceCount; @@ -200,7 +198,7 @@ void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { } void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc, - const Batch* baseBatch) { + const Op* baseOp) { if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) { return; // beginFlush was not successful. } @@ -213,14 +211,14 @@ void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProc } const GrGLCaps& glCaps = this->glGpu()->glCaps(); - const GLBatch* batch = static_cast<const GLBatch*>(baseBatch); - int numCommands = batch->numGLCommands(); + const GLOp* op = static_cast<const GLOp*>(baseOp); + int numCommands = op->numGLCommands(); -#if GR_GL_LOG_INSTANCED_BATCHES +#if GR_GL_LOG_INSTANCED_OPS SkASSERT(fGLDrawCmdsInfo); - SkDebugf("Instanced batch: ["); + SkDebugf("Instanced op: ["); for (int i = 0; i < numCommands; ++i) { - int glCmdIdx = batch->fGLDrawCmdsIdx + i; + int glCmdIdx = op->fGLDrawCmdsIdx + i; SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInstanceCount, InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx].fGeometry)); } @@ -231,17 +229,17 @@ void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProc if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInstanceSupport()) { SkASSERT(fDrawIndirectBuffer); - int glCmdsIdx = batch->fGLDrawCmdsIdx; - this->flushInstanceAttribs(batch->fEmulatedBaseInstance); + int glCmdsIdx = op->fGLDrawCmdsIdx; + this->flushInstanceAttribs(op->fEmulatedBaseInstance); GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx, numCommands, 0)); return; } - int emulatedBaseInstance = batch->fEmulatedBaseInstance; + int emulatedBaseInstance = op->fEmulatedBaseInstance; for (int i = 0; i < numCommands; ++i) { - int glCmdIdx = batch->fGLDrawCmdsIdx + i; + int glCmdIdx = op->fGLDrawCmdsIdx + i; this->flushInstanceAttribs(emulatedBaseInstance); if (fDrawIndirectBuffer) { GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, diff --git a/src/gpu/instanced/GLInstancedRendering.h b/src/gpu/instanced/GLInstancedRendering.h index c9f0d8f8bb..c77b61db7e 100644 --- a/src/gpu/instanced/GLInstancedRendering.h +++ b/src/gpu/instanced/GLInstancedRendering.h @@ -15,7 +15,7 @@ class GrGLCaps; class GrGLGpu; -#define GR_GL_LOG_INSTANCED_BATCHES 0 +#define GR_GL_LOG_INSTANCED_OPS 0 namespace gr_instanced { @@ -33,10 +33,10 @@ private: GrGLGpu* glGpu() const; - Batch* createBatch() override; + sk_sp<Op> makeOp() override; void onBeginFlush(GrResourceProvider*) override; - void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) override; + void onDraw(const GrPipeline&, const InstanceProcessor&, const Op*) override; void onEndFlush() override; void onResetGpuResources(ResetType) override; @@ -54,7 +54,7 @@ private: GrGpuResource::UniqueID fInstanceAttribsBufferUniqueId; int fInstanceAttribsBaseInstance; - class GLBatch; + class GLOp; friend class ::GrGLCaps; // For CheckSupport. diff --git a/src/gpu/instanced/InstanceProcessor.cpp b/src/gpu/instanced/InstanceProcessor.cpp index 4560d2bbdc..e890247bc9 100644 --- a/src/gpu/instanced/InstanceProcessor.cpp +++ b/src/gpu/instanced/InstanceProcessor.cpp @@ -41,8 +41,7 @@ GrCaps::InstancedSupport InstanceProcessor::CheckSupport(const GrShaderCaps& sha return GrCaps::InstancedSupport::kMixedSampled; } -InstanceProcessor::InstanceProcessor(BatchInfo batchInfo, GrBuffer* paramsBuffer) - : fBatchInfo(batchInfo) { +InstanceProcessor::InstanceProcessor(OpInfo opInfo, GrBuffer* paramsBuffer) : fOpInfo(opInfo) { this->initClassID<InstanceProcessor>(); this->addVertexAttrib("shapeCoords", kVec2f_GrVertexAttribType, kHigh_GrSLPrecision); @@ -62,15 +61,14 @@ InstanceProcessor::InstanceProcessor(BatchInfo batchInfo, GrBuffer* paramsBuffer GR_STATIC_ASSERT(6 == (int)Attrib::kLocalRect); GR_STATIC_ASSERT(7 == kNumAttribs); - if (fBatchInfo.fHasParams) { + if (fOpInfo.fHasParams) { SkASSERT(paramsBuffer); fParamsAccess.reset(kRGBA_float_GrPixelConfig, paramsBuffer, kVertex_GrShaderFlag); this->addBufferAccess(&fParamsAccess); } - if (fBatchInfo.fAntialiasMode >= AntialiasMode::kMSAA) { - if (!fBatchInfo.isSimpleRects() || - AntialiasMode::kMixedSamples == fBatchInfo.fAntialiasMode) { + if (fOpInfo.fAntialiasMode >= AntialiasMode::kMSAA) { + if (!fOpInfo.isSimpleRects() || AntialiasMode::kMixedSamples == fOpInfo.fAntialiasMode) { this->setWillUseSampleLocations(); } } @@ -139,7 +137,7 @@ private: class GLSLInstanceProcessor::Backend { public: - static Backend* SK_WARN_UNUSED_RESULT Create(const GrPipeline&, BatchInfo, const VertexInputs&); + static Backend* SK_WARN_UNUSED_RESULT Create(const GrPipeline&, OpInfo, const VertexInputs&); virtual ~Backend() {} void init(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*); @@ -160,19 +158,19 @@ public: const char* outColor); protected: - Backend(BatchInfo batchInfo, const VertexInputs& inputs) - : fBatchInfo(batchInfo), - fInputs(inputs), - fModifiesCoverage(false), - fModifiesColor(false), - fNeedsNeighborRadii(false), - fColor(kVec4f_GrSLType), - fTriangleIsArc(kInt_GrSLType), - fArcCoords(kVec2f_GrSLType), - fInnerShapeCoords(kVec2f_GrSLType), - fInnerRRect(kVec4f_GrSLType), - fModifiedShapeCoords(nullptr) { - if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) { + Backend(OpInfo opInfo, const VertexInputs& inputs) + : fOpInfo(opInfo) + , fInputs(inputs) + , fModifiesCoverage(false) + , fModifiesColor(false) + , fNeedsNeighborRadii(false) + , fColor(kVec4f_GrSLType) + , fTriangleIsArc(kInt_GrSLType) + , fArcCoords(kVec2f_GrSLType) + , fInnerShapeCoords(kVec2f_GrSLType) + , fInnerRRect(kVec4f_GrSLType) + , fModifiedShapeCoords(nullptr) { + if (fOpInfo.fShapeTypes & kRRect_ShapesMask) { fModifiedShapeCoords = "adjustedShapeCoords"; } } @@ -191,17 +189,17 @@ protected: void setupNinePatchRadii(GrGLSLVertexBuilder*); void setupComplexRadii(GrGLSLVertexBuilder*); - const BatchInfo fBatchInfo; - const VertexInputs& fInputs; - bool fModifiesCoverage; - bool fModifiesColor; - bool fNeedsNeighborRadii; - GrGLSLVertToFrag fColor; - GrGLSLVertToFrag fTriangleIsArc; - GrGLSLVertToFrag fArcCoords; - GrGLSLVertToFrag fInnerShapeCoords; - GrGLSLVertToFrag fInnerRRect; - const char* fModifiedShapeCoords; + const OpInfo fOpInfo; + const VertexInputs& fInputs; + bool fModifiesCoverage; + bool fModifiesColor; + bool fNeedsNeighborRadii; + GrGLSLVertToFrag fColor; + GrGLSLVertToFrag fTriangleIsArc; + GrGLSLVertToFrag fArcCoords; + GrGLSLVertToFrag fInnerShapeCoords; + GrGLSLVertToFrag fInnerRRect; + const char* fModifiedShapeCoords; }; void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { @@ -215,12 +213,12 @@ void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { varyingHandler->emitAttributes(ip); VertexInputs inputs(ip, v); - if (ip.batchInfo().fHasParams) { + if (ip.opInfo().fHasParams) { SkASSERT(1 == ip.numBuffers()); inputs.initParams(args.fBufferSamplers[0]); } - if (!ip.batchInfo().fHasPerspective) { + if (!ip.opInfo().fHasPerspective) { v->codeAppendf("mat2x3 shapeMatrix = mat2x3(%s, %s);", inputs.attr(Attrib::kShapeMatrixX), inputs.attr(Attrib::kShapeMatrixY)); } else { @@ -235,36 +233,36 @@ void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { v->codeAppend ("}"); } - bool hasSingleShapeType = SkIsPow2(ip.batchInfo().fShapeTypes); + bool hasSingleShapeType = SkIsPow2(ip.opInfo().fShapeTypes); if (!hasSingleShapeType) { v->defineConstant("SHAPE_TYPE_BIT", kShapeType_InfoBit); v->codeAppendf("uint shapeType = %s >> SHAPE_TYPE_BIT;", inputs.attr(Attrib::kInstanceInfo)); } - std::unique_ptr<Backend> backend(Backend::Create(pipeline, ip.batchInfo(), inputs)); + std::unique_ptr<Backend> backend(Backend::Create(pipeline, ip.opInfo(), inputs)); backend->init(varyingHandler, v); int usedShapeDefinitions = 0; - if (hasSingleShapeType || !(ip.batchInfo().fShapeTypes & ~kRRect_ShapesMask)) { - if (kRect_ShapeFlag == ip.batchInfo().fShapeTypes) { + if (hasSingleShapeType || !(ip.opInfo().fShapeTypes & ~kRRect_ShapesMask)) { + if (kRect_ShapeFlag == ip.opInfo().fShapeTypes) { backend->setupRect(v); - } else if (kOval_ShapeFlag == ip.batchInfo().fShapeTypes) { + } else if (kOval_ShapeFlag == ip.opInfo().fShapeTypes) { backend->setupOval(v); } else { backend->setupRRect(v, &usedShapeDefinitions); } } else { - if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) { + if (ip.opInfo().fShapeTypes & kRRect_ShapesMask) { v->codeAppend ("if (shapeType >= SIMPLE_R_RECT_SHAPE_TYPE) {"); backend->setupRRect(v, &usedShapeDefinitions); v->codeAppend ("}"); usedShapeDefinitions |= kSimpleRRect_ShapeFlag; } - if (ip.batchInfo().fShapeTypes & kOval_ShapeFlag) { - if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) { - if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) { + if (ip.opInfo().fShapeTypes & kOval_ShapeFlag) { + if (ip.opInfo().fShapeTypes & kRect_ShapeFlag) { + if (ip.opInfo().fShapeTypes & kRRect_ShapesMask) { v->codeAppend ("else "); } v->codeAppend ("if (OVAL_SHAPE_TYPE == shapeType) {"); @@ -275,15 +273,15 @@ void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { backend->setupOval(v); v->codeAppend ("}"); } - if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) { + if (ip.opInfo().fShapeTypes & kRect_ShapeFlag) { v->codeAppend ("else {"); backend->setupRect(v); v->codeAppend ("}"); } } - if (ip.batchInfo().fInnerShapeTypes) { - bool hasSingleInnerShapeType = SkIsPow2(ip.batchInfo().fInnerShapeTypes); + if (ip.opInfo().fInnerShapeTypes) { + bool hasSingleInnerShapeType = SkIsPow2(ip.opInfo().fInnerShapeTypes); if (!hasSingleInnerShapeType) { v->defineConstantf("int", "INNER_SHAPE_TYPE_MASK", "0x%x", kInnerShapeType_InfoMask); v->defineConstant("INNER_SHAPE_TYPE_BIT", kInnerShapeType_InfoBit); @@ -306,27 +304,27 @@ void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { backend->initInnerShape(varyingHandler, v); - SkASSERT(0 == (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask) || - kSimpleRRect_ShapeFlag == (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask)); + SkASSERT(0 == (ip.opInfo().fInnerShapeTypes & kRRect_ShapesMask) || + kSimpleRRect_ShapeFlag == (ip.opInfo().fInnerShapeTypes & kRRect_ShapesMask)); if (hasSingleInnerShapeType) { - if (kRect_ShapeFlag == ip.batchInfo().fInnerShapeTypes) { + if (kRect_ShapeFlag == ip.opInfo().fInnerShapeTypes) { backend->setupInnerRect(v); - } else if (kOval_ShapeFlag == ip.batchInfo().fInnerShapeTypes) { + } else if (kOval_ShapeFlag == ip.opInfo().fInnerShapeTypes) { backend->setupInnerOval(v); } else { backend->setupInnerSimpleRRect(v); } } else { - if (ip.batchInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) { + if (ip.opInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) { v->codeAppend ("if (SIMPLE_R_RECT_SHAPE_TYPE == innerShapeType) {"); backend->setupInnerSimpleRRect(v); v->codeAppend("}"); usedShapeDefinitions |= kSimpleRRect_ShapeFlag; } - if (ip.batchInfo().fInnerShapeTypes & kOval_ShapeFlag) { - if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) { - if (ip.batchInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) { + if (ip.opInfo().fInnerShapeTypes & kOval_ShapeFlag) { + if (ip.opInfo().fInnerShapeTypes & kRect_ShapeFlag) { + if (ip.opInfo().fInnerShapeTypes & kSimpleRRect_ShapeFlag) { v->codeAppend ("else "); } v->codeAppend ("if (OVAL_SHAPE_TYPE == innerShapeType) {"); @@ -337,7 +335,7 @@ void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { backend->setupInnerOval(v); v->codeAppend("}"); } - if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) { + if (ip.opInfo().fInnerShapeTypes & kRect_ShapeFlag) { v->codeAppend("else {"); backend->setupInnerRect(v); v->codeAppend("}"); @@ -360,17 +358,17 @@ void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { args.fOutputColor); const char* localCoords = nullptr; - if (ip.batchInfo().fUsesLocalCoords) { + if (ip.opInfo().fUsesLocalCoords) { localCoords = "localCoords"; v->codeAppendf("vec2 t = 0.5 * (%s + vec2(1));", backend->outShapeCoords()); v->codeAppendf("vec2 localCoords = (1.0 - t) * %s.xy + t * %s.zw;", inputs.attr(Attrib::kLocalRect), inputs.attr(Attrib::kLocalRect)); } - if (ip.batchInfo().fHasLocalMatrix && ip.batchInfo().fHasParams) { + if (ip.opInfo().fHasLocalMatrix && ip.opInfo().fHasParams) { v->defineConstantf("int", "LOCAL_MATRIX_FLAG", "0x%x", kLocalMatrix_InfoFlag); v->codeAppendf("if (0 != (%s & LOCAL_MATRIX_FLAG)) {", inputs.attr(Attrib::kInstanceInfo)); - if (!ip.batchInfo().fUsesLocalCoords) { + if (!ip.opInfo().fUsesLocalCoords) { inputs.skipParams(2); } else { v->codeAppendf( "mat2x3 localMatrix;"); @@ -385,7 +383,7 @@ void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { v->codeAppend("}"); } - GrSLType positionType = ip.batchInfo().fHasPerspective ? kVec3f_GrSLType : kVec2f_GrSLType; + GrSLType positionType = ip.opInfo().fHasPerspective ? kVec3f_GrSLType : kVec2f_GrSLType; v->codeAppendf("%s deviceCoords = vec3(%s, 1) * shapeMatrix;", GrGLSLTypeString(positionType), backend->outShapeCoords()); gpArgs->fPositionVar.set(positionType, "deviceCoords"); @@ -418,7 +416,7 @@ void GLSLInstanceProcessor::Backend::setupRRect(GrGLSLVertexBuilder* v, int* use v->codeAppend ("mat2 p = "); fInputs.fetchNextParam(kMat22f_GrSLType); v->codeAppend (";"); - uint8_t types = fBatchInfo.fShapeTypes & kRRect_ShapesMask; + uint8_t types = fOpInfo.fShapeTypes & kRRect_ShapesMask; if (0 == (types & (types - 1))) { if (kSimpleRRect_ShapeFlag == types) { this->setupSimpleRadii(v); @@ -520,7 +518,7 @@ void GLSLInstanceProcessor::Backend::adjustRRectVertices(GrGLSLVertexBuilder* v) void GLSLInstanceProcessor::Backend::initInnerShape(GrGLSLVaryingHandler* varyingHandler, GrGLSLVertexBuilder* v) { - SkASSERT(!(fBatchInfo.fInnerShapeTypes & (kNinePatch_ShapeFlag | kComplexRRect_ShapeFlag))); + SkASSERT(!(fOpInfo.fInnerShapeTypes & (kNinePatch_ShapeFlag | kComplexRRect_ShapeFlag))); this->onInitInnerShape(varyingHandler, v); @@ -556,10 +554,9 @@ void GLSLInstanceProcessor::Backend::emitCode(GrGLSLVertexBuilder* v, GrGLSLPPFr class GLSLInstanceProcessor::BackendNonAA : public Backend { public: - BackendNonAA(BatchInfo batchInfo, const VertexInputs& inputs) - : INHERITED(batchInfo, inputs) { - if (fBatchInfo.fCannotDiscard && !fBatchInfo.isSimpleRects()) { - fModifiesColor = !fBatchInfo.fCannotTweakAlphaForCoverage; + BackendNonAA(OpInfo opInfo, const VertexInputs& inputs) : INHERITED(opInfo, inputs) { + if (fOpInfo.fCannotDiscard && !fOpInfo.isSimpleRects()) { + fModifiesColor = !fOpInfo.fCannotTweakAlphaForCoverage; fModifiesCoverage = !fModifiesColor; } } @@ -582,7 +579,7 @@ private: void GLSLInstanceProcessor::BackendNonAA::onInit(GrGLSLVaryingHandler* varyingHandler, GrGLSLVertexBuilder*) { - if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { + if (kRect_ShapeFlag != fOpInfo.fShapeTypes) { varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision); varyingHandler->addVarying("arcCoords", &fArcCoords, kMedium_GrSLPrecision); } @@ -604,8 +601,8 @@ void GLSLInstanceProcessor::BackendNonAA::setupOval(GrGLSLVertexBuilder* v) { void GLSLInstanceProcessor::BackendNonAA::onInitInnerShape(GrGLSLVaryingHandler* varyingHandler, GrGLSLVertexBuilder*) { varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kMedium_GrSLPrecision); - if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes && - kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes) { + if (kRect_ShapeFlag != fOpInfo.fInnerShapeTypes && + kOval_ShapeFlag != fOpInfo.fInnerShapeTypes) { varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kMedium_GrSLPrecision); } } @@ -631,7 +628,7 @@ void GLSLInstanceProcessor::BackendNonAA::onEmitCode(GrGLSLVertexBuilder*, const char* outCoverage, const char* outColor) { const char* dropFragment = nullptr; - if (!fBatchInfo.fCannotDiscard) { + if (!fOpInfo.fCannotDiscard) { dropFragment = "discard"; } else if (fModifiesCoverage) { f->codeAppend ("lowp float covered = 1.0;"); @@ -645,13 +642,13 @@ void GLSLInstanceProcessor::BackendNonAA::onEmitCode(GrGLSLVertexBuilder*, f->codeAppendf("if (%s != 0 && dot(%s, %s) > 1.0) %s;", fTriangleIsArc.fsIn(), fArcCoords.fsIn(), fArcCoords.fsIn(), dropFragment); } - if (fBatchInfo.fInnerShapeTypes) { + if (fOpInfo.fInnerShapeTypes) { SkASSERT(dropFragment); f->codeAppendf("// Inner shape.\n"); - if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { + if (kRect_ShapeFlag == fOpInfo.fInnerShapeTypes) { f->codeAppendf("if (all(lessThanEqual(abs(%s), vec2(1)))) %s;", fInnerShapeCoords.fsIn(), dropFragment); - } else if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { + } else if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) { f->codeAppendf("if ((dot(%s, %s) <= 1.0)) %s;", fInnerShapeCoords.fsIn(), fInnerShapeCoords.fsIn(), dropFragment); } else { @@ -681,20 +678,19 @@ void GLSLInstanceProcessor::BackendNonAA::onEmitCode(GrGLSLVertexBuilder*, class GLSLInstanceProcessor::BackendCoverage : public Backend { public: - BackendCoverage(BatchInfo batchInfo, const VertexInputs& inputs) - : INHERITED(batchInfo, inputs), - fColorTimesRectCoverage(kVec4f_GrSLType), - fRectCoverage(kFloat_GrSLType), - fEllipseCoords(kVec2f_GrSLType), - fEllipseName(kVec2f_GrSLType), - fBloatedRadius(kFloat_GrSLType), - fDistanceToInnerEdge(kVec2f_GrSLType), - fInnerShapeBloatedHalfSize(kVec2f_GrSLType), - fInnerEllipseCoords(kVec2f_GrSLType), - fInnerEllipseName(kVec2f_GrSLType) { - fShapeIsCircle = !fBatchInfo.fNonSquare && !(fBatchInfo.fShapeTypes & kRRect_ShapesMask); - fTweakAlphaForCoverage = !fBatchInfo.fCannotTweakAlphaForCoverage && - !fBatchInfo.fInnerShapeTypes; + BackendCoverage(OpInfo opInfo, const VertexInputs& inputs) + : INHERITED(opInfo, inputs) + , fColorTimesRectCoverage(kVec4f_GrSLType) + , fRectCoverage(kFloat_GrSLType) + , fEllipseCoords(kVec2f_GrSLType) + , fEllipseName(kVec2f_GrSLType) + , fBloatedRadius(kFloat_GrSLType) + , fDistanceToInnerEdge(kVec2f_GrSLType) + , fInnerShapeBloatedHalfSize(kVec2f_GrSLType) + , fInnerEllipseCoords(kVec2f_GrSLType) + , fInnerEllipseName(kVec2f_GrSLType) { + fShapeIsCircle = !fOpInfo.fNonSquare && !(fOpInfo.fShapeTypes & kRRect_ShapesMask); + fTweakAlphaForCoverage = !fOpInfo.fCannotTweakAlphaForCoverage && !fOpInfo.fInnerShapeTypes; fModifiesCoverage = !fTweakAlphaForCoverage; fModifiesColor = fTweakAlphaForCoverage; fModifiedShapeCoords = "bloatedShapeCoords"; @@ -745,11 +741,11 @@ void GLSLInstanceProcessor::BackendCoverage::onInit(GrGLSLVaryingHandler* varyin v->codeAppend ("vec2 bloat = 0.5 / shapeHalfSize;"); v->codeAppendf("bloatedShapeCoords = %s * (1.0 + bloat);", fInputs.attr(Attrib::kShapeCoords)); - if (kOval_ShapeFlag != fBatchInfo.fShapeTypes) { + if (kOval_ShapeFlag != fOpInfo.fShapeTypes) { if (fTweakAlphaForCoverage) { varyingHandler->addVarying("colorTimesRectCoverage", &fColorTimesRectCoverage, kLow_GrSLPrecision); - if (kRect_ShapeFlag == fBatchInfo.fShapeTypes) { + if (kRect_ShapeFlag == fOpInfo.fShapeTypes) { fColor = fColorTimesRectCoverage; } } else { @@ -757,7 +753,7 @@ void GLSLInstanceProcessor::BackendCoverage::onInit(GrGLSLVaryingHandler* varyin } v->codeAppend("float rectCoverage = 0.0;"); } - if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { + if (kRect_ShapeFlag != fOpInfo.fShapeTypes) { varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision); if (!fShapeIsCircle) { varyingHandler->addVarying("ellipseCoords", &fEllipseCoords, kMedium_GrSLPrecision); @@ -862,7 +858,7 @@ void GLSLInstanceProcessor::BackendCoverage::onInitInnerShape(GrGLSLVaryingHandl GrGLSLVertexBuilder* v) { v->codeAppend("vec2 innerShapeHalfSize = shapeHalfSize / outer2Inner.xy;"); - if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { + if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) { varyingHandler->addVarying("innerEllipseCoords", &fInnerEllipseCoords, kMedium_GrSLPrecision); varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName, kHigh_GrSLPrecision); @@ -871,7 +867,7 @@ void GLSLInstanceProcessor::BackendCoverage::onInitInnerShape(GrGLSLVaryingHandl kMedium_GrSLPrecision); varyingHandler->addFlatVarying("innerShapeBloatedHalfSize", &fInnerShapeBloatedHalfSize, kMedium_GrSLPrecision); - if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) { + if (kRect_ShapeFlag != fOpInfo.fInnerShapeTypes) { varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kMedium_GrSLPrecision); varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName, @@ -926,7 +922,7 @@ void GLSLInstanceProcessor::BackendCoverage::onEmitCode(GrGLSLVertexBuilder* v, } SkString coverage("lowp float coverage"); - if (fBatchInfo.fInnerShapeTypes || (!fTweakAlphaForCoverage && fTriangleIsArc.fsIn())) { + if (fOpInfo.fInnerShapeTypes || (!fTweakAlphaForCoverage && fTriangleIsArc.fsIn())) { f->codeAppendf("%s;", coverage.c_str()); coverage = "coverage"; } @@ -937,7 +933,7 @@ void GLSLInstanceProcessor::BackendCoverage::onEmitCode(GrGLSLVertexBuilder* v, if (fShapeIsCircle) { this->emitCircle(f, coverage.c_str()); } else { - bool ellipseCoordsMayBeNegative = SkToBool(fBatchInfo.fShapeTypes & kOval_ShapeFlag); + bool ellipseCoordsMayBeNegative = SkToBool(fOpInfo.fShapeTypes & kOval_ShapeFlag); this->emitArc(f, fEllipseCoords.fsIn(), fEllipseName.fsIn(), true /*ellipseCoordsNeedClamp*/, ellipseCoordsMayBeNegative, coverage.c_str()); @@ -950,10 +946,10 @@ void GLSLInstanceProcessor::BackendCoverage::onEmitCode(GrGLSLVertexBuilder* v, this->emitRect(f, coverage.c_str(), outColor); } - if (fBatchInfo.fInnerShapeTypes) { + if (fOpInfo.fInnerShapeTypes) { f->codeAppendf("// Inner shape.\n"); SkString innerCoverageDecl("lowp float innerCoverage"); - if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { + if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) { this->emitArc(f, fInnerEllipseCoords.fsIn(), fInnerEllipseName.fsIn(), true /*ellipseCoordsNeedClamp*/, true /*ellipseCoordsMayBeNegative*/, innerCoverageDecl.c_str()); @@ -962,7 +958,7 @@ void GLSLInstanceProcessor::BackendCoverage::onEmitCode(GrGLSLVertexBuilder* v, fDistanceToInnerEdge.vsOut()); v->codeAppendf("%s = innerShapeHalfSize + 0.5;", fInnerShapeBloatedHalfSize.vsOut()); - if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { + if (kRect_ShapeFlag == fOpInfo.fInnerShapeTypes) { this->emitInnerRect(f, innerCoverageDecl.c_str()); } else { f->codeAppendf("%s = 0.0;", innerCoverageDecl.c_str()); @@ -1003,7 +999,7 @@ void GLSLInstanceProcessor::BackendCoverage::emitRect(GrGLSLPPFragmentBuilder* f void GLSLInstanceProcessor::BackendCoverage::emitCircle(GrGLSLPPFragmentBuilder* f, const char* outCoverage) { // TODO: circleCoords = max(circleCoords, 0) if we decide to do this optimization on rrects. - SkASSERT(!(kRRect_ShapesMask & fBatchInfo.fShapeTypes)); + SkASSERT(!(kRRect_ShapesMask & fOpInfo.fShapeTypes)); f->codeAppendf("mediump float distanceToEdge = %s - length(%s);", fBloatedRadius.fsIn(), fEllipseCoords.fsIn()); f->codeAppendf("%s = clamp(distanceToEdge, 0.0, 1.0);", outCoverage); @@ -1050,24 +1046,24 @@ void GLSLInstanceProcessor::BackendCoverage::emitInnerRect(GrGLSLPPFragmentBuild class GLSLInstanceProcessor::BackendMultisample : public Backend { public: - BackendMultisample(BatchInfo batchInfo, const VertexInputs& inputs, int effectiveSampleCnt) - : INHERITED(batchInfo, inputs), - fEffectiveSampleCnt(effectiveSampleCnt), - fShapeCoords(kVec2f_GrSLType), - fShapeInverseMatrix(kMat22f_GrSLType), - fFragShapeHalfSpan(kVec2f_GrSLType), - fArcTest(kVec2f_GrSLType), - fArcInverseMatrix(kMat22f_GrSLType), - fFragArcHalfSpan(kVec2f_GrSLType), - fEarlyAccept(kInt_GrSLType), - fInnerShapeInverseMatrix(kMat22f_GrSLType), - fFragInnerShapeHalfSpan(kVec2f_GrSLType) { - fRectTrianglesMaySplit = fBatchInfo.fHasPerspective; - fNeedsNeighborRadii = this->isMixedSampled() && !fBatchInfo.fHasPerspective; + BackendMultisample(OpInfo opInfo, const VertexInputs& inputs, int effectiveSampleCnt) + : INHERITED(opInfo, inputs) + , fEffectiveSampleCnt(effectiveSampleCnt) + , fShapeCoords(kVec2f_GrSLType) + , fShapeInverseMatrix(kMat22f_GrSLType) + , fFragShapeHalfSpan(kVec2f_GrSLType) + , fArcTest(kVec2f_GrSLType) + , fArcInverseMatrix(kMat22f_GrSLType) + , fFragArcHalfSpan(kVec2f_GrSLType) + , fEarlyAccept(kInt_GrSLType) + , fInnerShapeInverseMatrix(kMat22f_GrSLType) + , fFragInnerShapeHalfSpan(kVec2f_GrSLType) { + fRectTrianglesMaySplit = fOpInfo.fHasPerspective; + fNeedsNeighborRadii = this->isMixedSampled() && !fOpInfo.fHasPerspective; } private: - bool isMixedSampled() const { return AntialiasMode::kMixedSamples == fBatchInfo.fAntialiasMode; } + bool isMixedSampled() const { return AntialiasMode::kMixedSamples == fOpInfo.fAntialiasMode; } void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; void setupRect(GrGLSLVertexBuilder*) override; @@ -1125,50 +1121,50 @@ private: void GLSLInstanceProcessor::BackendMultisample::onInit(GrGLSLVaryingHandler* varyingHandler, GrGLSLVertexBuilder* v) { if (!this->isMixedSampled()) { - if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { + if (kRect_ShapeFlag != fOpInfo.fShapeTypes) { varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision); varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPrecision); - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseMatrix, kHigh_GrSLPrecision); varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfSpan, kHigh_GrSLPrecision); } - } else if (!fBatchInfo.fInnerShapeTypes) { + } else if (!fOpInfo.fInnerShapeTypes) { return; } } else { varyingHandler->addVarying("shapeCoords", &fShapeCoords, kHigh_GrSLPrecision); - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { varyingHandler->addFlatVarying("shapeInverseMatrix", &fShapeInverseMatrix, kHigh_GrSLPrecision); varyingHandler->addFlatVarying("fragShapeHalfSpan", &fFragShapeHalfSpan, kHigh_GrSLPrecision); } - if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) { + if (fOpInfo.fShapeTypes & kRRect_ShapesMask) { varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPrecision); varyingHandler->addVarying("arcTest", &fArcTest, kHigh_GrSLPrecision); - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseMatrix, kHigh_GrSLPrecision); varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfSpan, kHigh_GrSLPrecision); } - } else if (fBatchInfo.fShapeTypes & kOval_ShapeFlag) { + } else if (fOpInfo.fShapeTypes & kOval_ShapeFlag) { fArcCoords = fShapeCoords; fArcInverseMatrix = fShapeInverseMatrix; fFragArcHalfSpan = fFragShapeHalfSpan; - if (fBatchInfo.fShapeTypes & kRect_ShapeFlag) { + if (fOpInfo.fShapeTypes & kRect_ShapeFlag) { varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kLow_GrSLPrecision); } } - if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { - v->defineConstantf("int", "SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1); - varyingHandler->addFlatVarying("earlyAccept", &fEarlyAccept, kHigh_GrSLPrecision); + if (kRect_ShapeFlag != fOpInfo.fShapeTypes) { + v->defineConstantf("int", "SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1); + varyingHandler->addFlatVarying("earlyAccept", &fEarlyAccept, kHigh_GrSLPrecision); } } - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { v->codeAppend("mat2 shapeInverseMatrix = inverse(mat2(shapeMatrix));"); v->codeAppend("vec2 fragShapeSpan = abs(vec4(shapeInverseMatrix).xz) + " "abs(vec4(shapeInverseMatrix).yw);"); @@ -1231,7 +1227,7 @@ void GLSLInstanceProcessor::BackendMultisample::adjustRRectVertices(GrGLSLVertex return; } - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { // For the mixed samples algorithm it's best to bloat the corner triangles a bit so that // more of the pixels that cross into the arc region are completely inside the shared edges. // We also snap to a regular rect if the radii shrink smaller than a pixel. @@ -1280,7 +1276,7 @@ void GLSLInstanceProcessor::BackendMultisample::onSetupRRect(GrGLSLVertexBuilder v->codeAppendf("%s = (cornerSize == vec2(0)) ? vec2(0) : " "cornerSign * %s * mat2(1, cornerSize.x - 1.0, cornerSize.y - 1.0, 1);", fArcTest.vsOut(), fModifiedShapeCoords); - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { // Shift the point at which distances to edges are measured from the center of the pixel // to the corner. This way the sign of fArcTest will quickly tell us whether a pixel // is completely inside the shared edge. Perspective mode will accomplish this same task @@ -1300,11 +1296,11 @@ void GLSLInstanceProcessor::BackendMultisample::onInitInnerShape(GrGLSLVaryingHandler* varyingHandler, GrGLSLVertexBuilder* v) { varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kHigh_GrSLPrecision); - if (kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes && - kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) { + if (kOval_ShapeFlag != fOpInfo.fInnerShapeTypes && + kRect_ShapeFlag != fOpInfo.fInnerShapeTypes) { varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kHigh_GrSLPrecision); } - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { varyingHandler->addFlatVarying("innerShapeInverseMatrix", &fInnerShapeInverseMatrix, kHigh_GrSLPrecision); v->codeAppendf("%s = shapeInverseMatrix * mat2(outer2Inner.x, 0, 0, outer2Inner.y);", @@ -1351,7 +1347,7 @@ void GLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder*, f->defineConstantf("int", "SAMPLE_MASK_MSB", "0x%x", 1 << (fEffectiveSampleCnt - 1)); } - if (kRect_ShapeFlag != (fBatchInfo.fShapeTypes | fBatchInfo.fInnerShapeTypes)) { + if (kRect_ShapeFlag != (fOpInfo.fShapeTypes | fOpInfo.fInnerShapeTypes)) { GrShaderVar x("x", kVec2f_GrSLType, GrShaderVar::kNonArray, kHigh_GrSLPrecision); f->emitFunction(kFloat_GrSLType, "square", 1, &x, "return dot(x, x);", &fSquareFun); } @@ -1365,14 +1361,14 @@ void GLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder*, arcCoords.fVarying = &fArcCoords; arcCoords.fInverseMatrix = fArcInverseMatrix.fsIn(); arcCoords.fFragHalfSpan = fFragArcHalfSpan.fsIn(); - bool clampArcCoords = this->isMixedSampled() && (fBatchInfo.fShapeTypes & kRRect_ShapesMask); + bool clampArcCoords = this->isMixedSampled() && (fOpInfo.fShapeTypes & kRRect_ShapesMask); EmitShapeOpts opts; opts.fIsTightGeometry = true; opts.fResolveMixedSamples = this->isMixedSampled(); opts.fInvertCoverage = false; - if (fBatchInfo.fHasPerspective && fBatchInfo.fInnerShapeTypes) { + if (fOpInfo.fHasPerspective && fOpInfo.fInnerShapeTypes) { // This determines if the fragment should consider the inner shape in its sample mask. // We take the derivative early in case discards may occur before we get to the inner shape. f->codeAppendf("highp vec2 fragInnerShapeApproxHalfSpan = 0.5 * fwidth(%s);", @@ -1389,7 +1385,7 @@ void GLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder*, } } else { const char* arcTest = fArcTest.fsIn(); - if (arcTest && fBatchInfo.fHasPerspective) { + if (arcTest && fOpInfo.fHasPerspective) { // The non-perspective version accounts for fwidth() in the vertex shader. // We make sure to take the derivative here, before a neighbor pixel may early accept. f->codeAppendf("highp vec2 arcTest = %s - 0.5 * fwidth(%s);", @@ -1414,21 +1410,21 @@ void GLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder*, f->codeAppend ("} else {"); this->emitArc(f, arcCoords, false, clampArcCoords, opts); f->codeAppend ("}"); - } else if (fBatchInfo.fShapeTypes == kOval_ShapeFlag) { + } else if (fOpInfo.fShapeTypes == kOval_ShapeFlag) { this->emitArc(f, arcCoords, false, clampArcCoords, opts); } else { - SkASSERT(fBatchInfo.fShapeTypes == kRect_ShapeFlag); + SkASSERT(fOpInfo.fShapeTypes == kRect_ShapeFlag); this->emitRect(f, shapeCoords, opts); } f->codeAppend ("}"); } - if (fBatchInfo.fInnerShapeTypes) { + if (fOpInfo.fInnerShapeTypes) { f->codeAppendf("// Inner shape.\n"); EmitShapeCoords innerShapeCoords; innerShapeCoords.fVarying = &fInnerShapeCoords; - if (!fBatchInfo.fHasPerspective) { + if (!fOpInfo.fHasPerspective) { innerShapeCoords.fInverseMatrix = fInnerShapeInverseMatrix.fsIn(); innerShapeCoords.fFragHalfSpan = fFragInnerShapeHalfSpan.fsIn(); } @@ -1438,13 +1434,13 @@ void GLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder*, innerOpts.fResolveMixedSamples = false; // Mixed samples are resolved in the outer shape. innerOpts.fInvertCoverage = true; - if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { + if (kOval_ShapeFlag == fOpInfo.fInnerShapeTypes) { this->emitArc(f, innerShapeCoords, true, false, innerOpts); } else { f->codeAppendf("if (all(lessThan(abs(%s), 1.0 + %s))) {", fInnerShapeCoords.fsIn(), - !fBatchInfo.fHasPerspective ? innerShapeCoords.fFragHalfSpan - : "fragInnerShapeApproxHalfSpan"); // Above. - if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { + !fOpInfo.fHasPerspective ? innerShapeCoords.fFragHalfSpan + : "fragInnerShapeApproxHalfSpan"); // Above. + if (kRect_ShapeFlag == fOpInfo.fInnerShapeTypes) { this->emitRect(f, innerShapeCoords, innerOpts); } else { this->emitSimpleRRect(f, innerShapeCoords, fInnerRRect.fsIn(), innerOpts); @@ -1606,7 +1602,7 @@ GLSLInstanceProcessor::BackendMultisample::acceptOrRejectWholeFragment(GrGLSLPPF // fragment. f->codeAppend("if ((gl_SampleMaskIn[0] & SAMPLE_MASK_MSB) == 0) {"); // Drop this fragment. - if (!fBatchInfo.fCannotDiscard) { + if (!fOpInfo.fCannotDiscard) { f->codeAppend("discard;"); } else { f->overrideSampleCoverage("0"); @@ -1617,7 +1613,7 @@ GLSLInstanceProcessor::BackendMultisample::acceptOrRejectWholeFragment(GrGLSLPPF f->codeAppend("}"); } } else { // Reject the entire fragment. - if (!fBatchInfo.fCannotDiscard) { + if (!fOpInfo.fCannotDiscard) { f->codeAppend("discard;"); } else if (opts.fResolveMixedSamples) { f->overrideSampleCoverage("0"); @@ -1642,7 +1638,7 @@ void GLSLInstanceProcessor::BackendMultisample::acceptCoverageMask(GrGLSLPPFragm SkASSERT(!opts.fInvertCoverage); f->codeAppendf("if ((gl_SampleMaskIn[0] & (1 << findMSB(%s))) == 0) {", shapeMask); // Drop this fragment. - if (!fBatchInfo.fCannotDiscard) { + if (!fOpInfo.fCannotDiscard) { f->codeAppend ("discard;"); } else { f->overrideSampleCoverage("0"); @@ -1661,21 +1657,21 @@ void GLSLInstanceProcessor::BackendMultisample::acceptCoverageMask(GrGLSLPPFragm //////////////////////////////////////////////////////////////////////////////////////////////////// -GLSLInstanceProcessor::Backend* -GLSLInstanceProcessor::Backend::Create(const GrPipeline& pipeline, BatchInfo batchInfo, - const VertexInputs& inputs) { - switch (batchInfo.fAntialiasMode) { +GLSLInstanceProcessor::Backend* GLSLInstanceProcessor::Backend::Create(const GrPipeline& pipeline, + OpInfo opInfo, + const VertexInputs& inputs) { + switch (opInfo.fAntialiasMode) { default: SkFAIL("Unexpected antialias mode."); case AntialiasMode::kNone: - return new BackendNonAA(batchInfo, inputs); + return new BackendNonAA(opInfo, inputs); case AntialiasMode::kCoverage: - return new BackendCoverage(batchInfo, inputs); + return new BackendCoverage(opInfo, inputs); case AntialiasMode::kMSAA: case AntialiasMode::kMixedSamples: { const GrRenderTargetPriv& rtp = pipeline.getRenderTarget()->renderTargetPriv(); const GrGpu::MultisampleSpecs& specs = rtp.getMultisampleSpecs(pipeline); - return new BackendMultisample(batchInfo, inputs, specs.fEffectiveSampleCnt); + return new BackendMultisample(opInfo, inputs, specs.fEffectiveSampleCnt); } } } diff --git a/src/gpu/instanced/InstanceProcessor.h b/src/gpu/instanced/InstanceProcessor.h index df96ce3933..bfe503e5bc 100644 --- a/src/gpu/instanced/InstanceProcessor.h +++ b/src/gpu/instanced/InstanceProcessor.h @@ -22,13 +22,13 @@ namespace gr_instanced { */ class InstanceProcessor : public GrGeometryProcessor { public: - InstanceProcessor(BatchInfo, GrBuffer* paramsBuffer); + InstanceProcessor(OpInfo, GrBuffer* paramsBuffer); const char* name() const override { return "Instance Processor"; } - BatchInfo batchInfo() const { return fBatchInfo; } + OpInfo opInfo() const { return fOpInfo; } void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override { - b->add32(fBatchInfo.fData); + b->add32(fOpInfo.fData); } GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override; @@ -56,8 +56,8 @@ private: */ static GrCaps::InstancedSupport CheckSupport(const GrShaderCaps&, const GrCaps&); - const BatchInfo fBatchInfo; - BufferAccess fParamsAccess; + OpInfo fOpInfo; + BufferAccess fParamsAccess; friend class GLInstancedRendering; // For CheckSupport. diff --git a/src/gpu/instanced/InstancedRendering.cpp b/src/gpu/instanced/InstancedRendering.cpp index faecfe5eed..2a248a55af 100644 --- a/src/gpu/instanced/InstancedRendering.cpp +++ b/src/gpu/instanced/InstancedRendering.cpp @@ -21,85 +21,86 @@ InstancedRendering::InstancedRendering(GrGpu* gpu) fDrawPool(1024, 1024) { } -GrDrawOp* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix, - GrColor color, GrAA aa, - const GrInstancedPipelineInfo& info, GrAAType* aaType) { +sk_sp<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix, + GrColor color, GrAA aa, + const GrInstancedPipelineInfo& info, + GrAAType* aaType) { return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, aa, info, aaType); } -GrDrawOp* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix, - GrColor color, const SkRect& localRect, GrAA aa, - const GrInstancedPipelineInfo& info, GrAAType* aaType) { +sk_sp<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix, + GrColor color, const SkRect& localRect, GrAA aa, + const GrInstancedPipelineInfo& info, + GrAAType* aaType) { return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, localRect, aa, info, aaType); } -GrDrawOp* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix, - GrColor color, const SkMatrix& localMatrix, - GrAA aa, const GrInstancedPipelineInfo& info, - GrAAType* aaType) { +sk_sp<GrDrawOp> InstancedRendering::recordRect(const SkRect& rect, const SkMatrix& viewMatrix, + GrColor color, const SkMatrix& localMatrix, GrAA aa, + const GrInstancedPipelineInfo& info, + GrAAType* aaType) { if (localMatrix.hasPerspective()) { return nullptr; // Perspective is not yet supported in the local matrix. } - if (Batch* batch = this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, aa, - info, aaType)) { - batch->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag; - batch->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(), - localMatrix.getTranslateX()); - batch->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(), - localMatrix.getTranslateY()); - batch->fInfo.fHasLocalMatrix = true; - return batch; + if (sk_sp<Op> op = this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, aa, info, + aaType)) { + op->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag; + op->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(), + localMatrix.getTranslateX()); + op->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(), + localMatrix.getTranslateY()); + op->fInfo.fHasLocalMatrix = true; + return std::move(op); } return nullptr; } -GrDrawOp* InstancedRendering::recordOval(const SkRect& oval, const SkMatrix& viewMatrix, - GrColor color, GrAA aa, - const GrInstancedPipelineInfo& info, GrAAType* aaType) { +sk_sp<GrDrawOp> InstancedRendering::recordOval(const SkRect& oval, const SkMatrix& viewMatrix, + GrColor color, GrAA aa, + const GrInstancedPipelineInfo& info, + GrAAType* aaType) { return this->recordShape(ShapeType::kOval, oval, viewMatrix, color, oval, aa, info, aaType); } -GrDrawOp* InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatrix& viewMatrix, - GrColor color, GrAA aa, - const GrInstancedPipelineInfo& info, GrAAType* aaType) { - if (Batch* batch = this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix, color, +sk_sp<GrDrawOp> InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatrix& viewMatrix, + GrColor color, GrAA aa, + const GrInstancedPipelineInfo& info, + GrAAType* aaType) { + if (sk_sp<Op> op = this->recordShape(GetRRectShapeType(rrect), rrect.rect(), viewMatrix, color, rrect.rect(), aa, info, aaType)) { - batch->appendRRectParams(rrect); - return batch; + op->appendRRectParams(rrect); + return std::move(op); } return nullptr; } -GrDrawOp* InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRect& inner, - const SkMatrix& viewMatrix, GrColor color, - GrAA aa, const GrInstancedPipelineInfo& info, - GrAAType* aaType) { +sk_sp<GrDrawOp> InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRect& inner, + const SkMatrix& viewMatrix, GrColor color, GrAA aa, + const GrInstancedPipelineInfo& info, + GrAAType* aaType) { if (inner.getType() > SkRRect::kSimple_Type) { return nullptr; // Complex inner round rects are not yet supported. } if (SkRRect::kEmpty_Type == inner.getType()) { return this->recordRRect(outer, viewMatrix, color, aa, info, aaType); } - if (Batch* batch = this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix, color, + if (sk_sp<Op> op = this->recordShape(GetRRectShapeType(outer), outer.rect(), viewMatrix, color, outer.rect(), aa, info, aaType)) { - batch->appendRRectParams(outer); + op->appendRRectParams(outer); ShapeType innerShapeType = GetRRectShapeType(inner); - batch->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType); - batch->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit); - batch->appendParamsTexel(inner.rect().asScalars(), 4); - batch->appendRRectParams(inner); - return batch; + op->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType); + op->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeType_InfoBit); + op->appendParamsTexel(inner.rect().asScalars(), 4); + op->appendRRectParams(inner); + return std::move(op); } return nullptr; } -InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const SkRect& bounds, - const SkMatrix& viewMatrix, - GrColor color, const SkRect& localRect, - GrAA aa, - const GrInstancedPipelineInfo& info, - GrAAType* aaType) { +sk_sp<InstancedRendering::Op> InstancedRendering::recordShape( + ShapeType type, const SkRect& bounds, const SkMatrix& viewMatrix, GrColor color, + const SkRect& localRect, GrAA aa, const GrInstancedPipelineInfo& info, GrAAType* aaType) { SkASSERT(State::kRecordingDraws == fState); if (info.fIsRenderingToFloat && fGpu->caps()->avoidInstancedDrawsToFPTargets()) { @@ -111,19 +112,17 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const return nullptr; } - Batch* batch = this->createBatch(); - batch->fInfo.fAntialiasMode = antialiasMode; - batch->fInfo.fShapeTypes = GetShapeFlag(type); - batch->fInfo.fCannotDiscard = !info.fCanDiscard; + sk_sp<Op> op = this->makeOp(); + op->fInfo.fAntialiasMode = antialiasMode; + op->fInfo.fShapeTypes = GetShapeFlag(type); + op->fInfo.fCannotDiscard = !info.fCanDiscard; - Instance& instance = batch->getSingleInstance(); + Instance& instance = op->getSingleInstance(); instance.fInfo = (int)type << kShapeType_InfoBit; - Batch::HasAABloat aaBloat = (antialiasMode == AntialiasMode::kCoverage) - ? Batch::HasAABloat::kYes - : Batch::HasAABloat::kNo; - Batch::IsZeroArea zeroArea = (bounds.isEmpty()) ? Batch::IsZeroArea::kYes - : Batch::IsZeroArea::kNo; + Op::HasAABloat aaBloat = (antialiasMode == AntialiasMode::kCoverage) ? Op::HasAABloat::kYes + : Op::HasAABloat::kNo; + Op::IsZeroArea zeroArea = (bounds.isEmpty()) ? Op::IsZeroArea::kYes : Op::IsZeroArea::kNo; // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that // will map this rectangle to the same device coordinates as "viewMatrix * bounds". @@ -147,18 +146,19 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const // it's quite simple to find the bounding rectangle: float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]); float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]); - SkRect batchBounds; - batchBounds.fLeft = m[2] - devBoundsHalfWidth; - batchBounds.fRight = m[2] + devBoundsHalfWidth; - batchBounds.fTop = m[5] - devBoundsHalfHeight; - batchBounds.fBottom = m[5] + devBoundsHalfHeight; - batch->setBounds(batchBounds, aaBloat, zeroArea); + SkRect opBounds; + opBounds.fLeft = m[2] - devBoundsHalfWidth; + opBounds.fRight = m[2] + devBoundsHalfWidth; + opBounds.fTop = m[5] - devBoundsHalfHeight; + opBounds.fBottom = m[5] + devBoundsHalfHeight; + op->setBounds(opBounds, aaBloat, zeroArea); // TODO: Is this worth the CPU overhead? - batch->fInfo.fNonSquare = - fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out. - fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew? - fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f; // Diff. lengths? + op->fInfo.fNonSquare = + fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early out. + fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew? + fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > + 1e-2f; // Diff. lengths? } else { SkMatrix shapeMatrix(viewMatrix); shapeMatrix.preTranslate(tx, ty); @@ -174,12 +174,12 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const m[5] = SkScalarToFloat(shapeMatrix.getTranslateY()); // Send the perspective column as a param. - batch->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1], - shapeMatrix[SkMatrix::kMPersp2]); - batch->fInfo.fHasPerspective = true; + op->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkMatrix::kMPersp1], + shapeMatrix[SkMatrix::kMPersp2]); + op->fInfo.fHasPerspective = true; - batch->setBounds(bounds, aaBloat, zeroArea); - batch->fInfo.fNonSquare = true; + op->setBounds(bounds, aaBloat, zeroArea); + op->fInfo.fNonSquare = true; } instance.fColor = color; @@ -187,8 +187,8 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float. memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float)); - batch->fPixelLoad = batch->bounds().height() * batch->bounds().width(); - return batch; + op->fPixelLoad = op->bounds().height() * op->bounds().width(); + return op; } inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, @@ -234,12 +234,12 @@ inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix, return false; } -InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir) - : INHERITED(classID), - fInstancedRendering(ir), - fIsTracked(false), - fNumDraws(1), - fNumChangesInGeometry(0) { +InstancedRendering::Op::Op(uint32_t classID, InstancedRendering* ir) + : INHERITED(classID) + , fInstancedRendering(ir) + , fIsTracked(false) + , fNumDraws(1) + , fNumChangesInGeometry(0) { fHeadDraw = fTailDraw = fInstancedRendering->fDrawPool.allocate(); #ifdef SK_DEBUG fHeadDraw->fGeometry = {-1, 0}; @@ -247,9 +247,9 @@ InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir) fHeadDraw->fNext = nullptr; } -InstancedRendering::Batch::~Batch() { +InstancedRendering::Op::~Op() { if (fIsTracked) { - fInstancedRendering->fTrackedBatches.remove(this); + fInstancedRendering->fTrackedOps.remove(this); } Draw* draw = fHeadDraw; @@ -260,7 +260,7 @@ InstancedRendering::Batch::~Batch() { } } -void InstancedRendering::Batch::appendRRectParams(const SkRRect& rrect) { +void InstancedRendering::Op::appendRRectParams(const SkRRect& rrect) { SkASSERT(!fIsTracked); switch (rrect.getType()) { case SkRRect::kSimple_Type: { @@ -307,7 +307,7 @@ void InstancedRendering::Batch::appendRRectParams(const SkRRect& rrect) { } } -void InstancedRendering::Batch::appendParamsTexel(const SkScalar* vals, int count) { +void InstancedRendering::Op::appendParamsTexel(const SkScalar* vals, int count) { SkASSERT(!fIsTracked); SkASSERT(count <= 4 && count >= 0); const float* valsAsFloats = vals; // Ensure SkScalar == float. @@ -315,7 +315,7 @@ void InstancedRendering::Batch::appendParamsTexel(const SkScalar* vals, int coun fInfo.fHasParams = true; } -void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) { +void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) { SkASSERT(!fIsTracked); ParamsTexel& texel = fParams.push_back(); texel.fX = SkScalarToFloat(x); @@ -325,7 +325,7 @@ void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScal fInfo.fHasParams = true; } -void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) { +void InstancedRendering::Op::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) { SkASSERT(!fIsTracked); ParamsTexel& texel = fParams.push_back(); texel.fX = SkScalarToFloat(x); @@ -334,9 +334,9 @@ void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScal fInfo.fHasParams = true; } -void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutput* color, - GrInitInvariantOutput* coverage, - GrBatchToXPOverrides* overrides) const { +void InstancedRendering::Op::computePipelineOptimizations(GrInitInvariantOutput* color, + GrInitInvariantOutput* coverage, + GrBatchToXPOverrides* overrides) const { color->setKnownFourComponents(this->getSingleInstance().fColor); if (AntialiasMode::kCoverage == fInfo.fAntialiasMode || @@ -348,7 +348,7 @@ void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutp } } -void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& overrides) { +void InstancedRendering::Op::initBatchTracker(const GrXPOverridesForBatch& overrides) { Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 command. SkASSERT(draw.fGeometry.isEmpty()); SkASSERT(SkIsPow2(fInfo.fShapeTypes)); @@ -377,23 +377,23 @@ void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& ov fInfo.fUsesLocalCoords = overrides.readsLocalCoords(); fInfo.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage(); - fInstancedRendering->fTrackedBatches.addToTail(this); + fInstancedRendering->fTrackedOps.addToTail(this); fIsTracked = true; } -bool InstancedRendering::Batch::onCombineIfPossible(GrOp* other, const GrCaps& caps) { - Batch* that = static_cast<Batch*>(other); +bool InstancedRendering::Op::onCombineIfPossible(GrOp* other, const GrCaps& caps) { + Op* that = static_cast<Op*>(other); SkASSERT(fInstancedRendering == that->fInstancedRendering); SkASSERT(fTailDraw); SkASSERT(that->fTailDraw); - if (!BatchInfo::CanCombine(fInfo, that->fInfo) || - !GrPipeline::CanCombine(*this->pipeline(), this->bounds(), - *that->pipeline(), that->bounds(), caps)) { + if (!OpInfo::CanCombine(fInfo, that->fInfo) || + !GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), + that->bounds(), caps)) { return false; } - BatchInfo combinedInfo = fInfo | that->fInfo; + OpInfo combinedInfo = fInfo | that->fInfo; if (!combinedInfo.isSimpleRects()) { // This threshold was chosen with the "shapes_mixed" bench on a MacBook with Intel graphics. // There seems to be a wide range where it doesn't matter if we combine or not. What matters @@ -411,7 +411,7 @@ bool InstancedRendering::Batch::onCombineIfPossible(GrOp* other, const GrCaps& c fInfo = combinedInfo; fPixelLoad += that->fPixelLoad; - // Adopt the other batch's draws. + // Adopt the other op's draws. fNumDraws += that->fNumDraws; fNumChangesInGeometry += that->fNumChangesInGeometry; if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) { @@ -429,7 +429,7 @@ void InstancedRendering::beginFlush(GrResourceProvider* rp) { SkASSERT(State::kRecordingDraws == fState); fState = State::kFlushing; - if (fTrackedBatches.isEmpty()) { + if (fTrackedOps.isEmpty()) { return; } @@ -461,7 +461,7 @@ void InstancedRendering::beginFlush(GrResourceProvider* rp) { this->onBeginFlush(rp); } -void InstancedRendering::Batch::onDraw(GrOpFlushState* state, const SkRect& bounds) { +void InstancedRendering::Op::onDraw(GrOpFlushState* state, const SkRect& bounds) { SkASSERT(State::kFlushing == fInstancedRendering->fState); SkASSERT(state->gpu() == fInstancedRendering->gpu()); @@ -475,9 +475,9 @@ void InstancedRendering::Batch::onDraw(GrOpFlushState* state, const SkRect& boun } void InstancedRendering::endFlush() { - // The caller is expected to delete all tracked batches (i.e. batches whose initBatchTracker + // The caller is expected to delete all tracked ops (i.e. ops whose initBatchTracker // method has been called) before ending the flush. - SkASSERT(fTrackedBatches.isEmpty()); + SkASSERT(fTrackedOps.isEmpty()); fParams.reset(); fParamsBuffer.reset(); this->onEndFlush(); diff --git a/src/gpu/instanced/InstancedRendering.h b/src/gpu/instanced/InstancedRendering.h index 903697436c..078e52cee3 100644 --- a/src/gpu/instanced/InstancedRendering.h +++ b/src/gpu/instanced/InstancedRendering.h @@ -23,11 +23,11 @@ class InstanceProcessor; /** * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for - * instanced draws into one location, and creates special batches that pull from this data. The - * nature of instanced rendering allows these batches to combine well and render efficiently. + * instanced draws into one location, and creates special ops that pull from this data. The nature + * of instanced rendering allows these ops to combine well and render efficiently. * * During a flush, this class assembles the accumulated draw data into a single vertex and texel - * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs. + * buffer, and its subclass draws the ops using backend-specific instanced rendering APIs. * * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by * InstanceProcessor. @@ -39,41 +39,42 @@ public: GrGpu* gpu() const { return fGpu.get(); } /** - * These methods make a new record internally for an instanced draw, and return a batch that is - * effectively just an index to that record. The returned batch is not self-contained, but + * These methods make a new record internally for an instanced draw, and return an op that is + * effectively just an index to that record. The returned op is not self-contained, but * rather relies on this class to handle the rendering. The client must call beginFlush() on - * this class before attempting to flush batches returned by it. It is invalid to record new + * this class before attempting to flush ops returned by it. It is invalid to record new * draws between beginFlush() and endFlush(). */ - GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, - GrAA, const GrInstancedPipelineInfo&, GrAAType*); + sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, GrAA, + const GrInstancedPipelineInfo&, GrAAType*); - GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, - const SkRect& localRect, GrAA, - const GrInstancedPipelineInfo&, GrAAType*); + sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, + const SkRect& localRect, GrAA, + const GrInstancedPipelineInfo&, GrAAType*); - GrDrawOp* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, - const SkMatrix& localMatrix, GrAA, - const GrInstancedPipelineInfo&, GrAAType*); + sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, + const SkMatrix& localMatrix, GrAA, + const GrInstancedPipelineInfo&, GrAAType*); - GrDrawOp* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, - GrAA, const GrInstancedPipelineInfo&, GrAAType*); + sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, GrAA, + const GrInstancedPipelineInfo&, GrAAType*); - GrDrawOp* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor, - GrAA, const GrInstancedPipelineInfo&, GrAAType*); + sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor, + GrAA, const GrInstancedPipelineInfo&, + GrAAType*); - GrDrawOp* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner, - const SkMatrix&, GrColor, GrAA, - const GrInstancedPipelineInfo&, GrAAType*); + sk_sp<GrDrawOp> SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner, + const SkMatrix&, GrColor, GrAA, + const GrInstancedPipelineInfo&, GrAAType*); /** * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the - * batches created by this class. + * ops created by this class. */ void beginFlush(GrResourceProvider*); /** - * Called once the batches created previously by this class have all been released. Allows the + * Called once the ops created previously by this class have all been released. Allows the * client to begin recording draws again. */ void endFlush(); @@ -90,12 +91,12 @@ public: void resetGpuResources(ResetType); protected: - class Batch : public GrDrawOp { + class Op : public GrDrawOp { public: - SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch); + SK_DECLARE_INTERNAL_LLIST_INTERFACE(Op); - ~Batch() override; - const char* name() const override { return "Instanced Batch"; } + ~Op() override; + const char* name() const override { return "InstancedRendering::Op"; } SkString dumpInfo() const override { SkString string; @@ -131,7 +132,7 @@ protected: void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z); protected: - Batch(uint32_t classID, InstancedRendering* ir); + Op(uint32_t classID, InstancedRendering* ir); void initBatchTracker(const GrXPOverridesForBatch&) override; bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override; @@ -143,31 +144,31 @@ protected: void onPrepare(GrOpFlushState*) override {} void onDraw(GrOpFlushState*, const SkRect& bounds) override; - InstancedRendering* const fInstancedRendering; - BatchInfo fInfo; - SkScalar fPixelLoad; - SkSTArray<5, ParamsTexel, true> fParams; - bool fIsTracked; - int fNumDraws; - int fNumChangesInGeometry; - Draw* fHeadDraw; - Draw* fTailDraw; + InstancedRendering* const fInstancedRendering; + OpInfo fInfo; + SkScalar fPixelLoad; + SkSTArray<5, ParamsTexel, true> fParams; + bool fIsTracked; + int fNumDraws; + int fNumChangesInGeometry; + Draw* fHeadDraw; + Draw* fTailDraw; typedef GrDrawOp INHERITED; friend class InstancedRendering; }; - typedef SkTInternalLList<Batch> BatchList; + typedef SkTInternalLList<Op> OpList; InstancedRendering(GrGpu* gpu); - const BatchList& trackedBatches() const { return fTrackedBatches; } + const OpList& trackedOps() const { return fTrackedOps; } const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer.get(); } const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer.get(); } virtual void onBeginFlush(GrResourceProvider*) = 0; - virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0; + virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Op*) = 0; virtual void onEndFlush() = 0; virtual void onResetGpuResources(ResetType) = 0; @@ -177,24 +178,24 @@ private: kFlushing }; - Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds, - const SkMatrix& viewMatrix, GrColor, - const SkRect& localRect, GrAA aa, - const GrInstancedPipelineInfo&, GrAAType*); + sk_sp<Op> SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds, + const SkMatrix& viewMatrix, GrColor, + const SkRect& localRect, GrAA aa, + const GrInstancedPipelineInfo&, GrAAType*); bool selectAntialiasMode(const SkMatrix& viewMatrix, GrAA aa, const GrInstancedPipelineInfo&, GrAAType*, AntialiasMode*); - virtual Batch* createBatch() = 0; + virtual sk_sp<Op> makeOp() = 0; - const sk_sp<GrGpu> fGpu; - State fState; - GrObjectMemoryPool<Batch::Draw> fDrawPool; - SkSTArray<1024, ParamsTexel, true> fParams; - BatchList fTrackedBatches; - sk_sp<const GrBuffer> fVertexBuffer; - sk_sp<const GrBuffer> fIndexBuffer; - sk_sp<GrBuffer> fParamsBuffer; + const sk_sp<GrGpu> fGpu; + State fState; + GrObjectMemoryPool<Op::Draw> fDrawPool; + SkSTArray<1024, ParamsTexel, true> fParams; + OpList fTrackedOps; + sk_sp<const GrBuffer> fVertexBuffer; + sk_sp<const GrBuffer> fIndexBuffer; + sk_sp<GrBuffer> fParamsBuffer; }; } diff --git a/src/gpu/instanced/InstancedRenderingTypes.h b/src/gpu/instanced/InstancedRenderingTypes.h index 2ed269a8cf..4e0c3d3d95 100644 --- a/src/gpu/instanced/InstancedRenderingTypes.h +++ b/src/gpu/instanced/InstancedRenderingTypes.h @@ -120,14 +120,14 @@ GR_STATIC_ASSERT(0 == offsetof(ParamsTexel, fX)); GR_STATIC_ASSERT(4 * 4 == sizeof(ParamsTexel)); /** - * Tracks all information needed in order to draw a batch of instances. This struct also serves - * as an all-in-one shader key for the batch. + * Tracks all information needed in order to draw a op of instances. This struct also serves + * as an all-in-one shader key for the op. */ -struct BatchInfo { - BatchInfo() : fData(0) {} - explicit BatchInfo(uint32_t data) : fData(data) {} +struct OpInfo { + OpInfo() : fData(0) {} + explicit OpInfo(uint32_t data) : fData(data) {} - static bool CanCombine(const BatchInfo& a, const BatchInfo& b); + static bool CanCombine(const OpInfo& a, const OpInfo& b); bool isSimpleRects() const { return !((fShapeTypes & ~kRect_ShapeFlag) | fInnerShapeTypes); @@ -150,7 +150,7 @@ struct BatchInfo { }; }; -inline bool BatchInfo::CanCombine(const BatchInfo& a, const BatchInfo& b) { +inline bool OpInfo::CanCombine(const OpInfo& a, const OpInfo& b) { if (a.fAntialiasMode != b.fAntialiasMode) { return false; } @@ -165,13 +165,13 @@ inline bool BatchInfo::CanCombine(const BatchInfo& a, const BatchInfo& b) { return true; } -inline BatchInfo operator|(const BatchInfo& a, const BatchInfo& b) { - SkASSERT(BatchInfo::CanCombine(a, b)); - return BatchInfo(a.fData | b.fData); +inline OpInfo operator|(const OpInfo& a, const OpInfo& b) { + SkASSERT(OpInfo::CanCombine(a, b)); + return OpInfo(a.fData | b.fData); } // This is required since all the data must fit into 32 bits of a shader key. -GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(BatchInfo)); +GR_STATIC_ASSERT(sizeof(uint32_t) == sizeof(OpInfo)); GR_STATIC_ASSERT(kNumShapeTypes <= 8); struct IndexRange { |