diff options
-rw-r--r-- | gm/beziereffects.cpp | 2 | ||||
-rw-r--r-- | gm/convexpolyeffect.cpp | 1 | ||||
-rw-r--r-- | src/gpu/GrBatch.h | 21 | ||||
-rw-r--r-- | src/gpu/GrInOrderCommandBuilder.cpp | 1 | ||||
-rw-r--r-- | src/gpu/GrPipeline.h | 5 | ||||
-rw-r--r-- | src/gpu/GrReorderCommandBuilder.cpp | 4 |
6 files changed, 21 insertions, 13 deletions
diff --git a/gm/beziereffects.cpp b/gm/beziereffects.cpp index 5e00608fa7..6e586196f4 100644 --- a/gm/beziereffects.cpp +++ b/gm/beziereffects.cpp @@ -46,6 +46,7 @@ private: BezierCubicOrConicTestBatch(const GrGeometryProcessor* gp, const Geometry& geo, const SkScalar klmEqs[9], SkScalar sign) : INHERITED(gp, geo.fBounds) { + this->initClassID<BezierCubicOrConicTestBatch>(); for (int i = 0; i < 9; i++) { fKlmEqs[i] = klmEqs[i]; } @@ -447,6 +448,7 @@ private: : INHERITED(gp, geo.fBounds) , fGeometry(geo) , fDevToUV(devToUV) { + this->initClassID<BezierQuadTestBatch>(); } struct Vertex { diff --git a/gm/convexpolyeffect.cpp b/gm/convexpolyeffect.cpp index 2c77bceb0a..9891f87215 100644 --- a/gm/convexpolyeffect.cpp +++ b/gm/convexpolyeffect.cpp @@ -43,6 +43,7 @@ private: ConvexPolyTestBatch(const GrGeometryProcessor* gp, const Geometry& geo) : INHERITED(gp, geo.fBounds) , fGeometry(geo) { + this->initClassID<ConvexPolyTestBatch>(); } Geometry* geoData(int index) override { diff --git a/src/gpu/GrBatch.h b/src/gpu/GrBatch.h index 21dc3d21c8..b77011268b 100644 --- a/src/gpu/GrBatch.h +++ b/src/gpu/GrBatch.h @@ -11,9 +11,9 @@ #include <new> #include "GrBatchTarget.h" #include "GrGeometryProcessor.h" +#include "GrNonAtomicRef.h" #include "GrVertices.h" #include "SkAtomics.h" -#include "SkRefCnt.h" #include "SkTypes.h" class GrGpu; @@ -37,9 +37,8 @@ struct GrInitInvariantOutput; * information will be communicated to the GrBatch prior to geometry generation. */ -class GrBatch : public SkRefCnt { +class GrBatch : public GrNonAtomicRef { public: - GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) } virtual ~GrBatch() {} @@ -58,6 +57,10 @@ public: return false; } + if (!this->pipeline()->isEqual(*that->pipeline())) { + return false; + } + return this->onCombineIfPossible(that); } @@ -94,14 +97,14 @@ public: SkDEBUGCODE(bool isUsed() const { return fUsed; }) + void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipeline)); } + protected: template <typename PROC_SUBCLASS> void initClassID() { static uint32_t kClassID = GenClassID(); fClassID = kClassID; } - uint32_t fClassID; - // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds // rect because we outset it for dst copy textures void setBounds(const SkRect& newBounds) { fBounds = newBounds; } @@ -110,6 +113,8 @@ protected: return fBounds.joinPossiblyEmptyRect(otherBounds); } + const GrPipeline* pipeline() const { return fPipeline; } + /** Helper for rendering instances using an instanced index index buffer. This class creates the space for the vertices and flushes the draws to the batch target.*/ class InstancedHelper { @@ -148,6 +153,7 @@ protected: typedef InstancedHelper INHERITED; }; + uint32_t fClassID; SkRect fBounds; private: @@ -166,11 +172,10 @@ private: enum { kIllegalBatchClassID = 0, }; + SkAutoTUnref<const GrPipeline> fPipeline; static int32_t gCurrBatchClassID; - - SkDEBUGCODE(bool fUsed;) - int fNumberOfDraws; + SkDEBUGCODE(bool fUsed;) typedef SkRefCnt INHERITED; }; diff --git a/src/gpu/GrInOrderCommandBuilder.cpp b/src/gpu/GrInOrderCommandBuilder.cpp index 6d942449ee..5083e58d5a 100644 --- a/src/gpu/GrInOrderCommandBuilder.cpp +++ b/src/gpu/GrInOrderCommandBuilder.cpp @@ -27,6 +27,7 @@ static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(State* state, GrBatch* batch) { // Check if there is a Batch Draw we can batch with + batch->setPipeline(state->getPipeline()); if (!this->cmdBuffer()->empty() && Cmd::kDrawBatch_CmdType == this->cmdBuffer()->back().type()) { DrawBatch* previous = static_cast<DrawBatch*>(&this->cmdBuffer()->back()); diff --git a/src/gpu/GrPipeline.h b/src/gpu/GrPipeline.h index bf8ca8a725..3b5181831a 100644 --- a/src/gpu/GrPipeline.h +++ b/src/gpu/GrPipeline.h @@ -10,6 +10,7 @@ #include "GrColor.h" #include "GrGpu.h" +#include "GrNonAtomicRef.h" #include "GrPendingFragmentStage.h" #include "GrPrimitiveProcessor.h" #include "GrProgramDesc.h" @@ -26,10 +27,8 @@ class GrPipelineBuilder; * Class that holds an optimized version of a GrPipelineBuilder. It is meant to be an immutable * class, and contains all data needed to set the state for a gpu draw. */ -class GrPipeline { +class GrPipeline : public GrNonAtomicRef { public: - - GrPipeline(const GrPipelineBuilder&, const GrProcOptInfo& colorPOI, const GrProcOptInfo& coveragePOI, diff --git a/src/gpu/GrReorderCommandBuilder.cpp b/src/gpu/GrReorderCommandBuilder.cpp index e373381cb2..8c318fd0d0 100644 --- a/src/gpu/GrReorderCommandBuilder.cpp +++ b/src/gpu/GrReorderCommandBuilder.cpp @@ -23,6 +23,7 @@ GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(State* state, Gr // Experimentally we have found that most batching occurs within the first 10 comparisons. static const int kMaxLookback = 10; int i = 0; + batch->setPipeline(state->getPipeline()); if (!this->cmdBuffer()->empty()) { GrTargetCommands::CmdBuffer::ReverseIter reverseIter(*this->cmdBuffer()); @@ -30,8 +31,7 @@ GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(State* state, Gr if (Cmd::kDrawBatch_CmdType == reverseIter->type()) { DrawBatch* previous = static_cast<DrawBatch*>(reverseIter.get()); - if (previous->fState->getPipeline()->isEqual(*state->getPipeline()) && - previous->fBatch->combineIfPossible(batch)) { + if (previous->fBatch->combineIfPossible(batch)) { return NULL; } |