diff options
author | bsalomon <bsalomon@google.com> | 2015-08-10 13:03:50 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-08-10 13:03:51 -0700 |
commit | 5ac42ea8079c9368430971943192772984da5fce (patch) | |
tree | ba62ea9a747df79b380c6c8a235ac30c9eef29eb /src | |
parent | 4977983510028712528743aa877f6da83781b381 (diff) |
Make initBatchTracker private, move towards pipeline on batch
Review URL: https://codereview.chromium.org/1282893002
Diffstat (limited to 'src')
-rw-r--r-- | src/gpu/GrBufferedDrawTarget.cpp | 4 | ||||
-rw-r--r-- | src/gpu/GrCommandBuilder.h | 2 | ||||
-rw-r--r-- | src/gpu/GrImmediateDrawTarget.cpp | 2 | ||||
-rw-r--r-- | src/gpu/GrInOrderCommandBuilder.cpp | 6 | ||||
-rw-r--r-- | src/gpu/GrInOrderCommandBuilder.h | 2 | ||||
-rw-r--r-- | src/gpu/GrReorderCommandBuilder.cpp | 26 | ||||
-rw-r--r-- | src/gpu/GrReorderCommandBuilder.h | 2 | ||||
-rw-r--r-- | src/gpu/GrTargetCommands.h | 6 | ||||
-rw-r--r-- | src/gpu/batches/GrBatch.h | 19 |
9 files changed, 37 insertions, 32 deletions
diff --git a/src/gpu/GrBufferedDrawTarget.cpp b/src/gpu/GrBufferedDrawTarget.cpp index 1abf813801..32111f5326 100644 --- a/src/gpu/GrBufferedDrawTarget.cpp +++ b/src/gpu/GrBufferedDrawTarget.cpp @@ -34,7 +34,7 @@ void GrBufferedDrawTarget::onDrawBatch(GrBatch* batch, return; } - GrTargetCommands::Cmd* cmd = fCommands->recordDrawBatch(state, batch); + GrTargetCommands::Cmd* cmd = fCommands->recordDrawBatch(state, opts, batch); this->recordTraceMarkersIfNecessary(cmd); } @@ -190,8 +190,6 @@ GrBufferedDrawTarget::setupPipelineAndShouldDraw(GrBatch* batch, return NULL; } - batch->initBatchTracker(*opts); - if (fPrevState && !fPrevState->fPrimitiveProcessor.get() && fPrevState->getPipeline()->isEqual(*state->getPipeline())) { this->unallocState(state); diff --git a/src/gpu/GrCommandBuilder.h b/src/gpu/GrCommandBuilder.h index f0672490bb..72c83e58dc 100644 --- a/src/gpu/GrCommandBuilder.h +++ b/src/gpu/GrCommandBuilder.h @@ -28,7 +28,7 @@ public: bool insideClip, GrRenderTarget* renderTarget); virtual Cmd* recordDiscard(GrRenderTarget*); - virtual Cmd* recordDrawBatch(State*, GrBatch*) = 0; + virtual Cmd* recordDrawBatch(const State*, const GrPipelineOptimizations&, GrBatch*) = 0; virtual Cmd* recordStencilPath(const GrPipelineBuilder&, const GrPathProcessor*, const GrPath*, diff --git a/src/gpu/GrImmediateDrawTarget.cpp b/src/gpu/GrImmediateDrawTarget.cpp index 5b6a1a5241..bf13e94414 100644 --- a/src/gpu/GrImmediateDrawTarget.cpp +++ b/src/gpu/GrImmediateDrawTarget.cpp @@ -37,8 +37,6 @@ void GrImmediateDrawTarget::onDrawBatch(GrBatch* batch, return; } - batch->initBatchTracker(opts); - fBatchTarget.resetNumberOfDraws(); batch->generateGeometry(&fBatchTarget); diff --git a/src/gpu/GrInOrderCommandBuilder.cpp b/src/gpu/GrInOrderCommandBuilder.cpp index b679b6f3b8..912f0b5dbd 100644 --- a/src/gpu/GrInOrderCommandBuilder.cpp +++ b/src/gpu/GrInOrderCommandBuilder.cpp @@ -25,9 +25,11 @@ static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettin return isWinding; } -GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(State* state, GrBatch* batch) { +GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(const State* state, + const GrPipelineOptimizations& opts, + GrBatch* batch) { // Check if there is a Batch Draw we can batch with - batch->setPipeline(state->getPipeline()); + batch->setPipeline(state->getPipeline(), opts); GrBATCH_INFO("In-Recording (%s, %u)\n", batch->name(), batch->uniqueID()); if (!this->cmdBuffer()->empty() && Cmd::kDrawBatch_CmdType == this->cmdBuffer()->back().type()) { diff --git a/src/gpu/GrInOrderCommandBuilder.h b/src/gpu/GrInOrderCommandBuilder.h index f992cc51d8..bb3d23823a 100644 --- a/src/gpu/GrInOrderCommandBuilder.h +++ b/src/gpu/GrInOrderCommandBuilder.h @@ -17,7 +17,7 @@ public: GrInOrderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) { } - Cmd* recordDrawBatch(State*, GrBatch*) override; + Cmd* recordDrawBatch(const State*, const GrPipelineOptimizations&, GrBatch*) override; Cmd* recordStencilPath(const GrPipelineBuilder&, const GrPathProcessor*, const GrPath*, diff --git a/src/gpu/GrReorderCommandBuilder.cpp b/src/gpu/GrReorderCommandBuilder.cpp index c7b5f97da4..4f2685532b 100644 --- a/src/gpu/GrReorderCommandBuilder.cpp +++ b/src/gpu/GrReorderCommandBuilder.cpp @@ -15,7 +15,9 @@ static bool intersect(const Left& a, const Right& b) { a.fTop < b.fBottom && b.fTop < a.fBottom; } -GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(State* state, GrBatch* batch) { +GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(const State* state, + const GrPipelineOptimizations& opts, + GrBatch* batch) { // Check if there is a Batch Draw we can batch with by linearly searching back until we either // 1) check every draw // 2) intersect with something @@ -23,7 +25,7 @@ GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(State* state, Gr // Experimentally we have found that most batching occurs within the first 10 comparisons. static const int kMaxLookback = 10; int i = 0; - batch->setPipeline(state->getPipeline()); + batch->setPipeline(state->getPipeline(), opts); GrRenderTarget* rt = state->getPipeline()->getRenderTarget(); GrBATCH_INFO("Re-Recording (%s, B%u)\n" @@ -33,17 +35,17 @@ GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(State* state, Gr batch->uniqueID(), rt, batch->bounds().fLeft, batch->bounds().fRight, batch->bounds().fTop, batch->bounds().fBottom); -#if GR_BATCH_SPEW - SkDebugf("\tColorStages:\n"); - for (int i = 0; i < state->getPipeline()->numColorFragmentStages(); i++) { - SkDebugf("\t\t%s\n", state->getPipeline()->getColorStage(i).processor()->name()); - } - SkDebugf("\tCoverageStages:\n"); - for (int i = 0; i < state->getPipeline()->numCoverageFragmentStages(); i++) { - SkDebugf("\t\t%s\n", state->getPipeline()->getCoverageStage(i).processor()->name()); + if (GR_BATCH_SPEW) { + SkDebugf("\tColorStages:\n"); + for (int i = 0; i < state->getPipeline()->numColorFragmentStages(); i++) { + SkDebugf("\t\t%s\n", state->getPipeline()->getColorStage(i).processor()->name()); + } + SkDebugf("\tCoverageStages:\n"); + for (int i = 0; i < state->getPipeline()->numCoverageFragmentStages(); i++) { + SkDebugf("\t\t%s\n", state->getPipeline()->getCoverageStage(i).processor()->name()); + } + SkDebugf("\tXP: %s\n", state->getPipeline()->getXferProcessor()->name()); } - SkDebugf("\tXP: %s\n", state->getPipeline()->getXferProcessor()->name()); -#endif GrBATCH_INFO("\tOutcome:\n"); if (!this->cmdBuffer()->empty()) { GrTargetCommands::CmdBuffer::ReverseIter reverseIter(*this->cmdBuffer()); diff --git a/src/gpu/GrReorderCommandBuilder.h b/src/gpu/GrReorderCommandBuilder.h index 6ee11f9f2e..c3fc66129e 100644 --- a/src/gpu/GrReorderCommandBuilder.h +++ b/src/gpu/GrReorderCommandBuilder.h @@ -17,7 +17,7 @@ public: GrReorderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) {} - Cmd* recordDrawBatch(State*, GrBatch*) override; + Cmd* recordDrawBatch(const State*, const GrPipelineOptimizations&, GrBatch*) override; Cmd* recordStencilPath(const GrPipelineBuilder&, const GrPathProcessor*, const GrPath*, diff --git a/src/gpu/GrTargetCommands.h b/src/gpu/GrTargetCommands.h index 4de75897fc..f5f0004d07 100644 --- a/src/gpu/GrTargetCommands.h +++ b/src/gpu/GrTargetCommands.h @@ -241,7 +241,7 @@ private: }; struct DrawBatch : public Cmd { - DrawBatch(State* state, GrBatch* batch, GrBatchTarget* batchTarget) + DrawBatch(const State* state, GrBatch* batch, GrBatchTarget* batchTarget) : Cmd(kDrawBatch_CmdType) , fState(SkRef(state)) , fBatch(SkRef(batch)) @@ -251,8 +251,8 @@ private: void execute(GrGpu*) override; - SkAutoTUnref<State> fState; - SkAutoTUnref<GrBatch> fBatch; + SkAutoTUnref<const State> fState; + SkAutoTUnref<GrBatch> fBatch; private: GrBatchTarget* fBatchTarget; diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrBatch.h index 281a60e98e..7d91b1c1e1 100644 --- a/src/gpu/batches/GrBatch.h +++ b/src/gpu/batches/GrBatch.h @@ -60,12 +60,6 @@ public: virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0; - /* - * initBatchTracker is a hook for the some additional overrides / optimization possibilities - * from the GrXferProcessor. - */ - virtual void initBatchTracker(const GrPipelineOptimizations&) = 0; - bool combineIfPossible(GrBatch* that) { if (this->classID() != that->classID()) { return false; @@ -108,7 +102,11 @@ public: SkDEBUGCODE(bool isUsed() const { return fUsed; }) const GrPipeline* pipeline() const { return fPipeline; } - void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipeline)); } + + void setPipeline(const GrPipeline* pipeline, const GrPipelineOptimizations& optimizations) { + fPipeline.reset(SkRef(pipeline)); + this->initBatchTracker(optimizations); + } #if GR_BATCH_SPEW uint32_t uniqueID() const { return fUniqueID; } @@ -170,6 +168,13 @@ protected: SkRect fBounds; private: + /* + * initBatchTracker is a hook for the some additional overrides / optimization possibilities + * from the GrXferProcessor. + */ + virtual void initBatchTracker(const GrPipelineOptimizations&) = 0; + + static uint32_t GenID(int32_t* idCounter) { // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The // atomic inc returns the old value not the incremented value. So we add |