aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/batches
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu/batches')
-rw-r--r--src/gpu/batches/GrBatch.h4
-rw-r--r--src/gpu/batches/GrClearBatch.h2
-rw-r--r--src/gpu/batches/GrClearStencilClipBatch.h2
-rw-r--r--src/gpu/batches/GrCopySurfaceBatch.h2
-rw-r--r--src/gpu/batches/GrDiscardBatch.h2
-rw-r--r--src/gpu/batches/GrDrawPathBatch.cpp4
-rw-r--r--src/gpu/batches/GrDrawPathBatch.h4
-rw-r--r--src/gpu/batches/GrStencilPathBatch.h2
-rw-r--r--src/gpu/batches/GrVertexBatch.cpp4
-rw-r--r--src/gpu/batches/GrVertexBatch.h2
10 files changed, 14 insertions, 14 deletions
diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrBatch.h
index 8dafe9fba3..bef01b77c4 100644
--- a/src/gpu/batches/GrBatch.h
+++ b/src/gpu/batches/GrBatch.h
@@ -125,7 +125,7 @@ public:
void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
/** Issues the batches commands to GrGpu. */
- void draw(GrBatchFlushState* state) { this->onDraw(state); }
+ void draw(GrBatchFlushState* state, const SkRect& bounds) { this->onDraw(state, bounds); }
/** Used to block batching across render target changes. Remove this once we store
GrBatches for different RTs in different targets. */
@@ -191,7 +191,7 @@ private:
virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
virtual void onPrepare(GrBatchFlushState*) = 0;
- virtual void onDraw(GrBatchFlushState*) = 0;
+ virtual void onDraw(GrBatchFlushState*, const SkRect& bounds) = 0;
static uint32_t GenID(int32_t* idCounter) {
// The atomic inc returns the old value not the incremented value. So we add
diff --git a/src/gpu/batches/GrClearBatch.h b/src/gpu/batches/GrClearBatch.h
index c4bbadc6c9..f11d485536 100644
--- a/src/gpu/batches/GrClearBatch.h
+++ b/src/gpu/batches/GrClearBatch.h
@@ -94,7 +94,7 @@ private:
void onPrepare(GrBatchFlushState*) override {}
- void onDraw(GrBatchFlushState* state) override {
+ void onDraw(GrBatchFlushState* state, const SkRect& /*bounds*/) override {
state->commandBuffer()->clear(fClip, fColor);
}
diff --git a/src/gpu/batches/GrClearStencilClipBatch.h b/src/gpu/batches/GrClearStencilClipBatch.h
index d9d5b2e2ec..42d7c444db 100644
--- a/src/gpu/batches/GrClearStencilClipBatch.h
+++ b/src/gpu/batches/GrClearStencilClipBatch.h
@@ -50,7 +50,7 @@ private:
void onPrepare(GrBatchFlushState*) override {}
- void onDraw(GrBatchFlushState* state) override {
+ void onDraw(GrBatchFlushState* state, const SkRect& /*bounds*/) override {
state->commandBuffer()->clearStencilClip(fClip, fInsideStencilMask);
}
diff --git a/src/gpu/batches/GrCopySurfaceBatch.h b/src/gpu/batches/GrCopySurfaceBatch.h
index fea8aae2f3..c987d0d5df 100644
--- a/src/gpu/batches/GrCopySurfaceBatch.h
+++ b/src/gpu/batches/GrCopySurfaceBatch.h
@@ -66,7 +66,7 @@ private:
void onPrepare(GrBatchFlushState*) override {}
- void onDraw(GrBatchFlushState* state) override {
+ void onDraw(GrBatchFlushState* state, const SkRect& /*bounds*/) override {
if (!state->commandBuffer()) {
state->gpu()->copySurface(fDst.get(), fSrc.get(), fSrcRect, fDstPoint);
} else {
diff --git a/src/gpu/batches/GrDiscardBatch.h b/src/gpu/batches/GrDiscardBatch.h
index c9aa7d68b3..8a3be0e5f3 100644
--- a/src/gpu/batches/GrDiscardBatch.h
+++ b/src/gpu/batches/GrDiscardBatch.h
@@ -43,7 +43,7 @@ private:
void onPrepare(GrBatchFlushState*) override {}
- void onDraw(GrBatchFlushState* state) override {
+ void onDraw(GrBatchFlushState* state, const SkRect& /*bounds*/) override {
state->commandBuffer()->discard();
}
diff --git a/src/gpu/batches/GrDrawPathBatch.cpp b/src/gpu/batches/GrDrawPathBatch.cpp
index 815fe74636..4080d2f0a1 100644
--- a/src/gpu/batches/GrDrawPathBatch.cpp
+++ b/src/gpu/batches/GrDrawPathBatch.cpp
@@ -26,7 +26,7 @@ SkString GrDrawPathBatch::dumpInfo() const {
return string;
}
-void GrDrawPathBatch::onDraw(GrBatchFlushState* state) {
+void GrDrawPathBatch::onDraw(GrBatchFlushState* state, const SkRect& bounds) {
GrProgramDesc desc;
SkAutoTUnref<GrPathProcessor> pathProc(GrPathProcessor::Create(this->color(),
@@ -116,7 +116,7 @@ bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
return true;
}
-void GrDrawPathRangeBatch::onDraw(GrBatchFlushState* state) {
+void GrDrawPathRangeBatch::onDraw(GrBatchFlushState* state, const SkRect& bounds) {
const Draw& head = *fDraws.head();
SkMatrix drawMatrix(this->viewMatrix());
diff --git a/src/gpu/batches/GrDrawPathBatch.h b/src/gpu/batches/GrDrawPathBatch.h
index 33bf678eb5..6adc94356f 100644
--- a/src/gpu/batches/GrDrawPathBatch.h
+++ b/src/gpu/batches/GrDrawPathBatch.h
@@ -81,7 +81,7 @@ private:
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
- void onDraw(GrBatchFlushState* state) override;
+ void onDraw(GrBatchFlushState* state, const SkRect& bounds) override;
GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
@@ -174,7 +174,7 @@ private:
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override;
- void onDraw(GrBatchFlushState* state) override;
+ void onDraw(GrBatchFlushState* state, const SkRect& bounds) override;
struct Draw {
void set(const InstanceData* instanceData, SkScalar x, SkScalar y) {
diff --git a/src/gpu/batches/GrStencilPathBatch.h b/src/gpu/batches/GrStencilPathBatch.h
index f505a531d4..73bec195d0 100644
--- a/src/gpu/batches/GrStencilPathBatch.h
+++ b/src/gpu/batches/GrStencilPathBatch.h
@@ -66,7 +66,7 @@ private:
void onPrepare(GrBatchFlushState*) override {}
- void onDraw(GrBatchFlushState* state) override {
+ void onDraw(GrBatchFlushState* state, const SkRect& bounds) override {
GrPathRendering::StencilPathArgs args(fUseHWAA, fRenderTarget.get(), &fViewMatrix,
&fScissor, &fStencil);
state->gpu()->pathRendering()->stencilPath(args, fPath.get());
diff --git a/src/gpu/batches/GrVertexBatch.cpp b/src/gpu/batches/GrVertexBatch.cpp
index af3a186e13..32413b81cb 100644
--- a/src/gpu/batches/GrVertexBatch.cpp
+++ b/src/gpu/batches/GrVertexBatch.cpp
@@ -62,7 +62,7 @@ void* GrVertexBatch::QuadHelper::init(Target* target, size_t vertexStride,
quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
}
-void GrVertexBatch::onDraw(GrBatchFlushState* state) {
+void GrVertexBatch::onDraw(GrBatchFlushState* state, const SkRect& bounds) {
int currUploadIdx = 0;
int currMeshIdx = 0;
@@ -76,7 +76,7 @@ void GrVertexBatch::onDraw(GrBatchFlushState* state) {
}
const QueuedDraw &draw = fQueuedDraws[currDrawIdx];
state->commandBuffer()->draw(*this->pipeline(), *draw.fGeometryProcessor.get(),
- fMeshes.begin() + currMeshIdx, draw.fMeshCnt);
+ fMeshes.begin() + currMeshIdx, draw.fMeshCnt, bounds);
currMeshIdx += draw.fMeshCnt;
state->flushToken();
}
diff --git a/src/gpu/batches/GrVertexBatch.h b/src/gpu/batches/GrVertexBatch.h
index 19475a7afd..1159e5ed2a 100644
--- a/src/gpu/batches/GrVertexBatch.h
+++ b/src/gpu/batches/GrVertexBatch.h
@@ -63,7 +63,7 @@ protected:
private:
void onPrepare(GrBatchFlushState* state) final;
- void onDraw(GrBatchFlushState* state) final;
+ void onDraw(GrBatchFlushState* state, const SkRect& bounds) final;
virtual void onPrepareDraws(Target*) const = 0;