aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--dm/DMSrcSink.cpp6
-rw-r--r--gm/beziereffects.cpp4
-rw-r--r--gm/convexpolyeffect.cpp2
-rw-r--r--gn/gpu.gni4
-rw-r--r--include/gpu/GrContextOptions.h4
-rw-r--r--include/private/GrAuditTrail.h6
-rw-r--r--src/gpu/GrAppliedClip.h2
-rw-r--r--src/gpu/GrAuditTrail.cpp6
-rw-r--r--src/gpu/GrBatchTest.h2
-rw-r--r--src/gpu/GrMesh.h2
-rw-r--r--src/gpu/GrOvalRenderer.cpp22
-rw-r--r--src/gpu/GrPipeline.cpp2
-rw-r--r--src/gpu/GrPipeline.h6
-rw-r--r--src/gpu/GrPipelineBuilder.cpp2
-rw-r--r--src/gpu/GrPrimitiveProcessor.h2
-rw-r--r--src/gpu/GrRenderTargetContext.cpp4
-rw-r--r--src/gpu/GrRenderTargetOpList.cpp88
-rw-r--r--src/gpu/GrRenderTargetOpList.h8
-rw-r--r--src/gpu/GrResourceProvider.h4
-rw-r--r--src/gpu/GrTextureOpList.cpp8
-rw-r--r--src/gpu/GrTextureOpList.h8
-rw-r--r--src/gpu/SkGrPriv.h2
-rw-r--r--src/gpu/batches/GrAAConvexPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrAADistanceFieldPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrAAFillRectBatch.cpp4
-rw-r--r--src/gpu/batches/GrAAFillRectBatch.h2
-rw-r--r--src/gpu/batches/GrAAHairLinePathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrAAStrokeRectBatch.cpp6
-rw-r--r--src/gpu/batches/GrAAStrokeRectBatch.h1
-rw-r--r--src/gpu/batches/GrAnalyticRectBatch.cpp4
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.cpp2
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.h4
-rw-r--r--src/gpu/batches/GrClearBatch.h10
-rw-r--r--src/gpu/batches/GrClearStencilClipBatch.h10
-rw-r--r--src/gpu/batches/GrCopySurfaceBatch.cpp2
-rw-r--r--src/gpu/batches/GrCopySurfaceBatch.h12
-rw-r--r--src/gpu/batches/GrDefaultPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrDiscardBatch.h10
-rw-r--r--src/gpu/batches/GrDrawAtlasBatch.cpp2
-rw-r--r--src/gpu/batches/GrDrawAtlasBatch.h4
-rw-r--r--src/gpu/batches/GrDrawBatch.h6
-rw-r--r--src/gpu/batches/GrDrawPathBatch.cpp2
-rw-r--r--src/gpu/batches/GrDrawPathBatch.h8
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.cpp2
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.h4
-rw-r--r--src/gpu/batches/GrMSAAPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrNinePatch.cpp4
-rw-r--r--src/gpu/batches/GrNonAAFillRectBatch.cpp4
-rw-r--r--src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp4
-rw-r--r--src/gpu/batches/GrNonAAStrokeRectBatch.cpp4
-rw-r--r--src/gpu/batches/GrOp.cpp (renamed from src/gpu/batches/GrBatch.cpp)28
-rw-r--r--src/gpu/batches/GrOp.h (renamed from src/gpu/batches/GrBatch.h)87
-rw-r--r--src/gpu/batches/GrPLSPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrRectBatchFactory.h2
-rw-r--r--src/gpu/batches/GrRegionBatch.cpp4
-rwxr-xr-xsrc/gpu/batches/GrShadowRRectBatch.cpp8
-rw-r--r--src/gpu/batches/GrStencilPathBatch.h12
-rw-r--r--src/gpu/batches/GrTessellatingPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrTestBatch.h4
-rw-r--r--src/gpu/effects/GrDashingEffect.cpp4
-rw-r--r--src/gpu/instanced/GLInstancedRendering.cpp2
-rw-r--r--src/gpu/instanced/InstancedRendering.cpp2
-rw-r--r--src/gpu/instanced/InstancedRendering.h2
-rw-r--r--tests/GrPorterDuffTest.cpp4
-rw-r--r--tests/PrimitiveProcessorTest.cpp4
66 files changed, 248 insertions, 252 deletions
diff --git a/dm/DMSrcSink.cpp b/dm/DMSrcSink.cpp
index eae0d2463d..0a54702d08 100644
--- a/dm/DMSrcSink.cpp
+++ b/dm/DMSrcSink.cpp
@@ -1215,9 +1215,9 @@ GPUSink::GPUSink(GrContextFactory::ContextType ct,
, fThreaded(threaded) {}
DEFINE_bool(imm, false, "Run gpu configs in immediate mode.");
-DEFINE_bool(batchClip, false, "Clip each GrBatch to its device bounds for testing.");
-DEFINE_int32(batchLookback, -1, "Maximum GrBatch lookback for combining, negative means default.");
-DEFINE_int32(batchLookahead, -1, "Maximum GrBatch lookahead for combining, negative means "
+DEFINE_bool(batchClip, false, "Clip each GrOp to its device bounds for testing.");
+DEFINE_int32(batchLookback, -1, "Maximum GrOp lookback for combining, negative means default.");
+DEFINE_int32(batchLookahead, -1, "Maximum GrOp lookahead for combining, negative means "
"default.");
Error GPUSink::draw(const Src& src, SkBitmap* dst, SkWStream*, SkString* log) const {
diff --git a/gm/beziereffects.cpp b/gm/beziereffects.cpp
index 3447df4106..d58e5910b0 100644
--- a/gm/beziereffects.cpp
+++ b/gm/beziereffects.cpp
@@ -30,7 +30,7 @@ namespace skiagm {
class BezierCubicOrConicTestBatch : public GrTestBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
const char* name() const override { return "BezierCubicOrConicTestBatch"; }
@@ -386,7 +386,7 @@ private:
class BezierQuadTestBatch : public GrTestBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
const char* name() const override { return "BezierQuadTestBatch"; }
BezierQuadTestBatch(sk_sp<GrGeometryProcessor> gp, const SkRect& bounds, GrColor color,
diff --git a/gm/convexpolyeffect.cpp b/gm/convexpolyeffect.cpp
index fdeef299dd..880574217d 100644
--- a/gm/convexpolyeffect.cpp
+++ b/gm/convexpolyeffect.cpp
@@ -42,7 +42,7 @@ static SkRect sorted_rect(const SkRect& unsorted) {
namespace skiagm {
class PolyBoundsBatch : public GrTestBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
const char* name() const override { return "PolyBoundsBatch"; }
diff --git a/gn/gpu.gni b/gn/gpu.gni
index 439443eee6..0fb0b53754 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -244,8 +244,6 @@ skia_gpu_sources = [
"$_src/gpu/batches/GrAnalyticRectBatch.h",
"$_src/gpu/batches/GrAtlasTextBatch.cpp",
"$_src/gpu/batches/GrAtlasTextBatch.h",
- "$_src/gpu/batches/GrBatch.cpp",
- "$_src/gpu/batches/GrBatch.h",
"$_src/gpu/batches/GrClearBatch.h",
"$_src/gpu/batches/GrClearStencilClipBatch.h",
"$_src/gpu/batches/GrCopySurfaceBatch.cpp",
@@ -272,6 +270,8 @@ skia_gpu_sources = [
"$_src/gpu/batches/GrNonAAStrokeRectBatch.h",
"$_src/gpu/batches/GrNinePatch.cpp",
"$_src/gpu/batches/GrNinePatch.h",
+ "$_src/gpu/batches/GrOp.cpp",
+ "$_src/gpu/batches/GrOp.h",
"$_src/gpu/batches/GrPLSPathRenderer.cpp",
"$_src/gpu/batches/GrPLSPathRenderer.h",
"$_src/gpu/batches/GrRectBatchFactory.h",
diff --git a/include/gpu/GrContextOptions.h b/include/gpu/GrContextOptions.h
index e69b18ce62..6a1b51b5a6 100644
--- a/include/gpu/GrContextOptions.h
+++ b/include/gpu/GrContextOptions.h
@@ -39,11 +39,11 @@ struct GrContextOptions {
immediately. Intended to ease debugging. */
bool fImmediateMode = false;
- /** For debugging purposes turn each GrBatch's bounds into a clip rect. This is used to
+ /** For debugging purposes turn each GrOp's bounds into a clip rect. This is used to
verify that the clip bounds are conservative. */
bool fClipBatchToBounds = false;
- /** For debugging, override the default maximum look-back or look-ahead window for GrBatch
+ /** For debugging, override the default maximum look-back or look-ahead window for GrOp
combining. */
int fMaxBatchLookback = -1;
int fMaxBatchLookahead = -1;
diff --git a/include/private/GrAuditTrail.h b/include/private/GrAuditTrail.h
index 6e8c92ddb6..ac1f850d47 100644
--- a/include/private/GrAuditTrail.h
+++ b/include/private/GrAuditTrail.h
@@ -15,7 +15,7 @@
#include "SkTArray.h"
#include "SkTHash.h"
-class GrBatch;
+class GrOp;
/*
* GrAuditTrail collects a list of draw ops, detailed information about those ops, and can dump them
@@ -85,9 +85,9 @@ public:
fCurrentStackTrace.push_back(SkString(framename));
}
- void addBatch(const GrBatch* batch);
+ void addBatch(const GrOp* batch);
- void batchingResultCombined(const GrBatch* consumer, const GrBatch* consumed);
+ void batchingResultCombined(const GrOp* consumer, const GrOp* consumed);
// Because batching is heavily dependent on sequence of draw calls, these calls will only
// produce valid information for the given draw sequence which preceeded them.
diff --git a/src/gpu/GrAppliedClip.h b/src/gpu/GrAppliedClip.h
index 3e98c6cb05..27fbde0b29 100644
--- a/src/gpu/GrAppliedClip.h
+++ b/src/gpu/GrAppliedClip.h
@@ -15,7 +15,7 @@ class GrFragmentProcessor;
/**
* Produced by GrClip. It provides a set of modifications to the drawing state that are used to
- * create the final GrPipeline for a GrBatch.
+ * create the final GrPipeline for a GrOp.
*/
class GrAppliedClip : public SkNoncopyable {
public:
diff --git a/src/gpu/GrAuditTrail.cpp b/src/gpu/GrAuditTrail.cpp
index 93694cbe10..8f6f566456 100644
--- a/src/gpu/GrAuditTrail.cpp
+++ b/src/gpu/GrAuditTrail.cpp
@@ -6,11 +6,11 @@
*/
#include "GrAuditTrail.h"
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
const int GrAuditTrail::kGrAuditTrailInvalidID = -1;
-void GrAuditTrail::addBatch(const GrBatch* batch) {
+void GrAuditTrail::addBatch(const GrOp* batch) {
SkASSERT(fEnabled);
Batch* auditBatch = new Batch;
fBatchPool.emplace_back(auditBatch);
@@ -51,7 +51,7 @@ void GrAuditTrail::addBatch(const GrBatch* batch) {
fBatchList.emplace_back(batchNode);
}
-void GrAuditTrail::batchingResultCombined(const GrBatch* consumer, const GrBatch* consumed) {
+void GrAuditTrail::batchingResultCombined(const GrOp* consumer, const GrOp* consumed) {
// Look up the batch we are going to glom onto
int* indexPtr = fIDLookup.find(consumer->uniqueID());
SkASSERT(indexPtr);
diff --git a/src/gpu/GrBatchTest.h b/src/gpu/GrBatchTest.h
index 32e8e28930..c6cfa0d9c7 100644
--- a/src/gpu/GrBatchTest.h
+++ b/src/gpu/GrBatchTest.h
@@ -18,7 +18,7 @@ class SkRandom;
/*
* This file defines some macros for testing batches, and also declares functions / objects which
- * are generally useful for GrBatch testing
+ * are generally useful for GrOp testing
*/
// Batches should define test functions using DRAW_BATCH_TEST_DEFINE. The other macros defined
diff --git a/src/gpu/GrMesh.h b/src/gpu/GrMesh.h
index 964e0b4a8e..5d1ce6f3e4 100644
--- a/src/gpu/GrMesh.h
+++ b/src/gpu/GrMesh.h
@@ -35,7 +35,7 @@ protected:
};
/**
- * Used to communicate index and vertex buffers, counts, and offsets for a draw from GrBatch to
+ * Used to communicate index and vertex buffers, counts, and offsets for a draw from GrOp to
* GrGpu. It also holds the primitive type for the draw. TODO: Consider moving ownership of this
* and draw-issuing responsibility to GrPrimitiveProcessor. The rest of the vertex info lives there
* already (stride, attribute mappings).
diff --git a/src/gpu/GrOvalRenderer.cpp b/src/gpu/GrOvalRenderer.cpp
index ea126b1262..b5fa7fd36c 100644
--- a/src/gpu/GrOvalRenderer.cpp
+++ b/src/gpu/GrOvalRenderer.cpp
@@ -26,7 +26,7 @@
#include "glsl/GrGLSLUniformHandler.h"
#include "glsl/GrGLSLUtil.h"
-// TODO(joshualitt) - Break this file up during GrBatch post implementation cleanup
+// TODO(joshualitt) - Break this file up during GrOp post implementation cleanup
namespace {
@@ -599,7 +599,7 @@ static const uint16_t* circle_type_to_indices(bool stroked) {
class CircleBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
/** Optional extra params to render a partial arc rather than a full circle. */
struct ArcParams {
@@ -1098,7 +1098,7 @@ private:
target->draw(gp.get(), mesh);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
CircleBatch* that = t->cast<CircleBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
@@ -1150,7 +1150,7 @@ private:
class EllipseBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, const SkRect& ellipse,
const SkStrokeRec& stroke) {
SkASSERT(viewMatrix.rectStaysRect());
@@ -1340,7 +1340,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
EllipseBatch* that = t->cast<EllipseBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
@@ -1381,7 +1381,7 @@ private:
class DIEllipseBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static GrDrawBatch* Create(GrColor color,
const SkMatrix& viewMatrix,
@@ -1558,7 +1558,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
DIEllipseBatch* that = t->cast<DIEllipseBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
@@ -1723,7 +1723,7 @@ static const uint16_t* rrect_type_to_indices(RRectType type) {
class RRectCircleRendererBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
// A devStrokeWidth <= 0 indicates a fill only. If devStrokeWidth > 0 then strokeOnly indicates
// whether the rrect is only stroked or stroked and filled.
@@ -2022,7 +2022,7 @@ private:
target->draw(gp.get(), mesh);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
RRectCircleRendererBatch* that = t->cast<RRectCircleRendererBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
@@ -2083,7 +2083,7 @@ static const GrBuffer* ref_rrect_index_buffer(RRectType type,
class RRectEllipseRendererBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
// If devStrokeWidths values are <= 0 indicates then fill only. Otherwise, strokeOnly indicates
// whether the rrect is only stroked or stroked and filled.
@@ -2275,7 +2275,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
RRectEllipseRendererBatch* that = t->cast<RRectEllipseRendererBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index c58e0ee449..0e4cac5ac1 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -16,7 +16,7 @@
#include "GrRenderTargetPriv.h"
#include "GrXferProcessor.h"
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
GrPipeline* GrPipeline::CreateAt(void* memory, const CreateArgs& args,
GrXPOverridesForBatch* overrides) {
diff --git a/src/gpu/GrPipeline.h b/src/gpu/GrPipeline.h
index 2c7c779bd9..0b0b898425 100644
--- a/src/gpu/GrPipeline.h
+++ b/src/gpu/GrPipeline.h
@@ -26,10 +26,10 @@
#include "effects/GrPorterDuffXferProcessor.h"
#include "effects/GrSimpleTextureEffect.h"
-class GrBatch;
-class GrRenderTargetContext;
class GrDeviceCoordTexture;
+class GrOp;
class GrPipelineBuilder;
+class GrRenderTargetContext;
struct GrBatchToXPOverrides {
GrBatchToXPOverrides()
@@ -81,7 +81,7 @@ public:
static bool AreEqual(const GrPipeline& a, const GrPipeline& b);
/**
- * Allows a GrBatch subclass to determine whether two GrBatches can combine. This is a stricter
+ * Allows a GrOp subclass to determine whether two GrBatches can combine. This is a stricter
* test than isEqual because it also considers blend barriers when the two batches' bounds
* overlap
*/
diff --git a/src/gpu/GrPipelineBuilder.cpp b/src/gpu/GrPipelineBuilder.cpp
index 864d6f1bc6..fac5752f70 100644
--- a/src/gpu/GrPipelineBuilder.cpp
+++ b/src/gpu/GrPipelineBuilder.cpp
@@ -12,7 +12,7 @@
#include "GrPipeline.h"
#include "GrProcOptInfo.h"
#include "GrXferProcessor.h"
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
#include "effects/GrPorterDuffXferProcessor.h"
GrPipelineBuilder::GrPipelineBuilder()
diff --git a/src/gpu/GrPrimitiveProcessor.h b/src/gpu/GrPrimitiveProcessor.h
index cb23897fb6..addc1c1d79 100644
--- a/src/gpu/GrPrimitiveProcessor.h
+++ b/src/gpu/GrPrimitiveProcessor.h
@@ -58,7 +58,7 @@ enum GrPixelLocalStorageState {
/*
* This class allows the GrPipeline to communicate information about the pipeline to a
- * GrBatch which should be forwarded to the GrPrimitiveProcessor(s) created by the batch.
+ * GrOp which should be forwarded to the GrPrimitiveProcessor(s) created by the batch.
* These are not properly part of the pipeline because they assume the specific inputs
* that the batch provided when it created the pipeline. Identical pipelines may be
* created by different batches with different input assumptions and therefore different
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index 4cba6b9a70..f6645e329b 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -20,7 +20,7 @@
#include "GrResourceProvider.h"
#include "SkSurfacePriv.h"
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
#include "batches/GrClearBatch.h"
#include "batches/GrDrawAtlasBatch.h"
#include "batches/GrDrawVerticesBatch.h"
@@ -274,7 +274,7 @@ void GrRenderTargetContext::internalClear(const GrFixedClip& clip,
if (!this->accessRenderTarget()) {
return;
}
- sk_sp<GrBatch> batch(GrClearBatch::Make(clip, color, this->accessRenderTarget()));
+ sk_sp<GrOp> batch(GrClearBatch::Make(clip, color, this->accessRenderTarget()));
if (!batch) {
return;
}
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 7b16a55c9d..ad20771ccd 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -177,7 +177,7 @@ void GrRenderTargetOpList::prepareBatches(GrBatchFlushState* flushState) {
}
}
-// TODO: this is where GrBatch::renderTarget is used (which is fine since it
+// TODO: this is where GrOp::renderTarget is used (which is fine since it
// is at flush time). However, we need to store the RenderTargetProxy in the
// Batches and instantiate them here.
bool GrRenderTargetOpList::drawBatches(GrBatchFlushState* flushState) {
@@ -242,7 +242,7 @@ void GrRenderTargetOpList::freeGpuResources() {
}
}
-static void batch_bounds(SkRect* bounds, const GrBatch* batch) {
+static void batch_bounds(SkRect* bounds, const GrOp* batch) {
*bounds = batch->bounds();
if (batch->hasZeroArea()) {
if (batch->hasAABloat()) {
@@ -388,19 +388,19 @@ void GrRenderTargetOpList::stencilPath(GrRenderTargetContext* renderTargetContex
return;
}
- GrBatch* batch = GrStencilPathBatch::Create(viewMatrix,
- useHWAA,
- path->getFillType(),
- appliedClip.hasStencilClip(),
- stencilAttachment->bits(),
- appliedClip.scissorState(),
- renderTargetContext->accessRenderTarget(),
- path);
+ GrOp* batch = GrStencilPathBatch::Create(viewMatrix,
+ useHWAA,
+ path->getFillType(),
+ appliedClip.hasStencilClip(),
+ stencilAttachment->bits(),
+ appliedClip.scissorState(),
+ renderTargetContext->accessRenderTarget(),
+ path);
this->recordBatch(batch, appliedClip.clippedDrawBounds());
batch->unref();
}
-void GrRenderTargetOpList::addBatch(sk_sp<GrBatch> batch) {
+void GrRenderTargetOpList::addBatch(sk_sp<GrOp> batch) {
this->recordBatch(batch.get(), batch->bounds());
}
@@ -427,7 +427,7 @@ void GrRenderTargetOpList::discard(GrRenderTarget* renderTarget) {
// Currently this just inserts a discard batch. However, once in MDB this can remove all the
// previously recorded batches and change the load op to discard.
if (this->caps()->discardRenderTargetSupport()) {
- GrBatch* batch = new GrDiscardBatch(renderTarget);
+ GrOp* batch = new GrDiscardBatch(renderTarget);
this->recordBatch(batch, batch->bounds());
batch->unref();
}
@@ -439,7 +439,7 @@ bool GrRenderTargetOpList::copySurface(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint) {
- GrBatch* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
+ GrOp* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
if (!batch) {
return false;
}
@@ -466,7 +466,7 @@ static void join(SkRect* out, const SkRect& a, const SkRect& b) {
out->fBottom = SkTMax(a.fBottom, b.fBottom);
}
-GrBatch* GrRenderTargetOpList::recordBatch(GrBatch* batch, const SkRect& clippedBounds) {
+GrOp* GrRenderTargetOpList::recordBatch(GrOp* batch, const SkRect& clippedBounds) {
// A closed GrOpList should never receive new/more batches
SkASSERT(!this->isClosed());
@@ -475,31 +475,31 @@ GrBatch* GrRenderTargetOpList::recordBatch(GrBatch* batch, const SkRect& clipped
// 2) intersect with something
// 3) find a 'blocker'
GR_AUDIT_TRAIL_ADDBATCH(fAuditTrail, batch);
- GrBATCH_INFO("Re-Recording (%s, B%u)\n"
- "\tBounds LRTB (%f, %f, %f, %f)\n",
- batch->name(),
- batch->uniqueID(),
- batch->bounds().fLeft, batch->bounds().fRight,
- batch->bounds().fTop, batch->bounds().fBottom);
- GrBATCH_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
- GrBATCH_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
- clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
- clippedBounds.fBottom);
- GrBATCH_INFO("\tOutcome:\n");
+ GrOP_INFO("Re-Recording (%s, B%u)\n"
+ "\tBounds LRTB (%f, %f, %f, %f)\n",
+ batch->name(),
+ batch->uniqueID(),
+ batch->bounds().fLeft, batch->bounds().fRight,
+ batch->bounds().fTop, batch->bounds().fBottom);
+ GrOP_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
+ GrOP_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
+ clippedBounds.fBottom);
+ GrOP_INFO("\tOutcome:\n");
int maxCandidates = SkTMin(fMaxBatchLookback, fRecordedBatches.count());
if (maxCandidates) {
int i = 0;
while (true) {
- GrBatch* candidate = fRecordedBatches.fromBack(i).fBatch.get();
+ GrOp* candidate = fRecordedBatches.fromBack(i).fBatch.get();
// We cannot continue to search backwards if the render target changes
if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
- GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
- candidate->name(), candidate->uniqueID());
+ GrOP_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+ candidate->name(), candidate->uniqueID());
break;
}
if (candidate->combineIfPossible(batch, *this->caps())) {
- GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
- candidate->uniqueID());
+ GrOP_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, candidate, batch);
join(&fRecordedBatches.fromBack(i).fClippedBounds,
fRecordedBatches.fromBack(i).fClippedBounds, clippedBounds);
@@ -508,18 +508,18 @@ GrBatch* GrRenderTargetOpList::recordBatch(GrBatch* batch, const SkRect& clipped
// Stop going backwards if we would cause a painter's order violation.
const SkRect& candidateBounds = fRecordedBatches.fromBack(i).fClippedBounds;
if (!can_reorder(candidateBounds, clippedBounds)) {
- GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
- candidate->uniqueID());
+ GrOP_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
break;
}
++i;
if (i == maxCandidates) {
- GrBATCH_INFO("\t\tReached max lookback or beginning of batch array %d\n", i);
+ GrOP_INFO("\t\tReached max lookback or beginning of batch array %d\n", i);
break;
}
}
} else {
- GrBATCH_INFO("\t\tFirstBatch\n");
+ GrOP_INFO("\t\tFirstBatch\n");
}
GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(fAuditTrail, batch);
fRecordedBatches.emplace_back(RecordedBatch{sk_ref_sp(batch), clippedBounds});
@@ -532,16 +532,16 @@ void GrRenderTargetOpList::forwardCombine() {
return;
}
for (int i = 0; i < fRecordedBatches.count() - 2; ++i) {
- GrBatch* batch = fRecordedBatches[i].fBatch.get();
+ GrOp* batch = fRecordedBatches[i].fBatch.get();
const SkRect& batchBounds = fRecordedBatches[i].fClippedBounds;
int maxCandidateIdx = SkTMin(i + fMaxBatchLookahead, fRecordedBatches.count() - 1);
int j = i + 1;
while (true) {
- GrBatch* candidate = fRecordedBatches[j].fBatch.get();
+ GrOp* candidate = fRecordedBatches[j].fBatch.get();
// We cannot continue to search if the render target changes
if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
- GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
- candidate->name(), candidate->uniqueID());
+ GrOP_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+ candidate->name(), candidate->uniqueID());
break;
}
if (j == i +1) {
@@ -549,8 +549,8 @@ void GrRenderTargetOpList::forwardCombine() {
// via backwards combining in recordBatch.
SkASSERT(!batch->combineIfPossible(candidate, *this->caps()));
} else if (batch->combineIfPossible(candidate, *this->caps())) {
- GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
- candidate->uniqueID());
+ GrOP_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, batch, candidate);
fRecordedBatches[j].fBatch = std::move(fRecordedBatches[i].fBatch);
join(&fRecordedBatches[j].fClippedBounds, fRecordedBatches[j].fClippedBounds,
@@ -560,13 +560,13 @@ void GrRenderTargetOpList::forwardCombine() {
// Stop going traversing if we would cause a painter's order violation.
const SkRect& candidateBounds = fRecordedBatches[j].fClippedBounds;
if (!can_reorder(candidateBounds, batchBounds)) {
- GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
- candidate->uniqueID());
+ GrOP_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
break;
}
++j;
if (j > maxCandidateIdx) {
- GrBATCH_INFO("\t\tReached max lookahead or end of batch array %d\n", i);
+ GrOP_INFO("\t\tReached max lookahead or end of batch array %d\n", i);
break;
}
}
@@ -578,7 +578,7 @@ void GrRenderTargetOpList::forwardCombine() {
void GrRenderTargetOpList::clearStencilClip(const GrFixedClip& clip,
bool insideStencilMask,
GrRenderTarget* rt) {
- GrBatch* batch = new GrClearStencilClipBatch(clip, insideStencilMask, rt);
+ GrOp* batch = new GrClearStencilClipBatch(clip, insideStencilMask, rt);
this->recordBatch(batch, batch->bounds());
batch->unref();
}
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h
index 2f4a3475c3..aad141731d 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -28,12 +28,12 @@
#include "SkTypes.h"
class GrAuditTrail;
-class GrBatch;
class GrClearBatch;
class GrClip;
class GrCaps;
class GrPath;
class GrDrawPathBatchBase;
+class GrOp;
class GrPipelineBuilder;
class GrRenderTargetProxy;
@@ -84,7 +84,7 @@ public:
void drawBatch(const GrPipelineBuilder&, GrRenderTargetContext*, const GrClip&, GrDrawBatch*);
- void addBatch(sk_sp<GrBatch>);
+ void addBatch(sk_sp<GrOp>);
/**
* Draws the path into user stencil bits. Upon return, all user stencil values
@@ -134,7 +134,7 @@ private:
// Returns the batch that the input batch was combined with or the input batch if it wasn't
// combined.
- GrBatch* recordBatch(GrBatch*, const SkRect& clippedBounds);
+ GrOp* recordBatch(GrOp*, const SkRect& clippedBounds);
void forwardCombine();
// Makes a copy of the dst if it is necessary for the draw. Returns false if a copy is required
@@ -151,7 +151,7 @@ private:
void clearStencilClip(const GrFixedClip&, bool insideStencilMask, GrRenderTarget*);
struct RecordedBatch {
- sk_sp<GrBatch> fBatch;
+ sk_sp<GrOp> fBatch;
SkRect fClippedBounds;
};
SkSTArray<256, RecordedBatch, true> fRecordedBatches;
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index abcd699957..70535e2344 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -97,7 +97,7 @@ public:
enum Flags {
/** If the caller intends to do direct reads/writes to/from the CPU then this flag must be
* set when accessing resources during a GrOpList flush. This includes the execution of
- * GrBatch objects. The reason is that these memory operations are done immediately and
+ * GrOp objects. The reason is that these memory operations are done immediately and
* will occur out of order WRT the operations being flushed.
* Make this automatic: https://bug.skia.org/4156
*/
@@ -129,7 +129,7 @@ public:
}
/** Returns a GrBatchAtlas. This function can be called anywhere, but the returned atlas should
- * only be used inside of GrBatch::generateGeometry
+ * only be used inside of GrOp::generateGeometry
* @param GrPixelConfig The pixel config which this atlas will store
* @param width width in pixels of the atlas
* @param height height in pixels of the atlas
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 974ee9bc75..ffffa0b031 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -82,7 +82,7 @@ bool GrTextureOpList::copySurface(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint) {
- GrBatch* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
+ GrOp* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
if (!batch) {
return false;
}
@@ -95,18 +95,18 @@ bool GrTextureOpList::copySurface(GrSurface* dst,
return true;
}
-void GrTextureOpList::recordBatch(GrBatch* batch) {
+void GrTextureOpList::recordBatch(GrOp* batch) {
// A closed GrOpList should never receive new/more batches
SkASSERT(!this->isClosed());
GR_AUDIT_TRAIL_ADDBATCH(fAuditTrail, batch);
- GrBATCH_INFO("Re-Recording (%s, B%u)\n"
+ GrOP_INFO("Re-Recording (%s, B%u)\n"
"\tBounds LRTB (%f, %f, %f, %f)\n",
batch->name(),
batch->uniqueID(),
batch->bounds().fLeft, batch->bounds().fRight,
batch->bounds().fTop, batch->bounds().fBottom);
- GrBATCH_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
+ GrOP_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(fAuditTrail, batch);
fRecordedBatches.emplace_back(sk_ref_sp(batch));
diff --git a/src/gpu/GrTextureOpList.h b/src/gpu/GrTextureOpList.h
index 7674184890..934c4b0211 100644
--- a/src/gpu/GrTextureOpList.h
+++ b/src/gpu/GrTextureOpList.h
@@ -13,8 +13,8 @@
#include "SkTArray.h"
class GrAuditTrail;
-class GrBatch;
class GrGpu;
+class GrOp;
class GrTextureProxy;
struct SkIPoint;
struct SkIRect;
@@ -60,10 +60,10 @@ public:
SkDEBUGCODE(void dump() const override;)
private:
- void recordBatch(GrBatch*);
+ void recordBatch(GrOp*);
- SkSTArray<2, sk_sp<GrBatch>, true> fRecordedBatches;
- GrGpu* fGpu;
+ SkSTArray<2, sk_sp<GrOp>, true> fRecordedBatches;
+ GrGpu* fGpu;
typedef GrOpList INHERITED;
};
diff --git a/src/gpu/SkGrPriv.h b/src/gpu/SkGrPriv.h
index bb41aad87e..44ff8e7e25 100644
--- a/src/gpu/SkGrPriv.h
+++ b/src/gpu/SkGrPriv.h
@@ -67,7 +67,7 @@ bool SkPaintToGrPaintReplaceShader(GrContext*,
GrPaint* grPaint);
/** Blends the SkPaint's shader (or color if no shader) with the color which specified via a
- GrBatch's GrPrimitiveProcesssor. Currently there is a bool param to indicate whether the
+ GrOp's GrPrimitiveProcesssor. Currently there is a bool param to indicate whether the
primitive color is the dst or src color to the blend in order to work around differences between
drawVertices and drawAtlas. */
bool SkPaintToGrPaintWithXfermode(GrContext* context,
diff --git a/src/gpu/batches/GrAAConvexPathRenderer.cpp b/src/gpu/batches/GrAAConvexPathRenderer.cpp
index 1be84c7ca0..8f100503e6 100644
--- a/src/gpu/batches/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAAConvexPathRenderer.cpp
@@ -734,7 +734,7 @@ static sk_sp<GrGeometryProcessor> create_fill_gp(bool tweakAlphaForCoverage,
class AAConvexPathBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
AAConvexPathBatch(GrColor color, const SkMatrix& viewMatrix, const SkPath& path)
: INHERITED(ClassID()) {
fGeoData.emplace_back(Geometry{color, viewMatrix, path});
@@ -932,7 +932,7 @@ private:
}
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
index e91192733c..8ece0c9db6 100644
--- a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -120,7 +120,7 @@ static const SkScalar kAntiAliasPad = 1.0f;
class AADistanceFieldPathBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
typedef GrAADistanceFieldPathRenderer::ShapeData ShapeData;
typedef SkTDynamicHash<ShapeData, ShapeData::Key> ShapeCache;
@@ -480,7 +480,7 @@ private:
const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
AADistanceFieldPathBatch* that = t->cast<AADistanceFieldPathBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAAFillRectBatch.cpp b/src/gpu/batches/GrAAFillRectBatch.cpp
index 46a5d50955..c8804daacf 100644
--- a/src/gpu/batches/GrAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrAAFillRectBatch.cpp
@@ -159,7 +159,7 @@ static void generate_aa_fill_rect_geometry(intptr_t verts,
}
class AAFillRectBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
AAFillRectBatch(GrColor color,
const SkMatrix& viewMatrix,
@@ -266,7 +266,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
AAFillRectBatch* that = t->cast<AAFillRectBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAAFillRectBatch.h b/src/gpu/batches/GrAAFillRectBatch.h
index 1dbec995f7..b4fa2328aa 100644
--- a/src/gpu/batches/GrAAFillRectBatch.h
+++ b/src/gpu/batches/GrAAFillRectBatch.h
@@ -10,9 +10,9 @@
#include "GrColor.h"
-class GrBatch;
class GrDrawBatch;
class SkMatrix;
+class GrOp;
struct SkRect;
namespace GrAAFillRectBatch {
diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
index b2ad9ba28e..d7b3d69664 100644
--- a/src/gpu/batches/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
@@ -677,7 +677,7 @@ bool check_bounds(const SkMatrix& viewMatrix, const SkRect& devBounds, void* ver
class AAHairlineBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
AAHairlineBatch(GrColor color,
uint8_t coverage,
@@ -732,7 +732,7 @@ private:
typedef SkTArray<int, true> IntArray;
typedef SkTArray<float, true> FloatArray;
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
AAHairlineBatch* that = t->cast<AAHairlineBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
index f8516b9df2..10f1d72e9c 100644
--- a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
@@ -125,7 +125,7 @@ static sk_sp<GrGeometryProcessor> create_fill_gp(bool tweakAlphaForCoverage,
class AAFlatteningConvexPathBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
AAFlatteningConvexPathBatch(GrColor color,
const SkMatrix& viewMatrix,
@@ -285,7 +285,7 @@ private:
sk_free(indices);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
AAFlatteningConvexPathBatch* that = t->cast<AAFlatteningConvexPathBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp
index 7f87ad6222..8f13adc752 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -118,7 +118,7 @@ static sk_sp<GrGeometryProcessor> create_stroke_rect_gp(bool tweakAlphaForCovera
class AAStrokeRectBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
AAStrokeRectBatch(GrColor color, const SkMatrix& viewMatrix,
const SkRect& devOutside, const SkRect& devInside)
@@ -204,7 +204,7 @@ private:
const SkMatrix& viewMatrix() const { return fViewMatrix; }
bool miterStroke() const { return fMiterStroke; }
- bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+ bool onCombineIfPossible(GrOp* t, const GrCaps&) override;
void generateAAStrokeRectGeometry(void* vertices,
size_t offset,
@@ -402,7 +402,7 @@ const GrBuffer* AAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourcePr
}
}
-bool AAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool AAStrokeRectBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
AAStrokeRectBatch* that = t->cast<AAStrokeRectBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.h b/src/gpu/batches/GrAAStrokeRectBatch.h
index 964cc5b4b9..73020d5e45 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.h
+++ b/src/gpu/batches/GrAAStrokeRectBatch.h
@@ -10,7 +10,6 @@
#include "GrColor.h"
-class GrBatch;
class GrDrawBatch;
class GrResourceProvider;
class SkMatrix;
diff --git a/src/gpu/batches/GrAnalyticRectBatch.cpp b/src/gpu/batches/GrAnalyticRectBatch.cpp
index 655644f048..5e196cc674 100644
--- a/src/gpu/batches/GrAnalyticRectBatch.cpp
+++ b/src/gpu/batches/GrAnalyticRectBatch.cpp
@@ -236,7 +236,7 @@ sk_sp<GrGeometryProcessor> RectGeometryProcessor::TestCreate(GrProcessorTestData
class AnalyticRectBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
AnalyticRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
const SkRect& croppedRect, const SkRect& bounds)
@@ -357,7 +357,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
AnalyticRectBatch* that = t->cast<AnalyticRectBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAtlasTextBatch.cpp b/src/gpu/batches/GrAtlasTextBatch.cpp
index cf4ca24aa8..df81e2cb26 100644
--- a/src/gpu/batches/GrAtlasTextBatch.cpp
+++ b/src/gpu/batches/GrAtlasTextBatch.cpp
@@ -183,7 +183,7 @@ void GrAtlasTextBatch::flush(GrVertexBatch::Target* target, FlushInfo* flushInfo
flushInfo->fGlyphsToFlush = 0;
}
-bool GrAtlasTextBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrAtlasTextBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
GrAtlasTextBatch* that = t->cast<GrAtlasTextBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAtlasTextBatch.h b/src/gpu/batches/GrAtlasTextBatch.h
index b3b88dfc23..ca715a744c 100644
--- a/src/gpu/batches/GrAtlasTextBatch.h
+++ b/src/gpu/batches/GrAtlasTextBatch.h
@@ -15,7 +15,7 @@
class GrAtlasTextBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static const int kVerticesPerGlyph = GrAtlasTextBlob::kVerticesPerGlyph;
static const int kIndicesPerGlyph = 6;
@@ -152,7 +152,7 @@ private:
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
int numGlyphs() const { return fBatch.fNumGlyphs; }
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override;
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override;
// TODO just use class params
// TODO trying to figure out why lcd is so whack
diff --git a/src/gpu/batches/GrClearBatch.h b/src/gpu/batches/GrClearBatch.h
index 24905d3648..f1870486df 100644
--- a/src/gpu/batches/GrClearBatch.h
+++ b/src/gpu/batches/GrClearBatch.h
@@ -8,16 +8,16 @@
#ifndef GrClearBatch_DEFINED
#define GrClearBatch_DEFINED
-#include "GrBatch.h"
#include "GrBatchFlushState.h"
#include "GrFixedClip.h"
#include "GrGpu.h"
#include "GrGpuCommandBuffer.h"
+#include "GrOp.h"
#include "GrRenderTarget.h"
-class GrClearBatch final : public GrBatch {
+class GrClearBatch final : public GrOp {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static sk_sp<GrClearBatch> Make(const GrFixedClip& clip, GrColor color, GrRenderTarget* rt) {
sk_sp<GrClearBatch> batch(new GrClearBatch(clip, color, rt));
@@ -68,7 +68,7 @@ private:
fRenderTarget.reset(rt);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
// This could be much more complicated. Currently we look at cases where the new clear
// contains the old clear, or when the new clear is a subset of the old clear and is the
// same color.
@@ -105,7 +105,7 @@ private:
GrColor fColor;
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
- typedef GrBatch INHERITED;
+ typedef GrOp INHERITED;
};
#endif
diff --git a/src/gpu/batches/GrClearStencilClipBatch.h b/src/gpu/batches/GrClearStencilClipBatch.h
index 0f2617355d..87c12f0a26 100644
--- a/src/gpu/batches/GrClearStencilClipBatch.h
+++ b/src/gpu/batches/GrClearStencilClipBatch.h
@@ -8,16 +8,16 @@
#ifndef GrClearStencilClipBatch_DEFINED
#define GrClearStencilClipBatch_DEFINED
-#include "GrBatch.h"
#include "GrBatchFlushState.h"
#include "GrFixedClip.h"
#include "GrGpu.h"
#include "GrGpuCommandBuffer.h"
+#include "GrOp.h"
#include "GrRenderTarget.h"
-class GrClearStencilClipBatch final : public GrBatch {
+class GrClearStencilClipBatch final : public GrOp {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
GrClearStencilClipBatch(const GrFixedClip& clip, bool insideStencilMask, GrRenderTarget* rt)
: INHERITED(ClassID())
@@ -49,7 +49,7 @@ public:
}
private:
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; }
void onPrepare(GrBatchFlushState*) override {}
@@ -61,7 +61,7 @@ private:
const bool fInsideStencilMask;
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
- typedef GrBatch INHERITED;
+ typedef GrOp INHERITED;
};
#endif
diff --git a/src/gpu/batches/GrCopySurfaceBatch.cpp b/src/gpu/batches/GrCopySurfaceBatch.cpp
index 724609871d..9aa46e69fe 100644
--- a/src/gpu/batches/GrCopySurfaceBatch.cpp
+++ b/src/gpu/batches/GrCopySurfaceBatch.cpp
@@ -59,7 +59,7 @@ bool GrCopySurfaceBatch::ClipSrcRectAndDstPoint(const GrSurface* dst,
return !clippedSrcRect->isEmpty();
}
-GrBatch* GrCopySurfaceBatch::Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+GrOp* GrCopySurfaceBatch::Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint) {
SkASSERT(dst);
SkASSERT(src);
diff --git a/src/gpu/batches/GrCopySurfaceBatch.h b/src/gpu/batches/GrCopySurfaceBatch.h
index 3d9fc78ac3..a808b2f181 100644
--- a/src/gpu/batches/GrCopySurfaceBatch.h
+++ b/src/gpu/batches/GrCopySurfaceBatch.h
@@ -8,14 +8,14 @@
#ifndef GrCopySurfaceBatch_DEFINED
#define GrCopySurfaceBatch_DEFINED
-#include "GrBatch.h"
#include "GrBatchFlushState.h"
#include "GrGpu.h"
+#include "GrOp.h"
#include "GrRenderTarget.h"
-class GrCopySurfaceBatch final : public GrBatch {
+class GrCopySurfaceBatch final : public GrOp {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
/** This should not really be exposed as Create() will apply this clipping, but there is
* currently a workaround in GrContext::copySurface() for non-render target dsts that relies
@@ -27,7 +27,7 @@ public:
SkIRect* clippedSrcRect,
SkIPoint* clippedDstPoint);
- static GrBatch* Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ static GrOp* Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
const SkIPoint& dstPoint);
const char* name() const override { return "CopySurface"; }
@@ -64,7 +64,7 @@ private:
this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
}
- bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override { return false; }
+ bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override { return false; }
void onPrepare(GrBatchFlushState*) override {}
@@ -83,7 +83,7 @@ private:
SkIRect fSrcRect;
SkIPoint fDstPoint;
- typedef GrBatch INHERITED;
+ typedef GrOp INHERITED;
};
#endif
diff --git a/src/gpu/batches/GrDefaultPathRenderer.cpp b/src/gpu/batches/GrDefaultPathRenderer.cpp
index 21e2289df3..5faf81a0d1 100644
--- a/src/gpu/batches/GrDefaultPathRenderer.cpp
+++ b/src/gpu/batches/GrDefaultPathRenderer.cpp
@@ -96,7 +96,7 @@ static inline void add_quad(SkPoint** vert, const SkPoint* base, const SkPoint p
class DefaultPathBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
DefaultPathBatch(GrColor color, const SkPath& path, SkScalar tolerance,
uint8_t coverage, const SkMatrix& viewMatrix, bool isHairline,
@@ -268,7 +268,7 @@ private:
target->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
DefaultPathBatch* that = t->cast<DefaultPathBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrDiscardBatch.h b/src/gpu/batches/GrDiscardBatch.h
index d2ebb4662b..a53feffd9c 100644
--- a/src/gpu/batches/GrDiscardBatch.h
+++ b/src/gpu/batches/GrDiscardBatch.h
@@ -8,14 +8,14 @@
#ifndef GrDiscardBatch_DEFINED
#define GrDiscardBatch_DEFINED
-#include "GrBatch.h"
#include "GrBatchFlushState.h"
#include "GrGpu.h"
+#include "GrOp.h"
#include "GrRenderTarget.h"
-class GrDiscardBatch final : public GrBatch {
+class GrDiscardBatch final : public GrOp {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
GrDiscardBatch(GrRenderTarget* rt)
: INHERITED(ClassID())
@@ -39,7 +39,7 @@ public:
}
private:
- bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override {
return this->renderTargetUniqueID() == that->renderTargetUniqueID();
}
@@ -51,7 +51,7 @@ private:
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
- typedef GrBatch INHERITED;
+ typedef GrOp INHERITED;
};
#endif
diff --git a/src/gpu/batches/GrDrawAtlasBatch.cpp b/src/gpu/batches/GrDrawAtlasBatch.cpp
index 6f1bfedfec..95c8f2cf32 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.cpp
+++ b/src/gpu/batches/GrDrawAtlasBatch.cpp
@@ -162,7 +162,7 @@ GrDrawAtlasBatch::GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, in
this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
-bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrDrawAtlasBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
GrDrawAtlasBatch* that = t->cast<GrDrawAtlasBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrDrawAtlasBatch.h b/src/gpu/batches/GrDrawAtlasBatch.h
index f8d88e0c20..449882c356 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.h
+++ b/src/gpu/batches/GrDrawAtlasBatch.h
@@ -14,7 +14,7 @@
class GrDrawAtlasBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, int spriteCount,
const SkRSXform* xforms, const SkRect* rects, const SkColor* colors);
@@ -55,7 +55,7 @@ private:
int quadCount() const { return fQuadCount; }
bool coverageIgnored() const { return fCoverageIgnored; }
- bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+ bool onCombineIfPossible(GrOp* t, const GrCaps&) override;
struct Geometry {
GrColor fColor;
diff --git a/src/gpu/batches/GrDrawBatch.h b/src/gpu/batches/GrDrawBatch.h
index c92dca346a..2184552d58 100644
--- a/src/gpu/batches/GrDrawBatch.h
+++ b/src/gpu/batches/GrDrawBatch.h
@@ -9,7 +9,7 @@
#define GrDrawBatch_DEFINED
#include <functional>
-#include "GrBatch.h"
+#include "GrOp.h"
#include "GrPipeline.h"
struct GrInitInvariantOutput;
@@ -44,7 +44,7 @@ private:
/**
* Base class for GrBatches that draw. These batches have a GrPipeline installed by GrOpList.
*/
-class GrDrawBatch : public GrBatch {
+class GrDrawBatch : public GrOp {
public:
/** Method that performs an upload on behalf of a DeferredUploadFn. */
using WritePixelsFn = std::function<bool(GrSurface* texture,
@@ -136,7 +136,7 @@ protected:
private:
SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
bool fPipelineInstalled;
- typedef GrBatch INHERITED;
+ typedef GrOp INHERITED;
};
#endif
diff --git a/src/gpu/batches/GrDrawPathBatch.cpp b/src/gpu/batches/GrDrawPathBatch.cpp
index fb458ec7cc..8d0bc42261 100644
--- a/src/gpu/batches/GrDrawPathBatch.cpp
+++ b/src/gpu/batches/GrDrawPathBatch.cpp
@@ -62,7 +62,7 @@ GrDrawPathRangeBatch::GrDrawPathRangeBatch(const SkMatrix& viewMatrix, SkScalar
this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
}
-bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrDrawPathRangeBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
GrDrawPathRangeBatch* that = t->cast<GrDrawPathRangeBatch>();
if (this->fPathRange.get() != that->fPathRange.get() ||
this->transformType() != that->transformType() ||
diff --git a/src/gpu/batches/GrDrawPathBatch.h b/src/gpu/batches/GrDrawPathBatch.h
index 3a46f46b2d..9de29e2171 100644
--- a/src/gpu/batches/GrDrawPathBatch.h
+++ b/src/gpu/batches/GrDrawPathBatch.h
@@ -63,7 +63,7 @@ private:
class GrDrawPathBatch final : public GrDrawPathBatchBase {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static GrDrawBatch* Create(const SkMatrix& viewMatrix, GrColor color, const GrPath* path) {
return new GrDrawPathBatch(viewMatrix, color, path);
@@ -80,7 +80,7 @@ private:
this->setTransformedBounds(path->getBounds(), viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; }
void onDraw(GrBatchFlushState* state, const SkRect& bounds) override;
@@ -94,7 +94,7 @@ class GrDrawPathRangeBatch final : public GrDrawPathBatchBase {
public:
typedef GrPathRendering::PathTransformType TransformType;
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
struct InstanceData : public SkNoncopyable {
public:
@@ -173,7 +173,7 @@ private:
TransformType transformType() const { return fDraws.head()->fInstanceData->transformType(); }
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override;
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override;
void onDraw(GrBatchFlushState* state, const SkRect& bounds) override;
diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp
index e565022055..1d023200c2 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.cpp
+++ b/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -171,7 +171,7 @@ void GrDrawVerticesBatch::onPrepareDraws(Target* target) const {
target->draw(gp.get(), mesh);
}
-bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrDrawVerticesBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
GrDrawVerticesBatch* that = t->cast<GrDrawVerticesBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrDrawVerticesBatch.h b/src/gpu/batches/GrDrawVerticesBatch.h
index d71fb0daba..821321a771 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.h
+++ b/src/gpu/batches/GrDrawVerticesBatch.h
@@ -20,7 +20,7 @@ struct GrInitInvariantOutput;
class GrDrawVerticesBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
GrDrawVerticesBatch(GrColor color, GrPrimitiveType primitiveType,
@@ -55,7 +55,7 @@ private:
kPoints_GrPrimitiveType == fPrimitiveType;
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+ bool onCombineIfPossible(GrOp* t, const GrCaps&) override;
struct Mesh {
GrColor fColor; // Only used if there are no per-vertex colors
diff --git a/src/gpu/batches/GrMSAAPathRenderer.cpp b/src/gpu/batches/GrMSAAPathRenderer.cpp
index ab98a15055..77d4e6b0bf 100644
--- a/src/gpu/batches/GrMSAAPathRenderer.cpp
+++ b/src/gpu/batches/GrMSAAPathRenderer.cpp
@@ -216,7 +216,7 @@ private:
class MSAAPathBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
MSAAPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix,
const SkRect& devBounds)
@@ -447,7 +447,7 @@ private:
}
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
MSAAPathBatch* that = t->cast<MSAAPathBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNinePatch.cpp b/src/gpu/batches/GrNinePatch.cpp
index 2e1809d754..7ca8d6840a 100644
--- a/src/gpu/batches/GrNinePatch.cpp
+++ b/src/gpu/batches/GrNinePatch.cpp
@@ -25,7 +25,7 @@ static sk_sp<GrGeometryProcessor> create_gp(bool readsCoverage) {
class GrNonAANinePatchBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static const int kVertsPerRect = 4;
static const int kIndicesPerRect = 6;
@@ -143,7 +143,7 @@ private:
fOverrides = overrides;
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
GrNonAANinePatchBatch* that = t->cast<GrNonAANinePatchBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNonAAFillRectBatch.cpp b/src/gpu/batches/GrNonAAFillRectBatch.cpp
index 0e66134795..8f45a8f93e 100644
--- a/src/gpu/batches/GrNonAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAFillRectBatch.cpp
@@ -73,7 +73,7 @@ static void tesselate(intptr_t vertices,
class NonAAFillRectBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
NonAAFillRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
const SkRect* localRect, const SkMatrix* localMatrix)
@@ -159,7 +159,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
NonAAFillRectBatch* that = t->cast<NonAAFillRectBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
index 9a946c80f6..2dcd3e21b4 100644
--- a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
+++ b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
@@ -92,7 +92,7 @@ static void tesselate(intptr_t vertices,
// We handle perspective in the local matrix or viewmatrix with special batches
class GrNonAAFillRectPerspectiveBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
GrNonAAFillRectPerspectiveBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
const SkRect* localRect, const SkMatrix* localMatrix)
@@ -187,7 +187,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
GrNonAAFillRectPerspectiveBatch* that = t->cast<GrNonAAFillRectPerspectiveBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
index 9139dab9fe..2cc1a7d22c 100644
--- a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
@@ -47,7 +47,7 @@ inline static bool allowed_stroke(const SkStrokeRec& stroke) {
class NonAAStrokeRectBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
const char* name() const override { return "NonAAStrokeRectBatch"; }
@@ -167,7 +167,7 @@ private:
fOverrides = overrides;
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps&) override {
// NonAA stroke rects cannot batch right now
// TODO make these batchable
return false;
diff --git a/src/gpu/batches/GrBatch.cpp b/src/gpu/batches/GrOp.cpp
index 6755cf94f7..1d86419c77 100644
--- a/src/gpu/batches/GrBatch.cpp
+++ b/src/gpu/batches/GrOp.cpp
@@ -5,14 +5,14 @@
* found in the LICENSE file.
*/
-#include "GrBatch.h"
+#include "GrOp.h"
#include "GrMemoryPool.h"
#include "SkSpinlock.h"
-// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small,
-// but seems to be mostly consistent. There is a lot in flux right now, but we should really
-// revisit this when batch is everywhere
+// TODO I noticed a small benefit to using a larger exclusive pool for ops. Its very small, but
+// seems to be mostly consistent. There is a lot in flux right now, but we should really revisit
+// this.
// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
@@ -20,7 +20,7 @@
// memory barrier between accesses of a context on different threads. Also, there may be multiple
// GrContexts and those contexts may be in use concurrently on different threads.
namespace {
-static SkSpinlock gBatchSpinlock;
+static SkSpinlock gOpPoolSpinLock;
class MemoryPoolAccessor {
public:
@@ -29,8 +29,8 @@ public:
MemoryPoolAccessor() {}
~MemoryPoolAccessor() {}
#else
- MemoryPoolAccessor() { gBatchSpinlock.acquire(); }
- ~MemoryPoolAccessor() { gBatchSpinlock.release(); }
+ MemoryPoolAccessor() { gOpPoolSpinLock.acquire(); }
+ ~MemoryPoolAccessor() { gOpPoolSpinLock.release(); }
#endif
GrMemoryPool* pool() const {
@@ -40,24 +40,24 @@ public:
};
}
-int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchID;
+int32_t GrOp::gCurrOpClassID = GrOp::kIllegalOpID;
-int32_t GrBatch::gCurrBatchUniqueID = GrBatch::kIllegalBatchID;
+int32_t GrOp::gCurrOpUniqueID = GrOp::kIllegalOpID;
-void* GrBatch::operator new(size_t size) {
+void* GrOp::operator new(size_t size) {
return MemoryPoolAccessor().pool()->allocate(size);
}
-void GrBatch::operator delete(void* target) {
+void GrOp::operator delete(void* target) {
return MemoryPoolAccessor().pool()->release(target);
}
-GrBatch::GrBatch(uint32_t classID)
+GrOp::GrOp(uint32_t classID)
: fClassID(classID)
- , fUniqueID(kIllegalBatchID) {
+ , fUniqueID(kIllegalOpID) {
SkASSERT(classID == SkToU32(fClassID));
SkDEBUGCODE(fUsed = false;)
SkDEBUGCODE(fBoundsFlags = kUninitialized_BoundsFlag);
}
-GrBatch::~GrBatch() {}
+GrOp::~GrOp() {}
diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrOp.h
index e4065ecace..79fbfa4b90 100644
--- a/src/gpu/batches/GrBatch.h
+++ b/src/gpu/batches/GrOp.h
@@ -22,48 +22,47 @@ class GrGpuCommandBuffer;
class GrBatchFlushState;
/**
- * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
- * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
- * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
- * subclasses complete freedom to decide how / what they can batch.
+ * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reorderable
+ * batching, Ganesh does not generate geometry inline with draw calls. Instead, it captures the
+ * arguments to the draw and then generates the geometry on demand. This gives GrOp subclasses
+ * complete freedom to decide how/what they can batch.
*
- * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
- * merged using combineIfPossible. When two batches merge, one takes on the union of the data
- * and the other is left empty. The merged batch becomes responsible for drawing the data from both
- * the original batches.
+ * Ops of the same subclass may be merged using combineIfPossible. When two ops merge, one
+ * takes on the union of the data and the other is left empty. The merged op becomes responsible
+ * for drawing the data from both the original ops.
*
* If there are any possible optimizations which might require knowing more about the full state of
- * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
- * information will be communicated to the GrBatch prior to geometry generation.
+ * the draw, e.g. whether or not the GrOp is allowed to tweak alpha for coverage, then this
+ * information will be communicated to the GrOp prior to geometry generation.
*
- * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip.
+ * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
* The bounds are used in determining which clip elements must be applied and thus the bounds cannot
* in turn depend upon the clip.
*/
-#define GR_BATCH_SPEW 0
-#if GR_BATCH_SPEW
- #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
- #define GrBATCH_SPEW(code) code
+#define GR_OP_SPEW 0
+#if GR_OP_SPEW
+ #define GrOP_SPEW(code) code
+ #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
#else
- #define GrBATCH_SPEW(code)
- #define GrBATCH_INFO(...)
+ #define GrOP_SPEW(code)
+ #define GrOP_INFO(...)
#endif
// A helper macro to generate a class static id
-#define DEFINE_BATCH_CLASS_ID \
+#define DEFINE_OP_CLASS_ID \
static uint32_t ClassID() { \
- static uint32_t kClassID = GenBatchClassID(); \
+ static uint32_t kClassID = GenOpClassID(); \
return kClassID; \
}
-class GrBatch : public GrNonAtomicRef<GrBatch> {
+class GrOp : public GrNonAtomicRef<GrOp> {
public:
- GrBatch(uint32_t classID);
- virtual ~GrBatch();
+ GrOp(uint32_t classID);
+ virtual ~GrOp();
virtual const char* name() const = 0;
- bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
+ bool combineIfPossible(GrOp* that, const GrCaps& caps) {
if (this->classID() != that->classID()) {
return false;
}
@@ -97,7 +96,7 @@ public:
}
/**
- * Helper for safely down-casting to a GrBatch subclass
+ * Helper for safely down-casting to a GrOp subclass
*/
template <typename T> const T& cast() const {
SkASSERT(T::ClassID() == this->classID());
@@ -109,40 +108,40 @@ public:
return static_cast<T*>(this);
}
- uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
+ uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
// We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uint32_t uniqueID() const {
- if (kIllegalBatchID == fUniqueID) {
- fUniqueID = GenBatchID();
+ if (kIllegalOpID == fUniqueID) {
+ fUniqueID = GenOpID();
}
return fUniqueID;
}
SkDEBUGCODE(bool isUsed() const { return fUsed; })
- /** Called prior to drawing. The batch should perform any resource creation necessary to
+ /** Called prior to drawing. The op should perform any resource creation necessary to
to quickly issue its draw when draw is called. */
void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
- /** Issues the batches commands to GrGpu. */
+ /** Issues the op's commands to GrGpu. */
void draw(GrBatchFlushState* state, const SkRect& bounds) { this->onDraw(state, bounds); }
/** Used to block batching across render target changes. Remove this once we store
- GrBatches for different RTs in different targets. */
+ GrOps for different RTs in different targets. */
// TODO: this needs to be updated to return GrSurfaceProxy::UniqueID
virtual GrGpuResource::UniqueID renderTargetUniqueID() const = 0;
- /** Used for spewing information about batches when debugging. */
+ /** Used for spewing information about ops when debugging. */
virtual SkString dumpInfo() const {
SkString string;
- string.appendf("BatchBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+ string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
return string;
}
protected:
/**
- * Indicates that the batch will produce geometry that extends beyond its bounds for the
+ * Indicates that the op will produce geometry that extends beyond its bounds for the
* purpose of ensuring that the fragment shader runs on partially covered pixels for
* non-MSAA antialiasing.
*/
@@ -151,8 +150,8 @@ protected:
kNo
};
/**
- * Indicates that the geometry represented by the batch has zero area (i.e. it is hairline
- * or points).
+ * Indicates that the geometry represented by the op has zero area (e.g. it is hairline or
+ * points).
*/
enum class IsZeroArea {
kYes,
@@ -168,7 +167,7 @@ protected:
this->setBoundsFlags(aabloat, zeroArea);
}
- void joinBounds(const GrBatch& that) {
+ void joinBounds(const GrOp& that) {
if (that.hasAABloat()) {
fBoundsFlags |= kAABloat_BoundsFlag;
}
@@ -178,15 +177,15 @@ protected:
return fBounds.joinPossiblyEmptyRect(that.fBounds);
}
- void replaceBounds(const GrBatch& that) {
+ void replaceBounds(const GrOp& that) {
fBounds = that.fBounds;
fBoundsFlags = that.fBoundsFlags;
}
- static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
+ static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
private:
- virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
+ virtual bool onCombineIfPossible(GrOp*, const GrCaps& caps) = 0;
virtual void onPrepare(GrBatchFlushState*) = 0;
virtual void onDraw(GrBatchFlushState*, const SkRect& bounds) = 0;
@@ -196,7 +195,7 @@ private:
// 1 to the returned value.
uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
if (!id) {
- SkFAIL("This should never wrap as it should only be called once for each GrBatch "
+ SkFAIL("This should never wrap as it should only be called once for each GrOp "
"subclass.");
}
return id;
@@ -209,7 +208,7 @@ private:
}
enum {
- kIllegalBatchID = 0,
+ kIllegalOpID = 0,
};
enum BoundsFlags {
@@ -222,12 +221,12 @@ private:
const uint16_t fClassID;
uint16_t fBoundsFlags;
- static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
+ static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
mutable uint32_t fUniqueID;
SkRect fBounds;
- static int32_t gCurrBatchUniqueID;
- static int32_t gCurrBatchClassID;
+ static int32_t gCurrOpUniqueID;
+ static int32_t gCurrOpClassID;
};
#endif
diff --git a/src/gpu/batches/GrPLSPathRenderer.cpp b/src/gpu/batches/GrPLSPathRenderer.cpp
index f31c323581..924e2a8bc6 100644
--- a/src/gpu/batches/GrPLSPathRenderer.cpp
+++ b/src/gpu/batches/GrPLSPathRenderer.cpp
@@ -765,7 +765,7 @@ bool GrPLSPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
class PLSPathBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
PLSPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix)
: INHERITED(ClassID())
, fColor(color)
@@ -915,7 +915,7 @@ public:
}
private:
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
return false;
}
diff --git a/src/gpu/batches/GrRectBatchFactory.h b/src/gpu/batches/GrRectBatchFactory.h
index c9b6843596..5ae1934d49 100644
--- a/src/gpu/batches/GrRectBatchFactory.h
+++ b/src/gpu/batches/GrRectBatchFactory.h
@@ -17,7 +17,7 @@
#include "GrPaint.h"
#include "SkMatrix.h"
-class GrBatch;
+class GrOp;
struct SkRect;
class SkStrokeRec;
diff --git a/src/gpu/batches/GrRegionBatch.cpp b/src/gpu/batches/GrRegionBatch.cpp
index ae09a5f290..e1eaf2802e 100644
--- a/src/gpu/batches/GrRegionBatch.cpp
+++ b/src/gpu/batches/GrRegionBatch.cpp
@@ -52,7 +52,7 @@ static void tesselate_region(intptr_t vertices,
class RegionBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
RegionBatch(GrColor color, const SkMatrix& viewMatrix, const SkRegion& region)
: INHERITED(ClassID())
@@ -130,7 +130,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
RegionBatch* that = t->cast<RegionBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrShadowRRectBatch.cpp b/src/gpu/batches/GrShadowRRectBatch.cpp
index c4b56b8e18..6ae6e62dc1 100755
--- a/src/gpu/batches/GrShadowRRectBatch.cpp
+++ b/src/gpu/batches/GrShadowRRectBatch.cpp
@@ -61,7 +61,7 @@ static const uint16_t* circle_type_to_indices(bool stroked) {
class ShadowCircleBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, SkPoint center,
SkScalar radius, SkScalar blurRadius, const GrStyle& style) {
@@ -367,7 +367,7 @@ private:
target->draw(gp.get(), mesh);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
ShadowCircleBatch* that = t->cast<ShadowCircleBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
@@ -506,7 +506,7 @@ static const uint16_t* rrect_type_to_indices(RRectType type) {
class ShadowCircularRRectBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
// A devStrokeWidth <= 0 indicates a fill only. If devStrokeWidth > 0 then strokeOnly indicates
// whether the rrect is only stroked or stroked and filled.
@@ -796,7 +796,7 @@ private:
target->draw(gp.get(), mesh);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
ShadowCircularRRectBatch* that = t->cast<ShadowCircularRRectBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrStencilPathBatch.h b/src/gpu/batches/GrStencilPathBatch.h
index b95c75a5ce..293da124ad 100644
--- a/src/gpu/batches/GrStencilPathBatch.h
+++ b/src/gpu/batches/GrStencilPathBatch.h
@@ -8,18 +8,18 @@
#ifndef GrStencilPathBatch_DEFINED
#define GrStencilPathBatch_DEFINED
-#include "GrBatch.h"
#include "GrBatchFlushState.h"
#include "GrGpu.h"
+#include "GrOp.h"
#include "GrPath.h"
#include "GrPathRendering.h"
#include "GrRenderTarget.h"
-class GrStencilPathBatch final : public GrBatch {
+class GrStencilPathBatch final : public GrOp {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
- static GrBatch* Create(const SkMatrix& viewMatrix,
+ static GrOp* Create(const SkMatrix& viewMatrix,
bool useHWAA,
GrPathRendering::FillType fillType,
bool hasStencilClip,
@@ -64,7 +64,7 @@ private:
this->setBounds(path->getBounds(), HasAABloat::kNo, IsZeroArea::kNo);
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; }
void onPrepare(GrBatchFlushState*) override {}
@@ -81,7 +81,7 @@ private:
GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
- typedef GrBatch INHERITED;
+ typedef GrOp INHERITED;
};
#endif
diff --git a/src/gpu/batches/GrTessellatingPathRenderer.cpp b/src/gpu/batches/GrTessellatingPathRenderer.cpp
index 970af5cd5c..46dd2f751e 100644
--- a/src/gpu/batches/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/batches/GrTessellatingPathRenderer.cpp
@@ -162,7 +162,7 @@ bool GrTessellatingPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) cons
class TessellatingPathBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
static GrDrawBatch* Create(const GrColor& color,
const GrShape& shape,
@@ -324,7 +324,7 @@ private:
target->draw(gp, mesh);
}
- bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
+ bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; }
TessellatingPathBatch(const GrColor& color,
const GrShape& shape,
diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h
index 5bac48ac01..9d76c439ec 100644
--- a/src/gpu/batches/GrTestBatch.h
+++ b/src/gpu/batches/GrTestBatch.h
@@ -55,9 +55,7 @@ protected:
const Optimizations optimizations() const { return fOptimizations; }
private:
- bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
- return false;
- }
+ bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; }
GrColor fColor;
Optimizations fOptimizations;
diff --git a/src/gpu/effects/GrDashingEffect.cpp b/src/gpu/effects/GrDashingEffect.cpp
index d9cb72f192..43a28ecc05 100644
--- a/src/gpu/effects/GrDashingEffect.cpp
+++ b/src/gpu/effects/GrDashingEffect.cpp
@@ -240,7 +240,7 @@ static sk_sp<GrGeometryProcessor> make_dash_gp(GrColor,
class DashBatch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
struct Geometry {
SkMatrix fViewMatrix;
SkMatrix fSrcRotInv;
@@ -646,7 +646,7 @@ private:
helper.recordDraw(target, gp.get());
}
- bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
DashBatch* that = t->cast<DashBatch>();
if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
that->bounds(), caps)) {
diff --git a/src/gpu/instanced/GLInstancedRendering.cpp b/src/gpu/instanced/GLInstancedRendering.cpp
index 49b059d3af..d680189dc0 100644
--- a/src/gpu/instanced/GLInstancedRendering.cpp
+++ b/src/gpu/instanced/GLInstancedRendering.cpp
@@ -17,7 +17,7 @@ namespace gr_instanced {
class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
int numGLCommands() const { return 1 + fNumChangesInGeometry; }
diff --git a/src/gpu/instanced/InstancedRendering.cpp b/src/gpu/instanced/InstancedRendering.cpp
index 99a749ad04..a46f24bfd8 100644
--- a/src/gpu/instanced/InstancedRendering.cpp
+++ b/src/gpu/instanced/InstancedRendering.cpp
@@ -382,7 +382,7 @@ void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& ov
fIsTracked = true;
}
-bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps& caps) {
+bool InstancedRendering::Batch::onCombineIfPossible(GrOp* other, const GrCaps& caps) {
Batch* that = static_cast<Batch*>(other);
SkASSERT(fInstancedRendering == that->fInstancedRendering);
SkASSERT(fTailDraw);
diff --git a/src/gpu/instanced/InstancedRendering.h b/src/gpu/instanced/InstancedRendering.h
index faa5471f44..c94ca8aec7 100644
--- a/src/gpu/instanced/InstancedRendering.h
+++ b/src/gpu/instanced/InstancedRendering.h
@@ -137,7 +137,7 @@ protected:
Batch(uint32_t classID, InstancedRendering* ir);
void initBatchTracker(const GrXPOverridesForBatch&) override;
- bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
+ bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
void computePipelineOptimizations(GrInitInvariantOutput* color,
GrInitInvariantOutput* coverage,
diff --git a/tests/GrPorterDuffTest.cpp b/tests/GrPorterDuffTest.cpp
index 53aeb9a1ca..f471937f96 100644
--- a/tests/GrPorterDuffTest.cpp
+++ b/tests/GrPorterDuffTest.cpp
@@ -1092,7 +1092,7 @@ static void test_color_opaque_no_coverage(skiatest::Reporter* reporter, const Gr
static void test_lcd_coverage_fallback_case(skiatest::Reporter* reporter, const GrCaps& caps) {
class TestLCDCoverageBatch: public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
TestLCDCoverageBatch() : INHERITED(ClassID()) {}
@@ -1106,7 +1106,7 @@ static void test_lcd_coverage_fallback_case(skiatest::Reporter* reporter, const
const char* name() const override { return "Test LCD Text Batch"; }
void initBatchTracker(const GrXPOverridesForBatch&) override {}
- bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
+ bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; }
void onPrepareDraws(Target*) const override {}
typedef GrVertexBatch INHERITED;
diff --git a/tests/PrimitiveProcessorTest.cpp b/tests/PrimitiveProcessorTest.cpp
index 9a338a8af5..68234e6304 100644
--- a/tests/PrimitiveProcessorTest.cpp
+++ b/tests/PrimitiveProcessorTest.cpp
@@ -27,7 +27,7 @@
namespace {
class Batch : public GrVertexBatch {
public:
- DEFINE_BATCH_CLASS_ID
+ DEFINE_OP_CLASS_ID
const char* name() const override { return "Dummy Batch"; }
void computePipelineOptimizations(GrInitInvariantOutput* color,
@@ -46,7 +46,7 @@ public:
}
private:
- bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
+ bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; }
void onPrepareDraws(Target* target) const override {
class GP : public GrGeometryProcessor {
public: