From 25a880960a9a689a745a01071ecba3fe494b5940 Mon Sep 17 00:00:00 2001 From: Brian Salomon Date: Thu, 1 Dec 2016 09:36:50 -0500 Subject: Rename GrBatch to GrOp Change-Id: I27b6324f8040899fafeda23ca524bc54a4dbf090 Reviewed-on: https://skia-review.googlesource.com/5392 Commit-Queue: Brian Salomon Reviewed-by: Brian Osman --- src/gpu/batches/GrAAConvexPathRenderer.cpp | 4 +- src/gpu/batches/GrAADistanceFieldPathRenderer.cpp | 4 +- src/gpu/batches/GrAAFillRectBatch.cpp | 4 +- src/gpu/batches/GrAAFillRectBatch.h | 2 +- src/gpu/batches/GrAAHairLinePathRenderer.cpp | 4 +- .../batches/GrAALinearizingConvexPathRenderer.cpp | 4 +- src/gpu/batches/GrAAStrokeRectBatch.cpp | 6 +- src/gpu/batches/GrAAStrokeRectBatch.h | 1 - src/gpu/batches/GrAnalyticRectBatch.cpp | 4 +- src/gpu/batches/GrAtlasTextBatch.cpp | 2 +- src/gpu/batches/GrAtlasTextBatch.h | 4 +- src/gpu/batches/GrBatch.cpp | 63 ------ src/gpu/batches/GrBatch.h | 233 --------------------- src/gpu/batches/GrClearBatch.h | 10 +- src/gpu/batches/GrClearStencilClipBatch.h | 10 +- src/gpu/batches/GrCopySurfaceBatch.cpp | 2 +- src/gpu/batches/GrCopySurfaceBatch.h | 12 +- src/gpu/batches/GrDefaultPathRenderer.cpp | 4 +- src/gpu/batches/GrDiscardBatch.h | 10 +- src/gpu/batches/GrDrawAtlasBatch.cpp | 2 +- src/gpu/batches/GrDrawAtlasBatch.h | 4 +- src/gpu/batches/GrDrawBatch.h | 6 +- src/gpu/batches/GrDrawPathBatch.cpp | 2 +- src/gpu/batches/GrDrawPathBatch.h | 8 +- src/gpu/batches/GrDrawVerticesBatch.cpp | 2 +- src/gpu/batches/GrDrawVerticesBatch.h | 4 +- src/gpu/batches/GrMSAAPathRenderer.cpp | 4 +- src/gpu/batches/GrNinePatch.cpp | 4 +- src/gpu/batches/GrNonAAFillRectBatch.cpp | 4 +- .../batches/GrNonAAFillRectPerspectiveBatch.cpp | 4 +- src/gpu/batches/GrNonAAStrokeRectBatch.cpp | 4 +- src/gpu/batches/GrOp.cpp | 63 ++++++ src/gpu/batches/GrOp.h | 232 ++++++++++++++++++++ src/gpu/batches/GrPLSPathRenderer.cpp | 4 +- src/gpu/batches/GrRectBatchFactory.h | 2 +- src/gpu/batches/GrRegionBatch.cpp | 4 +- src/gpu/batches/GrShadowRRectBatch.cpp | 8 +- src/gpu/batches/GrStencilPathBatch.h | 12 +- src/gpu/batches/GrTessellatingPathRenderer.cpp | 4 +- src/gpu/batches/GrTestBatch.h | 4 +- 40 files changed, 380 insertions(+), 384 deletions(-) delete mode 100644 src/gpu/batches/GrBatch.cpp delete mode 100644 src/gpu/batches/GrBatch.h create mode 100644 src/gpu/batches/GrOp.cpp create mode 100644 src/gpu/batches/GrOp.h (limited to 'src/gpu/batches') diff --git a/src/gpu/batches/GrAAConvexPathRenderer.cpp b/src/gpu/batches/GrAAConvexPathRenderer.cpp index 1be84c7ca0..8f100503e6 100644 --- a/src/gpu/batches/GrAAConvexPathRenderer.cpp +++ b/src/gpu/batches/GrAAConvexPathRenderer.cpp @@ -734,7 +734,7 @@ static sk_sp create_fill_gp(bool tweakAlphaForCoverage, class AAConvexPathBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID AAConvexPathBatch(GrColor color, const SkMatrix& viewMatrix, const SkPath& path) : INHERITED(ClassID()) { fGeoData.emplace_back(Geometry{color, viewMatrix, path}); @@ -932,7 +932,7 @@ private: } } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { AAConvexPathBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp index e91192733c..8ece0c9db6 100644 --- a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp +++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp @@ -120,7 +120,7 @@ static const SkScalar kAntiAliasPad = 1.0f; class AADistanceFieldPathBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID typedef GrAADistanceFieldPathRenderer::ShapeData ShapeData; typedef SkTDynamicHash ShapeCache; @@ -480,7 +480,7 @@ private: const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; } bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { AADistanceFieldPathBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrAAFillRectBatch.cpp b/src/gpu/batches/GrAAFillRectBatch.cpp index 46a5d50955..c8804daacf 100644 --- a/src/gpu/batches/GrAAFillRectBatch.cpp +++ b/src/gpu/batches/GrAAFillRectBatch.cpp @@ -159,7 +159,7 @@ static void generate_aa_fill_rect_geometry(intptr_t verts, } class AAFillRectBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID AAFillRectBatch(GrColor color, const SkMatrix& viewMatrix, @@ -266,7 +266,7 @@ private: helper.recordDraw(target, gp.get()); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { AAFillRectBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrAAFillRectBatch.h b/src/gpu/batches/GrAAFillRectBatch.h index 1dbec995f7..b4fa2328aa 100644 --- a/src/gpu/batches/GrAAFillRectBatch.h +++ b/src/gpu/batches/GrAAFillRectBatch.h @@ -10,9 +10,9 @@ #include "GrColor.h" -class GrBatch; class GrDrawBatch; class SkMatrix; +class GrOp; struct SkRect; namespace GrAAFillRectBatch { diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/src/gpu/batches/GrAAHairLinePathRenderer.cpp index b2ad9ba28e..d7b3d69664 100644 --- a/src/gpu/batches/GrAAHairLinePathRenderer.cpp +++ b/src/gpu/batches/GrAAHairLinePathRenderer.cpp @@ -677,7 +677,7 @@ bool check_bounds(const SkMatrix& viewMatrix, const SkRect& devBounds, void* ver class AAHairlineBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID AAHairlineBatch(GrColor color, uint8_t coverage, @@ -732,7 +732,7 @@ private: typedef SkTArray IntArray; typedef SkTArray FloatArray; - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { AAHairlineBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp index f8516b9df2..10f1d72e9c 100644 --- a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp +++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp @@ -125,7 +125,7 @@ static sk_sp create_fill_gp(bool tweakAlphaForCoverage, class AAFlatteningConvexPathBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID AAFlatteningConvexPathBatch(GrColor color, const SkMatrix& viewMatrix, @@ -285,7 +285,7 @@ private: sk_free(indices); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { AAFlatteningConvexPathBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp index 7f87ad6222..8f13adc752 100644 --- a/src/gpu/batches/GrAAStrokeRectBatch.cpp +++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp @@ -118,7 +118,7 @@ static sk_sp create_stroke_rect_gp(bool tweakAlphaForCovera class AAStrokeRectBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID AAStrokeRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& devOutside, const SkRect& devInside) @@ -204,7 +204,7 @@ private: const SkMatrix& viewMatrix() const { return fViewMatrix; } bool miterStroke() const { return fMiterStroke; } - bool onCombineIfPossible(GrBatch* t, const GrCaps&) override; + bool onCombineIfPossible(GrOp* t, const GrCaps&) override; void generateAAStrokeRectGeometry(void* vertices, size_t offset, @@ -402,7 +402,7 @@ const GrBuffer* AAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourcePr } } -bool AAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) { +bool AAStrokeRectBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) { AAStrokeRectBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), diff --git a/src/gpu/batches/GrAAStrokeRectBatch.h b/src/gpu/batches/GrAAStrokeRectBatch.h index 964cc5b4b9..73020d5e45 100644 --- a/src/gpu/batches/GrAAStrokeRectBatch.h +++ b/src/gpu/batches/GrAAStrokeRectBatch.h @@ -10,7 +10,6 @@ #include "GrColor.h" -class GrBatch; class GrDrawBatch; class GrResourceProvider; class SkMatrix; diff --git a/src/gpu/batches/GrAnalyticRectBatch.cpp b/src/gpu/batches/GrAnalyticRectBatch.cpp index 655644f048..5e196cc674 100644 --- a/src/gpu/batches/GrAnalyticRectBatch.cpp +++ b/src/gpu/batches/GrAnalyticRectBatch.cpp @@ -236,7 +236,7 @@ sk_sp RectGeometryProcessor::TestCreate(GrProcessorTestData class AnalyticRectBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID AnalyticRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect, const SkRect& croppedRect, const SkRect& bounds) @@ -357,7 +357,7 @@ private: helper.recordDraw(target, gp.get()); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { AnalyticRectBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrAtlasTextBatch.cpp b/src/gpu/batches/GrAtlasTextBatch.cpp index cf4ca24aa8..df81e2cb26 100644 --- a/src/gpu/batches/GrAtlasTextBatch.cpp +++ b/src/gpu/batches/GrAtlasTextBatch.cpp @@ -183,7 +183,7 @@ void GrAtlasTextBatch::flush(GrVertexBatch::Target* target, FlushInfo* flushInfo flushInfo->fGlyphsToFlush = 0; } -bool GrAtlasTextBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) { +bool GrAtlasTextBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) { GrAtlasTextBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrAtlasTextBatch.h b/src/gpu/batches/GrAtlasTextBatch.h index b3b88dfc23..ca715a744c 100644 --- a/src/gpu/batches/GrAtlasTextBatch.h +++ b/src/gpu/batches/GrAtlasTextBatch.h @@ -15,7 +15,7 @@ class GrAtlasTextBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID static const int kVerticesPerGlyph = GrAtlasTextBlob::kVerticesPerGlyph; static const int kIndicesPerGlyph = 6; @@ -152,7 +152,7 @@ private: bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } int numGlyphs() const { return fBatch.fNumGlyphs; } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override; + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override; // TODO just use class params // TODO trying to figure out why lcd is so whack diff --git a/src/gpu/batches/GrBatch.cpp b/src/gpu/batches/GrBatch.cpp deleted file mode 100644 index 6755cf94f7..0000000000 --- a/src/gpu/batches/GrBatch.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#include "GrBatch.h" - -#include "GrMemoryPool.h" -#include "SkSpinlock.h" - -// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small, -// but seems to be mostly consistent. There is a lot in flux right now, but we should really -// revisit this when batch is everywhere - - -// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on -// different threads. The GrContext is not used concurrently on different threads and there is a -// memory barrier between accesses of a context on different threads. Also, there may be multiple -// GrContexts and those contexts may be in use concurrently on different threads. -namespace { -static SkSpinlock gBatchSpinlock; -class MemoryPoolAccessor { -public: - -// We know in the Android framework there is only one GrContext. -#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) - MemoryPoolAccessor() {} - ~MemoryPoolAccessor() {} -#else - MemoryPoolAccessor() { gBatchSpinlock.acquire(); } - ~MemoryPoolAccessor() { gBatchSpinlock.release(); } -#endif - - GrMemoryPool* pool() const { - static GrMemoryPool gPool(16384, 16384); - return &gPool; - } -}; -} - -int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchID; - -int32_t GrBatch::gCurrBatchUniqueID = GrBatch::kIllegalBatchID; - -void* GrBatch::operator new(size_t size) { - return MemoryPoolAccessor().pool()->allocate(size); -} - -void GrBatch::operator delete(void* target) { - return MemoryPoolAccessor().pool()->release(target); -} - -GrBatch::GrBatch(uint32_t classID) - : fClassID(classID) - , fUniqueID(kIllegalBatchID) { - SkASSERT(classID == SkToU32(fClassID)); - SkDEBUGCODE(fUsed = false;) - SkDEBUGCODE(fBoundsFlags = kUninitialized_BoundsFlag); -} - -GrBatch::~GrBatch() {} diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrBatch.h deleted file mode 100644 index e4065ecace..0000000000 --- a/src/gpu/batches/GrBatch.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#ifndef GrBatch_DEFINED -#define GrBatch_DEFINED - -#include "../private/SkAtomics.h" -#include "GrGpuResource.h" -#include "GrNonAtomicRef.h" -#include "SkMatrix.h" -#include "SkRect.h" -#include "SkString.h" - -#include - -class GrCaps; -class GrGpuCommandBuffer; -class GrBatchFlushState; - -/** - * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate - * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it - * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch - * subclasses complete freedom to decide how / what they can batch. - * - * Batches are created when GrContext processes a draw call. Batches of the same subclass may be - * merged using combineIfPossible. When two batches merge, one takes on the union of the data - * and the other is left empty. The merged batch becomes responsible for drawing the data from both - * the original batches. - * - * If there are any possible optimizations which might require knowing more about the full state of - * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this - * information will be communicated to the GrBatch prior to geometry generation. - * - * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip. - * The bounds are used in determining which clip elements must be applied and thus the bounds cannot - * in turn depend upon the clip. - */ -#define GR_BATCH_SPEW 0 -#if GR_BATCH_SPEW - #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__) - #define GrBATCH_SPEW(code) code -#else - #define GrBATCH_SPEW(code) - #define GrBATCH_INFO(...) -#endif - -// A helper macro to generate a class static id -#define DEFINE_BATCH_CLASS_ID \ - static uint32_t ClassID() { \ - static uint32_t kClassID = GenBatchClassID(); \ - return kClassID; \ - } - -class GrBatch : public GrNonAtomicRef { -public: - GrBatch(uint32_t classID); - virtual ~GrBatch(); - - virtual const char* name() const = 0; - - bool combineIfPossible(GrBatch* that, const GrCaps& caps) { - if (this->classID() != that->classID()) { - return false; - } - - return this->onCombineIfPossible(that, caps); - } - - const SkRect& bounds() const { - SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags); - return fBounds; - } - - bool hasAABloat() const { - SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); - return SkToBool(fBoundsFlags & kAABloat_BoundsFlag); - } - - bool hasZeroArea() const { - SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); - return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag); - } - - void* operator new(size_t size); - void operator delete(void* target); - - void* operator new(size_t size, void* placement) { - return ::operator new(size, placement); - } - void operator delete(void* target, void* placement) { - ::operator delete(target, placement); - } - - /** - * Helper for safely down-casting to a GrBatch subclass - */ - template const T& cast() const { - SkASSERT(T::ClassID() == this->classID()); - return *static_cast(this); - } - - template T* cast() { - SkASSERT(T::ClassID() == this->classID()); - return static_cast(this); - } - - uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; } - - // We lazily initialize the uniqueID because currently the only user is GrAuditTrail - uint32_t uniqueID() const { - if (kIllegalBatchID == fUniqueID) { - fUniqueID = GenBatchID(); - } - return fUniqueID; - } - SkDEBUGCODE(bool isUsed() const { return fUsed; }) - - /** Called prior to drawing. The batch should perform any resource creation necessary to - to quickly issue its draw when draw is called. */ - void prepare(GrBatchFlushState* state) { this->onPrepare(state); } - - /** Issues the batches commands to GrGpu. */ - void draw(GrBatchFlushState* state, const SkRect& bounds) { this->onDraw(state, bounds); } - - /** Used to block batching across render target changes. Remove this once we store - GrBatches for different RTs in different targets. */ - // TODO: this needs to be updated to return GrSurfaceProxy::UniqueID - virtual GrGpuResource::UniqueID renderTargetUniqueID() const = 0; - - /** Used for spewing information about batches when debugging. */ - virtual SkString dumpInfo() const { - SkString string; - string.appendf("BatchBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", - fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom); - return string; - } - -protected: - /** - * Indicates that the batch will produce geometry that extends beyond its bounds for the - * purpose of ensuring that the fragment shader runs on partially covered pixels for - * non-MSAA antialiasing. - */ - enum class HasAABloat { - kYes, - kNo - }; - /** - * Indicates that the geometry represented by the batch has zero area (i.e. it is hairline - * or points). - */ - enum class IsZeroArea { - kYes, - kNo - }; - void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) { - fBounds = newBounds; - this->setBoundsFlags(aabloat, zeroArea); - } - void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m, - HasAABloat aabloat, IsZeroArea zeroArea) { - m.mapRect(&fBounds, srcBounds); - this->setBoundsFlags(aabloat, zeroArea); - } - - void joinBounds(const GrBatch& that) { - if (that.hasAABloat()) { - fBoundsFlags |= kAABloat_BoundsFlag; - } - if (that.hasZeroArea()) { - fBoundsFlags |= kZeroArea_BoundsFlag; - } - return fBounds.joinPossiblyEmptyRect(that.fBounds); - } - - void replaceBounds(const GrBatch& that) { - fBounds = that.fBounds; - fBoundsFlags = that.fBoundsFlags; - } - - static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); } - -private: - virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0; - - virtual void onPrepare(GrBatchFlushState*) = 0; - virtual void onDraw(GrBatchFlushState*, const SkRect& bounds) = 0; - - static uint32_t GenID(int32_t* idCounter) { - // The atomic inc returns the old value not the incremented value. So we add - // 1 to the returned value. - uint32_t id = static_cast(sk_atomic_inc(idCounter)) + 1; - if (!id) { - SkFAIL("This should never wrap as it should only be called once for each GrBatch " - "subclass."); - } - return id; - } - - void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) { - fBoundsFlags = 0; - fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0; - fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0; - } - - enum { - kIllegalBatchID = 0, - }; - - enum BoundsFlags { - kAABloat_BoundsFlag = 0x1, - kZeroArea_BoundsFlag = 0x2, - SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4) - }; - - SkDEBUGCODE(bool fUsed;) - const uint16_t fClassID; - uint16_t fBoundsFlags; - - static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); } - mutable uint32_t fUniqueID; - SkRect fBounds; - - static int32_t gCurrBatchUniqueID; - static int32_t gCurrBatchClassID; -}; - -#endif diff --git a/src/gpu/batches/GrClearBatch.h b/src/gpu/batches/GrClearBatch.h index 24905d3648..f1870486df 100644 --- a/src/gpu/batches/GrClearBatch.h +++ b/src/gpu/batches/GrClearBatch.h @@ -8,16 +8,16 @@ #ifndef GrClearBatch_DEFINED #define GrClearBatch_DEFINED -#include "GrBatch.h" #include "GrBatchFlushState.h" #include "GrFixedClip.h" #include "GrGpu.h" #include "GrGpuCommandBuffer.h" +#include "GrOp.h" #include "GrRenderTarget.h" -class GrClearBatch final : public GrBatch { +class GrClearBatch final : public GrOp { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID static sk_sp Make(const GrFixedClip& clip, GrColor color, GrRenderTarget* rt) { sk_sp batch(new GrClearBatch(clip, color, rt)); @@ -68,7 +68,7 @@ private: fRenderTarget.reset(rt); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { // This could be much more complicated. Currently we look at cases where the new clear // contains the old clear, or when the new clear is a subset of the old clear and is the // same color. @@ -105,7 +105,7 @@ private: GrColor fColor; GrPendingIOResource fRenderTarget; - typedef GrBatch INHERITED; + typedef GrOp INHERITED; }; #endif diff --git a/src/gpu/batches/GrClearStencilClipBatch.h b/src/gpu/batches/GrClearStencilClipBatch.h index 0f2617355d..87c12f0a26 100644 --- a/src/gpu/batches/GrClearStencilClipBatch.h +++ b/src/gpu/batches/GrClearStencilClipBatch.h @@ -8,16 +8,16 @@ #ifndef GrClearStencilClipBatch_DEFINED #define GrClearStencilClipBatch_DEFINED -#include "GrBatch.h" #include "GrBatchFlushState.h" #include "GrFixedClip.h" #include "GrGpu.h" #include "GrGpuCommandBuffer.h" +#include "GrOp.h" #include "GrRenderTarget.h" -class GrClearStencilClipBatch final : public GrBatch { +class GrClearStencilClipBatch final : public GrOp { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID GrClearStencilClipBatch(const GrFixedClip& clip, bool insideStencilMask, GrRenderTarget* rt) : INHERITED(ClassID()) @@ -49,7 +49,7 @@ public: } private: - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; } + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; } void onPrepare(GrBatchFlushState*) override {} @@ -61,7 +61,7 @@ private: const bool fInsideStencilMask; GrPendingIOResource fRenderTarget; - typedef GrBatch INHERITED; + typedef GrOp INHERITED; }; #endif diff --git a/src/gpu/batches/GrCopySurfaceBatch.cpp b/src/gpu/batches/GrCopySurfaceBatch.cpp index 724609871d..9aa46e69fe 100644 --- a/src/gpu/batches/GrCopySurfaceBatch.cpp +++ b/src/gpu/batches/GrCopySurfaceBatch.cpp @@ -59,7 +59,7 @@ bool GrCopySurfaceBatch::ClipSrcRectAndDstPoint(const GrSurface* dst, return !clippedSrcRect->isEmpty(); } -GrBatch* GrCopySurfaceBatch::Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, +GrOp* GrCopySurfaceBatch::Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) { SkASSERT(dst); SkASSERT(src); diff --git a/src/gpu/batches/GrCopySurfaceBatch.h b/src/gpu/batches/GrCopySurfaceBatch.h index 3d9fc78ac3..a808b2f181 100644 --- a/src/gpu/batches/GrCopySurfaceBatch.h +++ b/src/gpu/batches/GrCopySurfaceBatch.h @@ -8,14 +8,14 @@ #ifndef GrCopySurfaceBatch_DEFINED #define GrCopySurfaceBatch_DEFINED -#include "GrBatch.h" #include "GrBatchFlushState.h" #include "GrGpu.h" +#include "GrOp.h" #include "GrRenderTarget.h" -class GrCopySurfaceBatch final : public GrBatch { +class GrCopySurfaceBatch final : public GrOp { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID /** This should not really be exposed as Create() will apply this clipping, but there is * currently a workaround in GrContext::copySurface() for non-render target dsts that relies @@ -27,7 +27,7 @@ public: SkIRect* clippedSrcRect, SkIPoint* clippedDstPoint); - static GrBatch* Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, + static GrOp* Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint); const char* name() const override { return "CopySurface"; } @@ -64,7 +64,7 @@ private: this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo); } - bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override { return false; } + bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override { return false; } void onPrepare(GrBatchFlushState*) override {} @@ -83,7 +83,7 @@ private: SkIRect fSrcRect; SkIPoint fDstPoint; - typedef GrBatch INHERITED; + typedef GrOp INHERITED; }; #endif diff --git a/src/gpu/batches/GrDefaultPathRenderer.cpp b/src/gpu/batches/GrDefaultPathRenderer.cpp index 21e2289df3..5faf81a0d1 100644 --- a/src/gpu/batches/GrDefaultPathRenderer.cpp +++ b/src/gpu/batches/GrDefaultPathRenderer.cpp @@ -96,7 +96,7 @@ static inline void add_quad(SkPoint** vert, const SkPoint* base, const SkPoint p class DefaultPathBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID DefaultPathBatch(GrColor color, const SkPath& path, SkScalar tolerance, uint8_t coverage, const SkMatrix& viewMatrix, bool isHairline, @@ -268,7 +268,7 @@ private: target->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { DefaultPathBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrDiscardBatch.h b/src/gpu/batches/GrDiscardBatch.h index d2ebb4662b..a53feffd9c 100644 --- a/src/gpu/batches/GrDiscardBatch.h +++ b/src/gpu/batches/GrDiscardBatch.h @@ -8,14 +8,14 @@ #ifndef GrDiscardBatch_DEFINED #define GrDiscardBatch_DEFINED -#include "GrBatch.h" #include "GrBatchFlushState.h" #include "GrGpu.h" +#include "GrOp.h" #include "GrRenderTarget.h" -class GrDiscardBatch final : public GrBatch { +class GrDiscardBatch final : public GrOp { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID GrDiscardBatch(GrRenderTarget* rt) : INHERITED(ClassID()) @@ -39,7 +39,7 @@ public: } private: - bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override { return this->renderTargetUniqueID() == that->renderTargetUniqueID(); } @@ -51,7 +51,7 @@ private: GrPendingIOResource fRenderTarget; - typedef GrBatch INHERITED; + typedef GrOp INHERITED; }; #endif diff --git a/src/gpu/batches/GrDrawAtlasBatch.cpp b/src/gpu/batches/GrDrawAtlasBatch.cpp index 6f1bfedfec..95c8f2cf32 100644 --- a/src/gpu/batches/GrDrawAtlasBatch.cpp +++ b/src/gpu/batches/GrDrawAtlasBatch.cpp @@ -162,7 +162,7 @@ GrDrawAtlasBatch::GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, in this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo); } -bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) { +bool GrDrawAtlasBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) { GrDrawAtlasBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), diff --git a/src/gpu/batches/GrDrawAtlasBatch.h b/src/gpu/batches/GrDrawAtlasBatch.h index f8d88e0c20..449882c356 100644 --- a/src/gpu/batches/GrDrawAtlasBatch.h +++ b/src/gpu/batches/GrDrawAtlasBatch.h @@ -14,7 +14,7 @@ class GrDrawAtlasBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, int spriteCount, const SkRSXform* xforms, const SkRect* rects, const SkColor* colors); @@ -55,7 +55,7 @@ private: int quadCount() const { return fQuadCount; } bool coverageIgnored() const { return fCoverageIgnored; } - bool onCombineIfPossible(GrBatch* t, const GrCaps&) override; + bool onCombineIfPossible(GrOp* t, const GrCaps&) override; struct Geometry { GrColor fColor; diff --git a/src/gpu/batches/GrDrawBatch.h b/src/gpu/batches/GrDrawBatch.h index c92dca346a..2184552d58 100644 --- a/src/gpu/batches/GrDrawBatch.h +++ b/src/gpu/batches/GrDrawBatch.h @@ -9,7 +9,7 @@ #define GrDrawBatch_DEFINED #include -#include "GrBatch.h" +#include "GrOp.h" #include "GrPipeline.h" struct GrInitInvariantOutput; @@ -44,7 +44,7 @@ private: /** * Base class for GrBatches that draw. These batches have a GrPipeline installed by GrOpList. */ -class GrDrawBatch : public GrBatch { +class GrDrawBatch : public GrOp { public: /** Method that performs an upload on behalf of a DeferredUploadFn. */ using WritePixelsFn = std::function fPipelineStorage; bool fPipelineInstalled; - typedef GrBatch INHERITED; + typedef GrOp INHERITED; }; #endif diff --git a/src/gpu/batches/GrDrawPathBatch.cpp b/src/gpu/batches/GrDrawPathBatch.cpp index fb458ec7cc..8d0bc42261 100644 --- a/src/gpu/batches/GrDrawPathBatch.cpp +++ b/src/gpu/batches/GrDrawPathBatch.cpp @@ -62,7 +62,7 @@ GrDrawPathRangeBatch::GrDrawPathRangeBatch(const SkMatrix& viewMatrix, SkScalar this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo); } -bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) { +bool GrDrawPathRangeBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) { GrDrawPathRangeBatch* that = t->cast(); if (this->fPathRange.get() != that->fPathRange.get() || this->transformType() != that->transformType() || diff --git a/src/gpu/batches/GrDrawPathBatch.h b/src/gpu/batches/GrDrawPathBatch.h index 3a46f46b2d..9de29e2171 100644 --- a/src/gpu/batches/GrDrawPathBatch.h +++ b/src/gpu/batches/GrDrawPathBatch.h @@ -63,7 +63,7 @@ private: class GrDrawPathBatch final : public GrDrawPathBatchBase { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID static GrDrawBatch* Create(const SkMatrix& viewMatrix, GrColor color, const GrPath* path) { return new GrDrawPathBatch(viewMatrix, color, path); @@ -80,7 +80,7 @@ private: this->setTransformedBounds(path->getBounds(), viewMatrix, HasAABloat::kNo, IsZeroArea::kNo); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; } + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; } void onDraw(GrBatchFlushState* state, const SkRect& bounds) override; @@ -94,7 +94,7 @@ class GrDrawPathRangeBatch final : public GrDrawPathBatchBase { public: typedef GrPathRendering::PathTransformType TransformType; - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID struct InstanceData : public SkNoncopyable { public: @@ -173,7 +173,7 @@ private: TransformType transformType() const { return fDraws.head()->fInstanceData->transformType(); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override; + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override; void onDraw(GrBatchFlushState* state, const SkRect& bounds) override; diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp index e565022055..1d023200c2 100644 --- a/src/gpu/batches/GrDrawVerticesBatch.cpp +++ b/src/gpu/batches/GrDrawVerticesBatch.cpp @@ -171,7 +171,7 @@ void GrDrawVerticesBatch::onPrepareDraws(Target* target) const { target->draw(gp.get(), mesh); } -bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) { +bool GrDrawVerticesBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) { GrDrawVerticesBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), diff --git a/src/gpu/batches/GrDrawVerticesBatch.h b/src/gpu/batches/GrDrawVerticesBatch.h index d71fb0daba..821321a771 100644 --- a/src/gpu/batches/GrDrawVerticesBatch.h +++ b/src/gpu/batches/GrDrawVerticesBatch.h @@ -20,7 +20,7 @@ struct GrInitInvariantOutput; class GrDrawVerticesBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID GrDrawVerticesBatch(GrColor color, GrPrimitiveType primitiveType, @@ -55,7 +55,7 @@ private: kPoints_GrPrimitiveType == fPrimitiveType; } - bool onCombineIfPossible(GrBatch* t, const GrCaps&) override; + bool onCombineIfPossible(GrOp* t, const GrCaps&) override; struct Mesh { GrColor fColor; // Only used if there are no per-vertex colors diff --git a/src/gpu/batches/GrMSAAPathRenderer.cpp b/src/gpu/batches/GrMSAAPathRenderer.cpp index ab98a15055..77d4e6b0bf 100644 --- a/src/gpu/batches/GrMSAAPathRenderer.cpp +++ b/src/gpu/batches/GrMSAAPathRenderer.cpp @@ -216,7 +216,7 @@ private: class MSAAPathBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID MSAAPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix, const SkRect& devBounds) @@ -447,7 +447,7 @@ private: } } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { MSAAPathBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrNinePatch.cpp b/src/gpu/batches/GrNinePatch.cpp index 2e1809d754..7ca8d6840a 100644 --- a/src/gpu/batches/GrNinePatch.cpp +++ b/src/gpu/batches/GrNinePatch.cpp @@ -25,7 +25,7 @@ static sk_sp create_gp(bool readsCoverage) { class GrNonAANinePatchBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID static const int kVertsPerRect = 4; static const int kIndicesPerRect = 6; @@ -143,7 +143,7 @@ private: fOverrides = overrides; } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { GrNonAANinePatchBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrNonAAFillRectBatch.cpp b/src/gpu/batches/GrNonAAFillRectBatch.cpp index 0e66134795..8f45a8f93e 100644 --- a/src/gpu/batches/GrNonAAFillRectBatch.cpp +++ b/src/gpu/batches/GrNonAAFillRectBatch.cpp @@ -73,7 +73,7 @@ static void tesselate(intptr_t vertices, class NonAAFillRectBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID NonAAFillRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect, const SkRect* localRect, const SkMatrix* localMatrix) @@ -159,7 +159,7 @@ private: helper.recordDraw(target, gp.get()); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { NonAAFillRectBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp index 9a946c80f6..2dcd3e21b4 100644 --- a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp +++ b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp @@ -92,7 +92,7 @@ static void tesselate(intptr_t vertices, // We handle perspective in the local matrix or viewmatrix with special batches class GrNonAAFillRectPerspectiveBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID GrNonAAFillRectPerspectiveBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect, const SkRect* localRect, const SkMatrix* localMatrix) @@ -187,7 +187,7 @@ private: helper.recordDraw(target, gp.get()); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { GrNonAAFillRectPerspectiveBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp index 9139dab9fe..2cc1a7d22c 100644 --- a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp +++ b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp @@ -47,7 +47,7 @@ inline static bool allowed_stroke(const SkStrokeRec& stroke) { class NonAAStrokeRectBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID const char* name() const override { return "NonAAStrokeRectBatch"; } @@ -167,7 +167,7 @@ private: fOverrides = overrides; } - bool onCombineIfPossible(GrBatch* t, const GrCaps&) override { + bool onCombineIfPossible(GrOp* t, const GrCaps&) override { // NonAA stroke rects cannot batch right now // TODO make these batchable return false; diff --git a/src/gpu/batches/GrOp.cpp b/src/gpu/batches/GrOp.cpp new file mode 100644 index 0000000000..1d86419c77 --- /dev/null +++ b/src/gpu/batches/GrOp.cpp @@ -0,0 +1,63 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "GrOp.h" + +#include "GrMemoryPool.h" +#include "SkSpinlock.h" + +// TODO I noticed a small benefit to using a larger exclusive pool for ops. Its very small, but +// seems to be mostly consistent. There is a lot in flux right now, but we should really revisit +// this. + + +// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on +// different threads. The GrContext is not used concurrently on different threads and there is a +// memory barrier between accesses of a context on different threads. Also, there may be multiple +// GrContexts and those contexts may be in use concurrently on different threads. +namespace { +static SkSpinlock gOpPoolSpinLock; +class MemoryPoolAccessor { +public: + +// We know in the Android framework there is only one GrContext. +#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) + MemoryPoolAccessor() {} + ~MemoryPoolAccessor() {} +#else + MemoryPoolAccessor() { gOpPoolSpinLock.acquire(); } + ~MemoryPoolAccessor() { gOpPoolSpinLock.release(); } +#endif + + GrMemoryPool* pool() const { + static GrMemoryPool gPool(16384, 16384); + return &gPool; + } +}; +} + +int32_t GrOp::gCurrOpClassID = GrOp::kIllegalOpID; + +int32_t GrOp::gCurrOpUniqueID = GrOp::kIllegalOpID; + +void* GrOp::operator new(size_t size) { + return MemoryPoolAccessor().pool()->allocate(size); +} + +void GrOp::operator delete(void* target) { + return MemoryPoolAccessor().pool()->release(target); +} + +GrOp::GrOp(uint32_t classID) + : fClassID(classID) + , fUniqueID(kIllegalOpID) { + SkASSERT(classID == SkToU32(fClassID)); + SkDEBUGCODE(fUsed = false;) + SkDEBUGCODE(fBoundsFlags = kUninitialized_BoundsFlag); +} + +GrOp::~GrOp() {} diff --git a/src/gpu/batches/GrOp.h b/src/gpu/batches/GrOp.h new file mode 100644 index 0000000000..79fbfa4b90 --- /dev/null +++ b/src/gpu/batches/GrOp.h @@ -0,0 +1,232 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrBatch_DEFINED +#define GrBatch_DEFINED + +#include "../private/SkAtomics.h" +#include "GrGpuResource.h" +#include "GrNonAtomicRef.h" +#include "SkMatrix.h" +#include "SkRect.h" +#include "SkString.h" + +#include + +class GrCaps; +class GrGpuCommandBuffer; +class GrBatchFlushState; + +/** + * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reorderable + * batching, Ganesh does not generate geometry inline with draw calls. Instead, it captures the + * arguments to the draw and then generates the geometry on demand. This gives GrOp subclasses + * complete freedom to decide how/what they can batch. + * + * Ops of the same subclass may be merged using combineIfPossible. When two ops merge, one + * takes on the union of the data and the other is left empty. The merged op becomes responsible + * for drawing the data from both the original ops. + * + * If there are any possible optimizations which might require knowing more about the full state of + * the draw, e.g. whether or not the GrOp is allowed to tweak alpha for coverage, then this + * information will be communicated to the GrOp prior to geometry generation. + * + * The bounds of the op must contain all the vertices in device space *irrespective* of the clip. + * The bounds are used in determining which clip elements must be applied and thus the bounds cannot + * in turn depend upon the clip. + */ +#define GR_OP_SPEW 0 +#if GR_OP_SPEW + #define GrOP_SPEW(code) code + #define GrOP_INFO(...) SkDebugf(__VA_ARGS__) +#else + #define GrOP_SPEW(code) + #define GrOP_INFO(...) +#endif + +// A helper macro to generate a class static id +#define DEFINE_OP_CLASS_ID \ + static uint32_t ClassID() { \ + static uint32_t kClassID = GenOpClassID(); \ + return kClassID; \ + } + +class GrOp : public GrNonAtomicRef { +public: + GrOp(uint32_t classID); + virtual ~GrOp(); + + virtual const char* name() const = 0; + + bool combineIfPossible(GrOp* that, const GrCaps& caps) { + if (this->classID() != that->classID()) { + return false; + } + + return this->onCombineIfPossible(that, caps); + } + + const SkRect& bounds() const { + SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags); + return fBounds; + } + + bool hasAABloat() const { + SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); + return SkToBool(fBoundsFlags & kAABloat_BoundsFlag); + } + + bool hasZeroArea() const { + SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); + return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag); + } + + void* operator new(size_t size); + void operator delete(void* target); + + void* operator new(size_t size, void* placement) { + return ::operator new(size, placement); + } + void operator delete(void* target, void* placement) { + ::operator delete(target, placement); + } + + /** + * Helper for safely down-casting to a GrOp subclass + */ + template const T& cast() const { + SkASSERT(T::ClassID() == this->classID()); + return *static_cast(this); + } + + template T* cast() { + SkASSERT(T::ClassID() == this->classID()); + return static_cast(this); + } + + uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; } + + // We lazily initialize the uniqueID because currently the only user is GrAuditTrail + uint32_t uniqueID() const { + if (kIllegalOpID == fUniqueID) { + fUniqueID = GenOpID(); + } + return fUniqueID; + } + SkDEBUGCODE(bool isUsed() const { return fUsed; }) + + /** Called prior to drawing. The op should perform any resource creation necessary to + to quickly issue its draw when draw is called. */ + void prepare(GrBatchFlushState* state) { this->onPrepare(state); } + + /** Issues the op's commands to GrGpu. */ + void draw(GrBatchFlushState* state, const SkRect& bounds) { this->onDraw(state, bounds); } + + /** Used to block batching across render target changes. Remove this once we store + GrOps for different RTs in different targets. */ + // TODO: this needs to be updated to return GrSurfaceProxy::UniqueID + virtual GrGpuResource::UniqueID renderTargetUniqueID() const = 0; + + /** Used for spewing information about ops when debugging. */ + virtual SkString dumpInfo() const { + SkString string; + string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", + fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom); + return string; + } + +protected: + /** + * Indicates that the op will produce geometry that extends beyond its bounds for the + * purpose of ensuring that the fragment shader runs on partially covered pixels for + * non-MSAA antialiasing. + */ + enum class HasAABloat { + kYes, + kNo + }; + /** + * Indicates that the geometry represented by the op has zero area (e.g. it is hairline or + * points). + */ + enum class IsZeroArea { + kYes, + kNo + }; + void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) { + fBounds = newBounds; + this->setBoundsFlags(aabloat, zeroArea); + } + void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m, + HasAABloat aabloat, IsZeroArea zeroArea) { + m.mapRect(&fBounds, srcBounds); + this->setBoundsFlags(aabloat, zeroArea); + } + + void joinBounds(const GrOp& that) { + if (that.hasAABloat()) { + fBoundsFlags |= kAABloat_BoundsFlag; + } + if (that.hasZeroArea()) { + fBoundsFlags |= kZeroArea_BoundsFlag; + } + return fBounds.joinPossiblyEmptyRect(that.fBounds); + } + + void replaceBounds(const GrOp& that) { + fBounds = that.fBounds; + fBoundsFlags = that.fBoundsFlags; + } + + static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); } + +private: + virtual bool onCombineIfPossible(GrOp*, const GrCaps& caps) = 0; + + virtual void onPrepare(GrBatchFlushState*) = 0; + virtual void onDraw(GrBatchFlushState*, const SkRect& bounds) = 0; + + static uint32_t GenID(int32_t* idCounter) { + // The atomic inc returns the old value not the incremented value. So we add + // 1 to the returned value. + uint32_t id = static_cast(sk_atomic_inc(idCounter)) + 1; + if (!id) { + SkFAIL("This should never wrap as it should only be called once for each GrOp " + "subclass."); + } + return id; + } + + void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) { + fBoundsFlags = 0; + fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0; + fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0; + } + + enum { + kIllegalOpID = 0, + }; + + enum BoundsFlags { + kAABloat_BoundsFlag = 0x1, + kZeroArea_BoundsFlag = 0x2, + SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4) + }; + + SkDEBUGCODE(bool fUsed;) + const uint16_t fClassID; + uint16_t fBoundsFlags; + + static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); } + mutable uint32_t fUniqueID; + SkRect fBounds; + + static int32_t gCurrOpUniqueID; + static int32_t gCurrOpClassID; +}; + +#endif diff --git a/src/gpu/batches/GrPLSPathRenderer.cpp b/src/gpu/batches/GrPLSPathRenderer.cpp index f31c323581..924e2a8bc6 100644 --- a/src/gpu/batches/GrPLSPathRenderer.cpp +++ b/src/gpu/batches/GrPLSPathRenderer.cpp @@ -765,7 +765,7 @@ bool GrPLSPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const { class PLSPathBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID PLSPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix) : INHERITED(ClassID()) , fColor(color) @@ -915,7 +915,7 @@ public: } private: - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; } diff --git a/src/gpu/batches/GrRectBatchFactory.h b/src/gpu/batches/GrRectBatchFactory.h index c9b6843596..5ae1934d49 100644 --- a/src/gpu/batches/GrRectBatchFactory.h +++ b/src/gpu/batches/GrRectBatchFactory.h @@ -17,7 +17,7 @@ #include "GrPaint.h" #include "SkMatrix.h" -class GrBatch; +class GrOp; struct SkRect; class SkStrokeRec; diff --git a/src/gpu/batches/GrRegionBatch.cpp b/src/gpu/batches/GrRegionBatch.cpp index ae09a5f290..e1eaf2802e 100644 --- a/src/gpu/batches/GrRegionBatch.cpp +++ b/src/gpu/batches/GrRegionBatch.cpp @@ -52,7 +52,7 @@ static void tesselate_region(intptr_t vertices, class RegionBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID RegionBatch(GrColor color, const SkMatrix& viewMatrix, const SkRegion& region) : INHERITED(ClassID()) @@ -130,7 +130,7 @@ private: helper.recordDraw(target, gp.get()); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { RegionBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrShadowRRectBatch.cpp b/src/gpu/batches/GrShadowRRectBatch.cpp index c4b56b8e18..6ae6e62dc1 100755 --- a/src/gpu/batches/GrShadowRRectBatch.cpp +++ b/src/gpu/batches/GrShadowRRectBatch.cpp @@ -61,7 +61,7 @@ static const uint16_t* circle_type_to_indices(bool stroked) { class ShadowCircleBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, SkPoint center, SkScalar radius, SkScalar blurRadius, const GrStyle& style) { @@ -367,7 +367,7 @@ private: target->draw(gp.get(), mesh); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { ShadowCircleBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { @@ -506,7 +506,7 @@ static const uint16_t* rrect_type_to_indices(RRectType type) { class ShadowCircularRRectBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID // A devStrokeWidth <= 0 indicates a fill only. If devStrokeWidth > 0 then strokeOnly indicates // whether the rrect is only stroked or stroked and filled. @@ -796,7 +796,7 @@ private: target->draw(gp.get(), mesh); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { ShadowCircularRRectBatch* that = t->cast(); if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(), that->bounds(), caps)) { diff --git a/src/gpu/batches/GrStencilPathBatch.h b/src/gpu/batches/GrStencilPathBatch.h index b95c75a5ce..293da124ad 100644 --- a/src/gpu/batches/GrStencilPathBatch.h +++ b/src/gpu/batches/GrStencilPathBatch.h @@ -8,18 +8,18 @@ #ifndef GrStencilPathBatch_DEFINED #define GrStencilPathBatch_DEFINED -#include "GrBatch.h" #include "GrBatchFlushState.h" #include "GrGpu.h" +#include "GrOp.h" #include "GrPath.h" #include "GrPathRendering.h" #include "GrRenderTarget.h" -class GrStencilPathBatch final : public GrBatch { +class GrStencilPathBatch final : public GrOp { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID - static GrBatch* Create(const SkMatrix& viewMatrix, + static GrOp* Create(const SkMatrix& viewMatrix, bool useHWAA, GrPathRendering::FillType fillType, bool hasStencilClip, @@ -64,7 +64,7 @@ private: this->setBounds(path->getBounds(), HasAABloat::kNo, IsZeroArea::kNo); } - bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; } + bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; } void onPrepare(GrBatchFlushState*) override {} @@ -81,7 +81,7 @@ private: GrPendingIOResource fRenderTarget; GrPendingIOResource fPath; - typedef GrBatch INHERITED; + typedef GrOp INHERITED; }; #endif diff --git a/src/gpu/batches/GrTessellatingPathRenderer.cpp b/src/gpu/batches/GrTessellatingPathRenderer.cpp index 970af5cd5c..46dd2f751e 100644 --- a/src/gpu/batches/GrTessellatingPathRenderer.cpp +++ b/src/gpu/batches/GrTessellatingPathRenderer.cpp @@ -162,7 +162,7 @@ bool GrTessellatingPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) cons class TessellatingPathBatch : public GrVertexBatch { public: - DEFINE_BATCH_CLASS_ID + DEFINE_OP_CLASS_ID static GrDrawBatch* Create(const GrColor& color, const GrShape& shape, @@ -324,7 +324,7 @@ private: target->draw(gp, mesh); } - bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; } + bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; } TessellatingPathBatch(const GrColor& color, const GrShape& shape, diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h index 5bac48ac01..9d76c439ec 100644 --- a/src/gpu/batches/GrTestBatch.h +++ b/src/gpu/batches/GrTestBatch.h @@ -55,9 +55,7 @@ protected: const Optimizations optimizations() const { return fOptimizations; } private: - bool onCombineIfPossible(GrBatch* t, const GrCaps&) override { - return false; - } + bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; } GrColor fColor; Optimizations fOptimizations; -- cgit v1.2.3