aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
authorGravatar bsalomon <bsalomon@google.com>2016-07-08 06:40:56 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2016-07-08 06:40:57 -0700
commit88cf17d099085b8085ab11571b5094163dbb2c84 (patch)
tree0737705697caa8998453d5519a19a6f12b888104 /src/gpu
parentac41bac40f5a80d2bc5ccec584c23478a6900179 (diff)
Consolidate handling of infinitely thin primitives and aa bloat handing WRT batch bounds.
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/GrDrawTarget.cpp49
-rw-r--r--src/gpu/GrOvalRenderer.cpp37
-rw-r--r--src/gpu/batches/GrAAConvexPathRenderer.cpp7
-rw-r--r--src/gpu/batches/GrAADistanceFieldPathRenderer.cpp5
-rw-r--r--src/gpu/batches/GrAAFillRectBatch.cpp6
-rw-r--r--src/gpu/batches/GrAAHairLinePathRenderer.cpp11
-rw-r--r--src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp8
-rw-r--r--src/gpu/batches/GrAAStrokeRectBatch.cpp7
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.cpp4
-rw-r--r--src/gpu/batches/GrAtlasTextBatch.h7
-rw-r--r--src/gpu/batches/GrBatch.cpp2
-rw-r--r--src/gpu/batches/GrBatch.h80
-rw-r--r--src/gpu/batches/GrClearBatch.h6
-rw-r--r--src/gpu/batches/GrCopySurfaceBatch.h6
-rw-r--r--src/gpu/batches/GrDefaultPathRenderer.cpp11
-rw-r--r--src/gpu/batches/GrDiscardBatch.h3
-rw-r--r--src/gpu/batches/GrDrawAtlasBatch.cpp7
-rw-r--r--src/gpu/batches/GrDrawPathBatch.cpp4
-rw-r--r--src/gpu/batches/GrDrawPathBatch.h3
-rw-r--r--src/gpu/batches/GrDrawVerticesBatch.cpp10
-rw-r--r--src/gpu/batches/GrMSAAPathRenderer.cpp11
-rw-r--r--src/gpu/batches/GrNinePatch.cpp4
-rw-r--r--src/gpu/batches/GrNonAAFillRectBatch.cpp4
-rw-r--r--src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp4
-rw-r--r--src/gpu/batches/GrNonAAStrokeRectBatch.cpp23
-rw-r--r--src/gpu/batches/GrPLSPathRenderer.cpp4
-rw-r--r--src/gpu/batches/GrStencilPathBatch.h2
-rw-r--r--src/gpu/batches/GrTessellatingPathRenderer.cpp8
-rw-r--r--src/gpu/batches/GrTestBatch.h3
-rw-r--r--src/gpu/effects/GrDashingEffect.cpp12
-rw-r--r--src/gpu/instanced/InstancedRendering.cpp26
31 files changed, 242 insertions, 132 deletions
diff --git a/src/gpu/GrDrawTarget.cpp b/src/gpu/GrDrawTarget.cpp
index 86e0c82f22..356c480da8 100644
--- a/src/gpu/GrDrawTarget.cpp
+++ b/src/gpu/GrDrawTarget.cpp
@@ -281,13 +281,41 @@ void GrDrawTarget::reset() {
}
}
+static void batch_bounds(SkRect* bounds, const GrBatch* batch) {
+ *bounds = batch->bounds();
+ if (batch->hasZeroArea()) {
+ if (batch->hasAABloat()) {
+ bounds->outset(0.5f, 0.5f);
+ } else {
+ // We don't know which way the particular GPU will snap lines or points at integer
+ // coords. So we ensure that the bounds is large enough for either snap.
+ SkRect before = *bounds;
+ bounds->roundOut(bounds);
+ if (bounds->fLeft == before.fLeft) {
+ bounds->fLeft -= 1;
+ }
+ if (bounds->fTop == before.fTop) {
+ bounds->fTop -= 1;
+ }
+ if (bounds->fRight == before.fRight) {
+ bounds->fRight += 1;
+ }
+ if (bounds->fBottom == before.fBottom) {
+ bounds->fBottom += 1;
+ }
+ }
+ }
+}
+
void GrDrawTarget::drawBatch(const GrPipelineBuilder& pipelineBuilder,
GrDrawContext* drawContext,
const GrClip& clip,
GrDrawBatch* batch) {
// Setup clip
GrAppliedClip appliedClip;
- if (!clip.apply(fContext, pipelineBuilder, drawContext, &batch->bounds(), &appliedClip)) {
+ SkRect bounds;
+ batch_bounds(&bounds, batch);
+ if (!clip.apply(fContext, pipelineBuilder, drawContext, &bounds, &appliedClip)) {
return;
}
@@ -469,10 +497,17 @@ bool GrDrawTarget::copySurface(GrSurface* dst,
return true;
}
-template <class Left, class Right> static bool intersect(const Left& a, const Right& b) {
- SkASSERT(a.fLeft <= a.fRight && a.fTop <= a.fBottom &&
- b.fLeft <= b.fRight && b.fTop <= b.fBottom);
- return a.fLeft < b.fRight && b.fLeft < a.fRight && a.fTop < b.fBottom && b.fTop < a.fBottom;
+static inline bool exclusive_no_intersection(const SkRect& a, const SkRect& b) {
+ return a.fRight <= b.fLeft || a.fBottom <= b.fTop ||
+ b.fRight <= a.fLeft || b.fBottom <= a.fTop;
+}
+
+static inline bool can_reorder(const GrBatch* a, const GrBatch* b) {
+ SkRect ra;
+ SkRect rb;
+ batch_bounds(&ra, a);
+ batch_bounds(&rb, a);
+ return exclusive_no_intersection(ra, rb);
}
void GrDrawTarget::recordBatch(GrBatch* batch) {
@@ -512,7 +547,7 @@ void GrDrawTarget::recordBatch(GrBatch* batch) {
// Stop going backwards if we would cause a painter's order violation.
// TODO: The bounds used here do not fully consider the clip. It may be advantageous
// to clip each batch's bounds to the clip.
- if (intersect(candidate->bounds(), batch->bounds())) {
+ if (!can_reorder(candidate, batch)) {
GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
candidate->uniqueID());
break;
@@ -558,7 +593,7 @@ void GrDrawTarget::forwardCombine() {
// Stop going traversing if we would cause a painter's order violation.
// TODO: The bounds used here do not fully consider the clip. It may be advantageous
// to clip each batch's bounds to the clip.
- if (intersect(candidate->bounds(), batch->bounds())) {
+ if (!can_reorder(candidate, batch)) {
GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
candidate->uniqueID());
break;
diff --git a/src/gpu/GrOvalRenderer.cpp b/src/gpu/GrOvalRenderer.cpp
index f5dcfcb0f5..aec9b76ba8 100644
--- a/src/gpu/GrOvalRenderer.cpp
+++ b/src/gpu/GrOvalRenderer.cpp
@@ -572,7 +572,12 @@ public:
SkRect::MakeLTRB(center.fX - outerRadius, center.fY - outerRadius,
center.fX + outerRadius, center.fY + outerRadius)
});
- this->setBounds(fGeoData.back().fDevBounds);
+ // Use the original radius and stroke radius for the bounds so that it does not include the
+ // AA bloat.
+ radius += halfWidth;
+ this->setBounds({center.fX - radius, center.fY - radius,
+ center.fX + radius, center.fY + radius},
+ HasAABloat::kYes, IsZeroArea::kNo);
fStroked = isStrokeOnly && innerRadius > 0;
}
@@ -685,7 +690,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
@@ -777,12 +782,13 @@ public:
center.fX + xRadius, center.fY + yRadius)
});
+ batch->setBounds(batch->fGeoData.back().fDevBounds, HasAABloat::kYes, IsZeroArea::kNo);
+
// Outset bounds to include half-pixel width antialiasing.
batch->fGeoData[0].fDevBounds.outset(SK_ScalarHalf, SK_ScalarHalf);
batch->fStroked = isStrokeOnly && innerXRadius > 0 && innerYRadius > 0;
batch->fViewMatrixIfUsingLocalCoords = viewMatrix;
- batch->setBounds(batch->fGeoData.back().fDevBounds);
return batch;
}
@@ -894,7 +900,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
@@ -993,9 +999,8 @@ public:
SkRect::MakeLTRB(center.fX - xRadius - geoDx, center.fY - yRadius - geoDy,
center.fX + xRadius + geoDx, center.fY + yRadius + geoDy)
});
- SkRect devBounds = batch->fGeoData.back().fBounds;
- viewMatrix.mapRect(&devBounds);
- batch->setBounds(devBounds);
+ batch->setTransformedBounds(batch->fGeoData[0].fBounds, viewMatrix, HasAABloat::kYes,
+ IsZeroArea::kNo);
return batch;
}
@@ -1092,7 +1097,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
@@ -1202,11 +1207,12 @@ public:
outerRadius += SK_ScalarHalf;
innerRadius -= SK_ScalarHalf;
- // Expand the rect so all the pixels will be captured.
+ this->setBounds(bounds, HasAABloat::kYes, IsZeroArea::kNo);
+
+ // Expand the rect for aa to generate correct vertices.
bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
fGeoData.emplace_back(Geometry { color, innerRadius, outerRadius, bounds });
- this->setBounds(bounds);
}
const char* name() const override { return "RRectCircleBatch"; }
@@ -1324,7 +1330,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
@@ -1394,15 +1400,14 @@ public:
bounds.outset(devStrokeWidths.fX, devStrokeWidths.fY);
}
- // Expand the rect so all the pixels will be captured.
- bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
-
RRectEllipseRendererBatch* batch = new RRectEllipseRendererBatch();
batch->fStroked = stroked;
batch->fViewMatrixIfUsingLocalCoords = viewMatrix;
+ batch->setBounds(bounds, HasAABloat::kYes, IsZeroArea::kNo);
+ // Expand the rect for aa in order to generate the correct vertices.
+ bounds.outset(SK_ScalarHalf, SK_ScalarHalf);
batch->fGeoData.emplace_back(
Geometry {color, devXRadius, devYRadius, innerXRadius, innerYRadius, bounds});
- batch->setBounds(bounds);
return batch;
}
@@ -1534,7 +1539,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAAConvexPathRenderer.cpp b/src/gpu/batches/GrAAConvexPathRenderer.cpp
index 2c5b4fef5f..b0ddaeb65c 100644
--- a/src/gpu/batches/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAAConvexPathRenderer.cpp
@@ -746,9 +746,8 @@ public:
AAConvexPathBatch(GrColor color, const SkMatrix& viewMatrix, const SkPath& path)
: INHERITED(ClassID()) {
fGeoData.emplace_back(Geometry{color, viewMatrix, path});
- // compute bounds
- fBounds = path.getBounds();
- viewMatrix.mapRect(&fBounds);
+ this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
+ IsZeroArea::kNo);
}
const char* name() const override { return "AAConvexBatch"; }
@@ -958,7 +957,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
index 6f42a494a3..ea802b70dd 100644
--- a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -146,8 +146,7 @@ public:
fGammaCorrect = gammaCorrect;
// Compute bounds
- fBounds = shape.bounds();
- viewMatrix.mapRect(&fBounds);
+ this->setTransformedBounds(shape.bounds(), viewMatrix, HasAABloat::kYes, IsZeroArea::kNo);
}
const char* name() const override { return "AADistanceFieldPathBatch"; }
@@ -484,7 +483,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAAFillRectBatch.cpp b/src/gpu/batches/GrAAFillRectBatch.cpp
index fedde70398..4f93adf074 100644
--- a/src/gpu/batches/GrAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrAAFillRectBatch.cpp
@@ -173,7 +173,9 @@ public:
void* mem = fRectData.push_back_n(sizeof(RectInfo));
new (mem) RectInfo(color, viewMatrix, rect, devRect);
}
- fBounds = devRect;
+ IsZeroArea zeroArea = (!rect.width() || !rect.height()) ? IsZeroArea::kYes
+ : IsZeroArea::kNo;
+ this->setBounds(devRect, HasAABloat::kYes, zeroArea);
fRectCnt = 1;
}
@@ -278,7 +280,7 @@ private:
fRectData.push_back_n(that->fRectData.count(), that->fRectData.begin());
fRectCnt += that->fRectCnt;
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
index 206a72899f..194c79e41e 100644
--- a/src/gpu/batches/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
@@ -683,13 +683,8 @@ public:
SkIRect devClipBounds) : INHERITED(ClassID()) {
fGeoData.emplace_back(Geometry{color, coverage, viewMatrix, path, devClipBounds});
- // compute bounds
- fBounds = path.getBounds();
- viewMatrix.mapRect(&fBounds);
-
- // This is b.c. hairlines are notionally infinitely thin so without expansion
- // two overlapping lines could be reordered even though they hit the same pixels.
- fBounds.outset(0.5f, 0.5f);
+ this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes,
+ IsZeroArea::kYes);
}
const char* name() const override { return "AAHairlineBatch"; }
@@ -759,7 +754,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
index 4629619860..20d93d8e90 100644
--- a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
@@ -131,7 +131,7 @@ public:
fGeoData.emplace_back(Geometry{color, viewMatrix, path, strokeWidth, join, miterLimit});
// compute bounds
- fBounds = path.getBounds();
+ SkRect bounds = path.getBounds();
SkScalar w = strokeWidth;
if (w > 0) {
w /= 2;
@@ -139,9 +139,9 @@ public:
if (SkPaint::kMiter_Join == join && w > 1.f) {
w *= miterLimit;
}
- fBounds.outset(w, w);
+ bounds.outset(w, w);
}
- viewMatrix.mapRect(&fBounds);
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kYes, IsZeroArea::kNo);
}
const char* name() const override { return "AAConvexBatch"; }
@@ -284,7 +284,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp
index 8188f7a979..8c42c9a039 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -128,7 +128,7 @@ public:
SkASSERT(!devInside.isEmpty())
fGeoData.emplace_back(Geometry{color, devOutside, devOutside, devInside, false});
- fBounds = devOutside;
+ this->setBounds(devOutside, HasAABloat::kYes, IsZeroArea::kNo);
fMiterStroke = true;
}
@@ -145,8 +145,7 @@ public:
compute_rects(&geo.fDevOutside, &geo.fDevOutsideAssist, &geo.fDevInside, &geo.fDegenerate,
viewMatrix, rect, stroke.getWidth(), isMiter);
geo.fColor = color;
- batch->fBounds = geo.fDevOutside;
- batch->fBounds.join(geo.fDevOutsideAssist);
+ batch->setBounds(geo.fDevOutside, HasAABloat::kYes, IsZeroArea::kNo);
batch->fViewMatrix = viewMatrix;
return batch;
}
@@ -413,7 +412,7 @@ bool AAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
fBatch.fColor = GrColor_ILLEGAL;
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAtlasTextBatch.cpp b/src/gpu/batches/GrAtlasTextBatch.cpp
index 3c4b4a5135..6427bc0844 100644
--- a/src/gpu/batches/GrAtlasTextBatch.cpp
+++ b/src/gpu/batches/GrAtlasTextBatch.cpp
@@ -158,7 +158,7 @@ void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
args.fViewMatrix.mapRect(&rect);
}
// Allow for small numerical error in the bounds.
- SkRect bounds = fBounds;
+ SkRect bounds = this->bounds();
bounds.outset(0.001f, 0.001f);
SkASSERT(bounds.contains(rect));
#endif
@@ -239,7 +239,7 @@ bool GrAtlasTextBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
that->fGeoCount = 0;
fGeoCount = newGeoCount;
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrAtlasTextBatch.h b/src/gpu/batches/GrAtlasTextBatch.h
index 89a2f4e753..32771832aa 100644
--- a/src/gpu/batches/GrAtlasTextBatch.h
+++ b/src/gpu/batches/GrAtlasTextBatch.h
@@ -82,9 +82,12 @@ public:
void init() {
const Geometry& geo = fGeoData[0];
fBatch.fColor = geo.fColor;
-
- geo.fBlob->computeSubRunBounds(&fBounds, geo.fRun, geo.fSubRun, geo.fViewMatrix, geo.fX,
+ SkRect bounds;
+ geo.fBlob->computeSubRunBounds(&bounds, geo.fRun, geo.fSubRun, geo.fViewMatrix, geo.fX,
geo.fY);
+ // We don't have tight bounds on the glyph paths in device space. For the purposes of bounds
+ // we treat this as a set of non-AA rects rendered with a texture.
+ this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
}
const char* name() const override { return "TextBatch"; }
diff --git a/src/gpu/batches/GrBatch.cpp b/src/gpu/batches/GrBatch.cpp
index 900f889ff8..50f94b3902 100644
--- a/src/gpu/batches/GrBatch.cpp
+++ b/src/gpu/batches/GrBatch.cpp
@@ -49,7 +49,9 @@ void GrBatch::operator delete(void* target) {
GrBatch::GrBatch(uint32_t classID)
: fClassID(classID)
, fUniqueID(kIllegalBatchID) {
+ SkASSERT(classID == SkToU32(fClassID));
SkDEBUGCODE(fUsed = false;)
+ SkDEBUGCODE(fBoundsFlags = kUninitialized_BoundsFlag);
}
GrBatch::~GrBatch() {}
diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrBatch.h
index b0906ab690..8dafe9fba3 100644
--- a/src/gpu/batches/GrBatch.h
+++ b/src/gpu/batches/GrBatch.h
@@ -10,6 +10,7 @@
#include "../private/SkAtomics.h"
#include "GrNonAtomicRef.h"
+#include "SkMatrix.h"
#include "SkRect.h"
#include "SkString.h"
@@ -70,7 +71,20 @@ public:
return this->onCombineIfPossible(that, caps);
}
- const SkRect& bounds() const { return fBounds; }
+ const SkRect& bounds() const {
+ SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
+ return fBounds;
+ }
+
+ bool hasAABloat() const {
+ SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
+ return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
+ }
+
+ bool hasZeroArea() const {
+ SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
+ return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
+ }
void* operator new(size_t size);
void operator delete(void* target);
@@ -129,17 +143,49 @@ public:
virtual GrRenderTarget* renderTarget() const = 0;
protected:
- // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
- // rect because we outset it for dst copy textures
- void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
+ /**
+ * Indicates that the batch will produce geometry that extends beyond its bounds for the
+ * purpose of ensuring that the fragment shader runs on partially covered pixels for
+ * non-MSAA antialiasing.
+ */
+ enum class HasAABloat {
+ kYes,
+ kNo
+ };
+ /**
+ * Indicates that the geometry represented by the batch has zero area (i.e. it is hairline
+ * or points).
+ */
+ enum class IsZeroArea {
+ kYes,
+ kNo
+ };
+ void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsZeroArea zeroArea) {
+ fBounds = newBounds;
+ this->setBoundsFlags(aabloat, zeroArea);
+ }
+ void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
+ HasAABloat aabloat, IsZeroArea zeroArea) {
+ m.mapRect(&fBounds, srcBounds);
+ this->setBoundsFlags(aabloat, zeroArea);
+ }
- void joinBounds(const SkRect& otherBounds) {
- return fBounds.joinPossiblyEmptyRect(otherBounds);
+ void joinBounds(const GrBatch& that) {
+ if (that.hasAABloat()) {
+ fBoundsFlags |= kAABloat_BoundsFlag;
+ }
+ if (that.hasZeroArea()) {
+ fBoundsFlags |= kZeroArea_BoundsFlag;
+ }
+ return fBounds.joinPossiblyEmptyRect(that.fBounds);
}
- static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
+ void replaceBounds(const GrBatch& that) {
+ fBounds = that.fBounds;
+ fBoundsFlags = that.fBoundsFlags;
+ }
- SkRect fBounds;
+ static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
private:
virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
@@ -158,14 +204,30 @@ private:
return id;
}
+ void setBoundsFlags(HasAABloat aabloat, IsZeroArea zeroArea) {
+ fBoundsFlags = 0;
+ fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
+ fBoundsFlags |= (IsZeroArea ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
+ }
+
enum {
kIllegalBatchID = 0,
};
+ enum BoundsFlags {
+ kAABloat_BoundsFlag = 0x1,
+ kZeroArea_BoundsFlag = 0x2,
+ SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4)
+ };
+
SkDEBUGCODE(bool fUsed;)
- const uint32_t fClassID;
+ const uint16_t fClassID;
+ uint16_t fBoundsFlags;
+
static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
mutable uint32_t fUniqueID;
+ SkRect fBounds;
+
static int32_t gCurrBatchUniqueID;
static int32_t gCurrBatchClassID;
};
diff --git a/src/gpu/batches/GrClearBatch.h b/src/gpu/batches/GrClearBatch.h
index 79e10ca167..9a653a3962 100644
--- a/src/gpu/batches/GrClearBatch.h
+++ b/src/gpu/batches/GrClearBatch.h
@@ -23,7 +23,7 @@ public:
, fRect(rect)
, fColor(color)
, fRenderTarget(rt) {
- fBounds = SkRect::Make(rect);
+ this->setBounds(SkRect::Make(rect), HasAABloat::kNo, IsZeroArea::kNo);
}
const char* name() const override { return "Clear"; }
@@ -49,7 +49,7 @@ private:
SkASSERT(cb->fRenderTarget == fRenderTarget);
if (cb->fRect.contains(fRect)) {
fRect = cb->fRect;
- fBounds = cb->fBounds;
+ this->replaceBounds(*t);
fColor = cb->fColor;
return true;
} else if (cb->fColor == fColor && fRect.contains(cb->fRect)) {
@@ -80,7 +80,7 @@ public:
, fRect(rect)
, fInsideClip(insideClip)
, fRenderTarget(rt) {
- fBounds = SkRect::Make(rect);
+ this->setBounds(SkRect::Make(rect), HasAABloat::kNo, IsZeroArea::kNo);
}
const char* name() const override { return "ClearStencilClip"; }
diff --git a/src/gpu/batches/GrCopySurfaceBatch.h b/src/gpu/batches/GrCopySurfaceBatch.h
index e0da431587..05b30b24b0 100644
--- a/src/gpu/batches/GrCopySurfaceBatch.h
+++ b/src/gpu/batches/GrCopySurfaceBatch.h
@@ -56,8 +56,10 @@ private:
, fSrc(src)
, fSrcRect(srcRect)
, fDstPoint(dstPoint) {
- fBounds = SkRect::MakeXYWH(SkIntToScalar(dstPoint.fX), SkIntToScalar(dstPoint.fY),
- SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
+ SkRect bounds =
+ SkRect::MakeXYWH(SkIntToScalar(dstPoint.fX), SkIntToScalar(dstPoint.fY),
+ SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
+ this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
}
bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override { return false; }
diff --git a/src/gpu/batches/GrDefaultPathRenderer.cpp b/src/gpu/batches/GrDefaultPathRenderer.cpp
index a1fbc09e5f..335f3b60fa 100644
--- a/src/gpu/batches/GrDefaultPathRenderer.cpp
+++ b/src/gpu/batches/GrDefaultPathRenderer.cpp
@@ -106,13 +106,8 @@ public:
fBatch.fViewMatrix = viewMatrix;
fGeoData.emplace_back(Geometry{color, path, tolerance});
- this->setBounds(devBounds);
-
- // This is b.c. hairlines are notionally infinitely thin so without expansion
- // two overlapping lines could be reordered even though they hit the same pixels.
- if (isHairline) {
- fBounds.outset(0.5f, 0.5f);
- }
+ this->setBounds(devBounds, HasAABloat::kNo,
+ isHairline ? IsZeroArea::kYes : IsZeroArea::kNo);
}
const char* name() const override { return "DefaultPathBatch"; }
@@ -286,7 +281,7 @@ private:
}
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrDiscardBatch.h b/src/gpu/batches/GrDiscardBatch.h
index a739f23b7d..5dafe54f11 100644
--- a/src/gpu/batches/GrDiscardBatch.h
+++ b/src/gpu/batches/GrDiscardBatch.h
@@ -20,7 +20,8 @@ public:
GrDiscardBatch(GrRenderTarget* rt)
: INHERITED(ClassID())
, fRenderTarget(rt) {
- fBounds = SkRect::MakeWH(SkIntToScalar(rt->width()), SkIntToScalar(rt->height()));
+ this->setBounds(SkRect::MakeIWH(rt->width(), rt->height()), HasAABloat::kNo,
+ IsZeroArea::kNo);
}
const char* name() const override { return "Discard"; }
diff --git a/src/gpu/batches/GrDrawAtlasBatch.cpp b/src/gpu/batches/GrDrawAtlasBatch.cpp
index 9a8952fa3a..6f1bfedfec 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.cpp
+++ b/src/gpu/batches/GrDrawAtlasBatch.cpp
@@ -159,10 +159,7 @@ GrDrawAtlasBatch::GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, in
currVertex += vertexStride;
}
- viewMatrix.mapRect(&bounds);
- // Outset for a half pixel in each direction to account for snapping in non-AA case
- bounds.outset(0.5f, 0.5f);
- this->setBounds(bounds);
+ this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
@@ -192,7 +189,7 @@ bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
fGeoData.push_back_n(that->fGeoData.count(), that->fGeoData.begin());
fQuadCount += that->quadCount();
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrDrawPathBatch.cpp b/src/gpu/batches/GrDrawPathBatch.cpp
index b0f7d5e89c..ffe012f74b 100644
--- a/src/gpu/batches/GrDrawPathBatch.cpp
+++ b/src/gpu/batches/GrDrawPathBatch.cpp
@@ -57,7 +57,7 @@ GrDrawPathRangeBatch::GrDrawPathRangeBatch(const SkMatrix& viewMatrix, SkScalar
, fTotalPathCount(instanceData->count())
, fScale(scale) {
fDraws.addToHead()->set(instanceData, x, y);
- fBounds = bounds;
+ this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
}
bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
@@ -112,7 +112,7 @@ bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
draw->fY = head->fY;
that->fDraws.popHead();
}
- this->joinBounds(that->fBounds);
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrDrawPathBatch.h b/src/gpu/batches/GrDrawPathBatch.h
index 7e695d83df..cfdbc02c3d 100644
--- a/src/gpu/batches/GrDrawPathBatch.h
+++ b/src/gpu/batches/GrDrawPathBatch.h
@@ -78,8 +78,7 @@ private:
const GrPath* path)
: INHERITED(ClassID(), viewMatrix, color, fill)
, fPath(path) {
- fBounds = path->getBounds();
- viewMatrix.mapRect(&fBounds);
+ this->setTransformedBounds(path->getBounds(), viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp
index 81f418f2db..e565022055 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.cpp
+++ b/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -63,7 +63,13 @@ GrDrawVerticesBatch::GrDrawVerticesBatch(GrColor color, GrPrimitiveType primitiv
fIndexCount = indexCount;
fPrimitiveType = primitiveType;
- this->setBounds(bounds);
+ IsZeroArea zeroArea;
+ if (GrIsPrimTypeLines(primitiveType) || kPoints_GrPrimitiveType == primitiveType) {
+ zeroArea = IsZeroArea::kYes;
+ } else {
+ zeroArea = IsZeroArea::kNo;
+ }
+ this->setBounds(bounds, HasAABloat::kNo, zeroArea);
}
void GrDrawVerticesBatch::computePipelineOptimizations(GrInitInvariantOutput* color,
@@ -200,7 +206,7 @@ bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
fVertexCount += that->fVertexCount;
fIndexCount += that->fIndexCount;
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrMSAAPathRenderer.cpp b/src/gpu/batches/GrMSAAPathRenderer.cpp
index f0137c6ce9..51389ecddb 100644
--- a/src/gpu/batches/GrMSAAPathRenderer.cpp
+++ b/src/gpu/batches/GrMSAAPathRenderer.cpp
@@ -225,12 +225,11 @@ class MSAAPathBatch : public GrVertexBatch {
public:
DEFINE_BATCH_CLASS_ID
- MSAAPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix,
- const SkRect& devBounds)
+ MSAAPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix)
: INHERITED(ClassID())
, fViewMatrix(viewMatrix) {
fPaths.emplace_back(PathInfo{color, path});
- this->setBounds(devBounds);
+ this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
int contourCount;
this->computeWorstCasePointCount(path, &contourCount, &fMaxLineVertices, &fMaxQuadVertices);
fMaxLineIndices = fMaxLineVertices * 3;
@@ -460,7 +459,7 @@ private:
}
fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
fIsIndexed = true;
fMaxLineVertices += that->fMaxLineVertices;
fMaxQuadVertices += that->fMaxQuadVertices;
@@ -668,8 +667,8 @@ bool GrMSAAPathRenderer::internalDrawPath(GrDrawContext* drawContext,
drawContext->drawBatch(pipelineBuilder, clip, batch);
} else {
- SkAutoTUnref<MSAAPathBatch> batch(new MSAAPathBatch(paint.getColor(), path, viewMatrix,
- devBounds));
+ SkAutoTUnref<MSAAPathBatch> batch(new MSAAPathBatch(paint.getColor(), path,
+ viewMatrix));
if (!batch->isValid()) {
return false;
}
diff --git a/src/gpu/batches/GrNinePatch.cpp b/src/gpu/batches/GrNinePatch.cpp
index 1c90f83dbc..62af20b2cf 100644
--- a/src/gpu/batches/GrNinePatch.cpp
+++ b/src/gpu/batches/GrNinePatch.cpp
@@ -44,7 +44,7 @@ public:
fImageHeight = imageHeight;
// setup bounds
- patch.fViewMatrix.mapRect(&fBounds, patch.fDst);
+ this->setTransformedBounds(patch.fDst, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
const char* name() const override { return "NonAANinePatchBatch"; }
@@ -152,7 +152,7 @@ private:
}
fPatches.push_back_n(that->fPatches.count(), that->fPatches.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrNonAAFillRectBatch.cpp b/src/gpu/batches/GrNonAAFillRectBatch.cpp
index aaeabdaeb7..e2fff2adad 100644
--- a/src/gpu/batches/GrNonAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAFillRectBatch.cpp
@@ -91,7 +91,7 @@ public:
} else {
info.fLocalQuad.set(rect);
}
- viewMatrix.mapRect(&fBounds, fRects[0].fRect);
+ this->setTransformedBounds(fRects[0].fRect, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
const char* name() const override { return "NonAAFillRectBatch"; }
@@ -170,7 +170,7 @@ private:
}
fRects.push_back_n(that->fRects.count(), that->fRects.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
index 3cff209840..aa5a4203cf 100644
--- a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
+++ b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
@@ -111,7 +111,7 @@ public:
if (fHasLocalRect) {
info.fLocalRect = *localRect;
}
- viewMatrix.mapRect(&fBounds, rect);
+ this->setTransformedBounds(rect, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
const char* name() const override { return "NonAAFillRectPerspectiveBatch"; }
@@ -211,7 +211,7 @@ private:
}
fRects.push_back_n(that->fRects.count(), that->fRects.begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
index 299d995fc7..f443b32f19 100644
--- a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
@@ -72,24 +72,25 @@ public:
batch->fRect.sort();
batch->fStrokeWidth = stroke.getWidth();
- batch->fBounds = batch->fRect;
SkScalar rad = SkScalarHalf(batch->fStrokeWidth);
- batch->fBounds.outset(rad, rad);
- batch->fViewMatrix.mapRect(&batch->fBounds);
+ SkRect bounds = rect;
+ bounds.outset(rad, rad);
// If our caller snaps to pixel centers then we have to round out the bounds
if (snapToPixelCenters) {
+ viewMatrix.mapRect(&bounds);
// We want to be consistent with how we snap non-aa lines. To match what we do in
// GrGLSLVertexShaderBuilder, we first floor all the vertex values and then add half a
// pixel to force us to pixel centers.
- batch->fBounds.set(SkScalarFloorToScalar(batch->fBounds.fLeft),
- SkScalarFloorToScalar(batch->fBounds.fTop),
- SkScalarFloorToScalar(batch->fBounds.fRight),
- SkScalarFloorToScalar(batch->fBounds.fBottom));
- batch->fBounds.offset(0.5f, 0.5f);
-
- // Round out the bounds to integer values
- batch->fBounds.roundOut();
+ bounds.set(SkScalarFloorToScalar(bounds.fLeft),
+ SkScalarFloorToScalar(bounds.fTop),
+ SkScalarFloorToScalar(bounds.fRight),
+ SkScalarFloorToScalar(bounds.fBottom));
+ bounds.offset(0.5f, 0.5f);
+ batch->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
+ } else {
+ batch->setTransformedBounds(bounds, batch->fViewMatrix, HasAABloat ::kNo,
+ IsZeroArea::kNo);
}
return batch;
}
diff --git a/src/gpu/batches/GrPLSPathRenderer.cpp b/src/gpu/batches/GrPLSPathRenderer.cpp
index 6ec5a3af98..ad9bde1cfb 100644
--- a/src/gpu/batches/GrPLSPathRenderer.cpp
+++ b/src/gpu/batches/GrPLSPathRenderer.cpp
@@ -793,8 +793,8 @@ public:
, fPath(path)
, fViewMatrix(viewMatrix) {
// compute bounds
- fBounds = path.getBounds();
- fViewMatrix.mapRect(&fBounds);
+ this->setTransformedBounds(path.getBounds(), fViewMatrix, HasAABloat::kYes,
+ IsZeroArea::kNo);
}
const char* name() const override { return "PLSBatch"; }
diff --git a/src/gpu/batches/GrStencilPathBatch.h b/src/gpu/batches/GrStencilPathBatch.h
index 42cd3e9f93..cc55659a34 100644
--- a/src/gpu/batches/GrStencilPathBatch.h
+++ b/src/gpu/batches/GrStencilPathBatch.h
@@ -59,7 +59,7 @@ private:
, fScissor(scissor)
, fRenderTarget(renderTarget)
, fPath(path) {
- fBounds = path->getBounds();
+ this->setBounds(path->getBounds(), HasAABloat::kNo, IsZeroArea::kNo);
}
bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
diff --git a/src/gpu/batches/GrTessellatingPathRenderer.cpp b/src/gpu/batches/GrTessellatingPathRenderer.cpp
index 992d7735f2..b022e40330 100644
--- a/src/gpu/batches/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/batches/GrTessellatingPathRenderer.cpp
@@ -238,12 +238,8 @@ private:
// Because the clip bounds are used to add a contour for inverse fills, they must also
// include the path bounds.
fClipBounds.join(pathBounds);
- if (shape.inverseFilled()) {
- fBounds = fClipBounds;
- } else {
- fBounds = pathBounds;
- }
- viewMatrix.mapRect(&fBounds);
+ const SkRect& srcBounds = shape.inverseFilled() ? fClipBounds : pathBounds;
+ this->setTransformedBounds(srcBounds, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
}
GrColor fColor;
diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h
index 273baaebfd..5bac48ac01 100644
--- a/src/gpu/batches/GrTestBatch.h
+++ b/src/gpu/batches/GrTestBatch.h
@@ -41,7 +41,8 @@ protected:
GrTestBatch(uint32_t classID, const SkRect& bounds, GrColor color)
: INHERITED(classID)
, fColor(color) {
- this->setBounds(bounds);
+ // Choose some conservative values for aa bloat and zero area.
+ this->setBounds(bounds, HasAABloat::kYes, IsZeroArea::kYes);
}
struct Optimizations {
diff --git a/src/gpu/effects/GrDashingEffect.cpp b/src/gpu/effects/GrDashingEffect.cpp
index 4e82a62195..5949f1f8f2 100644
--- a/src/gpu/effects/GrDashingEffect.cpp
+++ b/src/gpu/effects/GrDashingEffect.cpp
@@ -282,13 +282,17 @@ private:
// compute bounds
SkScalar halfStrokeWidth = 0.5f * geometry.fSrcStrokeWidth;
SkScalar xBloat = SkPaint::kButt_Cap == cap ? 0 : halfStrokeWidth;
- fBounds.set(geometry.fPtsRot[0], geometry.fPtsRot[1]);
- fBounds.outset(xBloat, halfStrokeWidth);
+ SkRect bounds;
+ bounds.set(geometry.fPtsRot[0], geometry.fPtsRot[1]);
+ bounds.outset(xBloat, halfStrokeWidth);
// Note, we actually create the combined matrix here, and save the work
SkMatrix& combinedMatrix = fGeoData[0].fSrcRotInv;
combinedMatrix.postConcat(geometry.fViewMatrix);
- combinedMatrix.mapRect(&fBounds);
+
+ IsZeroArea zeroArea = geometry.fSrcStrokeWidth ? IsZeroArea::kNo : IsZeroArea::kYes;
+ HasAABloat aaBloat = (aaMode == AAMode::kNone) ? HasAABloat ::kNo : HasAABloat::kYes;
+ this->setTransformedBounds(bounds, combinedMatrix, aaBloat, zeroArea);
}
void initBatchTracker(const GrXPOverridesForBatch& overrides) override {
@@ -655,7 +659,7 @@ private:
}
fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
- this->joinBounds(that->bounds());
+ this->joinBounds(*that);
return true;
}
diff --git a/src/gpu/instanced/InstancedRendering.cpp b/src/gpu/instanced/InstancedRendering.cpp
index d96bb39786..fd9df6efe9 100644
--- a/src/gpu/instanced/InstancedRendering.cpp
+++ b/src/gpu/instanced/InstancedRendering.cpp
@@ -123,6 +123,12 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const
Instance& instance = batch->getSingleInstance();
instance.fInfo = (int)type << kShapeType_InfoBit;
+ Batch::HasAABloat aaBloat = (antialiasMode == AntialiasMode::kCoverage)
+ ? Batch::HasAABloat::kYes
+ : Batch::HasAABloat::kNo;
+ Batch::IsZeroArea zeroArea = (bounds.isEmpty()) ? Batch::IsZeroArea::kYes
+ : Batch::IsZeroArea::kNo;
+
// The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
// will map this rectangle to the same device coordinates as "viewMatrix * bounds".
float sx = 0.5f * bounds.width();
@@ -145,10 +151,12 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const
// it's quite simple to find the bounding rectangle:
float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
- batch->fBounds.fLeft = m[2] - devBoundsHalfWidth;
- batch->fBounds.fRight = m[2] + devBoundsHalfWidth;
- batch->fBounds.fTop = m[5] - devBoundsHalfHeight;
- batch->fBounds.fBottom = m[5] + devBoundsHalfHeight;
+ SkRect batchBounds;
+ batchBounds.fLeft = m[2] - devBoundsHalfWidth;
+ batchBounds.fRight = m[2] + devBoundsHalfWidth;
+ batchBounds.fTop = m[5] - devBoundsHalfHeight;
+ batchBounds.fBottom = m[5] + devBoundsHalfHeight;
+ batch->setBounds(batchBounds, aaBloat, zeroArea);
// TODO: Is this worth the CPU overhead?
batch->fInfo.fNonSquare =
@@ -174,8 +182,7 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const
shapeMatrix[SkMatrix::kMPersp2]);
batch->fInfo.fHasPerspective = true;
- viewMatrix.mapRect(&batch->fBounds, bounds);
-
+ batch->setBounds(bounds, aaBloat, zeroArea);
batch->fInfo.fNonSquare = true;
}
@@ -184,7 +191,7 @@ InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const
const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == float.
memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
- batch->fPixelLoad = batch->fBounds.height() * batch->fBounds.width();
+ batch->fPixelLoad = batch->bounds().height() * batch->bounds().width();
return batch;
}
@@ -352,7 +359,8 @@ void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& ov
if (kRect_ShapeFlag == fInfo.fShapeTypes) {
draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.fAntialiasMode);
} else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
- draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.fAntialiasMode, fBounds);
+ draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.fAntialiasMode,
+ this->bounds());
} else {
draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.fAntialiasMode);
}
@@ -401,7 +409,7 @@ bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps
}
}
- fBounds.join(that->fBounds);
+ this->joinBounds(*that);
fInfo = combinedInfo;
fPixelLoad += that->fPixelLoad;