aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/batches
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu/batches')
-rw-r--r--src/gpu/batches/GrAAConvexPathRenderer.cpp1022
-rw-r--r--src/gpu/batches/GrAAConvexPathRenderer.h24
-rw-r--r--src/gpu/batches/GrAAConvexTessellator.cpp1027
-rw-r--r--src/gpu/batches/GrAAConvexTessellator.h270
-rw-r--r--src/gpu/batches/GrAADistanceFieldPathRenderer.cpp626
-rwxr-xr-xsrc/gpu/batches/GrAADistanceFieldPathRenderer.h75
-rw-r--r--src/gpu/batches/GrAAHairLinePathRenderer.cpp998
-rw-r--r--src/gpu/batches/GrAAHairLinePathRenderer.h33
-rw-r--r--src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp345
-rw-r--r--src/gpu/batches/GrAALinearizingConvexPathRenderer.h24
-rw-r--r--src/gpu/batches/GrDashLinePathRenderer.cpp26
-rw-r--r--src/gpu/batches/GrDashLinePathRenderer.h29
-rw-r--r--src/gpu/batches/GrStencilAndCoverPathRenderer.cpp158
-rw-r--r--src/gpu/batches/GrStencilAndCoverPathRenderer.h45
-rw-r--r--src/gpu/batches/GrTessellatingPathRenderer.cpp1660
-rw-r--r--src/gpu/batches/GrTessellatingPathRenderer.h33
16 files changed, 6395 insertions, 0 deletions
diff --git a/src/gpu/batches/GrAAConvexPathRenderer.cpp b/src/gpu/batches/GrAAConvexPathRenderer.cpp
new file mode 100644
index 0000000000..6023f188df
--- /dev/null
+++ b/src/gpu/batches/GrAAConvexPathRenderer.cpp
@@ -0,0 +1,1022 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAConvexPathRenderer.h"
+
+#include "GrAAConvexTessellator.h"
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrPathUtils.h"
+#include "GrProcessor.h"
+#include "GrPipelineBuilder.h"
+#include "GrStrokeInfo.h"
+#include "SkGeometry.h"
+#include "SkPathPriv.h"
+#include "SkString.h"
+#include "SkTraceEvent.h"
+#include "batches/GrVertexBatch.h"
+#include "gl/GrGLProcessor.h"
+#include "gl/GrGLGeometryProcessor.h"
+#include "gl/builders/GrGLProgramBuilder.h"
+
+GrAAConvexPathRenderer::GrAAConvexPathRenderer() {
+}
+
+struct Segment {
+ enum {
+ // These enum values are assumed in member functions below.
+ kLine = 0,
+ kQuad = 1,
+ } fType;
+
+ // line uses one pt, quad uses 2 pts
+ SkPoint fPts[2];
+ // normal to edge ending at each pt
+ SkVector fNorms[2];
+ // is the corner where the previous segment meets this segment
+ // sharp. If so, fMid is a normalized bisector facing outward.
+ SkVector fMid;
+
+ int countPoints() {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fType + 1;
+ }
+ const SkPoint& endPt() const {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fPts[fType];
+ };
+ const SkPoint& endNorm() const {
+ GR_STATIC_ASSERT(0 == kLine && 1 == kQuad);
+ return fNorms[fType];
+ };
+};
+
+typedef SkTArray<Segment, true> SegmentArray;
+
+static void center_of_mass(const SegmentArray& segments, SkPoint* c) {
+ SkScalar area = 0;
+ SkPoint center = {0, 0};
+ int count = segments.count();
+ SkPoint p0 = {0, 0};
+ if (count > 2) {
+ // We translate the polygon so that the first point is at the origin.
+ // This avoids some precision issues with small area polygons far away
+ // from the origin.
+ p0 = segments[0].endPt();
+ SkPoint pi;
+ SkPoint pj;
+ // the first and last iteration of the below loop would compute
+ // zeros since the starting / ending point is (0,0). So instead we start
+ // at i=1 and make the last iteration i=count-2.
+ pj = segments[1].endPt() - p0;
+ for (int i = 1; i < count - 1; ++i) {
+ pi = pj;
+ const SkPoint pj = segments[i + 1].endPt() - p0;
+
+ SkScalar t = SkScalarMul(pi.fX, pj.fY) - SkScalarMul(pj.fX, pi.fY);
+ area += t;
+ center.fX += (pi.fX + pj.fX) * t;
+ center.fY += (pi.fY + pj.fY) * t;
+
+ }
+ }
+ // If the poly has no area then we instead return the average of
+ // its points.
+ if (SkScalarNearlyZero(area)) {
+ SkPoint avg;
+ avg.set(0, 0);
+ for (int i = 0; i < count; ++i) {
+ const SkPoint& pt = segments[i].endPt();
+ avg.fX += pt.fX;
+ avg.fY += pt.fY;
+ }
+ SkScalar denom = SK_Scalar1 / count;
+ avg.scale(denom);
+ *c = avg;
+ } else {
+ area *= 3;
+ area = SkScalarInvert(area);
+ center.fX = SkScalarMul(center.fX, area);
+ center.fY = SkScalarMul(center.fY, area);
+ // undo the translate of p0 to the origin.
+ *c = center + p0;
+ }
+ SkASSERT(!SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY));
+}
+
+static void compute_vectors(SegmentArray* segments,
+ SkPoint* fanPt,
+ SkPathPriv::FirstDirection dir,
+ int* vCount,
+ int* iCount) {
+ center_of_mass(*segments, fanPt);
+ int count = segments->count();
+
+ // Make the normals point towards the outside
+ SkPoint::Side normSide;
+ if (dir == SkPathPriv::kCCW_FirstDirection) {
+ normSide = SkPoint::kRight_Side;
+ } else {
+ normSide = SkPoint::kLeft_Side;
+ }
+
+ *vCount = 0;
+ *iCount = 0;
+ // compute normals at all points
+ for (int a = 0; a < count; ++a) {
+ Segment& sega = (*segments)[a];
+ int b = (a + 1) % count;
+ Segment& segb = (*segments)[b];
+
+ const SkPoint* prevPt = &sega.endPt();
+ int n = segb.countPoints();
+ for (int p = 0; p < n; ++p) {
+ segb.fNorms[p] = segb.fPts[p] - *prevPt;
+ segb.fNorms[p].normalize();
+ segb.fNorms[p].setOrthog(segb.fNorms[p], normSide);
+ prevPt = &segb.fPts[p];
+ }
+ if (Segment::kLine == segb.fType) {
+ *vCount += 5;
+ *iCount += 9;
+ } else {
+ *vCount += 6;
+ *iCount += 12;
+ }
+ }
+
+ // compute mid-vectors where segments meet. TODO: Detect shallow corners
+ // and leave out the wedges and close gaps by stitching segments together.
+ for (int a = 0; a < count; ++a) {
+ const Segment& sega = (*segments)[a];
+ int b = (a + 1) % count;
+ Segment& segb = (*segments)[b];
+ segb.fMid = segb.fNorms[0] + sega.endNorm();
+ segb.fMid.normalize();
+ // corner wedges
+ *vCount += 4;
+ *iCount += 6;
+ }
+}
+
+struct DegenerateTestData {
+ DegenerateTestData() { fStage = kInitial; }
+ bool isDegenerate() const { return kNonDegenerate != fStage; }
+ enum {
+ kInitial,
+ kPoint,
+ kLine,
+ kNonDegenerate
+ } fStage;
+ SkPoint fFirstPoint;
+ SkVector fLineNormal;
+ SkScalar fLineC;
+};
+
+static const SkScalar kClose = (SK_Scalar1 / 16);
+static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
+
+static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) {
+ switch (data->fStage) {
+ case DegenerateTestData::kInitial:
+ data->fFirstPoint = pt;
+ data->fStage = DegenerateTestData::kPoint;
+ break;
+ case DegenerateTestData::kPoint:
+ if (pt.distanceToSqd(data->fFirstPoint) > kCloseSqd) {
+ data->fLineNormal = pt - data->fFirstPoint;
+ data->fLineNormal.normalize();
+ data->fLineNormal.setOrthog(data->fLineNormal);
+ data->fLineC = -data->fLineNormal.dot(data->fFirstPoint);
+ data->fStage = DegenerateTestData::kLine;
+ }
+ break;
+ case DegenerateTestData::kLine:
+ if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) {
+ data->fStage = DegenerateTestData::kNonDegenerate;
+ }
+ case DegenerateTestData::kNonDegenerate:
+ break;
+ default:
+ SkFAIL("Unexpected degenerate test stage.");
+ }
+}
+
+static inline bool get_direction(const SkPath& path, const SkMatrix& m,
+ SkPathPriv::FirstDirection* dir) {
+ if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) {
+ return false;
+ }
+ // check whether m reverses the orientation
+ SkASSERT(!m.hasPerspective());
+ SkScalar det2x2 = SkScalarMul(m.get(SkMatrix::kMScaleX), m.get(SkMatrix::kMScaleY)) -
+ SkScalarMul(m.get(SkMatrix::kMSkewX), m.get(SkMatrix::kMSkewY));
+ if (det2x2 < 0) {
+ *dir = SkPathPriv::OppositeFirstDirection(*dir);
+ }
+ return true;
+}
+
+static inline void add_line_to_segment(const SkPoint& pt,
+ SegmentArray* segments) {
+ segments->push_back();
+ segments->back().fType = Segment::kLine;
+ segments->back().fPts[0] = pt;
+}
+
+static inline void add_quad_segment(const SkPoint pts[3],
+ SegmentArray* segments) {
+ if (pts[0].distanceToSqd(pts[1]) < kCloseSqd || pts[1].distanceToSqd(pts[2]) < kCloseSqd) {
+ if (pts[0] != pts[2]) {
+ add_line_to_segment(pts[2], segments);
+ }
+ } else {
+ segments->push_back();
+ segments->back().fType = Segment::kQuad;
+ segments->back().fPts[0] = pts[1];
+ segments->back().fPts[1] = pts[2];
+ }
+}
+
+static inline void add_cubic_segments(const SkPoint pts[4],
+ SkPathPriv::FirstDirection dir,
+ SegmentArray* segments) {
+ SkSTArray<15, SkPoint, true> quads;
+ GrPathUtils::convertCubicToQuads(pts, SK_Scalar1, true, dir, &quads);
+ int count = quads.count();
+ for (int q = 0; q < count; q += 3) {
+ add_quad_segment(&quads[q], segments);
+ }
+}
+
+static bool get_segments(const SkPath& path,
+ const SkMatrix& m,
+ SegmentArray* segments,
+ SkPoint* fanPt,
+ int* vCount,
+ int* iCount) {
+ SkPath::Iter iter(path, true);
+ // This renderer over-emphasizes very thin path regions. We use the distance
+ // to the path from the sample to compute coverage. Every pixel intersected
+ // by the path will be hit and the maximum distance is sqrt(2)/2. We don't
+ // notice that the sample may be close to a very thin area of the path and
+ // thus should be very light. This is particularly egregious for degenerate
+ // line paths. We detect paths that are very close to a line (zero area) and
+ // draw nothing.
+ DegenerateTestData degenerateData;
+ SkPathPriv::FirstDirection dir;
+ // get_direction can fail for some degenerate paths.
+ if (!get_direction(path, m, &dir)) {
+ return false;
+ }
+
+ for (;;) {
+ SkPoint pts[4];
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kMove_Verb:
+ m.mapPoints(pts, 1);
+ update_degenerate_test(&degenerateData, pts[0]);
+ break;
+ case SkPath::kLine_Verb: {
+ m.mapPoints(&pts[1], 1);
+ update_degenerate_test(&degenerateData, pts[1]);
+ add_line_to_segment(pts[1], segments);
+ break;
+ }
+ case SkPath::kQuad_Verb:
+ m.mapPoints(pts, 3);
+ update_degenerate_test(&degenerateData, pts[1]);
+ update_degenerate_test(&degenerateData, pts[2]);
+ add_quad_segment(pts, segments);
+ break;
+ case SkPath::kConic_Verb: {
+ m.mapPoints(pts, 3);
+ SkScalar weight = iter.conicWeight();
+ SkAutoConicToQuads converter;
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.5f);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ update_degenerate_test(&degenerateData, quadPts[2*i + 1]);
+ update_degenerate_test(&degenerateData, quadPts[2*i + 2]);
+ add_quad_segment(quadPts + 2*i, segments);
+ }
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ m.mapPoints(pts, 4);
+ update_degenerate_test(&degenerateData, pts[1]);
+ update_degenerate_test(&degenerateData, pts[2]);
+ update_degenerate_test(&degenerateData, pts[3]);
+ add_cubic_segments(pts, dir, segments);
+ break;
+ };
+ case SkPath::kDone_Verb:
+ if (degenerateData.isDegenerate()) {
+ return false;
+ } else {
+ compute_vectors(segments, fanPt, dir, vCount, iCount);
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+struct QuadVertex {
+ SkPoint fPos;
+ SkPoint fUV;
+ SkScalar fD0;
+ SkScalar fD1;
+};
+
+struct Draw {
+ Draw() : fVertexCnt(0), fIndexCnt(0) {}
+ int fVertexCnt;
+ int fIndexCnt;
+};
+
+typedef SkTArray<Draw, true> DrawArray;
+
+static void create_vertices(const SegmentArray& segments,
+ const SkPoint& fanPt,
+ DrawArray* draws,
+ QuadVertex* verts,
+ uint16_t* idxs) {
+ Draw* draw = &draws->push_back();
+ // alias just to make vert/index assignments easier to read.
+ int* v = &draw->fVertexCnt;
+ int* i = &draw->fIndexCnt;
+
+ int count = segments.count();
+ for (int a = 0; a < count; ++a) {
+ const Segment& sega = segments[a];
+ int b = (a + 1) % count;
+ const Segment& segb = segments[b];
+
+ // Check whether adding the verts for this segment to the current draw would cause index
+ // values to overflow.
+ int vCount = 4;
+ if (Segment::kLine == segb.fType) {
+ vCount += 5;
+ } else {
+ vCount += 6;
+ }
+ if (draw->fVertexCnt + vCount > (1 << 16)) {
+ verts += *v;
+ idxs += *i;
+ draw = &draws->push_back();
+ v = &draw->fVertexCnt;
+ i = &draw->fIndexCnt;
+ }
+
+ // FIXME: These tris are inset in the 1 unit arc around the corner
+ verts[*v + 0].fPos = sega.endPt();
+ verts[*v + 1].fPos = verts[*v + 0].fPos + sega.endNorm();
+ verts[*v + 2].fPos = verts[*v + 0].fPos + segb.fMid;
+ verts[*v + 3].fPos = verts[*v + 0].fPos + segb.fNorms[0];
+ verts[*v + 0].fUV.set(0,0);
+ verts[*v + 1].fUV.set(0,-SK_Scalar1);
+ verts[*v + 2].fUV.set(0,-SK_Scalar1);
+ verts[*v + 3].fUV.set(0,-SK_Scalar1);
+ verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
+ verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
+ verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
+ verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
+
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+ idxs[*i + 3] = *v + 0;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ *v += 4;
+ *i += 6;
+
+ if (Segment::kLine == segb.fType) {
+ verts[*v + 0].fPos = fanPt;
+ verts[*v + 1].fPos = sega.endPt();
+ verts[*v + 2].fPos = segb.fPts[0];
+
+ verts[*v + 3].fPos = verts[*v + 1].fPos + segb.fNorms[0];
+ verts[*v + 4].fPos = verts[*v + 2].fPos + segb.fNorms[0];
+
+ // we draw the line edge as a degenerate quad (u is 0, v is the
+ // signed distance to the edge)
+ SkScalar dist = fanPt.distanceToLineBetween(verts[*v + 1].fPos,
+ verts[*v + 2].fPos);
+ verts[*v + 0].fUV.set(0, dist);
+ verts[*v + 1].fUV.set(0, 0);
+ verts[*v + 2].fUV.set(0, 0);
+ verts[*v + 3].fUV.set(0, -SK_Scalar1);
+ verts[*v + 4].fUV.set(0, -SK_Scalar1);
+
+ verts[*v + 0].fD0 = verts[*v + 0].fD1 = -SK_Scalar1;
+ verts[*v + 1].fD0 = verts[*v + 1].fD1 = -SK_Scalar1;
+ verts[*v + 2].fD0 = verts[*v + 2].fD1 = -SK_Scalar1;
+ verts[*v + 3].fD0 = verts[*v + 3].fD1 = -SK_Scalar1;
+ verts[*v + 4].fD0 = verts[*v + 4].fD1 = -SK_Scalar1;
+
+ idxs[*i + 0] = *v + 3;
+ idxs[*i + 1] = *v + 1;
+ idxs[*i + 2] = *v + 2;
+
+ idxs[*i + 3] = *v + 4;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ *i += 6;
+
+ // Draw the interior fan if it exists.
+ // TODO: Detect and combine colinear segments. This will ensure we catch every case
+ // with no interior, and that the resulting shared edge uses the same endpoints.
+ if (count >= 3) {
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+
+ *i += 3;
+ }
+
+ *v += 5;
+ } else {
+ SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]};
+
+ SkVector midVec = segb.fNorms[0] + segb.fNorms[1];
+ midVec.normalize();
+
+ verts[*v + 0].fPos = fanPt;
+ verts[*v + 1].fPos = qpts[0];
+ verts[*v + 2].fPos = qpts[2];
+ verts[*v + 3].fPos = qpts[0] + segb.fNorms[0];
+ verts[*v + 4].fPos = qpts[2] + segb.fNorms[1];
+ verts[*v + 5].fPos = qpts[1] + midVec;
+
+ SkScalar c = segb.fNorms[0].dot(qpts[0]);
+ verts[*v + 0].fD0 = -segb.fNorms[0].dot(fanPt) + c;
+ verts[*v + 1].fD0 = 0.f;
+ verts[*v + 2].fD0 = -segb.fNorms[0].dot(qpts[2]) + c;
+ verts[*v + 3].fD0 = -SK_ScalarMax/100;
+ verts[*v + 4].fD0 = -SK_ScalarMax/100;
+ verts[*v + 5].fD0 = -SK_ScalarMax/100;
+
+ c = segb.fNorms[1].dot(qpts[2]);
+ verts[*v + 0].fD1 = -segb.fNorms[1].dot(fanPt) + c;
+ verts[*v + 1].fD1 = -segb.fNorms[1].dot(qpts[0]) + c;
+ verts[*v + 2].fD1 = 0.f;
+ verts[*v + 3].fD1 = -SK_ScalarMax/100;
+ verts[*v + 4].fD1 = -SK_ScalarMax/100;
+ verts[*v + 5].fD1 = -SK_ScalarMax/100;
+
+ GrPathUtils::QuadUVMatrix toUV(qpts);
+ toUV.apply<6, sizeof(QuadVertex), sizeof(SkPoint)>(verts + *v);
+
+ idxs[*i + 0] = *v + 3;
+ idxs[*i + 1] = *v + 1;
+ idxs[*i + 2] = *v + 2;
+ idxs[*i + 3] = *v + 4;
+ idxs[*i + 4] = *v + 3;
+ idxs[*i + 5] = *v + 2;
+
+ idxs[*i + 6] = *v + 5;
+ idxs[*i + 7] = *v + 3;
+ idxs[*i + 8] = *v + 4;
+
+ *i += 9;
+
+ // Draw the interior fan if it exists.
+ // TODO: Detect and combine colinear segments. This will ensure we catch every case
+ // with no interior, and that the resulting shared edge uses the same endpoints.
+ if (count >= 3) {
+ idxs[*i + 0] = *v + 0;
+ idxs[*i + 1] = *v + 2;
+ idxs[*i + 2] = *v + 1;
+
+ *i += 3;
+ }
+
+ *v += 6;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Quadratic specified by 0=u^2-v canonical coords. u and v are the first
+ * two components of the vertex attribute. Coverage is based on signed
+ * distance with negative being inside, positive outside. The edge is specified in
+ * window space (y-down). If either the third or fourth component of the interpolated
+ * vertex coord is > 0 then the pixel is considered outside the edge. This is used to
+ * attempt to trim to a portion of the infinite quad.
+ * Requires shader derivative instruction support.
+ */
+
+class QuadEdgeEffect : public GrGeometryProcessor {
+public:
+
+ static GrGeometryProcessor* Create(GrColor color, const SkMatrix& localMatrix,
+ bool usesLocalCoords) {
+ return new QuadEdgeEffect(color, localMatrix, usesLocalCoords);
+ }
+
+ virtual ~QuadEdgeEffect() {}
+
+ const char* name() const override { return "QuadEdge"; }
+
+ const Attribute* inPosition() const { return fInPosition; }
+ const Attribute* inQuadEdge() const { return fInQuadEdge; }
+ GrColor color() const { return fColor; }
+ bool colorIgnored() const { return GrColor_ILLEGAL == fColor; }
+ const SkMatrix& localMatrix() const { return fLocalMatrix; }
+ bool usesLocalCoords() const { return fUsesLocalCoords; }
+
+ class GLProcessor : public GrGLGeometryProcessor {
+ public:
+ GLProcessor(const GrGeometryProcessor&,
+ const GrBatchTracker&)
+ : fColor(GrColor_ILLEGAL) {}
+
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>();
+ GrGLGPBuilder* pb = args.fPB;
+ GrGLVertexBuilder* vsBuilder = pb->getVertexShaderBuilder();
+
+ // emit attributes
+ vsBuilder->emitAttributes(qe);
+
+ GrGLVertToFrag v(kVec4f_GrSLType);
+ args.fPB->addVarying("QuadEdge", &v);
+ vsBuilder->codeAppendf("%s = %s;", v.vsOut(), qe.inQuadEdge()->fName);
+
+ // Setup pass through color
+ if (!qe.colorIgnored()) {
+ this->setupUniformColor(pb, args.fOutputColor, &fColorUniform);
+ }
+
+ // Setup position
+ this->setupPosition(pb, gpArgs, qe.inPosition()->fName);
+
+ // emit transforms
+ this->emitTransforms(args.fPB, gpArgs->fPositionVar, qe.inPosition()->fName,
+ qe.localMatrix(), args.fTransformsIn, args.fTransformsOut);
+
+ GrGLFragmentBuilder* fsBuilder = args.fPB->getFragmentShaderBuilder();
+
+ SkAssertResult(fsBuilder->enableFeature(
+ GrGLFragmentShaderBuilder::kStandardDerivatives_GLSLFeature));
+ fsBuilder->codeAppendf("float edgeAlpha;");
+
+ // keep the derivative instructions outside the conditional
+ fsBuilder->codeAppendf("vec2 duvdx = dFdx(%s.xy);", v.fsIn());
+ fsBuilder->codeAppendf("vec2 duvdy = dFdy(%s.xy);", v.fsIn());
+ fsBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {", v.fsIn(), v.fsIn());
+ // today we know z and w are in device space. We could use derivatives
+ fsBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);", v.fsIn(),
+ v.fsIn());
+ fsBuilder->codeAppendf ("} else {");
+ fsBuilder->codeAppendf("vec2 gF = vec2(2.0*%s.x*duvdx.x - duvdx.y,"
+ " 2.0*%s.x*duvdy.x - duvdy.y);",
+ v.fsIn(), v.fsIn());
+ fsBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);", v.fsIn(), v.fsIn(),
+ v.fsIn());
+ fsBuilder->codeAppendf("edgeAlpha = "
+ "clamp(0.5 - edgeAlpha / length(gF), 0.0, 1.0);}");
+
+ fsBuilder->codeAppendf("%s = vec4(edgeAlpha);", args.fOutputCoverage);
+ }
+
+ static inline void GenKey(const GrGeometryProcessor& gp,
+ const GrBatchTracker& bt,
+ const GrGLSLCaps&,
+ GrProcessorKeyBuilder* b) {
+ const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>();
+ uint32_t key = 0;
+ key |= qee.usesLocalCoords() && qee.localMatrix().hasPerspective() ? 0x1 : 0x0;
+ key |= qee.colorIgnored() ? 0x2 : 0x0;
+ b->add32(key);
+ }
+
+ virtual void setData(const GrGLProgramDataManager& pdman,
+ const GrPrimitiveProcessor& gp,
+ const GrBatchTracker& bt) override {
+ const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>();
+ if (qe.color() != fColor) {
+ GrGLfloat c[4];
+ GrColorToRGBAFloat(qe.color(), c);
+ pdman.set4fv(fColorUniform, 1, c);
+ fColor = qe.color();
+ }
+ }
+
+ void setTransformData(const GrPrimitiveProcessor& primProc,
+ const GrGLProgramDataManager& pdman,
+ int index,
+ const SkTArray<const GrCoordTransform*, true>& transforms) override {
+ this->setTransformDataHelper<QuadEdgeEffect>(primProc, pdman, index, transforms);
+ }
+
+ private:
+ GrColor fColor;
+ UniformHandle fColorUniform;
+
+ typedef GrGLGeometryProcessor INHERITED;
+ };
+
+ virtual void getGLProcessorKey(const GrBatchTracker& bt,
+ const GrGLSLCaps& caps,
+ GrProcessorKeyBuilder* b) const override {
+ GLProcessor::GenKey(*this, bt, caps, b);
+ }
+
+ virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
+ const GrGLSLCaps&) const override {
+ return new GLProcessor(*this, bt);
+ }
+
+private:
+ QuadEdgeEffect(GrColor color, const SkMatrix& localMatrix, bool usesLocalCoords)
+ : fColor(color)
+ , fLocalMatrix(localMatrix)
+ , fUsesLocalCoords(usesLocalCoords) {
+ this->initClassID<QuadEdgeEffect>();
+ fInPosition = &this->addVertexAttrib(Attribute("inPosition", kVec2f_GrVertexAttribType));
+ fInQuadEdge = &this->addVertexAttrib(Attribute("inQuadEdge", kVec4f_GrVertexAttribType));
+ }
+
+ const Attribute* fInPosition;
+ const Attribute* fInQuadEdge;
+ GrColor fColor;
+ SkMatrix fLocalMatrix;
+ bool fUsesLocalCoords;
+
+ GR_DECLARE_GEOMETRY_PROCESSOR_TEST;
+
+ typedef GrGeometryProcessor INHERITED;
+};
+
+GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect);
+
+const GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) {
+ // Doesn't work without derivative instructions.
+ return d->fCaps->shaderCaps()->shaderDerivativeSupport() ?
+ QuadEdgeEffect::Create(GrRandomColor(d->fRandom),
+ GrTest::TestMatrix(d->fRandom),
+ d->fRandom->nextBool()) : nullptr;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ return (args.fShaderCaps->shaderDerivativeSupport() && args.fAntiAlias &&
+ args.fStroke->isFillStyle() && !args.fPath->isInverseFillType() &&
+ args.fPath->isConvex());
+}
+
+// extract the result vertices and indices from the GrAAConvexTessellator
+static void extract_verts(const GrAAConvexTessellator& tess,
+ void* vertices,
+ size_t vertexStride,
+ GrColor color,
+ uint16_t* idxs,
+ bool tweakAlphaForCoverage) {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices);
+
+ for (int i = 0; i < tess.numPts(); ++i) {
+ *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
+ }
+
+ // Make 'verts' point to the colors
+ verts += sizeof(SkPoint);
+ for (int i = 0; i < tess.numPts(); ++i) {
+ if (tweakAlphaForCoverage) {
+ SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
+ unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
+ tess.coverage(i);
+ }
+ }
+
+ for (int i = 0; i < tess.numIndices(); ++i) {
+ idxs[i] = tess.index(i);
+ }
+}
+
+static const GrGeometryProcessor* create_fill_gp(bool tweakAlphaForCoverage,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords,
+ bool coverageIgnored) {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(Color::kAttribute_Type);
+ Coverage::Type coverageType;
+ // TODO remove coverage if coverage is ignored
+ /*if (coverageIgnored) {
+ coverageType = Coverage::kNone_Type;
+ } else*/ if (tweakAlphaForCoverage) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ Coverage coverage(coverageType);
+ LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ return CreateForDeviceSpace(color, coverage, localCoords, viewMatrix);
+}
+
+class AAConvexPathBatch : public GrVertexBatch {
+public:
+ struct Geometry {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ };
+
+ static GrDrawBatch* Create(const Geometry& geometry) { return new AAConvexPathBatch(geometry); }
+
+ const char* name() const override { return "AAConvexBatch"; }
+
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ out->setKnownFourComponents(fGeoData[0].fColor);
+ }
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override {
+ out->setUnknownSingleComponent();
+ }
+
+private:
+
+ void initBatchTracker(const GrPipelineOptimizations& opt) override {
+ // Handle any color overrides
+ if (!opt.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ opt.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !opt.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = opt.readsLocalCoords();
+ fBatch.fCoverageIgnored = !opt.readsCoverage();
+ fBatch.fLinesOnly = SkPath::kLine_SegmentMask == fGeoData[0].fPath.getSegmentMasks();
+ fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
+ }
+
+ void prepareLinesOnlyDraws(Target* target) {
+ bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
+
+ // Setup GrGeometryProcessor
+ SkAutoTUnref<const GrGeometryProcessor> gp(create_fill_gp(canTweakAlphaForCoverage,
+ this->viewMatrix(),
+ this->usesLocalCoords(),
+ this->coverageIgnored()));
+ if (!gp) {
+ SkDebugf("Could not create GrGeometryProcessor\n");
+ return;
+ }
+
+ target->initDraw(gp, this->pipeline());
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(canTweakAlphaForCoverage ?
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+
+ GrAAConvexTessellator tess;
+
+ int instanceCount = fGeoData.count();
+
+ for (int i = 0; i < instanceCount; i++) {
+ tess.rewind();
+
+ Geometry& args = fGeoData[i];
+
+ if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
+ continue;
+ }
+
+ const GrVertexBuffer* vertexBuffer;
+ int firstVertex;
+
+ void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
+ &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const GrIndexBuffer* indexBuffer;
+ int firstIndex;
+
+ uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ extract_verts(tess, verts, vertexStride, args.fColor, idxs, canTweakAlphaForCoverage);
+
+ GrVertices info;
+ info.initIndexed(kTriangles_GrPrimitiveType,
+ vertexBuffer, indexBuffer,
+ firstVertex, firstIndex,
+ tess.numPts(), tess.numIndices());
+ target->draw(info);
+ }
+ }
+
+ void onPrepareDraws(Target* target) override {
+#ifndef SK_IGNORE_LINEONLY_AA_CONVEX_PATH_OPTS
+ if (this->linesOnly()) {
+ this->prepareLinesOnlyDraws(target);
+ return;
+ }
+#endif
+
+ int instanceCount = fGeoData.count();
+
+ SkMatrix invert;
+ if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
+ SkDebugf("Could not invert viewmatrix\n");
+ return;
+ }
+
+ // Setup GrGeometryProcessor
+ SkAutoTUnref<GrGeometryProcessor> quadProcessor(
+ QuadEdgeEffect::Create(this->color(), invert, this->usesLocalCoords()));
+
+ target->initDraw(quadProcessor, this->pipeline());
+
+ // TODO generate all segments for all paths and use one vertex buffer
+ for (int i = 0; i < instanceCount; i++) {
+ Geometry& args = fGeoData[i];
+
+ // We use the fact that SkPath::transform path does subdivision based on
+ // perspective. Otherwise, we apply the view matrix when copying to the
+ // segment representation.
+ const SkMatrix* viewMatrix = &args.fViewMatrix;
+ if (viewMatrix->hasPerspective()) {
+ args.fPath.transform(*viewMatrix);
+ viewMatrix = &SkMatrix::I();
+ }
+
+ int vertexCount;
+ int indexCount;
+ enum {
+ kPreallocSegmentCnt = 512 / sizeof(Segment),
+ kPreallocDrawCnt = 4,
+ };
+ SkSTArray<kPreallocSegmentCnt, Segment, true> segments;
+ SkPoint fanPt;
+
+ if (!get_segments(args.fPath, *viewMatrix, &segments, &fanPt, &vertexCount,
+ &indexCount)) {
+ continue;
+ }
+
+ const GrVertexBuffer* vertexBuffer;
+ int firstVertex;
+
+ size_t vertexStride = quadProcessor->getVertexStride();
+ QuadVertex* verts = reinterpret_cast<QuadVertex*>(target->makeVertexSpace(
+ vertexStride, vertexCount, &vertexBuffer, &firstVertex));
+
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ const GrIndexBuffer* indexBuffer;
+ int firstIndex;
+
+ uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+
+ SkSTArray<kPreallocDrawCnt, Draw, true> draws;
+ create_vertices(segments, fanPt, &draws, verts, idxs);
+
+ GrVertices vertices;
+
+ for (int i = 0; i < draws.count(); ++i) {
+ const Draw& draw = draws[i];
+ vertices.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer,
+ firstVertex, firstIndex, draw.fVertexCnt, draw.fIndexCnt);
+ target->draw(vertices);
+ firstVertex += draw.fVertexCnt;
+ firstIndex += draw.fIndexCnt;
+ }
+ }
+ }
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+ AAConvexPathBatch(const Geometry& geometry) {
+ this->initClassID<AAConvexPathBatch>();
+ fGeoData.push_back(geometry);
+
+ // compute bounds
+ fBounds = geometry.fPath.getBounds();
+ geometry.fViewMatrix.mapRect(&fBounds);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ if (this->linesOnly() != that->linesOnly()) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
+ fBatch.fCanTweakAlphaForCoverage = false;
+ }
+
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ this->joinBounds(that->bounds());
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ bool linesOnly() const { return fBatch.fLinesOnly; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCoverage; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ bool fLinesOnly;
+ bool fCanTweakAlphaForCoverage;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+};
+
+bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ if (args.fPath->isEmpty()) {
+ return true;
+ }
+
+ AAConvexPathBatch::Geometry geometry;
+ geometry.fColor = args.fColor;
+ geometry.fViewMatrix = *args.fViewMatrix;
+ geometry.fPath = *args.fPath;
+
+ SkAutoTUnref<GrDrawBatch> batch(AAConvexPathBatch::Create(geometry));
+ args.fTarget->drawBatch(*args.fPipelineBuilder, batch);
+
+ return true;
+
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(AAConvexPathBatch) {
+ AAConvexPathBatch::Geometry geometry;
+ geometry.fColor = GrRandomColor(random);
+ geometry.fViewMatrix = GrTest::TestMatrixInvertible(random);
+ geometry.fPath = GrTest::TestPathConvex(random);
+
+ return AAConvexPathBatch::Create(geometry);
+}
+
+#endif
diff --git a/src/gpu/batches/GrAAConvexPathRenderer.h b/src/gpu/batches/GrAAConvexPathRenderer.h
new file mode 100644
index 0000000000..5d51d6c98a
--- /dev/null
+++ b/src/gpu/batches/GrAAConvexPathRenderer.h
@@ -0,0 +1,24 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAConvexPathRenderer_DEFINED
+#define GrAAConvexPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrAAConvexPathRenderer : public GrPathRenderer {
+public:
+ GrAAConvexPathRenderer();
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+};
+
+#endif
diff --git a/src/gpu/batches/GrAAConvexTessellator.cpp b/src/gpu/batches/GrAAConvexTessellator.cpp
new file mode 100644
index 0000000000..c111b8b562
--- /dev/null
+++ b/src/gpu/batches/GrAAConvexTessellator.cpp
@@ -0,0 +1,1027 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAConvexTessellator.h"
+#include "SkCanvas.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkString.h"
+#include "GrPathUtils.h"
+
+// Next steps:
+// add an interactive sample app slide
+// add debug check that all points are suitably far apart
+// test more degenerate cases
+
+// The tolerance for fusing vertices and eliminating colinear lines (It is in device space).
+static const SkScalar kClose = (SK_Scalar1 / 16);
+static const SkScalar kCloseSqd = SkScalarMul(kClose, kClose);
+
+// tesselation tolerance values, in device space pixels
+static const SkScalar kQuadTolerance = 0.2f;
+static const SkScalar kCubicTolerance = 0.2f;
+static const SkScalar kConicTolerance = 0.5f;
+
+// dot product below which we use a round cap between curve segments
+static const SkScalar kRoundCapThreshold = 0.8f;
+
+static SkScalar intersect(const SkPoint& p0, const SkPoint& n0,
+ const SkPoint& p1, const SkPoint& n1) {
+ const SkPoint v = p1 - p0;
+ SkScalar perpDot = n0.fX * n1.fY - n0.fY * n1.fX;
+ return (v.fX * n1.fY - v.fY * n1.fX) / perpDot;
+}
+
+// This is a special case version of intersect where we have the vector
+// perpendicular to the second line rather than the vector parallel to it.
+static SkScalar perp_intersect(const SkPoint& p0, const SkPoint& n0,
+ const SkPoint& p1, const SkPoint& perp) {
+ const SkPoint v = p1 - p0;
+ SkScalar perpDot = n0.dot(perp);
+ return v.dot(perp) / perpDot;
+}
+
+static bool duplicate_pt(const SkPoint& p0, const SkPoint& p1) {
+ SkScalar distSq = p0.distanceToSqd(p1);
+ return distSq < kCloseSqd;
+}
+
+static SkScalar abs_dist_from_line(const SkPoint& p0, const SkVector& v, const SkPoint& test) {
+ SkPoint testV = test - p0;
+ SkScalar dist = testV.fX * v.fY - testV.fY * v.fX;
+ return SkScalarAbs(dist);
+}
+
+int GrAAConvexTessellator::addPt(const SkPoint& pt,
+ SkScalar depth,
+ SkScalar coverage,
+ bool movable,
+ bool isCurve) {
+ this->validate();
+
+ int index = fPts.count();
+ *fPts.push() = pt;
+ *fCoverages.push() = coverage;
+ *fMovable.push() = movable;
+ *fIsCurve.push() = isCurve;
+
+ this->validate();
+ return index;
+}
+
+void GrAAConvexTessellator::popLastPt() {
+ this->validate();
+
+ fPts.pop();
+ fCoverages.pop();
+ fMovable.pop();
+
+ this->validate();
+}
+
+void GrAAConvexTessellator::popFirstPtShuffle() {
+ this->validate();
+
+ fPts.removeShuffle(0);
+ fCoverages.removeShuffle(0);
+ fMovable.removeShuffle(0);
+
+ this->validate();
+}
+
+void GrAAConvexTessellator::updatePt(int index,
+ const SkPoint& pt,
+ SkScalar depth,
+ SkScalar coverage) {
+ this->validate();
+ SkASSERT(fMovable[index]);
+
+ fPts[index] = pt;
+ fCoverages[index] = coverage;
+}
+
+void GrAAConvexTessellator::addTri(int i0, int i1, int i2) {
+ if (i0 == i1 || i1 == i2 || i2 == i0) {
+ return;
+ }
+
+ *fIndices.push() = i0;
+ *fIndices.push() = i1;
+ *fIndices.push() = i2;
+}
+
+void GrAAConvexTessellator::rewind() {
+ fPts.rewind();
+ fCoverages.rewind();
+ fMovable.rewind();
+ fIndices.rewind();
+ fNorms.rewind();
+ fInitialRing.rewind();
+ fCandidateVerts.rewind();
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ fRings.rewind(); // TODO: leak in this case!
+#else
+ fRings[0].rewind();
+ fRings[1].rewind();
+#endif
+}
+
+void GrAAConvexTessellator::computeBisectors() {
+ fBisectors.setCount(fNorms.count());
+
+ int prev = fBisectors.count() - 1;
+ for (int cur = 0; cur < fBisectors.count(); prev = cur, ++cur) {
+ fBisectors[cur] = fNorms[cur] + fNorms[prev];
+ if (!fBisectors[cur].normalize()) {
+ SkASSERT(SkPoint::kLeft_Side == fSide || SkPoint::kRight_Side == fSide);
+ fBisectors[cur].setOrthog(fNorms[cur], (SkPoint::Side)-fSide);
+ SkVector other;
+ other.setOrthog(fNorms[prev], fSide);
+ fBisectors[cur] += other;
+ SkAssertResult(fBisectors[cur].normalize());
+ } else {
+ fBisectors[cur].negate(); // make the bisector face in
+ }
+
+ SkASSERT(SkScalarNearlyEqual(1.0f, fBisectors[cur].length()));
+ }
+}
+
+// Create as many rings as we need to (up to a predefined limit) to reach the specified target
+// depth. If we are in fill mode, the final ring will automatically be fanned.
+bool GrAAConvexTessellator::createInsetRings(Ring& previousRing, SkScalar initialDepth,
+ SkScalar initialCoverage, SkScalar targetDepth,
+ SkScalar targetCoverage, Ring** finalRing) {
+ static const int kMaxNumRings = 8;
+
+ if (previousRing.numPts() < 3) {
+ return false;
+ }
+ Ring* currentRing = &previousRing;
+ int i;
+ for (i = 0; i < kMaxNumRings; ++i) {
+ Ring* nextRing = this->getNextRing(currentRing);
+ SkASSERT(nextRing != currentRing);
+
+ bool done = this->createInsetRing(*currentRing, nextRing, initialDepth, initialCoverage,
+ targetDepth, targetCoverage, i == 0);
+ currentRing = nextRing;
+ if (done) {
+ break;
+ }
+ currentRing->init(*this);
+ }
+
+ if (kMaxNumRings == i) {
+ // Bail if we've exceeded the amount of time we want to throw at this.
+ this->terminate(*currentRing);
+ return false;
+ }
+ bool done = currentRing->numPts() >= 3;
+ if (done) {
+ currentRing->init(*this);
+ }
+ *finalRing = currentRing;
+ return done;
+}
+
+// The general idea here is to, conceptually, start with the original polygon and slide
+// the vertices along the bisectors until the first intersection. At that
+// point two of the edges collapse and the process repeats on the new polygon.
+// The polygon state is captured in the Ring class while the GrAAConvexTessellator
+// controls the iteration. The CandidateVerts holds the formative points for the
+// next ring.
+bool GrAAConvexTessellator::tessellate(const SkMatrix& m, const SkPath& path) {
+ if (!this->extractFromPath(m, path)) {
+ return false;
+ }
+
+ SkScalar coverage = 1.0f;
+ SkScalar scaleFactor = 0.0f;
+ if (fStrokeWidth >= 0.0f) {
+ SkASSERT(m.isSimilarity());
+ scaleFactor = m.getMaxScale(); // x and y scale are the same
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring outerStrokeRing;
+ this->createOuterRing(fInitialRing, effectiveStrokeWidth / 2 - kAntialiasingRadius,
+ coverage, &outerStrokeRing);
+ outerStrokeRing.init(*this);
+ Ring outerAARing;
+ this->createOuterRing(outerStrokeRing, kAntialiasingRadius * 2, 0.0f, &outerAARing);
+ } else {
+ Ring outerAARing;
+ this->createOuterRing(fInitialRing, kAntialiasingRadius, 0.0f, &outerAARing);
+ }
+
+ // the bisectors are only needed for the computation of the outer ring
+ fBisectors.rewind();
+ if (fStrokeWidth >= 0.0f && fInitialRing.numPts() > 2) {
+ SkScalar effectiveStrokeWidth = scaleFactor * fStrokeWidth;
+ Ring* insetStrokeRing;
+ SkScalar strokeDepth = effectiveStrokeWidth / 2 - kAntialiasingRadius;
+ if (this->createInsetRings(fInitialRing, 0.0f, coverage, strokeDepth, coverage,
+ &insetStrokeRing)) {
+ Ring* insetAARing;
+ this->createInsetRings(*insetStrokeRing, strokeDepth, coverage, strokeDepth +
+ kAntialiasingRadius * 2, 0.0f, &insetAARing);
+ }
+ } else {
+ Ring* insetAARing;
+ this->createInsetRings(fInitialRing, 0.0f, 0.5f, kAntialiasingRadius, 1.0f, &insetAARing);
+ }
+
+ SkDEBUGCODE(this->validate();)
+ return true;
+}
+
+SkScalar GrAAConvexTessellator::computeDepthFromEdge(int edgeIdx, const SkPoint& p) const {
+ SkASSERT(edgeIdx < fNorms.count());
+
+ SkPoint v = p - fPts[edgeIdx];
+ SkScalar depth = -fNorms[edgeIdx].dot(v);
+ return depth;
+}
+
+// Find a point that is 'desiredDepth' away from the 'edgeIdx'-th edge and lies
+// along the 'bisector' from the 'startIdx'-th point.
+bool GrAAConvexTessellator::computePtAlongBisector(int startIdx,
+ const SkVector& bisector,
+ int edgeIdx,
+ SkScalar desiredDepth,
+ SkPoint* result) const {
+ const SkPoint& norm = fNorms[edgeIdx];
+
+ // First find the point where the edge and the bisector intersect
+ SkPoint newP;
+
+ SkScalar t = perp_intersect(fPts[startIdx], bisector, fPts[edgeIdx], norm);
+ if (SkScalarNearlyEqual(t, 0.0f)) {
+ // the start point was one of the original ring points
+ SkASSERT(startIdx < fPts.count());
+ newP = fPts[startIdx];
+ } else if (t < 0.0f) {
+ newP = bisector;
+ newP.scale(t);
+ newP += fPts[startIdx];
+ } else {
+ return false;
+ }
+
+ // Then offset along the bisector from that point the correct distance
+ SkScalar dot = bisector.dot(norm);
+ t = -desiredDepth / dot;
+ *result = bisector;
+ result->scale(t);
+ *result += newP;
+
+ return true;
+}
+
+bool GrAAConvexTessellator::extractFromPath(const SkMatrix& m, const SkPath& path) {
+ SkASSERT(SkPath::kConvex_Convexity == path.getConvexity());
+
+ // Outer ring: 3*numPts
+ // Middle ring: numPts
+ // Presumptive inner ring: numPts
+ this->reservePts(5*path.countPoints());
+ // Outer ring: 12*numPts
+ // Middle ring: 0
+ // Presumptive inner ring: 6*numPts + 6
+ fIndices.setReserve(18*path.countPoints() + 6);
+
+ fNorms.setReserve(path.countPoints());
+
+ // TODO: is there a faster way to extract the points from the path? Perhaps
+ // get all the points via a new entry point, transform them all in bulk
+ // and then walk them to find duplicates?
+ SkPath::Iter iter(path, true);
+ SkPoint pts[4];
+ SkPath::Verb verb;
+ while ((verb = iter.next(pts)) != SkPath::kDone_Verb) {
+ switch (verb) {
+ case SkPath::kLine_Verb:
+ this->lineTo(m, pts[1], false);
+ break;
+ case SkPath::kQuad_Verb:
+ this->quadTo(m, pts);
+ break;
+ case SkPath::kCubic_Verb:
+ this->cubicTo(m, pts);
+ break;
+ case SkPath::kConic_Verb:
+ this->conicTo(m, pts, iter.conicWeight());
+ break;
+ case SkPath::kMove_Verb:
+ case SkPath::kClose_Verb:
+ case SkPath::kDone_Verb:
+ break;
+ }
+ }
+
+ if (this->numPts() < 2) {
+ return false;
+ }
+
+ // check if last point is a duplicate of the first point. If so, remove it.
+ if (duplicate_pt(fPts[this->numPts()-1], fPts[0])) {
+ this->popLastPt();
+ fNorms.pop();
+ }
+
+ SkASSERT(fPts.count() == fNorms.count()+1);
+ if (this->numPts() >= 3) {
+ if (abs_dist_from_line(fPts.top(), fNorms.top(), fPts[0]) < kClose) {
+ // The last point is on the line from the second to last to the first point.
+ this->popLastPt();
+ fNorms.pop();
+ }
+
+ *fNorms.push() = fPts[0] - fPts.top();
+ SkDEBUGCODE(SkScalar len =) SkPoint::Normalize(&fNorms.top());
+ SkASSERT(len > 0.0f);
+ SkASSERT(fPts.count() == fNorms.count());
+ }
+
+ if (this->numPts() >= 3 && abs_dist_from_line(fPts[0], fNorms.top(), fPts[1]) < kClose) {
+ // The first point is on the line from the last to the second.
+ this->popFirstPtShuffle();
+ fNorms.removeShuffle(0);
+ fNorms[0] = fPts[1] - fPts[0];
+ SkDEBUGCODE(SkScalar len =) SkPoint::Normalize(&fNorms[0]);
+ SkASSERT(len > 0.0f);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms[0].length()));
+ }
+
+ if (this->numPts() >= 3) {
+ // Check the cross product of the final trio
+ SkScalar cross = SkPoint::CrossProduct(fNorms[0], fNorms.top());
+ if (cross > 0.0f) {
+ fSide = SkPoint::kRight_Side;
+ } else {
+ fSide = SkPoint::kLeft_Side;
+ }
+
+ // Make all the normals face outwards rather than along the edge
+ for (int cur = 0; cur < fNorms.count(); ++cur) {
+ fNorms[cur].setOrthog(fNorms[cur], fSide);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms[cur].length()));
+ }
+
+ this->computeBisectors();
+ } else if (this->numPts() == 2) {
+ // We've got two points, so we're degenerate.
+ if (fStrokeWidth < 0.0f) {
+ // it's a fill, so we don't need to worry about degenerate paths
+ return false;
+ }
+ // For stroking, we still need to process the degenerate path, so fix it up
+ fSide = SkPoint::kLeft_Side;
+
+ // Make all the normals face outwards rather than along the edge
+ for (int cur = 0; cur < fNorms.count(); ++cur) {
+ fNorms[cur].setOrthog(fNorms[cur], fSide);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms[cur].length()));
+ }
+
+ fNorms.push(SkPoint::Make(-fNorms[0].fX, -fNorms[0].fY));
+ // we won't actually use the bisectors, so just push zeroes
+ fBisectors.push(SkPoint::Make(0.0, 0.0));
+ fBisectors.push(SkPoint::Make(0.0, 0.0));
+ } else {
+ return false;
+ }
+
+ fCandidateVerts.setReserve(this->numPts());
+ fInitialRing.setReserve(this->numPts());
+ for (int i = 0; i < this->numPts(); ++i) {
+ fInitialRing.addIdx(i, i);
+ }
+ fInitialRing.init(fNorms, fBisectors);
+
+ this->validate();
+ return true;
+}
+
+GrAAConvexTessellator::Ring* GrAAConvexTessellator::getNextRing(Ring* lastRing) {
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ Ring* ring = *fRings.push() = new Ring;
+ ring->setReserve(fInitialRing.numPts());
+ ring->rewind();
+ return ring;
+#else
+ // Flip flop back and forth between fRings[0] & fRings[1]
+ int nextRing = (lastRing == &fRings[0]) ? 1 : 0;
+ fRings[nextRing].setReserve(fInitialRing.numPts());
+ fRings[nextRing].rewind();
+ return &fRings[nextRing];
+#endif
+}
+
+void GrAAConvexTessellator::fanRing(const Ring& ring) {
+ // fan out from point 0
+ int startIdx = ring.index(0);
+ for (int cur = ring.numPts() - 2; cur >= 0; --cur) {
+ this->addTri(startIdx, ring.index(cur), ring.index(cur + 1));
+ }
+}
+
+void GrAAConvexTessellator::createOuterRing(const Ring& previousRing, SkScalar outset,
+ SkScalar coverage, Ring* nextRing) {
+ const int numPts = previousRing.numPts();
+ if (numPts == 0) {
+ return;
+ }
+
+ int prev = numPts - 1;
+ int lastPerpIdx = -1, firstPerpIdx = -1;
+
+ const SkScalar outsetSq = SkScalarMul(outset, outset);
+ SkScalar miterLimitSq = SkScalarMul(outset, fMiterLimit);
+ miterLimitSq = SkScalarMul(miterLimitSq, miterLimitSq);
+ for (int cur = 0; cur < numPts; ++cur) {
+ int originalIdx = previousRing.index(cur);
+ // For each vertex of the original polygon we add at least two points to the
+ // outset polygon - one extending perpendicular to each impinging edge. Connecting these
+ // two points yields a bevel join. We need one additional point for a mitered join, and
+ // a round join requires one or more points depending upon curvature.
+
+ // The perpendicular point for the last edge
+ SkPoint normal1 = previousRing.norm(prev);
+ SkPoint perp1 = normal1;
+ perp1.scale(outset);
+ perp1 += this->point(originalIdx);
+
+ // The perpendicular point for the next edge.
+ SkPoint normal2 = previousRing.norm(cur);
+ SkPoint perp2 = normal2;
+ perp2.scale(outset);
+ perp2 += fPts[originalIdx];
+
+ bool isCurve = fIsCurve[originalIdx];
+
+ // We know it isn't a duplicate of the prior point (since it and this
+ // one are just perpendicular offsets from the non-merged polygon points)
+ int perp1Idx = this->addPt(perp1, -outset, coverage, false, isCurve);
+ nextRing->addIdx(perp1Idx, originalIdx);
+
+ int perp2Idx;
+ // For very shallow angles all the corner points could fuse.
+ if (duplicate_pt(perp2, this->point(perp1Idx))) {
+ perp2Idx = perp1Idx;
+ } else {
+ perp2Idx = this->addPt(perp2, -outset, coverage, false, isCurve);
+ }
+
+ if (perp2Idx != perp1Idx) {
+ if (isCurve) {
+ // bevel or round depending upon curvature
+ SkScalar dotProd = normal1.dot(normal2);
+ if (dotProd < kRoundCapThreshold) {
+ // Currently we "round" by creating a single extra point, which produces
+ // good results for common cases. For thick strokes with high curvature, we will
+ // need to add more points; for the time being we simply fall back to software
+ // rendering for thick strokes.
+ SkPoint miter = previousRing.bisector(cur);
+ miter.setLength(-outset);
+ miter += fPts[originalIdx];
+
+ // For very shallow angles all the corner points could fuse
+ if (!duplicate_pt(miter, this->point(perp1Idx))) {
+ int miterIdx;
+ miterIdx = this->addPt(miter, -outset, coverage, false, false);
+ nextRing->addIdx(miterIdx, originalIdx);
+ // The two triangles for the corner
+ this->addTri(originalIdx, perp1Idx, miterIdx);
+ this->addTri(originalIdx, miterIdx, perp2Idx);
+ }
+ } else {
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ }
+ } else {
+ switch (fJoin) {
+ case SkPaint::Join::kMiter_Join: {
+ // The bisector outset point
+ SkPoint miter = previousRing.bisector(cur);
+ SkScalar dotProd = normal1.dot(normal2);
+ SkScalar sinHalfAngleSq = SkScalarHalf(SK_Scalar1 + dotProd);
+ SkScalar lengthSq = outsetSq / sinHalfAngleSq;
+ if (lengthSq > miterLimitSq) {
+ // just bevel it
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ break;
+ }
+ miter.setLength(-SkScalarSqrt(lengthSq));
+ miter += fPts[originalIdx];
+
+ // For very shallow angles all the corner points could fuse
+ if (!duplicate_pt(miter, this->point(perp1Idx))) {
+ int miterIdx;
+ miterIdx = this->addPt(miter, -outset, coverage, false, false);
+ nextRing->addIdx(miterIdx, originalIdx);
+ // The two triangles for the corner
+ this->addTri(originalIdx, perp1Idx, miterIdx);
+ this->addTri(originalIdx, miterIdx, perp2Idx);
+ }
+ break;
+ }
+ case SkPaint::Join::kBevel_Join:
+ this->addTri(originalIdx, perp1Idx, perp2Idx);
+ break;
+ default:
+ // kRound_Join is unsupported for now. GrAALinearizingConvexPathRenderer is
+ // only willing to draw mitered or beveled, so we should never get here.
+ SkASSERT(false);
+ }
+ }
+
+ nextRing->addIdx(perp2Idx, originalIdx);
+ }
+
+ if (0 == cur) {
+ // Store the index of the first perpendicular point to finish up
+ firstPerpIdx = perp1Idx;
+ SkASSERT(-1 == lastPerpIdx);
+ } else {
+ // The triangles for the previous edge
+ int prevIdx = previousRing.index(prev);
+ this->addTri(prevIdx, perp1Idx, originalIdx);
+ this->addTri(prevIdx, lastPerpIdx, perp1Idx);
+ }
+
+ // Track the last perpendicular outset point so we can construct the
+ // trailing edge triangles.
+ lastPerpIdx = perp2Idx;
+ prev = cur;
+ }
+
+ // pick up the final edge rect
+ int lastIdx = previousRing.index(numPts - 1);
+ this->addTri(lastIdx, firstPerpIdx, previousRing.index(0));
+ this->addTri(lastIdx, lastPerpIdx, firstPerpIdx);
+
+ this->validate();
+}
+
+// Something went wrong in the creation of the next ring. If we're filling the shape, just go ahead
+// and fan it.
+void GrAAConvexTessellator::terminate(const Ring& ring) {
+ if (fStrokeWidth < 0.0f) {
+ this->fanRing(ring);
+ }
+}
+
+static SkScalar compute_coverage(SkScalar depth, SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage) {
+ if (SkScalarNearlyEqual(initialDepth, targetDepth)) {
+ return targetCoverage;
+ }
+ SkScalar result = (depth - initialDepth) / (targetDepth - initialDepth) *
+ (targetCoverage - initialCoverage) + initialCoverage;
+ return SkScalarClampMax(result, 1.0f);
+}
+
+// return true when processing is complete
+bool GrAAConvexTessellator::createInsetRing(const Ring& lastRing, Ring* nextRing,
+ SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage,
+ bool forceNew) {
+ bool done = false;
+
+ fCandidateVerts.rewind();
+
+ // Loop through all the points in the ring and find the intersection with the smallest depth
+ SkScalar minDist = SK_ScalarMax, minT = 0.0f;
+ int minEdgeIdx = -1;
+
+ for (int cur = 0; cur < lastRing.numPts(); ++cur) {
+ int next = (cur + 1) % lastRing.numPts();
+ SkScalar t = intersect(this->point(lastRing.index(cur)), lastRing.bisector(cur),
+ this->point(lastRing.index(next)), lastRing.bisector(next));
+ SkScalar dist = -t * lastRing.norm(cur).dot(lastRing.bisector(cur));
+
+ if (minDist > dist) {
+ minDist = dist;
+ minT = t;
+ minEdgeIdx = cur;
+ }
+ }
+
+ if (minEdgeIdx == -1) {
+ return false;
+ }
+ SkPoint newPt = lastRing.bisector(minEdgeIdx);
+ newPt.scale(minT);
+ newPt += this->point(lastRing.index(minEdgeIdx));
+
+ SkScalar depth = this->computeDepthFromEdge(lastRing.origEdgeID(minEdgeIdx), newPt);
+ if (depth >= targetDepth) {
+ // None of the bisectors intersect before reaching the desired depth.
+ // Just step them all to the desired depth
+ depth = targetDepth;
+ done = true;
+ }
+
+ // 'dst' stores where each point in the last ring maps to/transforms into
+ // in the next ring.
+ SkTDArray<int> dst;
+ dst.setCount(lastRing.numPts());
+
+ // Create the first point (who compares with no one)
+ if (!this->computePtAlongBisector(lastRing.index(0),
+ lastRing.bisector(0),
+ lastRing.origEdgeID(0),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ dst[0] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(0), lastRing.origEdgeID(0),
+ !this->movable(lastRing.index(0)));
+
+ // Handle the middle points (who only compare with the prior point)
+ for (int cur = 1; cur < lastRing.numPts()-1; ++cur) {
+ if (!this->computePtAlongBisector(lastRing.index(cur),
+ lastRing.bisector(cur),
+ lastRing.origEdgeID(cur),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ if (!duplicate_pt(newPt, fCandidateVerts.lastPoint())) {
+ dst[cur] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(cur), lastRing.origEdgeID(cur),
+ !this->movable(lastRing.index(cur)));
+ } else {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ }
+ }
+
+ // Check on the last point (handling the wrap around)
+ int cur = lastRing.numPts()-1;
+ if (!this->computePtAlongBisector(lastRing.index(cur),
+ lastRing.bisector(cur),
+ lastRing.origEdgeID(cur),
+ depth, &newPt)) {
+ this->terminate(lastRing);
+ return true;
+ }
+ bool dupPrev = duplicate_pt(newPt, fCandidateVerts.lastPoint());
+ bool dupNext = duplicate_pt(newPt, fCandidateVerts.firstPoint());
+
+ if (!dupPrev && !dupNext) {
+ dst[cur] = fCandidateVerts.addNewPt(newPt,
+ lastRing.index(cur), lastRing.origEdgeID(cur),
+ !this->movable(lastRing.index(cur)));
+ } else if (dupPrev && !dupNext) {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ } else if (!dupPrev && dupNext) {
+ dst[cur] = fCandidateVerts.fuseWithNext();
+ } else {
+ bool dupPrevVsNext = duplicate_pt(fCandidateVerts.firstPoint(), fCandidateVerts.lastPoint());
+
+ if (!dupPrevVsNext) {
+ dst[cur] = fCandidateVerts.fuseWithPrior(lastRing.origEdgeID(cur));
+ } else {
+ const int fused = fCandidateVerts.fuseWithBoth();
+ dst[cur] = fused;
+ const int targetIdx = dst[cur - 1];
+ for (int i = cur - 1; i >= 0 && dst[i] == targetIdx; i--) {
+ dst[i] = fused;
+ }
+ }
+ }
+
+ // Fold the new ring's points into the global pool
+ for (int i = 0; i < fCandidateVerts.numPts(); ++i) {
+ int newIdx;
+ if (fCandidateVerts.needsToBeNew(i) || forceNew) {
+ // if the originating index is still valid then this point wasn't
+ // fused (and is thus movable)
+ SkScalar coverage = compute_coverage(depth, initialDepth, initialCoverage,
+ targetDepth, targetCoverage);
+ newIdx = this->addPt(fCandidateVerts.point(i), depth, coverage,
+ fCandidateVerts.originatingIdx(i) != -1, false);
+ } else {
+ SkASSERT(fCandidateVerts.originatingIdx(i) != -1);
+ this->updatePt(fCandidateVerts.originatingIdx(i), fCandidateVerts.point(i), depth,
+ targetCoverage);
+ newIdx = fCandidateVerts.originatingIdx(i);
+ }
+
+ nextRing->addIdx(newIdx, fCandidateVerts.origEdge(i));
+ }
+
+ // 'dst' currently has indices into the ring. Remap these to be indices
+ // into the global pool since the triangulation operates in that space.
+ for (int i = 0; i < dst.count(); ++i) {
+ dst[i] = nextRing->index(dst[i]);
+ }
+
+ for (int cur = 0; cur < lastRing.numPts(); ++cur) {
+ int next = (cur + 1) % lastRing.numPts();
+
+ this->addTri(lastRing.index(cur), lastRing.index(next), dst[next]);
+ this->addTri(lastRing.index(cur), dst[next], dst[cur]);
+ }
+
+ if (done && fStrokeWidth < 0.0f) {
+ // fill
+ this->fanRing(*nextRing);
+ }
+
+ if (nextRing->numPts() < 3) {
+ done = true;
+ }
+ return done;
+}
+
+void GrAAConvexTessellator::validate() const {
+ SkASSERT(fPts.count() == fMovable.count());
+ SkASSERT(0 == (fIndices.count() % 3));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+void GrAAConvexTessellator::Ring::init(const GrAAConvexTessellator& tess) {
+ this->computeNormals(tess);
+ this->computeBisectors(tess);
+}
+
+void GrAAConvexTessellator::Ring::init(const SkTDArray<SkVector>& norms,
+ const SkTDArray<SkVector>& bisectors) {
+ for (int i = 0; i < fPts.count(); ++i) {
+ fPts[i].fNorm = norms[i];
+ fPts[i].fBisector = bisectors[i];
+ }
+}
+
+// Compute the outward facing normal at each vertex.
+void GrAAConvexTessellator::Ring::computeNormals(const GrAAConvexTessellator& tess) {
+ for (int cur = 0; cur < fPts.count(); ++cur) {
+ int next = (cur + 1) % fPts.count();
+
+ fPts[cur].fNorm = tess.point(fPts[next].fIndex) - tess.point(fPts[cur].fIndex);
+ SkPoint::Normalize(&fPts[cur].fNorm);
+ fPts[cur].fNorm.setOrthog(fPts[cur].fNorm, tess.side());
+ }
+}
+
+void GrAAConvexTessellator::Ring::computeBisectors(const GrAAConvexTessellator& tess) {
+ int prev = fPts.count() - 1;
+ for (int cur = 0; cur < fPts.count(); prev = cur, ++cur) {
+ fPts[cur].fBisector = fPts[cur].fNorm + fPts[prev].fNorm;
+ if (!fPts[cur].fBisector.normalize()) {
+ SkASSERT(SkPoint::kLeft_Side == tess.side() || SkPoint::kRight_Side == tess.side());
+ fPts[cur].fBisector.setOrthog(fPts[cur].fNorm, (SkPoint::Side)-tess.side());
+ SkVector other;
+ other.setOrthog(fPts[prev].fNorm, tess.side());
+ fPts[cur].fBisector += other;
+ SkAssertResult(fPts[cur].fBisector.normalize());
+ } else {
+ fPts[cur].fBisector.negate(); // make the bisector face in
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#ifdef SK_DEBUG
+// Is this ring convex?
+bool GrAAConvexTessellator::Ring::isConvex(const GrAAConvexTessellator& tess) const {
+ if (fPts.count() < 3) {
+ return true;
+ }
+
+ SkPoint prev = tess.point(fPts[0].fIndex) - tess.point(fPts.top().fIndex);
+ SkPoint cur = tess.point(fPts[1].fIndex) - tess.point(fPts[0].fIndex);
+ SkScalar minDot = prev.fX * cur.fY - prev.fY * cur.fX;
+ SkScalar maxDot = minDot;
+
+ prev = cur;
+ for (int i = 1; i < fPts.count(); ++i) {
+ int next = (i + 1) % fPts.count();
+
+ cur = tess.point(fPts[next].fIndex) - tess.point(fPts[i].fIndex);
+ SkScalar dot = prev.fX * cur.fY - prev.fY * cur.fX;
+
+ minDot = SkMinScalar(minDot, dot);
+ maxDot = SkMaxScalar(maxDot, dot);
+
+ prev = cur;
+ }
+
+ if (SkScalarNearlyEqual(maxDot, 0.0f, 0.005f)) {
+ maxDot = 0;
+ }
+ if (SkScalarNearlyEqual(minDot, 0.0f, 0.005f)) {
+ minDot = 0;
+ }
+ return (maxDot >= 0.0f) == (minDot >= 0.0f);
+}
+
+#endif
+
+void GrAAConvexTessellator::lineTo(SkPoint p, bool isCurve) {
+ if (this->numPts() > 0 && duplicate_pt(p, this->lastPoint())) {
+ return;
+ }
+
+ SkASSERT(fPts.count() <= 1 || fPts.count() == fNorms.count()+1);
+ if (this->numPts() >= 2 &&
+ abs_dist_from_line(fPts.top(), fNorms.top(), p) < kClose) {
+ // The old last point is on the line from the second to last to the new point
+ this->popLastPt();
+ fNorms.pop();
+ fIsCurve.pop();
+ }
+ SkScalar initialRingCoverage = fStrokeWidth < 0.0f ? 0.5f : 1.0f;
+ this->addPt(p, 0.0f, initialRingCoverage, false, isCurve);
+ if (this->numPts() > 1) {
+ *fNorms.push() = fPts.top() - fPts[fPts.count()-2];
+ SkDEBUGCODE(SkScalar len =) SkPoint::Normalize(&fNorms.top());
+ SkASSERT(len > 0.0f);
+ SkASSERT(SkScalarNearlyEqual(1.0f, fNorms.top().length()));
+ }
+}
+
+void GrAAConvexTessellator::lineTo(const SkMatrix& m, SkPoint p, bool isCurve) {
+ m.mapPoints(&p, 1);
+ this->lineTo(p, isCurve);
+}
+
+void GrAAConvexTessellator::quadTo(SkPoint pts[3]) {
+ int maxCount = GrPathUtils::quadraticPointCount(pts, kQuadTolerance);
+ fPointBuffer.setReserve(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateQuadraticPoints(pts[0], pts[1], pts[2],
+ kQuadTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count; i++) {
+ lineTo(fPointBuffer[i], true);
+ }
+}
+
+void GrAAConvexTessellator::quadTo(const SkMatrix& m, SkPoint pts[3]) {
+ SkPoint transformed[3];
+ transformed[0] = pts[0];
+ transformed[1] = pts[1];
+ transformed[2] = pts[2];
+ m.mapPoints(transformed, 3);
+ quadTo(transformed);
+}
+
+void GrAAConvexTessellator::cubicTo(const SkMatrix& m, SkPoint pts[4]) {
+ m.mapPoints(pts, 4);
+ int maxCount = GrPathUtils::cubicPointCount(pts, kCubicTolerance);
+ fPointBuffer.setReserve(maxCount);
+ SkPoint* target = fPointBuffer.begin();
+ int count = GrPathUtils::generateCubicPoints(pts[0], pts[1], pts[2], pts[3],
+ kCubicTolerance, &target, maxCount);
+ fPointBuffer.setCount(count);
+ for (int i = 0; i < count; i++) {
+ lineTo(fPointBuffer[i], true);
+ }
+}
+
+// include down here to avoid compilation errors caused by "-" overload in SkGeometry.h
+#include "SkGeometry.h"
+
+void GrAAConvexTessellator::conicTo(const SkMatrix& m, SkPoint pts[3], SkScalar w) {
+ m.mapPoints(pts, 3);
+ SkAutoConicToQuads quadder;
+ const SkPoint* quads = quadder.computeQuads(pts, w, kConicTolerance);
+ SkPoint lastPoint = *(quads++);
+ int count = quadder.countQuads();
+ for (int i = 0; i < count; ++i) {
+ SkPoint quadPts[3];
+ quadPts[0] = lastPoint;
+ quadPts[1] = quads[0];
+ quadPts[2] = i == count - 1 ? pts[2] : quads[1];
+ quadTo(quadPts);
+ lastPoint = quadPts[2];
+ quads += 2;
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+static const SkScalar kPointRadius = 0.02f;
+static const SkScalar kArrowStrokeWidth = 0.0f;
+static const SkScalar kArrowLength = 0.2f;
+static const SkScalar kEdgeTextSize = 0.1f;
+static const SkScalar kPointTextSize = 0.02f;
+
+static void draw_point(SkCanvas* canvas, const SkPoint& p, SkScalar paramValue, bool stroke) {
+ SkPaint paint;
+ SkASSERT(paramValue <= 1.0f);
+ int gs = int(255*paramValue);
+ paint.setARGB(255, gs, gs, gs);
+
+ canvas->drawCircle(p.fX, p.fY, kPointRadius, paint);
+
+ if (stroke) {
+ SkPaint stroke;
+ stroke.setColor(SK_ColorYELLOW);
+ stroke.setStyle(SkPaint::kStroke_Style);
+ stroke.setStrokeWidth(kPointRadius/3.0f);
+ canvas->drawCircle(p.fX, p.fY, kPointRadius, stroke);
+ }
+}
+
+static void draw_line(SkCanvas* canvas, const SkPoint& p0, const SkPoint& p1, SkColor color) {
+ SkPaint p;
+ p.setColor(color);
+
+ canvas->drawLine(p0.fX, p0.fY, p1.fX, p1.fY, p);
+}
+
+static void draw_arrow(SkCanvas*canvas, const SkPoint& p, const SkPoint &n,
+ SkScalar len, SkColor color) {
+ SkPaint paint;
+ paint.setColor(color);
+ paint.setStrokeWidth(kArrowStrokeWidth);
+ paint.setStyle(SkPaint::kStroke_Style);
+
+ canvas->drawLine(p.fX, p.fY,
+ p.fX + len * n.fX, p.fY + len * n.fY,
+ paint);
+}
+
+void GrAAConvexTessellator::Ring::draw(SkCanvas* canvas, const GrAAConvexTessellator& tess) const {
+ SkPaint paint;
+ paint.setTextSize(kEdgeTextSize);
+
+ for (int cur = 0; cur < fPts.count(); ++cur) {
+ int next = (cur + 1) % fPts.count();
+
+ draw_line(canvas,
+ tess.point(fPts[cur].fIndex),
+ tess.point(fPts[next].fIndex),
+ SK_ColorGREEN);
+
+ SkPoint mid = tess.point(fPts[cur].fIndex) + tess.point(fPts[next].fIndex);
+ mid.scale(0.5f);
+
+ if (fPts.count()) {
+ draw_arrow(canvas, mid, fPts[cur].fNorm, kArrowLength, SK_ColorRED);
+ mid.fX += (kArrowLength/2) * fPts[cur].fNorm.fX;
+ mid.fY += (kArrowLength/2) * fPts[cur].fNorm.fY;
+ }
+
+ SkString num;
+ num.printf("%d", this->origEdgeID(cur));
+ canvas->drawText(num.c_str(), num.size(), mid.fX, mid.fY, paint);
+
+ if (fPts.count()) {
+ draw_arrow(canvas, tess.point(fPts[cur].fIndex), fPts[cur].fBisector,
+ kArrowLength, SK_ColorBLUE);
+ }
+ }
+}
+
+void GrAAConvexTessellator::draw(SkCanvas* canvas) const {
+ for (int i = 0; i < fIndices.count(); i += 3) {
+ SkASSERT(fIndices[i] < this->numPts()) ;
+ SkASSERT(fIndices[i+1] < this->numPts()) ;
+ SkASSERT(fIndices[i+2] < this->numPts()) ;
+
+ draw_line(canvas,
+ this->point(this->fIndices[i]), this->point(this->fIndices[i+1]),
+ SK_ColorBLACK);
+ draw_line(canvas,
+ this->point(this->fIndices[i+1]), this->point(this->fIndices[i+2]),
+ SK_ColorBLACK);
+ draw_line(canvas,
+ this->point(this->fIndices[i+2]), this->point(this->fIndices[i]),
+ SK_ColorBLACK);
+ }
+
+ fInitialRing.draw(canvas, *this);
+ for (int i = 0; i < fRings.count(); ++i) {
+ fRings[i]->draw(canvas, *this);
+ }
+
+ for (int i = 0; i < this->numPts(); ++i) {
+ draw_point(canvas,
+ this->point(i), 0.5f + (this->depth(i)/(2 * kAntialiasingRadius)),
+ !this->movable(i));
+
+ SkPaint paint;
+ paint.setTextSize(kPointTextSize);
+ paint.setTextAlign(SkPaint::kCenter_Align);
+ if (this->depth(i) <= -kAntialiasingRadius) {
+ paint.setColor(SK_ColorWHITE);
+ }
+
+ SkString num;
+ num.printf("%d", i);
+ canvas->drawText(num.c_str(), num.size(),
+ this->point(i).fX, this->point(i).fY+(kPointRadius/2.0f),
+ paint);
+ }
+}
+
+#endif
+
diff --git a/src/gpu/batches/GrAAConvexTessellator.h b/src/gpu/batches/GrAAConvexTessellator.h
new file mode 100644
index 0000000000..f3d84dc8ad
--- /dev/null
+++ b/src/gpu/batches/GrAAConvexTessellator.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAConvexTessellator_DEFINED
+#define GrAAConvexTessellator_DEFINED
+
+#include "SkColor.h"
+#include "SkPaint.h"
+#include "SkPoint.h"
+#include "SkScalar.h"
+#include "SkTDArray.h"
+
+class SkCanvas;
+class SkMatrix;
+class SkPath;
+
+//#define GR_AA_CONVEX_TESSELLATOR_VIZ 1
+
+// device space distance which we inset / outset points in order to create the soft antialiased edge
+static const SkScalar kAntialiasingRadius = 0.5f;
+
+class GrAAConvexTessellator;
+
+// The AAConvexTessellator holds the global pool of points and the triangulation
+// that connects them. It also drives the tessellation process.
+// The outward facing normals of the original polygon are stored (in 'fNorms') to service
+// computeDepthFromEdge requests.
+class GrAAConvexTessellator {
+public:
+ GrAAConvexTessellator(SkScalar strokeWidth = -1.0f,
+ SkPaint::Join join = SkPaint::Join::kBevel_Join,
+ SkScalar miterLimit = 0.0f)
+ : fSide(SkPoint::kOn_Side)
+ , fStrokeWidth(strokeWidth)
+ , fJoin(join)
+ , fMiterLimit(miterLimit) {
+ }
+
+ SkPoint::Side side() const { return fSide; }
+
+ bool tessellate(const SkMatrix& m, const SkPath& path);
+
+ // The next five should only be called after tessellate to extract the result
+ int numPts() const { return fPts.count(); }
+ int numIndices() const { return fIndices.count(); }
+
+ const SkPoint& lastPoint() const { return fPts.top(); }
+ const SkPoint& point(int index) const { return fPts[index]; }
+ int index(int index) const { return fIndices[index]; }
+ SkScalar coverage(int index) const { return fCoverages[index]; }
+
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ void draw(SkCanvas* canvas) const;
+#endif
+
+ // The tessellator can be reused for multiple paths by rewinding in between
+ void rewind();
+
+private:
+ // CandidateVerts holds the vertices for the next ring while they are
+ // being generated. Its main function is to de-dup the points.
+ class CandidateVerts {
+ public:
+ void setReserve(int numPts) { fPts.setReserve(numPts); }
+ void rewind() { fPts.rewind(); }
+
+ int numPts() const { return fPts.count(); }
+
+ const SkPoint& lastPoint() const { return fPts.top().fPt; }
+ const SkPoint& firstPoint() const { return fPts[0].fPt; }
+ const SkPoint& point(int index) const { return fPts[index].fPt; }
+
+ int originatingIdx(int index) const { return fPts[index].fOriginatingIdx; }
+ int origEdge(int index) const { return fPts[index].fOrigEdgeId; }
+ bool needsToBeNew(int index) const { return fPts[index].fNeedsToBeNew; }
+
+ int addNewPt(const SkPoint& newPt, int originatingIdx, int origEdge, bool needsToBeNew) {
+ struct PointData* pt = fPts.push();
+ pt->fPt = newPt;
+ pt->fOrigEdgeId = origEdge;
+ pt->fOriginatingIdx = originatingIdx;
+ pt->fNeedsToBeNew = needsToBeNew;
+ return fPts.count() - 1;
+ }
+
+ int fuseWithPrior(int origEdgeId) {
+ fPts.top().fOrigEdgeId = origEdgeId;
+ fPts.top().fOriginatingIdx = -1;
+ fPts.top().fNeedsToBeNew = true;
+ return fPts.count() - 1;
+ }
+
+ int fuseWithNext() {
+ fPts[0].fOriginatingIdx = -1;
+ fPts[0].fNeedsToBeNew = true;
+ return 0;
+ }
+
+ int fuseWithBoth() {
+ if (fPts.count() > 1) {
+ fPts.pop();
+ }
+
+ fPts[0].fOriginatingIdx = -1;
+ fPts[0].fNeedsToBeNew = true;
+ return 0;
+ }
+
+ private:
+ struct PointData {
+ SkPoint fPt;
+ int fOriginatingIdx;
+ int fOrigEdgeId;
+ bool fNeedsToBeNew;
+ };
+
+ SkTDArray<struct PointData> fPts;
+ };
+
+ // The Ring holds a set of indices into the global pool that together define
+ // a single polygon inset.
+ class Ring {
+ public:
+ void setReserve(int numPts) { fPts.setReserve(numPts); }
+ void rewind() { fPts.rewind(); }
+
+ int numPts() const { return fPts.count(); }
+
+ void addIdx(int index, int origEdgeId) {
+ struct PointData* pt = fPts.push();
+ pt->fIndex = index;
+ pt->fOrigEdgeId = origEdgeId;
+ }
+
+ // init should be called after all the indices have been added (via addIdx)
+ void init(const GrAAConvexTessellator& tess);
+ void init(const SkTDArray<SkVector>& norms, const SkTDArray<SkVector>& bisectors);
+
+ const SkPoint& norm(int index) const { return fPts[index].fNorm; }
+ const SkPoint& bisector(int index) const { return fPts[index].fBisector; }
+ int index(int index) const { return fPts[index].fIndex; }
+ int origEdgeID(int index) const { return fPts[index].fOrigEdgeId; }
+ void setOrigEdgeId(int index, int id) { fPts[index].fOrigEdgeId = id; }
+
+ #if GR_AA_CONVEX_TESSELLATOR_VIZ
+ void draw(SkCanvas* canvas, const GrAAConvexTessellator& tess) const;
+ #endif
+
+ private:
+ void computeNormals(const GrAAConvexTessellator& result);
+ void computeBisectors(const GrAAConvexTessellator& tess);
+
+ SkDEBUGCODE(bool isConvex(const GrAAConvexTessellator& tess) const;)
+
+ struct PointData {
+ SkPoint fNorm;
+ SkPoint fBisector;
+ int fIndex;
+ int fOrigEdgeId;
+ };
+
+ SkTDArray<PointData> fPts;
+ };
+
+ bool movable(int index) const { return fMovable[index]; }
+
+ // Movable points are those that can be slid along their bisector.
+ // Basically, a point is immovable if it is part of the original
+ // polygon or it results from the fusing of two bisectors.
+ int addPt(const SkPoint& pt, SkScalar depth, SkScalar coverage, bool movable, bool isCurve);
+ void popLastPt();
+ void popFirstPtShuffle();
+
+ void updatePt(int index, const SkPoint& pt, SkScalar depth, SkScalar coverage);
+
+ void addTri(int i0, int i1, int i2);
+
+ void reservePts(int count) {
+ fPts.setReserve(count);
+ fCoverages.setReserve(count);
+ fMovable.setReserve(count);
+ }
+
+ SkScalar computeDepthFromEdge(int edgeIdx, const SkPoint& p) const;
+
+ bool computePtAlongBisector(int startIdx, const SkPoint& bisector,
+ int edgeIdx, SkScalar desiredDepth,
+ SkPoint* result) const;
+
+ void lineTo(SkPoint p, bool isCurve);
+
+ void lineTo(const SkMatrix& m, SkPoint p, bool isCurve);
+
+ void quadTo(SkPoint pts[3]);
+
+ void quadTo(const SkMatrix& m, SkPoint pts[3]);
+
+ void cubicTo(const SkMatrix& m, SkPoint pts[4]);
+
+ void conicTo(const SkMatrix& m, SkPoint pts[3], SkScalar w);
+
+ void terminate(const Ring& lastRing);
+
+ // return false on failure/degenerate path
+ bool extractFromPath(const SkMatrix& m, const SkPath& path);
+ void computeBisectors();
+
+ void fanRing(const Ring& ring);
+
+ Ring* getNextRing(Ring* lastRing);
+
+ void createOuterRing(const Ring& previousRing, SkScalar outset, SkScalar coverage,
+ Ring* nextRing);
+
+ bool createInsetRings(Ring& previousRing, SkScalar initialDepth, SkScalar initialCoverage,
+ SkScalar targetDepth, SkScalar targetCoverage, Ring** finalRing);
+
+ bool createInsetRing(const Ring& lastRing, Ring* nextRing,
+ SkScalar initialDepth, SkScalar initialCoverage, SkScalar targetDepth,
+ SkScalar targetCoverage, bool forceNew);
+
+ void validate() const;
+
+ // fPts, fCoverages & fMovable should always have the same # of elements
+ SkTDArray<SkPoint> fPts;
+ SkTDArray<SkScalar> fCoverages;
+ // movable points are those that can be slid further along their bisector
+ SkTDArray<bool> fMovable;
+
+ // The outward facing normals for the original polygon
+ SkTDArray<SkVector> fNorms;
+ // The inward facing bisector at each point in the original polygon. Only
+ // needed for exterior ring creation and then handed off to the initial ring.
+ SkTDArray<SkVector> fBisectors;
+
+ // Tracks whether a given point is interior to a curve. Such points are
+ // assumed to have shallow curvature.
+ SkTDArray<bool> fIsCurve;
+
+ SkPoint::Side fSide; // winding of the original polygon
+
+ // The triangulation of the points
+ SkTDArray<int> fIndices;
+
+ Ring fInitialRing;
+#if GR_AA_CONVEX_TESSELLATOR_VIZ
+ // When visualizing save all the rings
+ SkTDArray<Ring*> fRings;
+#else
+ Ring fRings[2];
+#endif
+ CandidateVerts fCandidateVerts;
+
+ // < 0 means filling rather than stroking
+ SkScalar fStrokeWidth;
+
+ SkPaint::Join fJoin;
+
+ SkScalar fMiterLimit;
+
+ SkTDArray<SkPoint> fPointBuffer;
+};
+
+
+#endif
+
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
new file mode 100644
index 0000000000..45a3d65179
--- /dev/null
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -0,0 +1,626 @@
+
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAADistanceFieldPathRenderer.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrContext.h"
+#include "GrPipelineBuilder.h"
+#include "GrResourceProvider.h"
+#include "GrSurfacePriv.h"
+#include "GrSWMaskHelper.h"
+#include "GrTexturePriv.h"
+#include "GrVertexBuffer.h"
+#include "batches/GrVertexBatch.h"
+#include "effects/GrDistanceFieldGeoProc.h"
+
+#include "SkDistanceFieldGen.h"
+#include "SkRTConf.h"
+
+#define ATLAS_TEXTURE_WIDTH 1024
+#define ATLAS_TEXTURE_HEIGHT 2048
+#define PLOT_WIDTH 256
+#define PLOT_HEIGHT 256
+
+#define NUM_PLOTS_X (ATLAS_TEXTURE_WIDTH / PLOT_WIDTH)
+#define NUM_PLOTS_Y (ATLAS_TEXTURE_HEIGHT / PLOT_HEIGHT)
+
+#ifdef DF_PATH_TRACKING
+static int g_NumCachedPaths = 0;
+static int g_NumFreedPaths = 0;
+#endif
+
+// mip levels
+static const int kSmallMIP = 32;
+static const int kMediumMIP = 78;
+static const int kLargeMIP = 192;
+
+// Callback to clear out internal path cache when eviction occurs
+void GrAADistanceFieldPathRenderer::HandleEviction(GrBatchAtlas::AtlasID id, void* pr) {
+ GrAADistanceFieldPathRenderer* dfpr = (GrAADistanceFieldPathRenderer*)pr;
+ // remove any paths that use this plot
+ PathDataList::Iter iter;
+ iter.init(dfpr->fPathList, PathDataList::Iter::kHead_IterStart);
+ PathData* pathData;
+ while ((pathData = iter.get())) {
+ iter.next();
+ if (id == pathData->fID) {
+ dfpr->fPathCache.remove(pathData->fKey);
+ dfpr->fPathList.remove(pathData);
+ delete pathData;
+#ifdef DF_PATH_TRACKING
+ ++g_NumFreedPaths;
+#endif
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+GrAADistanceFieldPathRenderer::GrAADistanceFieldPathRenderer() : fAtlas(nullptr) {}
+
+GrAADistanceFieldPathRenderer::~GrAADistanceFieldPathRenderer() {
+ PathDataList::Iter iter;
+ iter.init(fPathList, PathDataList::Iter::kHead_IterStart);
+ PathData* pathData;
+ while ((pathData = iter.get())) {
+ iter.next();
+ fPathList.remove(pathData);
+ delete pathData;
+ }
+ delete fAtlas;
+
+#ifdef DF_PATH_TRACKING
+ SkDebugf("Cached paths: %d, freed paths: %d\n", g_NumCachedPaths, g_NumFreedPaths);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+bool GrAADistanceFieldPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+
+ // TODO: Support inverse fill
+ // TODO: Support strokes
+ if (!args.fShaderCaps->shaderDerivativeSupport() || !args.fAntiAlias ||
+ args.fPath->isInverseFillType() || args.fPath->isVolatile() ||
+ !args.fStroke->isFillStyle()) {
+ return false;
+ }
+
+ // currently don't support perspective
+ if (args.fViewMatrix->hasPerspective()) {
+ return false;
+ }
+
+ // only support paths smaller than 64x64, scaled to less than 256x256
+ // the goal is to accelerate rendering of lots of small paths that may be scaling
+ SkScalar maxScale = args.fViewMatrix->getMaxScale();
+ const SkRect& bounds = args.fPath->getBounds();
+ SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
+ return maxDim < 64.f && maxDim * maxScale < 256.f;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// padding around path bounds to allow for antialiased pixels
+static const SkScalar kAntiAliasPad = 1.0f;
+
+class AADistanceFieldPathBatch : public GrVertexBatch {
+public:
+ typedef GrAADistanceFieldPathRenderer::PathData PathData;
+ typedef SkTDynamicHash<PathData, PathData::Key> PathCache;
+ typedef GrAADistanceFieldPathRenderer::PathDataList PathDataList;
+
+ struct Geometry {
+ Geometry(const SkStrokeRec& stroke) : fStroke(stroke) {}
+ SkPath fPath;
+ SkStrokeRec fStroke;
+ bool fAntiAlias;
+ PathData* fPathData;
+ };
+
+ static GrDrawBatch* Create(const Geometry& geometry, GrColor color, const SkMatrix& viewMatrix,
+ GrBatchAtlas* atlas, PathCache* pathCache, PathDataList* pathList) {
+ return new AADistanceFieldPathBatch(geometry, color, viewMatrix, atlas, pathCache,
+ pathList);
+ }
+
+ const char* name() const override { return "AADistanceFieldPathBatch"; }
+
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const override {
+ out->setKnownFourComponents(fBatch.fColor);
+ }
+
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override {
+ out->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrPipelineOptimizations& opt) override {
+ // Handle any color overrides
+ if (!opt.readsColor()) {
+ fBatch.fColor = GrColor_ILLEGAL;
+ }
+ opt.getOverrideColorIfSet(&fBatch.fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !opt.readsColor();
+ fBatch.fUsesLocalCoords = opt.readsLocalCoords();
+ fBatch.fCoverageIgnored = !opt.readsCoverage();
+ }
+
+ struct FlushInfo {
+ SkAutoTUnref<const GrVertexBuffer> fVertexBuffer;
+ SkAutoTUnref<const GrIndexBuffer> fIndexBuffer;
+ int fVertexOffset;
+ int fInstancesToFlush;
+ };
+
+ void onPrepareDraws(Target* target) override {
+ int instanceCount = fGeoData.count();
+
+ SkMatrix invert;
+ if (this->usesLocalCoords() && !this->viewMatrix().invert(&invert)) {
+ SkDebugf("Could not invert viewmatrix\n");
+ return;
+ }
+
+ uint32_t flags = 0;
+ flags |= this->viewMatrix().isSimilarity() ? kSimilarity_DistanceFieldEffectFlag : 0;
+
+ GrTextureParams params(SkShader::kRepeat_TileMode, GrTextureParams::kBilerp_FilterMode);
+
+ // Setup GrGeometryProcessor
+ GrBatchAtlas* atlas = fAtlas;
+ SkAutoTUnref<GrGeometryProcessor> dfProcessor(
+ GrDistanceFieldPathGeoProc::Create(this->color(),
+ this->viewMatrix(),
+ atlas->getTexture(),
+ params,
+ flags,
+ this->usesLocalCoords()));
+
+ target->initDraw(dfProcessor, this->pipeline());
+
+ FlushInfo flushInfo;
+
+ // allocate vertices
+ size_t vertexStride = dfProcessor->getVertexStride();
+ SkASSERT(vertexStride == 2 * sizeof(SkPoint));
+
+ const GrVertexBuffer* vertexBuffer;
+ void* vertices = target->makeVertexSpace(vertexStride,
+ kVerticesPerQuad * instanceCount,
+ &vertexBuffer,
+ &flushInfo.fVertexOffset);
+ flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
+ flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
+ if (!vertices || !flushInfo.fIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ flushInfo.fInstancesToFlush = 0;
+ for (int i = 0; i < instanceCount; i++) {
+ Geometry& args = fGeoData[i];
+
+ // get mip level
+ SkScalar maxScale = this->viewMatrix().getMaxScale();
+ const SkRect& bounds = args.fPath.getBounds();
+ SkScalar maxDim = SkMaxScalar(bounds.width(), bounds.height());
+ SkScalar size = maxScale * maxDim;
+ uint32_t desiredDimension;
+ if (size <= kSmallMIP) {
+ desiredDimension = kSmallMIP;
+ } else if (size <= kMediumMIP) {
+ desiredDimension = kMediumMIP;
+ } else {
+ desiredDimension = kLargeMIP;
+ }
+
+ // check to see if path is cached
+ // TODO: handle stroked vs. filled version of same path
+ PathData::Key key = { args.fPath.getGenerationID(), desiredDimension };
+ args.fPathData = fPathCache->find(key);
+ if (nullptr == args.fPathData || !atlas->hasID(args.fPathData->fID)) {
+ // Remove the stale cache entry
+ if (args.fPathData) {
+ fPathCache->remove(args.fPathData->fKey);
+ fPathList->remove(args.fPathData);
+ delete args.fPathData;
+ }
+ SkScalar scale = desiredDimension/maxDim;
+ args.fPathData = new PathData;
+ if (!this->addPathToAtlas(target,
+ dfProcessor,
+ this->pipeline(),
+ &flushInfo,
+ atlas,
+ args.fPathData,
+ args.fPath,
+ args.fStroke,
+ args.fAntiAlias,
+ desiredDimension,
+ scale)) {
+ SkDebugf("Can't rasterize path\n");
+ return;
+ }
+ }
+
+ atlas->setLastUseToken(args.fPathData->fID, target->currentToken());
+
+ // Now set vertices
+ intptr_t offset = reinterpret_cast<intptr_t>(vertices);
+ offset += i * kVerticesPerQuad * vertexStride;
+ SkPoint* positions = reinterpret_cast<SkPoint*>(offset);
+ this->writePathVertices(target,
+ atlas,
+ this->pipeline(),
+ dfProcessor,
+ positions,
+ vertexStride,
+ this->viewMatrix(),
+ args.fPath,
+ args.fPathData);
+ flushInfo.fInstancesToFlush++;
+ }
+
+ this->flush(target, &flushInfo);
+ }
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+ AADistanceFieldPathBatch(const Geometry& geometry, GrColor color, const SkMatrix& viewMatrix,
+ GrBatchAtlas* atlas,
+ PathCache* pathCache, PathDataList* pathList) {
+ this->initClassID<AADistanceFieldPathBatch>();
+ fBatch.fColor = color;
+ fBatch.fViewMatrix = viewMatrix;
+ fGeoData.push_back(geometry);
+ fGeoData.back().fPathData = nullptr;
+
+ fAtlas = atlas;
+ fPathCache = pathCache;
+ fPathList = pathList;
+
+ // Compute bounds
+ fBounds = geometry.fPath.getBounds();
+ viewMatrix.mapRect(&fBounds);
+ }
+
+ bool addPathToAtlas(GrVertexBatch::Target* target,
+ const GrGeometryProcessor* dfProcessor,
+ const GrPipeline* pipeline,
+ FlushInfo* flushInfo,
+ GrBatchAtlas* atlas,
+ PathData* pathData,
+ const SkPath& path,
+ const SkStrokeRec&
+ stroke, bool antiAlias,
+ uint32_t dimension,
+ SkScalar scale) {
+ const SkRect& bounds = path.getBounds();
+
+ // generate bounding rect for bitmap draw
+ SkRect scaledBounds = bounds;
+ // scale to mip level size
+ scaledBounds.fLeft *= scale;
+ scaledBounds.fTop *= scale;
+ scaledBounds.fRight *= scale;
+ scaledBounds.fBottom *= scale;
+ // move the origin to an integer boundary (gives better results)
+ SkScalar dx = SkScalarFraction(scaledBounds.fLeft);
+ SkScalar dy = SkScalarFraction(scaledBounds.fTop);
+ scaledBounds.offset(-dx, -dy);
+ // get integer boundary
+ SkIRect devPathBounds;
+ scaledBounds.roundOut(&devPathBounds);
+ // pad to allow room for antialiasing
+ devPathBounds.outset(SkScalarCeilToInt(kAntiAliasPad), SkScalarCeilToInt(kAntiAliasPad));
+ // move origin to upper left corner
+ devPathBounds.offsetTo(0,0);
+
+ // draw path to bitmap
+ SkMatrix drawMatrix;
+ drawMatrix.setTranslate(-bounds.left(), -bounds.top());
+ drawMatrix.postScale(scale, scale);
+ drawMatrix.postTranslate(kAntiAliasPad, kAntiAliasPad);
+
+ // setup bitmap backing
+ // Now translate so the bound's UL corner is at the origin
+ drawMatrix.postTranslate(-devPathBounds.fLeft * SK_Scalar1,
+ -devPathBounds.fTop * SK_Scalar1);
+ SkIRect pathBounds = SkIRect::MakeWH(devPathBounds.width(),
+ devPathBounds.height());
+
+ SkAutoPixmapStorage dst;
+ if (!dst.tryAlloc(SkImageInfo::MakeA8(pathBounds.width(),
+ pathBounds.height()))) {
+ return false;
+ }
+ sk_bzero(dst.writable_addr(), dst.getSafeSize());
+
+ // rasterize path
+ SkPaint paint;
+ if (stroke.isHairlineStyle()) {
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setStrokeWidth(SK_Scalar1);
+ } else {
+ if (stroke.isFillStyle()) {
+ paint.setStyle(SkPaint::kFill_Style);
+ } else {
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setStrokeJoin(stroke.getJoin());
+ paint.setStrokeCap(stroke.getCap());
+ paint.setStrokeWidth(stroke.getWidth());
+ }
+ }
+ paint.setAntiAlias(antiAlias);
+
+ SkDraw draw;
+ sk_bzero(&draw, sizeof(draw));
+
+ SkRasterClip rasterClip;
+ rasterClip.setRect(pathBounds);
+ draw.fRC = &rasterClip;
+ draw.fClip = &rasterClip.bwRgn();
+ draw.fMatrix = &drawMatrix;
+ draw.fDst = dst;
+
+ draw.drawPathCoverage(path, paint);
+
+ // generate signed distance field
+ devPathBounds.outset(SK_DistanceFieldPad, SK_DistanceFieldPad);
+ int width = devPathBounds.width();
+ int height = devPathBounds.height();
+ // TODO We should really generate this directly into the plot somehow
+ SkAutoSMalloc<1024> dfStorage(width * height * sizeof(unsigned char));
+
+ // Generate signed distance field
+ SkGenerateDistanceFieldFromA8Image((unsigned char*)dfStorage.get(),
+ (const unsigned char*)dst.addr(),
+ dst.width(), dst.height(), dst.rowBytes());
+
+ // add to atlas
+ SkIPoint16 atlasLocation;
+ GrBatchAtlas::AtlasID id;
+ bool success = atlas->addToAtlas(&id, target, width, height, dfStorage.get(),
+ &atlasLocation);
+ if (!success) {
+ this->flush(target, flushInfo);
+ target->initDraw(dfProcessor, pipeline);
+
+ SkDEBUGCODE(success =) atlas->addToAtlas(&id, target, width, height,
+ dfStorage.get(), &atlasLocation);
+ SkASSERT(success);
+
+ }
+
+ // add to cache
+ pathData->fKey.fGenID = path.getGenerationID();
+ pathData->fKey.fDimension = dimension;
+ pathData->fScale = scale;
+ pathData->fID = id;
+ // change the scaled rect to match the size of the inset distance field
+ scaledBounds.fRight = scaledBounds.fLeft +
+ SkIntToScalar(devPathBounds.width() - 2*SK_DistanceFieldInset);
+ scaledBounds.fBottom = scaledBounds.fTop +
+ SkIntToScalar(devPathBounds.height() - 2*SK_DistanceFieldInset);
+ // shift the origin to the correct place relative to the distance field
+ // need to also restore the fractional translation
+ scaledBounds.offset(-SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dx,
+ -SkIntToScalar(SK_DistanceFieldInset) - kAntiAliasPad + dy);
+ pathData->fBounds = scaledBounds;
+ // origin we render from is inset from distance field edge
+ atlasLocation.fX += SK_DistanceFieldInset;
+ atlasLocation.fY += SK_DistanceFieldInset;
+ pathData->fAtlasLocation = atlasLocation;
+
+ fPathCache->add(pathData);
+ fPathList->addToTail(pathData);
+#ifdef DF_PATH_TRACKING
+ ++g_NumCachedPaths;
+#endif
+ return true;
+ }
+
+ void writePathVertices(GrDrawBatch::Target* target,
+ GrBatchAtlas* atlas,
+ const GrPipeline* pipeline,
+ const GrGeometryProcessor* gp,
+ SkPoint* positions,
+ size_t vertexStride,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const PathData* pathData) {
+ GrTexture* texture = atlas->getTexture();
+
+ SkScalar dx = pathData->fBounds.fLeft;
+ SkScalar dy = pathData->fBounds.fTop;
+ SkScalar width = pathData->fBounds.width();
+ SkScalar height = pathData->fBounds.height();
+
+ SkScalar invScale = 1.0f / pathData->fScale;
+ dx *= invScale;
+ dy *= invScale;
+ width *= invScale;
+ height *= invScale;
+
+ SkFixed tx = SkIntToFixed(pathData->fAtlasLocation.fX);
+ SkFixed ty = SkIntToFixed(pathData->fAtlasLocation.fY);
+ SkFixed tw = SkScalarToFixed(pathData->fBounds.width());
+ SkFixed th = SkScalarToFixed(pathData->fBounds.height());
+
+ // vertex positions
+ // TODO make the vertex attributes a struct
+ SkRect r = SkRect::MakeXYWH(dx, dy, width, height);
+ positions->setRectFan(r.left(), r.top(), r.right(), r.bottom(), vertexStride);
+
+ // vertex texture coords
+ SkPoint* textureCoords = positions + 1;
+ textureCoords->setRectFan(SkFixedToFloat(texture->texturePriv().normalizeFixedX(tx)),
+ SkFixedToFloat(texture->texturePriv().normalizeFixedY(ty)),
+ SkFixedToFloat(texture->texturePriv().normalizeFixedX(tx + tw)),
+ SkFixedToFloat(texture->texturePriv().normalizeFixedY(ty + th)),
+ vertexStride);
+ }
+
+ void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) {
+ GrVertices vertices;
+ int maxInstancesPerDraw = flushInfo->fIndexBuffer->maxQuads();
+ vertices.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
+ flushInfo->fIndexBuffer, flushInfo->fVertexOffset, kVerticesPerQuad,
+ kIndicesPerQuad, flushInfo->fInstancesToFlush, maxInstancesPerDraw);
+ target->draw(vertices);
+ flushInfo->fVertexOffset += kVerticesPerQuad * flushInfo->fInstancesToFlush;
+ flushInfo->fInstancesToFlush = 0;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AADistanceFieldPathBatch* that = t->cast<AADistanceFieldPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ // TODO we could actually probably do a bunch of this work on the CPU, ie map viewMatrix,
+ // maybe upload color via attribute
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ this->joinBounds(that->bounds());
+ return true;
+ }
+
+ struct BatchTracker {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+ GrBatchAtlas* fAtlas;
+ PathCache* fPathCache;
+ PathDataList* fPathList;
+};
+
+bool GrAADistanceFieldPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ // we've already bailed on inverse filled paths, so this is safe
+ if (args.fPath->isEmpty()) {
+ return true;
+ }
+
+ if (!fAtlas) {
+ fAtlas = args.fResourceProvider->createAtlas(kAlpha_8_GrPixelConfig,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ NUM_PLOTS_X, NUM_PLOTS_Y,
+ &GrAADistanceFieldPathRenderer::HandleEviction,
+ (void*)this);
+ if (!fAtlas) {
+ return false;
+ }
+ }
+
+ AADistanceFieldPathBatch::Geometry geometry(*args.fStroke);
+ geometry.fPath = *args.fPath;
+ geometry.fAntiAlias = args.fAntiAlias;
+
+ SkAutoTUnref<GrDrawBatch> batch(AADistanceFieldPathBatch::Create(geometry, args.fColor,
+ *args.fViewMatrix, fAtlas,
+ &fPathCache, &fPathList));
+ args.fTarget->drawBatch(*args.fPipelineBuilder, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+struct PathTestStruct {
+ typedef GrAADistanceFieldPathRenderer::PathCache PathCache;
+ typedef GrAADistanceFieldPathRenderer::PathData PathData;
+ typedef GrAADistanceFieldPathRenderer::PathDataList PathDataList;
+ PathTestStruct() : fContextID(SK_InvalidGenID), fAtlas(nullptr) {}
+ ~PathTestStruct() { this->reset(); }
+
+ void reset() {
+ PathDataList::Iter iter;
+ iter.init(fPathList, PathDataList::Iter::kHead_IterStart);
+ PathData* pathData;
+ while ((pathData = iter.get())) {
+ iter.next();
+ fPathList.remove(pathData);
+ delete pathData;
+ }
+ delete fAtlas;
+ fPathCache.reset();
+ }
+
+ static void HandleEviction(GrBatchAtlas::AtlasID id, void* pr) {
+ PathTestStruct* dfpr = (PathTestStruct*)pr;
+ // remove any paths that use this plot
+ PathDataList::Iter iter;
+ iter.init(dfpr->fPathList, PathDataList::Iter::kHead_IterStart);
+ PathData* pathData;
+ while ((pathData = iter.get())) {
+ iter.next();
+ if (id == pathData->fID) {
+ dfpr->fPathCache.remove(pathData->fKey);
+ dfpr->fPathList.remove(pathData);
+ delete pathData;
+ }
+ }
+ }
+
+ uint32_t fContextID;
+ GrBatchAtlas* fAtlas;
+ PathCache fPathCache;
+ PathDataList fPathList;
+};
+
+DRAW_BATCH_TEST_DEFINE(AADistanceFieldPathBatch) {
+ static PathTestStruct gTestStruct;
+
+ if (context->uniqueID() != gTestStruct.fContextID) {
+ gTestStruct.fContextID = context->uniqueID();
+ gTestStruct.reset();
+ gTestStruct.fAtlas =
+ context->resourceProvider()->createAtlas(kAlpha_8_GrPixelConfig,
+ ATLAS_TEXTURE_WIDTH, ATLAS_TEXTURE_HEIGHT,
+ NUM_PLOTS_X, NUM_PLOTS_Y,
+ &PathTestStruct::HandleEviction,
+ (void*)&gTestStruct);
+ }
+
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrColor color = GrRandomColor(random);
+
+ AADistanceFieldPathBatch::Geometry geometry(GrTest::TestStrokeRec(random));
+ geometry.fPath = GrTest::TestPath(random);
+ geometry.fAntiAlias = random->nextBool();
+
+ return AADistanceFieldPathBatch::Create(geometry, color, viewMatrix,
+ gTestStruct.fAtlas,
+ &gTestStruct.fPathCache,
+ &gTestStruct.fPathList);
+}
+
+#endif
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.h b/src/gpu/batches/GrAADistanceFieldPathRenderer.h
new file mode 100755
index 0000000000..469aeeb981
--- /dev/null
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.h
@@ -0,0 +1,75 @@
+
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAADistanceFieldPathRenderer_DEFINED
+#define GrAADistanceFieldPathRenderer_DEFINED
+
+#include "GrBatchAtlas.h"
+#include "GrPathRenderer.h"
+#include "GrRect.h"
+
+#include "SkChecksum.h"
+#include "SkTDynamicHash.h"
+
+class GrContext;
+
+class GrAADistanceFieldPathRenderer : public GrPathRenderer {
+public:
+ GrAADistanceFieldPathRenderer();
+ virtual ~GrAADistanceFieldPathRenderer();
+
+private:
+ StencilSupport onGetStencilSupport(const SkPath&, const GrStrokeInfo&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ struct PathData {
+ struct Key {
+ uint32_t fGenID;
+ // rendered size for stored path (32x32 max, 64x64 max, 128x128 max)
+ uint32_t fDimension;
+ bool operator==(const Key& other) const {
+ return other.fGenID == fGenID && other.fDimension == fDimension;
+ }
+ };
+ Key fKey;
+ SkScalar fScale;
+ GrBatchAtlas::AtlasID fID;
+ SkRect fBounds;
+ SkIPoint16 fAtlasLocation;
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(PathData);
+
+ static inline const Key& GetKey(const PathData& data) {
+ return data.fKey;
+ }
+
+ static inline uint32_t Hash(Key key) {
+ return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&key), sizeof(key));
+ }
+ };
+
+ static void HandleEviction(GrBatchAtlas::AtlasID, void*);
+
+ typedef SkTDynamicHash<PathData, PathData::Key> PathCache;
+ typedef SkTInternalLList<PathData> PathDataList;
+
+ GrBatchAtlas* fAtlas;
+ PathCache fPathCache;
+ PathDataList fPathList;
+
+ typedef GrPathRenderer INHERITED;
+
+ friend class AADistanceFieldPathBatch;
+ friend struct PathTestStruct;
+};
+
+#endif
diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
new file mode 100644
index 0000000000..e102db27a0
--- /dev/null
+++ b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
@@ -0,0 +1,998 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAHairLinePathRenderer.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrIndexBuffer.h"
+#include "GrPathUtils.h"
+#include "GrPipelineBuilder.h"
+#include "GrProcessor.h"
+#include "GrResourceProvider.h"
+#include "GrVertexBuffer.h"
+#include "SkGeometry.h"
+#include "SkStroke.h"
+#include "SkTemplates.h"
+
+#include "batches/GrVertexBatch.h"
+
+#include "effects/GrBezierEffect.h"
+
+#define PREALLOC_PTARRAY(N) SkSTArray<(N),SkPoint, true>
+
+// quadratics are rendered as 5-sided polys in order to bound the
+// AA stroke around the center-curve. See comments in push_quad_index_buffer and
+// bloat_quad. Quadratics and conics share an index buffer
+
+// lines are rendered as:
+// *______________*
+// |\ -_______ /|
+// | \ \ / |
+// | *--------* |
+// | / ______/ \ |
+// */_-__________\*
+// For: 6 vertices and 18 indices (for 6 triangles)
+
+// Each quadratic is rendered as a five sided polygon. This poly bounds
+// the quadratic's bounding triangle but has been expanded so that the
+// 1-pixel wide area around the curve is inside the poly.
+// If a,b,c are the original control points then the poly a0,b0,c0,c1,a1
+// that is rendered would look like this:
+// b0
+// b
+//
+// a0 c0
+// a c
+// a1 c1
+// Each is drawn as three triangles ((a0,a1,b0), (b0,c1,c0), (a1,c1,b0))
+// specified by these 9 indices:
+static const uint16_t kQuadIdxBufPattern[] = {
+ 0, 1, 2,
+ 2, 4, 3,
+ 1, 4, 2
+};
+
+static const int kIdxsPerQuad = SK_ARRAY_COUNT(kQuadIdxBufPattern);
+static const int kQuadNumVertices = 5;
+static const int kQuadsNumInIdxBuffer = 256;
+GR_DECLARE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
+
+static const GrIndexBuffer* ref_quads_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
+ return resourceProvider->findOrCreateInstancedIndexBuffer(
+ kQuadIdxBufPattern, kIdxsPerQuad, kQuadsNumInIdxBuffer, kQuadNumVertices,
+ gQuadsIndexBufferKey);
+}
+
+
+// Each line segment is rendered as two quads and two triangles.
+// p0 and p1 have alpha = 1 while all other points have alpha = 0.
+// The four external points are offset 1 pixel perpendicular to the
+// line and half a pixel parallel to the line.
+//
+// p4 p5
+// p0 p1
+// p2 p3
+//
+// Each is drawn as six triangles specified by these 18 indices:
+
+static const uint16_t kLineSegIdxBufPattern[] = {
+ 0, 1, 3,
+ 0, 3, 2,
+ 0, 4, 5,
+ 0, 5, 1,
+ 0, 2, 4,
+ 1, 5, 3
+};
+
+static const int kIdxsPerLineSeg = SK_ARRAY_COUNT(kLineSegIdxBufPattern);
+static const int kLineSegNumVertices = 6;
+static const int kLineSegsNumInIdxBuffer = 256;
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
+
+static const GrIndexBuffer* ref_lines_index_buffer(GrResourceProvider* resourceProvider) {
+ GR_DEFINE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
+ return resourceProvider->findOrCreateInstancedIndexBuffer(
+ kLineSegIdxBufPattern, kIdxsPerLineSeg, kLineSegsNumInIdxBuffer, kLineSegNumVertices,
+ gLinesIndexBufferKey);
+}
+
+// Takes 178th time of logf on Z600 / VC2010
+static int get_float_exp(float x) {
+ GR_STATIC_ASSERT(sizeof(int) == sizeof(float));
+#ifdef SK_DEBUG
+ static bool tested;
+ if (!tested) {
+ tested = true;
+ SkASSERT(get_float_exp(0.25f) == -2);
+ SkASSERT(get_float_exp(0.3f) == -2);
+ SkASSERT(get_float_exp(0.5f) == -1);
+ SkASSERT(get_float_exp(1.f) == 0);
+ SkASSERT(get_float_exp(2.f) == 1);
+ SkASSERT(get_float_exp(2.5f) == 1);
+ SkASSERT(get_float_exp(8.f) == 3);
+ SkASSERT(get_float_exp(100.f) == 6);
+ SkASSERT(get_float_exp(1000.f) == 9);
+ SkASSERT(get_float_exp(1024.f) == 10);
+ SkASSERT(get_float_exp(3000000.f) == 21);
+ }
+#endif
+ const int* iptr = (const int*)&x;
+ return (((*iptr) & 0x7f800000) >> 23) - 127;
+}
+
+// Uses the max curvature function for quads to estimate
+// where to chop the conic. If the max curvature is not
+// found along the curve segment it will return 1 and
+// dst[0] is the original conic. If it returns 2 the dst[0]
+// and dst[1] are the two new conics.
+static int split_conic(const SkPoint src[3], SkConic dst[2], const SkScalar weight) {
+ SkScalar t = SkFindQuadMaxCurvature(src);
+ if (t == 0) {
+ if (dst) {
+ dst[0].set(src, weight);
+ }
+ return 1;
+ } else {
+ if (dst) {
+ SkConic conic;
+ conic.set(src, weight);
+ conic.chopAt(t, dst);
+ }
+ return 2;
+ }
+}
+
+// Calls split_conic on the entire conic and then once more on each subsection.
+// Most cases will result in either 1 conic (chop point is not within t range)
+// or 3 points (split once and then one subsection is split again).
+static int chop_conic(const SkPoint src[3], SkConic dst[4], const SkScalar weight) {
+ SkConic dstTemp[2];
+ int conicCnt = split_conic(src, dstTemp, weight);
+ if (2 == conicCnt) {
+ int conicCnt2 = split_conic(dstTemp[0].fPts, dst, dstTemp[0].fW);
+ conicCnt = conicCnt2 + split_conic(dstTemp[1].fPts, &dst[conicCnt2], dstTemp[1].fW);
+ } else {
+ dst[0] = dstTemp[0];
+ }
+ return conicCnt;
+}
+
+// returns 0 if quad/conic is degen or close to it
+// in this case approx the path with lines
+// otherwise returns 1
+static int is_degen_quad_or_conic(const SkPoint p[3], SkScalar* dsqd) {
+ static const SkScalar gDegenerateToLineTol = GrPathUtils::kDefaultTolerance;
+ static const SkScalar gDegenerateToLineTolSqd =
+ SkScalarMul(gDegenerateToLineTol, gDegenerateToLineTol);
+
+ if (p[0].distanceToSqd(p[1]) < gDegenerateToLineTolSqd ||
+ p[1].distanceToSqd(p[2]) < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+
+ *dsqd = p[1].distanceToLineBetweenSqd(p[0], p[2]);
+ if (*dsqd < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+
+ if (p[2].distanceToLineBetweenSqd(p[1], p[0]) < gDegenerateToLineTolSqd) {
+ return 1;
+ }
+ return 0;
+}
+
+static int is_degen_quad_or_conic(const SkPoint p[3]) {
+ SkScalar dsqd;
+ return is_degen_quad_or_conic(p, &dsqd);
+}
+
+// we subdivide the quads to avoid huge overfill
+// if it returns -1 then should be drawn as lines
+static int num_quad_subdivs(const SkPoint p[3]) {
+ SkScalar dsqd;
+ if (is_degen_quad_or_conic(p, &dsqd)) {
+ return -1;
+ }
+
+ // tolerance of triangle height in pixels
+ // tuned on windows Quadro FX 380 / Z600
+ // trade off of fill vs cpu time on verts
+ // maybe different when do this using gpu (geo or tess shaders)
+ static const SkScalar gSubdivTol = 175 * SK_Scalar1;
+
+ if (dsqd <= SkScalarMul(gSubdivTol, gSubdivTol)) {
+ return 0;
+ } else {
+ static const int kMaxSub = 4;
+ // subdividing the quad reduces d by 4. so we want x = log4(d/tol)
+ // = log4(d*d/tol*tol)/2
+ // = log2(d*d/tol*tol)
+
+ // +1 since we're ignoring the mantissa contribution.
+ int log = get_float_exp(dsqd/(gSubdivTol*gSubdivTol)) + 1;
+ log = SkTMin(SkTMax(0, log), kMaxSub);
+ return log;
+ }
+}
+
+/**
+ * Generates the lines and quads to be rendered. Lines are always recorded in
+ * device space. We will do a device space bloat to account for the 1pixel
+ * thickness.
+ * Quads are recorded in device space unless m contains
+ * perspective, then in they are in src space. We do this because we will
+ * subdivide large quads to reduce over-fill. This subdivision has to be
+ * performed before applying the perspective matrix.
+ */
+static int gather_lines_and_quads(const SkPath& path,
+ const SkMatrix& m,
+ const SkIRect& devClipBounds,
+ GrAAHairLinePathRenderer::PtArray* lines,
+ GrAAHairLinePathRenderer::PtArray* quads,
+ GrAAHairLinePathRenderer::PtArray* conics,
+ GrAAHairLinePathRenderer::IntArray* quadSubdivCnts,
+ GrAAHairLinePathRenderer::FloatArray* conicWeights) {
+ SkPath::Iter iter(path, false);
+
+ int totalQuadCount = 0;
+ SkRect bounds;
+ SkIRect ibounds;
+
+ bool persp = m.hasPerspective();
+
+ for (;;) {
+ SkPoint pathPts[4];
+ SkPoint devPts[4];
+ SkPath::Verb verb = iter.next(pathPts);
+ switch (verb) {
+ case SkPath::kConic_Verb: {
+ SkConic dst[4];
+ // We chop the conics to create tighter clipping to hide error
+ // that appears near max curvature of very thin conics. Thin
+ // hyperbolas with high weight still show error.
+ int conicCnt = chop_conic(pathPts, dst, iter.conicWeight());
+ for (int i = 0; i < conicCnt; ++i) {
+ SkPoint* chopPnts = dst[i].fPts;
+ m.mapPoints(devPts, chopPnts, 3);
+ bounds.setBounds(devPts, 3);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ if (is_degen_quad_or_conic(devPts)) {
+ SkPoint* pts = lines->push_back_n(4);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ pts[2] = devPts[1];
+ pts[3] = devPts[2];
+ } else {
+ // when in perspective keep conics in src space
+ SkPoint* cPts = persp ? chopPnts : devPts;
+ SkPoint* pts = conics->push_back_n(3);
+ pts[0] = cPts[0];
+ pts[1] = cPts[1];
+ pts[2] = cPts[2];
+ conicWeights->push_back() = dst[i].fW;
+ }
+ }
+ }
+ break;
+ }
+ case SkPath::kMove_Verb:
+ break;
+ case SkPath::kLine_Verb:
+ m.mapPoints(devPts, pathPts, 2);
+ bounds.setBounds(devPts, 2);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ }
+ break;
+ case SkPath::kQuad_Verb: {
+ SkPoint choppedPts[5];
+ // Chopping the quad helps when the quad is either degenerate or nearly degenerate.
+ // When it is degenerate it allows the approximation with lines to work since the
+ // chop point (if there is one) will be at the parabola's vertex. In the nearly
+ // degenerate the QuadUVMatrix computed for the points is almost singular which
+ // can cause rendering artifacts.
+ int n = SkChopQuadAtMaxCurvature(pathPts, choppedPts);
+ for (int i = 0; i < n; ++i) {
+ SkPoint* quadPts = choppedPts + i * 2;
+ m.mapPoints(devPts, quadPts, 3);
+ bounds.setBounds(devPts, 3);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ int subdiv = num_quad_subdivs(devPts);
+ SkASSERT(subdiv >= -1);
+ if (-1 == subdiv) {
+ SkPoint* pts = lines->push_back_n(4);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ pts[2] = devPts[1];
+ pts[3] = devPts[2];
+ } else {
+ // when in perspective keep quads in src space
+ SkPoint* qPts = persp ? quadPts : devPts;
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = qPts[0];
+ pts[1] = qPts[1];
+ pts[2] = qPts[2];
+ quadSubdivCnts->push_back() = subdiv;
+ totalQuadCount += 1 << subdiv;
+ }
+ }
+ }
+ break;
+ }
+ case SkPath::kCubic_Verb:
+ m.mapPoints(devPts, pathPts, 4);
+ bounds.setBounds(devPts, 4);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ PREALLOC_PTARRAY(32) q;
+ // we don't need a direction if we aren't constraining the subdivision
+ const SkPathPriv::FirstDirection kDummyDir = SkPathPriv::kCCW_FirstDirection;
+ // We convert cubics to quadratics (for now).
+ // In perspective have to do conversion in src space.
+ if (persp) {
+ SkScalar tolScale =
+ GrPathUtils::scaleToleranceToSrc(SK_Scalar1, m,
+ path.getBounds());
+ GrPathUtils::convertCubicToQuads(pathPts, tolScale, false, kDummyDir, &q);
+ } else {
+ GrPathUtils::convertCubicToQuads(devPts, SK_Scalar1, false, kDummyDir, &q);
+ }
+ for (int i = 0; i < q.count(); i += 3) {
+ SkPoint* qInDevSpace;
+ // bounds has to be calculated in device space, but q is
+ // in src space when there is perspective.
+ if (persp) {
+ m.mapPoints(devPts, &q[i], 3);
+ bounds.setBounds(devPts, 3);
+ qInDevSpace = devPts;
+ } else {
+ bounds.setBounds(&q[i], 3);
+ qInDevSpace = &q[i];
+ }
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(devClipBounds, ibounds)) {
+ int subdiv = num_quad_subdivs(qInDevSpace);
+ SkASSERT(subdiv >= -1);
+ if (-1 == subdiv) {
+ SkPoint* pts = lines->push_back_n(4);
+ // lines should always be in device coords
+ pts[0] = qInDevSpace[0];
+ pts[1] = qInDevSpace[1];
+ pts[2] = qInDevSpace[1];
+ pts[3] = qInDevSpace[2];
+ } else {
+ SkPoint* pts = quads->push_back_n(3);
+ // q is already in src space when there is no
+ // perspective and dev coords otherwise.
+ pts[0] = q[0 + i];
+ pts[1] = q[1 + i];
+ pts[2] = q[2 + i];
+ quadSubdivCnts->push_back() = subdiv;
+ totalQuadCount += 1 << subdiv;
+ }
+ }
+ }
+ }
+ break;
+ case SkPath::kClose_Verb:
+ break;
+ case SkPath::kDone_Verb:
+ return totalQuadCount;
+ }
+ }
+}
+
+struct LineVertex {
+ SkPoint fPos;
+ float fCoverage;
+};
+
+struct BezierVertex {
+ SkPoint fPos;
+ union {
+ struct {
+ SkScalar fK;
+ SkScalar fL;
+ SkScalar fM;
+ } fConic;
+ SkVector fQuadCoord;
+ struct {
+ SkScalar fBogus[4];
+ };
+ };
+};
+
+GR_STATIC_ASSERT(sizeof(BezierVertex) == 3 * sizeof(SkPoint));
+
+static void intersect_lines(const SkPoint& ptA, const SkVector& normA,
+ const SkPoint& ptB, const SkVector& normB,
+ SkPoint* result) {
+
+ SkScalar lineAW = -normA.dot(ptA);
+ SkScalar lineBW = -normB.dot(ptB);
+
+ SkScalar wInv = SkScalarMul(normA.fX, normB.fY) -
+ SkScalarMul(normA.fY, normB.fX);
+ wInv = SkScalarInvert(wInv);
+
+ result->fX = SkScalarMul(normA.fY, lineBW) - SkScalarMul(lineAW, normB.fY);
+ result->fX = SkScalarMul(result->fX, wInv);
+
+ result->fY = SkScalarMul(lineAW, normB.fX) - SkScalarMul(normA.fX, lineBW);
+ result->fY = SkScalarMul(result->fY, wInv);
+}
+
+static void set_uv_quad(const SkPoint qpts[3], BezierVertex verts[kQuadNumVertices]) {
+ // this should be in the src space, not dev coords, when we have perspective
+ GrPathUtils::QuadUVMatrix DevToUV(qpts);
+ DevToUV.apply<kQuadNumVertices, sizeof(BezierVertex), sizeof(SkPoint)>(verts);
+}
+
+static void bloat_quad(const SkPoint qpts[3], const SkMatrix* toDevice,
+ const SkMatrix* toSrc, BezierVertex verts[kQuadNumVertices]) {
+ SkASSERT(!toDevice == !toSrc);
+ // original quad is specified by tri a,b,c
+ SkPoint a = qpts[0];
+ SkPoint b = qpts[1];
+ SkPoint c = qpts[2];
+
+ if (toDevice) {
+ toDevice->mapPoints(&a, 1);
+ toDevice->mapPoints(&b, 1);
+ toDevice->mapPoints(&c, 1);
+ }
+ // make a new poly where we replace a and c by a 1-pixel wide edges orthog
+ // to edges ab and bc:
+ //
+ // before | after
+ // | b0
+ // b |
+ // |
+ // | a0 c0
+ // a c | a1 c1
+ //
+ // edges a0->b0 and b0->c0 are parallel to original edges a->b and b->c,
+ // respectively.
+ BezierVertex& a0 = verts[0];
+ BezierVertex& a1 = verts[1];
+ BezierVertex& b0 = verts[2];
+ BezierVertex& c0 = verts[3];
+ BezierVertex& c1 = verts[4];
+
+ SkVector ab = b;
+ ab -= a;
+ SkVector ac = c;
+ ac -= a;
+ SkVector cb = b;
+ cb -= c;
+
+ // We should have already handled degenerates
+ SkASSERT(ab.length() > 0 && cb.length() > 0);
+
+ ab.normalize();
+ SkVector abN;
+ abN.setOrthog(ab, SkVector::kLeft_Side);
+ if (abN.dot(ac) > 0) {
+ abN.negate();
+ }
+
+ cb.normalize();
+ SkVector cbN;
+ cbN.setOrthog(cb, SkVector::kLeft_Side);
+ if (cbN.dot(ac) < 0) {
+ cbN.negate();
+ }
+
+ a0.fPos = a;
+ a0.fPos += abN;
+ a1.fPos = a;
+ a1.fPos -= abN;
+
+ c0.fPos = c;
+ c0.fPos += cbN;
+ c1.fPos = c;
+ c1.fPos -= cbN;
+
+ intersect_lines(a0.fPos, abN, c0.fPos, cbN, &b0.fPos);
+
+ if (toSrc) {
+ toSrc->mapPointsWithStride(&verts[0].fPos, sizeof(BezierVertex), kQuadNumVertices);
+ }
+}
+
+// Equations based off of Loop-Blinn Quadratic GPU Rendering
+// Input Parametric:
+// P(t) = (P0*(1-t)^2 + 2*w*P1*t*(1-t) + P2*t^2) / (1-t)^2 + 2*w*t*(1-t) + t^2)
+// Output Implicit:
+// f(x, y, w) = f(P) = K^2 - LM
+// K = dot(k, P), L = dot(l, P), M = dot(m, P)
+// k, l, m are calculated in function GrPathUtils::getConicKLM
+static void set_conic_coeffs(const SkPoint p[3], BezierVertex verts[kQuadNumVertices],
+ const SkScalar weight) {
+ SkScalar klm[9];
+
+ GrPathUtils::getConicKLM(p, weight, klm);
+
+ for (int i = 0; i < kQuadNumVertices; ++i) {
+ const SkPoint pnt = verts[i].fPos;
+ verts[i].fConic.fK = pnt.fX * klm[0] + pnt.fY * klm[1] + klm[2];
+ verts[i].fConic.fL = pnt.fX * klm[3] + pnt.fY * klm[4] + klm[5];
+ verts[i].fConic.fM = pnt.fX * klm[6] + pnt.fY * klm[7] + klm[8];
+ }
+}
+
+static void add_conics(const SkPoint p[3],
+ const SkScalar weight,
+ const SkMatrix* toDevice,
+ const SkMatrix* toSrc,
+ BezierVertex** vert) {
+ bloat_quad(p, toDevice, toSrc, *vert);
+ set_conic_coeffs(p, *vert, weight);
+ *vert += kQuadNumVertices;
+}
+
+static void add_quads(const SkPoint p[3],
+ int subdiv,
+ const SkMatrix* toDevice,
+ const SkMatrix* toSrc,
+ BezierVertex** vert) {
+ SkASSERT(subdiv >= 0);
+ if (subdiv) {
+ SkPoint newP[5];
+ SkChopQuadAtHalf(p, newP);
+ add_quads(newP + 0, subdiv-1, toDevice, toSrc, vert);
+ add_quads(newP + 2, subdiv-1, toDevice, toSrc, vert);
+ } else {
+ bloat_quad(p, toDevice, toSrc, *vert);
+ set_uv_quad(p, *vert);
+ *vert += kQuadNumVertices;
+ }
+}
+
+static void add_line(const SkPoint p[2],
+ const SkMatrix* toSrc,
+ uint8_t coverage,
+ LineVertex** vert) {
+ const SkPoint& a = p[0];
+ const SkPoint& b = p[1];
+
+ SkVector ortho, vec = b;
+ vec -= a;
+
+ if (vec.setLength(SK_ScalarHalf)) {
+ // Create a vector orthogonal to 'vec' and of unit length
+ ortho.fX = 2.0f * vec.fY;
+ ortho.fY = -2.0f * vec.fX;
+
+ float floatCoverage = GrNormalizeByteToFloat(coverage);
+
+ (*vert)[0].fPos = a;
+ (*vert)[0].fCoverage = floatCoverage;
+ (*vert)[1].fPos = b;
+ (*vert)[1].fCoverage = floatCoverage;
+ (*vert)[2].fPos = a - vec + ortho;
+ (*vert)[2].fCoverage = 0;
+ (*vert)[3].fPos = b + vec + ortho;
+ (*vert)[3].fCoverage = 0;
+ (*vert)[4].fPos = a - vec - ortho;
+ (*vert)[4].fCoverage = 0;
+ (*vert)[5].fPos = b + vec - ortho;
+ (*vert)[5].fCoverage = 0;
+
+ if (toSrc) {
+ toSrc->mapPointsWithStride(&(*vert)->fPos,
+ sizeof(LineVertex),
+ kLineSegNumVertices);
+ }
+ } else {
+ // just make it degenerate and likely offscreen
+ for (int i = 0; i < kLineSegNumVertices; ++i) {
+ (*vert)[i].fPos.set(SK_ScalarMax, SK_ScalarMax);
+ }
+ }
+
+ *vert += kLineSegNumVertices;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrAAHairLinePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (!args.fAntiAlias) {
+ return false;
+ }
+
+ if (!IsStrokeHairlineOrEquivalent(*args.fStroke, *args.fViewMatrix, nullptr)) {
+ return false;
+ }
+
+ if (SkPath::kLine_SegmentMask == args.fPath->getSegmentMasks() ||
+ args.fShaderCaps->shaderDerivativeSupport()) {
+ return true;
+ }
+ return false;
+}
+
+template <class VertexType>
+bool check_bounds(const SkMatrix& viewMatrix, const SkRect& devBounds, void* vertices, int vCount)
+{
+ SkRect tolDevBounds = devBounds;
+ // The bounds ought to be tight, but in perspective the below code runs the verts
+ // through the view matrix to get back to dev coords, which can introduce imprecision.
+ if (viewMatrix.hasPerspective()) {
+ tolDevBounds.outset(SK_Scalar1 / 1000, SK_Scalar1 / 1000);
+ } else {
+ // Non-persp matrices cause this path renderer to draw in device space.
+ SkASSERT(viewMatrix.isIdentity());
+ }
+ SkRect actualBounds;
+
+ VertexType* verts = reinterpret_cast<VertexType*>(vertices);
+ bool first = true;
+ for (int i = 0; i < vCount; ++i) {
+ SkPoint pos = verts[i].fPos;
+ // This is a hack to workaround the fact that we move some degenerate segments offscreen.
+ if (SK_ScalarMax == pos.fX) {
+ continue;
+ }
+ viewMatrix.mapPoints(&pos, 1);
+ if (first) {
+ actualBounds.set(pos.fX, pos.fY, pos.fX, pos.fY);
+ first = false;
+ } else {
+ actualBounds.growToInclude(pos.fX, pos.fY);
+ }
+ }
+ if (!first) {
+ return tolDevBounds.contains(actualBounds);
+ }
+
+ return true;
+}
+
+class AAHairlineBatch : public GrVertexBatch {
+public:
+ struct Geometry {
+ GrColor fColor;
+ uint8_t fCoverage;
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ SkIRect fDevClipBounds;
+ };
+
+ static GrDrawBatch* Create(const Geometry& geometry) { return new AAHairlineBatch(geometry); }
+
+ const char* name() const override { return "AAHairlineBatch"; }
+
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ out->setKnownFourComponents(fGeoData[0].fColor);
+ }
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override {
+ out->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrPipelineOptimizations& opt) override {
+ // Handle any color overrides
+ if (!opt.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ opt.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !opt.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = opt.readsLocalCoords();
+ fBatch.fCoverageIgnored = !opt.readsCoverage();
+ fBatch.fCoverage = fGeoData[0].fCoverage;
+ }
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+ void onPrepareDraws(Target*) override;
+
+ typedef SkTArray<SkPoint, true> PtArray;
+ typedef SkTArray<int, true> IntArray;
+ typedef SkTArray<float, true> FloatArray;
+
+ AAHairlineBatch(const Geometry& geometry) {
+ this->initClassID<AAHairlineBatch>();
+ fGeoData.push_back(geometry);
+
+ // compute bounds
+ fBounds = geometry.fPath.getBounds();
+ geometry.fViewMatrix.mapRect(&fBounds);
+
+ // This is b.c. hairlines are notionally infinitely thin so without expansion
+ // two overlapping lines could be reordered even though they hit the same pixels.
+ fBounds.outset(0.5f, 0.5f);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AAHairlineBatch* that = t->cast<AAHairlineBatch>();
+
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ if (this->viewMatrix().hasPerspective() != that->viewMatrix().hasPerspective()) {
+ return false;
+ }
+
+ // We go to identity if we don't have perspective
+ if (this->viewMatrix().hasPerspective() &&
+ !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ // TODO we can actually batch hairlines if they are the same color in a kind of bulk method
+ // but we haven't implemented this yet
+ // TODO investigate going to vertex color and coverage?
+ if (this->coverage() != that->coverage()) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ return false;
+ }
+
+ SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ this->joinBounds(that->bounds());
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ uint8_t coverage() const { return fBatch.fCoverage; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+ struct BatchTracker {
+ GrColor fColor;
+ uint8_t fCoverage;
+ SkRect fDevBounds;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+};
+
+void AAHairlineBatch::onPrepareDraws(Target* target) {
+ // Setup the viewmatrix and localmatrix for the GrGeometryProcessor.
+ SkMatrix invert;
+ if (!this->viewMatrix().invert(&invert)) {
+ return;
+ }
+
+ // we will transform to identity space if the viewmatrix does not have perspective
+ bool hasPerspective = this->viewMatrix().hasPerspective();
+ const SkMatrix* geometryProcessorViewM = &SkMatrix::I();
+ const SkMatrix* geometryProcessorLocalM = &invert;
+ const SkMatrix* toDevice = nullptr;
+ const SkMatrix* toSrc = nullptr;
+ if (hasPerspective) {
+ geometryProcessorViewM = &this->viewMatrix();
+ geometryProcessorLocalM = &SkMatrix::I();
+ toDevice = &this->viewMatrix();
+ toSrc = &invert;
+ }
+
+ SkAutoTUnref<const GrGeometryProcessor> lineGP;
+ {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(this->color());
+ Coverage coverage(Coverage::kAttribute_Type);
+ LocalCoords localCoords(this->usesLocalCoords() ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ localCoords.fMatrix = geometryProcessorLocalM;
+ lineGP.reset(GrDefaultGeoProcFactory::Create(color, coverage, localCoords,
+ *geometryProcessorViewM));
+ }
+
+ SkAutoTUnref<const GrGeometryProcessor> quadGP(
+ GrQuadEffect::Create(this->color(),
+ *geometryProcessorViewM,
+ kHairlineAA_GrProcessorEdgeType,
+ target->caps(),
+ *geometryProcessorLocalM,
+ this->usesLocalCoords(),
+ this->coverage()));
+
+ SkAutoTUnref<const GrGeometryProcessor> conicGP(
+ GrConicEffect::Create(this->color(),
+ *geometryProcessorViewM,
+ kHairlineAA_GrProcessorEdgeType,
+ target->caps(),
+ *geometryProcessorLocalM,
+ this->usesLocalCoords(),
+ this->coverage()));
+
+ // This is hand inlined for maximum performance.
+ PREALLOC_PTARRAY(128) lines;
+ PREALLOC_PTARRAY(128) quads;
+ PREALLOC_PTARRAY(128) conics;
+ IntArray qSubdivs;
+ FloatArray cWeights;
+ int quadCount = 0;
+
+ int instanceCount = fGeoData.count();
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+ quadCount += gather_lines_and_quads(args.fPath, args.fViewMatrix, args.fDevClipBounds,
+ &lines, &quads, &conics, &qSubdivs, &cWeights);
+ }
+
+ int lineCount = lines.count() / 2;
+ int conicCount = conics.count() / 3;
+
+ // do lines first
+ if (lineCount) {
+ SkAutoTUnref<const GrIndexBuffer> linesIndexBuffer(
+ ref_lines_index_buffer(target->resourceProvider()));
+ target->initDraw(lineGP, this->pipeline());
+
+ const GrVertexBuffer* vertexBuffer;
+ int firstVertex;
+
+ size_t vertexStride = lineGP->getVertexStride();
+ int vertexCount = kLineSegNumVertices * lineCount;
+ LineVertex* verts = reinterpret_cast<LineVertex*>(
+ target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex));
+
+ if (!verts|| !linesIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ SkASSERT(lineGP->getVertexStride() == sizeof(LineVertex));
+
+ for (int i = 0; i < lineCount; ++i) {
+ add_line(&lines[2*i], toSrc, this->coverage(), &verts);
+ }
+
+ {
+ GrVertices vertices;
+ vertices.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, linesIndexBuffer,
+ firstVertex, kLineSegNumVertices, kIdxsPerLineSeg, lineCount,
+ kLineSegsNumInIdxBuffer);
+ target->draw(vertices);
+ }
+ }
+
+ if (quadCount || conicCount) {
+ const GrVertexBuffer* vertexBuffer;
+ int firstVertex;
+
+ SkAutoTUnref<const GrIndexBuffer> quadsIndexBuffer(
+ ref_quads_index_buffer(target->resourceProvider()));
+
+ size_t vertexStride = sizeof(BezierVertex);
+ int vertexCount = kQuadNumVertices * quadCount + kQuadNumVertices * conicCount;
+ void *vertices = target->makeVertexSpace(vertexStride, vertexCount,
+ &vertexBuffer, &firstVertex);
+
+ if (!vertices || !quadsIndexBuffer) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+
+ // Setup vertices
+ BezierVertex* verts = reinterpret_cast<BezierVertex*>(vertices);
+
+ int unsubdivQuadCnt = quads.count() / 3;
+ for (int i = 0; i < unsubdivQuadCnt; ++i) {
+ SkASSERT(qSubdivs[i] >= 0);
+ add_quads(&quads[3*i], qSubdivs[i], toDevice, toSrc, &verts);
+ }
+
+ // Start Conics
+ for (int i = 0; i < conicCount; ++i) {
+ add_conics(&conics[3*i], cWeights[i], toDevice, toSrc, &verts);
+ }
+
+ if (quadCount > 0) {
+ target->initDraw(quadGP, this->pipeline());
+
+ {
+ GrVertices verts;
+ verts.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
+ firstVertex, kQuadNumVertices, kIdxsPerQuad, quadCount,
+ kQuadsNumInIdxBuffer);
+ target->draw(verts);
+ firstVertex += quadCount * kQuadNumVertices;
+ }
+ }
+
+ if (conicCount > 0) {
+ target->initDraw(conicGP, this->pipeline());
+
+ {
+ GrVertices verts;
+ verts.initInstanced(kTriangles_GrPrimitiveType, vertexBuffer, quadsIndexBuffer,
+ firstVertex, kQuadNumVertices, kIdxsPerQuad, conicCount,
+ kQuadsNumInIdxBuffer);
+ target->draw(verts);
+ }
+ }
+ }
+}
+
+static GrDrawBatch* create_hairline_batch(GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const GrStrokeInfo& stroke,
+ const SkIRect& devClipBounds) {
+ SkScalar hairlineCoverage;
+ uint8_t newCoverage = 0xff;
+ if (GrPathRenderer::IsStrokeHairlineOrEquivalent(stroke, viewMatrix, &hairlineCoverage)) {
+ newCoverage = SkScalarRoundToInt(hairlineCoverage * 0xff);
+ }
+
+ AAHairlineBatch::Geometry geometry;
+ geometry.fColor = color;
+ geometry.fCoverage = newCoverage;
+ geometry.fViewMatrix = viewMatrix;
+ geometry.fPath = path;
+ geometry.fDevClipBounds = devClipBounds;
+
+ return AAHairlineBatch::Create(geometry);
+}
+
+bool GrAAHairLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ SkIRect devClipBounds;
+ args.fPipelineBuilder->clip().getConservativeBounds(args.fPipelineBuilder->getRenderTarget(),
+ &devClipBounds);
+
+ SkAutoTUnref<GrDrawBatch> batch(create_hairline_batch(args.fColor, *args.fViewMatrix, *args.fPath,
+ *args.fStroke, devClipBounds));
+ args.fTarget->drawBatch(*args.fPipelineBuilder, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(AAHairlineBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrix(random);
+ GrStrokeInfo stroke(SkStrokeRec::kHairline_InitStyle);
+ SkPath path = GrTest::TestPath(random);
+ SkIRect devClipBounds;
+ devClipBounds.setEmpty();
+ return create_hairline_batch(color, viewMatrix, path, stroke, devClipBounds);
+}
+
+#endif
diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.h b/src/gpu/batches/GrAAHairLinePathRenderer.h
new file mode 100644
index 0000000000..61c06067d9
--- /dev/null
+++ b/src/gpu/batches/GrAAHairLinePathRenderer.h
@@ -0,0 +1,33 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAHairLinePathRenderer_DEFINED
+#define GrAAHairLinePathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrAAHairLinePathRenderer : public GrPathRenderer {
+public:
+ static GrPathRenderer* Create() { return new GrAAHairLinePathRenderer; }
+
+ typedef SkTArray<SkPoint, true> PtArray;
+ typedef SkTArray<int, true> IntArray;
+ typedef SkTArray<float, true> FloatArray;
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ GrAAHairLinePathRenderer() {}
+
+ typedef GrPathRenderer INHERITED;
+};
+
+
+#endif
diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
new file mode 100644
index 0000000000..d05fe4e908
--- /dev/null
+++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
@@ -0,0 +1,345 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAALinearizingConvexPathRenderer.h"
+
+#include "GrAAConvexTessellator.h"
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrContext.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrGeometryProcessor.h"
+#include "GrInvariantOutput.h"
+#include "GrPathUtils.h"
+#include "GrProcessor.h"
+#include "GrPipelineBuilder.h"
+#include "GrStrokeInfo.h"
+#include "SkGeometry.h"
+#include "SkString.h"
+#include "SkTraceEvent.h"
+#include "SkPathPriv.h"
+#include "batches/GrVertexBatch.h"
+#include "gl/GrGLProcessor.h"
+#include "gl/GrGLGeometryProcessor.h"
+#include "gl/builders/GrGLProgramBuilder.h"
+
+static const int DEFAULT_BUFFER_SIZE = 100;
+
+// The thicker the stroke, the harder it is to produce high-quality results using tessellation. For
+// the time being, we simply drop back to software rendering above this stroke width.
+static const SkScalar kMaxStrokeWidth = 20.0;
+
+GrAALinearizingConvexPathRenderer::GrAALinearizingConvexPathRenderer() {
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrAALinearizingConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (!args.fAntiAlias) {
+ return false;
+ }
+ if (args.fPath->isInverseFillType()) {
+ return false;
+ }
+ if (!args.fPath->isConvex()) {
+ return false;
+ }
+ if (args.fStroke->getStyle() == SkStrokeRec::kStroke_Style) {
+ if (!args.fViewMatrix->isSimilarity()) {
+ return false;
+ }
+ SkScalar strokeWidth = args.fViewMatrix->getMaxScale() * args.fStroke->getWidth();
+ return strokeWidth >= 1.0f && strokeWidth <= kMaxStrokeWidth && !args.fStroke->isDashed() &&
+ SkPathPriv::LastVerbIsClose(*args.fPath) &&
+ args.fStroke->getJoin() != SkPaint::Join::kRound_Join;
+ }
+ return args.fStroke->getStyle() == SkStrokeRec::kFill_Style;
+}
+
+// extract the result vertices and indices from the GrAAConvexTessellator
+static void extract_verts(const GrAAConvexTessellator& tess,
+ void* vertices,
+ size_t vertexStride,
+ GrColor color,
+ uint16_t firstIndex,
+ uint16_t* idxs,
+ bool tweakAlphaForCoverage) {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices);
+
+ for (int i = 0; i < tess.numPts(); ++i) {
+ *((SkPoint*)((intptr_t)verts + i * vertexStride)) = tess.point(i);
+ }
+
+ // Make 'verts' point to the colors
+ verts += sizeof(SkPoint);
+ for (int i = 0; i < tess.numPts(); ++i) {
+ if (tweakAlphaForCoverage) {
+ SkASSERT(SkScalarRoundToInt(255.0f * tess.coverage(i)) <= 255);
+ unsigned scale = SkScalarRoundToInt(255.0f * tess.coverage(i));
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) =
+ tess.coverage(i);
+ }
+ }
+
+ for (int i = 0; i < tess.numIndices(); ++i) {
+ idxs[i] = tess.index(i) + firstIndex;
+ }
+}
+
+static const GrGeometryProcessor* create_fill_gp(bool tweakAlphaForCoverage,
+ const SkMatrix& viewMatrix,
+ bool usesLocalCoords,
+ bool coverageIgnored) {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(Color::kAttribute_Type);
+ Coverage::Type coverageType;
+ // TODO remove coverage if coverage is ignored
+ /*if (coverageIgnored) {
+ coverageType = Coverage::kNone_Type;
+ } else*/ if (tweakAlphaForCoverage) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kAttribute_Type;
+ }
+ Coverage coverage(coverageType);
+ LocalCoords localCoords(usesLocalCoords ? LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ return CreateForDeviceSpace(color, coverage, localCoords, viewMatrix);
+}
+
+class AAFlatteningConvexPathBatch : public GrVertexBatch {
+public:
+ struct Geometry {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkPath fPath;
+ SkScalar fStrokeWidth;
+ SkPaint::Join fJoin;
+ SkScalar fMiterLimit;
+ };
+
+ static GrDrawBatch* Create(const Geometry& geometry) {
+ return new AAFlatteningConvexPathBatch(geometry);
+ }
+
+ const char* name() const override { return "AAConvexBatch"; }
+
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const override {
+ // When this is called on a batch, there is only one geometry bundle
+ out->setKnownFourComponents(fGeoData[0].fColor);
+ }
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override {
+ out->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrPipelineOptimizations& opt) override {
+ // Handle any color overrides
+ if (!opt.readsColor()) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ }
+ opt.getOverrideColorIfSet(&fGeoData[0].fColor);
+
+ // setup batch properties
+ fBatch.fColorIgnored = !opt.readsColor();
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = opt.readsLocalCoords();
+ fBatch.fCoverageIgnored = !opt.readsCoverage();
+ fBatch.fLinesOnly = SkPath::kLine_SegmentMask == fGeoData[0].fPath.getSegmentMasks();
+ fBatch.fCanTweakAlphaForCoverage = opt.canTweakAlphaForCoverage();
+ }
+
+ void draw(GrVertexBatch::Target* target, const GrPipeline* pipeline, int vertexCount,
+ size_t vertexStride, void* vertices, int indexCount, uint16_t* indices) {
+ if (vertexCount == 0 || indexCount == 0) {
+ return;
+ }
+ const GrVertexBuffer* vertexBuffer;
+ GrVertices info;
+ int firstVertex;
+ void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
+ &firstVertex);
+ if (!verts) {
+ SkDebugf("Could not allocate vertices\n");
+ return;
+ }
+ memcpy(verts, vertices, vertexCount * vertexStride);
+
+ const GrIndexBuffer* indexBuffer;
+ int firstIndex;
+ uint16_t* idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
+ if (!idxs) {
+ SkDebugf("Could not allocate indices\n");
+ return;
+ }
+ memcpy(idxs, indices, indexCount * sizeof(uint16_t));
+ info.initIndexed(kTriangles_GrPrimitiveType, vertexBuffer, indexBuffer, firstVertex,
+ firstIndex, vertexCount, indexCount);
+ target->draw(info);
+ }
+
+ void onPrepareDraws(Target* target) override {
+ bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
+
+ // Setup GrGeometryProcessor
+ SkAutoTUnref<const GrGeometryProcessor> gp(create_fill_gp(canTweakAlphaForCoverage,
+ this->viewMatrix(),
+ this->usesLocalCoords(),
+ this->coverageIgnored()));
+ if (!gp) {
+ SkDebugf("Couldn't create a GrGeometryProcessor\n");
+ return;
+ }
+
+ target->initDraw(gp, this->pipeline());
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(canTweakAlphaForCoverage ?
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+
+ int instanceCount = fGeoData.count();
+
+ int vertexCount = 0;
+ int indexCount = 0;
+ int maxVertices = DEFAULT_BUFFER_SIZE;
+ int maxIndices = DEFAULT_BUFFER_SIZE;
+ uint8_t* vertices = (uint8_t*) sk_malloc_throw(maxVertices * vertexStride);
+ uint16_t* indices = (uint16_t*) sk_malloc_throw(maxIndices * sizeof(uint16_t));
+ for (int i = 0; i < instanceCount; i++) {
+ Geometry& args = fGeoData[i];
+ GrAAConvexTessellator tess(args.fStrokeWidth, args.fJoin, args.fMiterLimit);
+
+ if (!tess.tessellate(args.fViewMatrix, args.fPath)) {
+ continue;
+ }
+
+ int currentIndices = tess.numIndices();
+ SkASSERT(currentIndices <= UINT16_MAX);
+ if (indexCount + currentIndices > UINT16_MAX) {
+ // if we added the current instance, we would overflow the indices we can store in a
+ // uint16_t. Draw what we've got so far and reset.
+ draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
+ indices);
+ vertexCount = 0;
+ indexCount = 0;
+ }
+ int currentVertices = tess.numPts();
+ if (vertexCount + currentVertices > maxVertices) {
+ maxVertices = SkTMax(vertexCount + currentVertices, maxVertices * 2);
+ vertices = (uint8_t*) sk_realloc_throw(vertices, maxVertices * vertexStride);
+ }
+ if (indexCount + currentIndices > maxIndices) {
+ maxIndices = SkTMax(indexCount + currentIndices, maxIndices * 2);
+ indices = (uint16_t*) sk_realloc_throw(indices, maxIndices * sizeof(uint16_t));
+ }
+
+ extract_verts(tess, vertices + vertexStride * vertexCount, vertexStride, args.fColor,
+ vertexCount, indices + indexCount, canTweakAlphaForCoverage);
+ vertexCount += currentVertices;
+ indexCount += currentIndices;
+ }
+ draw(target, this->pipeline(), vertexCount, vertexStride, vertices, indexCount,
+ indices);
+ sk_free(vertices);
+ sk_free(indices);
+ }
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+ AAFlatteningConvexPathBatch(const Geometry& geometry) {
+ this->initClassID<AAFlatteningConvexPathBatch>();
+ fGeoData.push_back(geometry);
+
+ // compute bounds
+ fBounds = geometry.fPath.getBounds();
+ geometry.fViewMatrix.mapRect(&fBounds);
+ }
+
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ AAFlatteningConvexPathBatch* that = t->cast<AAFlatteningConvexPathBatch>();
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
+ that->bounds(), caps)) {
+ return false;
+ }
+
+ SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ // In the event of two batches, one who can tweak, one who cannot, we just fall back to
+ // not tweaking
+ if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
+ fBatch.fCanTweakAlphaForCoverage = false;
+ }
+
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ this->joinBounds(that->bounds());
+ return true;
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ bool linesOnly() const { return fBatch.fLinesOnly; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ bool canTweakAlphaForCoverage() const { return fBatch.fCanTweakAlphaForCoverage; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+ bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ bool fLinesOnly;
+ bool fCanTweakAlphaForCoverage;
+ };
+
+ BatchTracker fBatch;
+ SkSTArray<1, Geometry, true> fGeoData;
+};
+
+bool GrAALinearizingConvexPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ if (args.fPath->isEmpty()) {
+ return true;
+ }
+ AAFlatteningConvexPathBatch::Geometry geometry;
+ geometry.fColor = args.fColor;
+ geometry.fViewMatrix = *args.fViewMatrix;
+ geometry.fPath = *args.fPath;
+ geometry.fStrokeWidth = args.fStroke->isFillStyle() ? -1.0f : args.fStroke->getWidth();
+ geometry.fJoin = args.fStroke->isFillStyle() ? SkPaint::Join::kMiter_Join :
+ args.fStroke->getJoin();
+ geometry.fMiterLimit = args.fStroke->getMiter();
+
+ SkAutoTUnref<GrDrawBatch> batch(AAFlatteningConvexPathBatch::Create(geometry));
+ args.fTarget->drawBatch(*args.fPipelineBuilder, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(AAFlatteningConvexPathBatch) {
+ AAFlatteningConvexPathBatch::Geometry geometry;
+ geometry.fColor = GrRandomColor(random);
+ geometry.fViewMatrix = GrTest::TestMatrixInvertible(random);
+ geometry.fPath = GrTest::TestPathConvex(random);
+
+ return AAFlatteningConvexPathBatch::Create(geometry);
+}
+
+#endif
diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.h b/src/gpu/batches/GrAALinearizingConvexPathRenderer.h
new file mode 100644
index 0000000000..57d21e0c1e
--- /dev/null
+++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.h
@@ -0,0 +1,24 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAALinearizingConvexPathRenderer_DEFINED
+#define GrAALinearizingConvexPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrAALinearizingConvexPathRenderer : public GrPathRenderer {
+public:
+ GrAALinearizingConvexPathRenderer();
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+};
+
+#endif
diff --git a/src/gpu/batches/GrDashLinePathRenderer.cpp b/src/gpu/batches/GrDashLinePathRenderer.cpp
new file mode 100644
index 0000000000..e26f5d7627
--- /dev/null
+++ b/src/gpu/batches/GrDashLinePathRenderer.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDashLinePathRenderer.h"
+
+#include "GrGpu.h"
+#include "effects/GrDashingEffect.h"
+
+bool GrDashLinePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ SkPoint pts[2];
+ if (args.fStroke->isDashed() && args.fPath->isLine(pts)) {
+ return GrDashingEffect::CanDrawDashLine(pts, *args.fStroke, *args.fViewMatrix);
+ }
+ return false;
+}
+
+bool GrDashLinePathRenderer::onDrawPath(const DrawPathArgs& args) {
+ SkPoint pts[2];
+ SkAssertResult(args.fPath->isLine(pts));
+ return GrDashingEffect::DrawDashLine(args.fTarget, *args.fPipelineBuilder, args.fColor,
+ *args.fViewMatrix, pts, args.fAntiAlias, *args.fStroke);
+}
diff --git a/src/gpu/batches/GrDashLinePathRenderer.h b/src/gpu/batches/GrDashLinePathRenderer.h
new file mode 100644
index 0000000000..f21c73b688
--- /dev/null
+++ b/src/gpu/batches/GrDashLinePathRenderer.h
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDashLinePathRenderer_DEFINED
+#define GrDashLinePathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrDashLinePathRenderer : public GrPathRenderer {
+private:
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ StencilSupport onGetStencilSupport(const SkPath&, const GrStrokeInfo&) const override {
+ return kNoSupport_StencilSupport;
+ }
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ SkAutoTUnref<GrGpu> fGpu;
+ typedef GrPathRenderer INHERITED;
+};
+
+
+#endif
diff --git a/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp b/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp
new file mode 100644
index 0000000000..1a32e3fb75
--- /dev/null
+++ b/src/gpu/batches/GrStencilAndCoverPathRenderer.cpp
@@ -0,0 +1,158 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrStencilAndCoverPathRenderer.h"
+#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrGpu.h"
+#include "GrPath.h"
+#include "GrRenderTarget.h"
+#include "GrResourceProvider.h"
+#include "GrStrokeInfo.h"
+
+/*
+ * For now paths only natively support winding and even odd fill types
+ */
+static GrPathRendering::FillType convert_skpath_filltype(SkPath::FillType fill) {
+ switch (fill) {
+ default:
+ SkFAIL("Incomplete Switch\n");
+ case SkPath::kWinding_FillType:
+ case SkPath::kInverseWinding_FillType:
+ return GrPathRendering::kWinding_FillType;
+ case SkPath::kEvenOdd_FillType:
+ case SkPath::kInverseEvenOdd_FillType:
+ return GrPathRendering::kEvenOdd_FillType;
+ }
+}
+
+GrPathRenderer* GrStencilAndCoverPathRenderer::Create(GrResourceProvider* resourceProvider,
+ const GrCaps& caps) {
+ if (caps.shaderCaps()->pathRenderingSupport()) {
+ return new GrStencilAndCoverPathRenderer(resourceProvider);
+ } else {
+ return nullptr;
+ }
+}
+
+GrStencilAndCoverPathRenderer::GrStencilAndCoverPathRenderer(GrResourceProvider* resourceProvider)
+ : fResourceProvider(resourceProvider) {
+}
+
+bool GrStencilAndCoverPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ if (args.fStroke->isHairlineStyle()) {
+ return false;
+ }
+ if (!args.fPipelineBuilder->getStencil().isDisabled()) {
+ return false;
+ }
+ if (args.fAntiAlias) {
+ return args.fPipelineBuilder->getRenderTarget()->isStencilBufferMultisampled();
+ } else {
+ return true; // doesn't do per-path AA, relies on the target having MSAA
+ }
+}
+
+static GrPath* get_gr_path(GrResourceProvider* resourceProvider, const SkPath& skPath,
+ const GrStrokeInfo& stroke) {
+ GrUniqueKey key;
+ bool isVolatile;
+ GrPath::ComputeKey(skPath, stroke, &key, &isVolatile);
+ SkAutoTUnref<GrPath> path(
+ static_cast<GrPath*>(resourceProvider->findAndRefResourceByUniqueKey(key)));
+ if (!path) {
+ path.reset(resourceProvider->createPath(skPath, stroke));
+ if (!isVolatile) {
+ resourceProvider->assignUniqueKeyToResource(key, path);
+ }
+ } else {
+ SkASSERT(path->isEqualTo(skPath, stroke));
+ }
+ return path.detach();
+}
+
+void GrStencilAndCoverPathRenderer::onStencilPath(const StencilPathArgs& args) {
+ SkASSERT(!args.fPath->isInverseFillType());
+ SkAutoTUnref<GrPathProcessor> pp(GrPathProcessor::Create(GrColor_WHITE, *args.fViewMatrix));
+ SkAutoTUnref<GrPath> p(get_gr_path(fResourceProvider, *args.fPath, *args.fStroke));
+ args.fTarget->stencilPath(*args.fPipelineBuilder, pp, p,
+ convert_skpath_filltype(args.fPath->getFillType()));
+}
+
+bool GrStencilAndCoverPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ SkASSERT(!args.fStroke->isHairlineStyle());
+ const SkPath& path = *args.fPath;
+ GrPipelineBuilder* pipelineBuilder = args.fPipelineBuilder;
+ const SkMatrix& viewMatrix = *args.fViewMatrix;
+
+ SkASSERT(pipelineBuilder->getStencil().isDisabled());
+
+ if (args.fAntiAlias) {
+ SkASSERT(pipelineBuilder->getRenderTarget()->isStencilBufferMultisampled());
+ pipelineBuilder->enableState(GrPipelineBuilder::kHWAntialias_Flag);
+ }
+
+ SkAutoTUnref<GrPath> p(get_gr_path(fResourceProvider, path, *args.fStroke));
+
+ if (path.isInverseFillType()) {
+ GR_STATIC_CONST_SAME_STENCIL(kInvertedStencilPass,
+ kKeep_StencilOp,
+ kZero_StencilOp,
+ // We know our rect will hit pixels outside the clip and the user bits will be 0
+ // outside the clip. So we can't just fill where the user bits are 0. We also need to
+ // check that the clip bit is set.
+ kEqualIfInClip_StencilFunc,
+ 0xffff,
+ 0x0000,
+ 0xffff);
+
+ pipelineBuilder->setStencil(kInvertedStencilPass);
+
+ // fake inverse with a stencil and cover
+ SkAutoTUnref<GrPathProcessor> pp(GrPathProcessor::Create(GrColor_WHITE, viewMatrix));
+ args.fTarget->stencilPath(*pipelineBuilder, pp, p,
+ convert_skpath_filltype(path.getFillType()));
+
+ SkMatrix invert = SkMatrix::I();
+ SkRect bounds =
+ SkRect::MakeLTRB(0, 0, SkIntToScalar(pipelineBuilder->getRenderTarget()->width()),
+ SkIntToScalar(pipelineBuilder->getRenderTarget()->height()));
+ SkMatrix vmi;
+ // mapRect through persp matrix may not be correct
+ if (!viewMatrix.hasPerspective() && viewMatrix.invert(&vmi)) {
+ vmi.mapRect(&bounds);
+ // theoretically could set bloat = 0, instead leave it because of matrix inversion
+ // precision.
+ SkScalar bloat = viewMatrix.getMaxScale() * SK_ScalarHalf;
+ bounds.outset(bloat, bloat);
+ } else {
+ if (!viewMatrix.invert(&invert)) {
+ return false;
+ }
+ }
+ const SkMatrix& viewM = viewMatrix.hasPerspective() ? SkMatrix::I() : viewMatrix;
+ args.fTarget->drawNonAARect(*pipelineBuilder, args.fColor, viewM, bounds, invert);
+ } else {
+ GR_STATIC_CONST_SAME_STENCIL(kStencilPass,
+ kZero_StencilOp,
+ kKeep_StencilOp,
+ kNotEqual_StencilFunc,
+ 0xffff,
+ 0x0000,
+ 0xffff);
+
+ pipelineBuilder->setStencil(kStencilPass);
+ SkAutoTUnref<GrPathProcessor> pp(GrPathProcessor::Create(args.fColor, viewMatrix));
+ args.fTarget->drawPath(*pipelineBuilder, pp, p,
+ convert_skpath_filltype(path.getFillType()));
+ }
+
+ pipelineBuilder->stencil()->setDisabled();
+ return true;
+}
diff --git a/src/gpu/batches/GrStencilAndCoverPathRenderer.h b/src/gpu/batches/GrStencilAndCoverPathRenderer.h
new file mode 100644
index 0000000000..bb8cdb0dd7
--- /dev/null
+++ b/src/gpu/batches/GrStencilAndCoverPathRenderer.h
@@ -0,0 +1,45 @@
+
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBuiltInPathRenderer_DEFINED
+#define GrBuiltInPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrContext;
+class GrGpu;
+
+/**
+ * Uses GrGpu::stencilPath followed by a cover rectangle. This subclass doesn't apply AA; it relies
+ * on the target having MSAA if AA is desired.
+ */
+class GrStencilAndCoverPathRenderer : public GrPathRenderer {
+public:
+
+ static GrPathRenderer* Create(GrResourceProvider*, const GrCaps&);
+
+
+private:
+ StencilSupport onGetStencilSupport(const SkPath&, const GrStrokeInfo&) const override {
+ return GrPathRenderer::kStencilOnly_StencilSupport;
+ }
+
+ bool onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ void onStencilPath(const StencilPathArgs&) override;
+
+ GrStencilAndCoverPathRenderer(GrResourceProvider*);
+
+ GrResourceProvider* fResourceProvider;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/batches/GrTessellatingPathRenderer.cpp b/src/gpu/batches/GrTessellatingPathRenderer.cpp
new file mode 100644
index 0000000000..bc9a6b5d04
--- /dev/null
+++ b/src/gpu/batches/GrTessellatingPathRenderer.cpp
@@ -0,0 +1,1660 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTessellatingPathRenderer.h"
+
+#include "GrBatchFlushState.h"
+#include "GrBatchTest.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrPathUtils.h"
+#include "GrVertices.h"
+#include "GrResourceCache.h"
+#include "GrResourceProvider.h"
+#include "SkChunkAlloc.h"
+#include "SkGeometry.h"
+
+#include "batches/GrVertexBatch.h"
+
+#include <stdio.h>
+
+/*
+ * This path renderer tessellates the path into triangles, uploads the triangles to a
+ * vertex buffer, and renders them with a single draw call. It does not currently do
+ * antialiasing, so it must be used in conjunction with multisampling.
+ *
+ * There are six stages to the algorithm:
+ *
+ * 1) Linearize the path contours into piecewise linear segments (path_to_contours()).
+ * 2) Build a mesh of edges connecting the vertices (build_edges()).
+ * 3) Sort the vertices in Y (and secondarily in X) (merge_sort()).
+ * 4) Simplify the mesh by inserting new vertices at intersecting edges (simplify()).
+ * 5) Tessellate the simplified mesh into monotone polygons (tessellate()).
+ * 6) Triangulate the monotone polygons directly into a vertex buffer (polys_to_triangles()).
+ *
+ * The vertex sorting in step (3) is a merge sort, since it plays well with the linked list
+ * of vertices (and the necessity of inserting new vertices on intersection).
+ *
+ * Stages (4) and (5) use an active edge list, which a list of all edges for which the
+ * sweep line has crossed the top vertex, but not the bottom vertex. It's sorted
+ * left-to-right based on the point where both edges are active (when both top vertices
+ * have been seen, so the "lower" top vertex of the two). If the top vertices are equal
+ * (shared), it's sorted based on the last point where both edges are active, so the
+ * "upper" bottom vertex.
+ *
+ * The most complex step is the simplification (4). It's based on the Bentley-Ottman
+ * line-sweep algorithm, but due to floating point inaccuracy, the intersection points are
+ * not exact and may violate the mesh topology or active edge list ordering. We
+ * accommodate this by adjusting the topology of the mesh and AEL to match the intersection
+ * points. This occurs in three ways:
+ *
+ * A) Intersections may cause a shortened edge to no longer be ordered with respect to its
+ * neighbouring edges at the top or bottom vertex. This is handled by merging the
+ * edges (merge_collinear_edges()).
+ * B) Intersections may cause an edge to violate the left-to-right ordering of the
+ * active edge list. This is handled by splitting the neighbour edge on the
+ * intersected vertex (cleanup_active_edges()).
+ * C) Shortening an edge may cause an active edge to become inactive or an inactive edge
+ * to become active. This is handled by removing or inserting the edge in the active
+ * edge list (fix_active_state()).
+ *
+ * The tessellation steps (5) and (6) are based on "Triangulating Simple Polygons and
+ * Equivalent Problems" (Fournier and Montuno); also a line-sweep algorithm. Note that it
+ * currently uses a linked list for the active edge list, rather than a 2-3 tree as the
+ * paper describes. The 2-3 tree gives O(lg N) lookups, but insertion and removal also
+ * become O(lg N). In all the test cases, it was found that the cost of frequent O(lg N)
+ * insertions and removals was greater than the cost of infrequent O(N) lookups with the
+ * linked list implementation. With the latter, all removals are O(1), and most insertions
+ * are O(1), since we know the adjacent edge in the active edge list based on the topology.
+ * Only type 2 vertices (see paper) require the O(N) lookups, and these are much less
+ * frequent. There may be other data structures worth investigating, however.
+ *
+ * Note that the orientation of the line sweep algorithms is determined by the aspect ratio of the
+ * path bounds. When the path is taller than it is wide, we sort vertices based on increasing Y
+ * coordinate, and secondarily by increasing X coordinate. When the path is wider than it is tall,
+ * we sort by increasing X coordinate, but secondarily by *decreasing* Y coordinate. This is so
+ * that the "left" and "right" orientation in the code remains correct (edges to the left are
+ * increasing in Y; edges to the right are decreasing in Y). That is, the setting rotates 90
+ * degrees counterclockwise, rather that transposing.
+ */
+#define LOGGING_ENABLED 0
+#define WIREFRAME 0
+
+#if LOGGING_ENABLED
+#define LOG printf
+#else
+#define LOG(...)
+#endif
+
+#define ALLOC_NEW(Type, args, alloc) new (alloc.allocThrow(sizeof(Type))) Type args
+
+namespace {
+
+struct Vertex;
+struct Edge;
+struct Poly;
+
+template <class T, T* T::*Prev, T* T::*Next>
+void insert(T* t, T* prev, T* next, T** head, T** tail) {
+ t->*Prev = prev;
+ t->*Next = next;
+ if (prev) {
+ prev->*Next = t;
+ } else if (head) {
+ *head = t;
+ }
+ if (next) {
+ next->*Prev = t;
+ } else if (tail) {
+ *tail = t;
+ }
+}
+
+template <class T, T* T::*Prev, T* T::*Next>
+void remove(T* t, T** head, T** tail) {
+ if (t->*Prev) {
+ t->*Prev->*Next = t->*Next;
+ } else if (head) {
+ *head = t->*Next;
+ }
+ if (t->*Next) {
+ t->*Next->*Prev = t->*Prev;
+ } else if (tail) {
+ *tail = t->*Prev;
+ }
+ t->*Prev = t->*Next = nullptr;
+}
+
+/**
+ * Vertices are used in three ways: first, the path contours are converted into a
+ * circularly-linked list of Vertices for each contour. After edge construction, the same Vertices
+ * are re-ordered by the merge sort according to the sweep_lt comparator (usually, increasing
+ * in Y) using the same fPrev/fNext pointers that were used for the contours, to avoid
+ * reallocation. Finally, MonotonePolys are built containing a circularly-linked list of
+ * Vertices. (Currently, those Vertices are newly-allocated for the MonotonePolys, since
+ * an individual Vertex from the path mesh may belong to multiple
+ * MonotonePolys, so the original Vertices cannot be re-used.
+ */
+
+struct Vertex {
+ Vertex(const SkPoint& point)
+ : fPoint(point), fPrev(nullptr), fNext(nullptr)
+ , fFirstEdgeAbove(nullptr), fLastEdgeAbove(nullptr)
+ , fFirstEdgeBelow(nullptr), fLastEdgeBelow(nullptr)
+ , fProcessed(false)
+#if LOGGING_ENABLED
+ , fID (-1.0f)
+#endif
+ {}
+ SkPoint fPoint; // Vertex position
+ Vertex* fPrev; // Linked list of contours, then Y-sorted vertices.
+ Vertex* fNext; // "
+ Edge* fFirstEdgeAbove; // Linked list of edges above this vertex.
+ Edge* fLastEdgeAbove; // "
+ Edge* fFirstEdgeBelow; // Linked list of edges below this vertex.
+ Edge* fLastEdgeBelow; // "
+ bool fProcessed; // Has this vertex been seen in simplify()?
+#if LOGGING_ENABLED
+ float fID; // Identifier used for logging.
+#endif
+};
+
+/***************************************************************************************/
+
+typedef bool (*CompareFunc)(const SkPoint& a, const SkPoint& b);
+
+struct Comparator {
+ CompareFunc sweep_lt;
+ CompareFunc sweep_gt;
+};
+
+bool sweep_lt_horiz(const SkPoint& a, const SkPoint& b) {
+ return a.fX == b.fX ? a.fY > b.fY : a.fX < b.fX;
+}
+
+bool sweep_lt_vert(const SkPoint& a, const SkPoint& b) {
+ return a.fY == b.fY ? a.fX < b.fX : a.fY < b.fY;
+}
+
+bool sweep_gt_horiz(const SkPoint& a, const SkPoint& b) {
+ return a.fX == b.fX ? a.fY < b.fY : a.fX > b.fX;
+}
+
+bool sweep_gt_vert(const SkPoint& a, const SkPoint& b) {
+ return a.fY == b.fY ? a.fX > b.fX : a.fY > b.fY;
+}
+
+inline SkPoint* emit_vertex(Vertex* v, SkPoint* data) {
+ *data++ = v->fPoint;
+ return data;
+}
+
+SkPoint* emit_triangle(Vertex* v0, Vertex* v1, Vertex* v2, SkPoint* data) {
+#if WIREFRAME
+ data = emit_vertex(v0, data);
+ data = emit_vertex(v1, data);
+ data = emit_vertex(v1, data);
+ data = emit_vertex(v2, data);
+ data = emit_vertex(v2, data);
+ data = emit_vertex(v0, data);
+#else
+ data = emit_vertex(v0, data);
+ data = emit_vertex(v1, data);
+ data = emit_vertex(v2, data);
+#endif
+ return data;
+}
+
+struct EdgeList {
+ EdgeList() : fHead(nullptr), fTail(nullptr) {}
+ Edge* fHead;
+ Edge* fTail;
+};
+
+/**
+ * An Edge joins a top Vertex to a bottom Vertex. Edge ordering for the list of "edges above" and
+ * "edge below" a vertex as well as for the active edge list is handled by isLeftOf()/isRightOf().
+ * Note that an Edge will give occasionally dist() != 0 for its own endpoints (because floating
+ * point). For speed, that case is only tested by the callers which require it (e.g.,
+ * cleanup_active_edges()). Edges also handle checking for intersection with other edges.
+ * Currently, this converts the edges to the parametric form, in order to avoid doing a division
+ * until an intersection has been confirmed. This is slightly slower in the "found" case, but
+ * a lot faster in the "not found" case.
+ *
+ * The coefficients of the line equation stored in double precision to avoid catastrphic
+ * cancellation in the isLeftOf() and isRightOf() checks. Using doubles ensures that the result is
+ * correct in float, since it's a polynomial of degree 2. The intersect() function, being
+ * degree 5, is still subject to catastrophic cancellation. We deal with that by assuming its
+ * output may be incorrect, and adjusting the mesh topology to match (see comment at the top of
+ * this file).
+ */
+
+struct Edge {
+ Edge(Vertex* top, Vertex* bottom, int winding)
+ : fWinding(winding)
+ , fTop(top)
+ , fBottom(bottom)
+ , fLeft(nullptr)
+ , fRight(nullptr)
+ , fPrevEdgeAbove(nullptr)
+ , fNextEdgeAbove(nullptr)
+ , fPrevEdgeBelow(nullptr)
+ , fNextEdgeBelow(nullptr)
+ , fLeftPoly(nullptr)
+ , fRightPoly(nullptr) {
+ recompute();
+ }
+ int fWinding; // 1 == edge goes downward; -1 = edge goes upward.
+ Vertex* fTop; // The top vertex in vertex-sort-order (sweep_lt).
+ Vertex* fBottom; // The bottom vertex in vertex-sort-order.
+ Edge* fLeft; // The linked list of edges in the active edge list.
+ Edge* fRight; // "
+ Edge* fPrevEdgeAbove; // The linked list of edges in the bottom Vertex's "edges above".
+ Edge* fNextEdgeAbove; // "
+ Edge* fPrevEdgeBelow; // The linked list of edges in the top Vertex's "edges below".
+ Edge* fNextEdgeBelow; // "
+ Poly* fLeftPoly; // The Poly to the left of this edge, if any.
+ Poly* fRightPoly; // The Poly to the right of this edge, if any.
+ double fDX; // The line equation for this edge, in implicit form.
+ double fDY; // fDY * x + fDX * y + fC = 0, for point (x, y) on the line.
+ double fC;
+ double dist(const SkPoint& p) const {
+ return fDY * p.fX - fDX * p.fY + fC;
+ }
+ bool isRightOf(Vertex* v) const {
+ return dist(v->fPoint) < 0.0;
+ }
+ bool isLeftOf(Vertex* v) const {
+ return dist(v->fPoint) > 0.0;
+ }
+ void recompute() {
+ fDX = static_cast<double>(fBottom->fPoint.fX) - fTop->fPoint.fX;
+ fDY = static_cast<double>(fBottom->fPoint.fY) - fTop->fPoint.fY;
+ fC = static_cast<double>(fTop->fPoint.fY) * fBottom->fPoint.fX -
+ static_cast<double>(fTop->fPoint.fX) * fBottom->fPoint.fY;
+ }
+ bool intersect(const Edge& other, SkPoint* p) {
+ LOG("intersecting %g -> %g with %g -> %g\n",
+ fTop->fID, fBottom->fID,
+ other.fTop->fID, other.fBottom->fID);
+ if (fTop == other.fTop || fBottom == other.fBottom) {
+ return false;
+ }
+ double denom = fDX * other.fDY - fDY * other.fDX;
+ if (denom == 0.0) {
+ return false;
+ }
+ double dx = static_cast<double>(fTop->fPoint.fX) - other.fTop->fPoint.fX;
+ double dy = static_cast<double>(fTop->fPoint.fY) - other.fTop->fPoint.fY;
+ double sNumer = dy * other.fDX - dx * other.fDY;
+ double tNumer = dy * fDX - dx * fDY;
+ // If (sNumer / denom) or (tNumer / denom) is not in [0..1], exit early.
+ // This saves us doing the divide below unless absolutely necessary.
+ if (denom > 0.0 ? (sNumer < 0.0 || sNumer > denom || tNumer < 0.0 || tNumer > denom)
+ : (sNumer > 0.0 || sNumer < denom || tNumer > 0.0 || tNumer < denom)) {
+ return false;
+ }
+ double s = sNumer / denom;
+ SkASSERT(s >= 0.0 && s <= 1.0);
+ p->fX = SkDoubleToScalar(fTop->fPoint.fX + s * fDX);
+ p->fY = SkDoubleToScalar(fTop->fPoint.fY + s * fDY);
+ return true;
+ }
+ bool isActive(EdgeList* activeEdges) const {
+ return activeEdges && (fLeft || fRight || activeEdges->fHead == this);
+ }
+};
+
+/***************************************************************************************/
+
+struct Poly {
+ Poly(int winding)
+ : fWinding(winding)
+ , fHead(nullptr)
+ , fTail(nullptr)
+ , fActive(nullptr)
+ , fNext(nullptr)
+ , fPartner(nullptr)
+ , fCount(0)
+ {
+#if LOGGING_ENABLED
+ static int gID = 0;
+ fID = gID++;
+ LOG("*** created Poly %d\n", fID);
+#endif
+ }
+ typedef enum { kNeither_Side, kLeft_Side, kRight_Side } Side;
+ struct MonotonePoly {
+ MonotonePoly()
+ : fSide(kNeither_Side)
+ , fHead(nullptr)
+ , fTail(nullptr)
+ , fPrev(nullptr)
+ , fNext(nullptr) {}
+ Side fSide;
+ Vertex* fHead;
+ Vertex* fTail;
+ MonotonePoly* fPrev;
+ MonotonePoly* fNext;
+ bool addVertex(Vertex* v, Side side, SkChunkAlloc& alloc) {
+ Vertex* newV = ALLOC_NEW(Vertex, (v->fPoint), alloc);
+ bool done = false;
+ if (fSide == kNeither_Side) {
+ fSide = side;
+ } else {
+ done = side != fSide;
+ }
+ if (fHead == nullptr) {
+ fHead = fTail = newV;
+ } else if (fSide == kRight_Side) {
+ newV->fPrev = fTail;
+ fTail->fNext = newV;
+ fTail = newV;
+ } else {
+ newV->fNext = fHead;
+ fHead->fPrev = newV;
+ fHead = newV;
+ }
+ return done;
+ }
+
+ SkPoint* emit(SkPoint* data) {
+ Vertex* first = fHead;
+ Vertex* v = first->fNext;
+ while (v != fTail) {
+ SkASSERT(v && v->fPrev && v->fNext);
+ Vertex* prev = v->fPrev;
+ Vertex* curr = v;
+ Vertex* next = v->fNext;
+ double ax = static_cast<double>(curr->fPoint.fX) - prev->fPoint.fX;
+ double ay = static_cast<double>(curr->fPoint.fY) - prev->fPoint.fY;
+ double bx = static_cast<double>(next->fPoint.fX) - curr->fPoint.fX;
+ double by = static_cast<double>(next->fPoint.fY) - curr->fPoint.fY;
+ if (ax * by - ay * bx >= 0.0) {
+ data = emit_triangle(prev, curr, next, data);
+ v->fPrev->fNext = v->fNext;
+ v->fNext->fPrev = v->fPrev;
+ if (v->fPrev == first) {
+ v = v->fNext;
+ } else {
+ v = v->fPrev;
+ }
+ } else {
+ v = v->fNext;
+ }
+ }
+ return data;
+ }
+ };
+ Poly* addVertex(Vertex* v, Side side, SkChunkAlloc& alloc) {
+ LOG("addVertex() to %d at %g (%g, %g), %s side\n", fID, v->fID, v->fPoint.fX, v->fPoint.fY,
+ side == kLeft_Side ? "left" : side == kRight_Side ? "right" : "neither");
+ Poly* partner = fPartner;
+ Poly* poly = this;
+ if (partner) {
+ fPartner = partner->fPartner = nullptr;
+ }
+ if (!fActive) {
+ fActive = ALLOC_NEW(MonotonePoly, (), alloc);
+ }
+ if (fActive->addVertex(v, side, alloc)) {
+ if (fTail) {
+ fActive->fPrev = fTail;
+ fTail->fNext = fActive;
+ fTail = fActive;
+ } else {
+ fHead = fTail = fActive;
+ }
+ if (partner) {
+ partner->addVertex(v, side, alloc);
+ poly = partner;
+ } else {
+ Vertex* prev = fActive->fSide == Poly::kLeft_Side ?
+ fActive->fHead->fNext : fActive->fTail->fPrev;
+ fActive = ALLOC_NEW(MonotonePoly, , alloc);
+ fActive->addVertex(prev, Poly::kNeither_Side, alloc);
+ fActive->addVertex(v, side, alloc);
+ }
+ }
+ fCount++;
+ return poly;
+ }
+ void end(Vertex* v, SkChunkAlloc& alloc) {
+ LOG("end() %d at %g, %g\n", fID, v->fPoint.fX, v->fPoint.fY);
+ if (fPartner) {
+ fPartner = fPartner->fPartner = nullptr;
+ }
+ addVertex(v, fActive->fSide == kLeft_Side ? kRight_Side : kLeft_Side, alloc);
+ }
+ SkPoint* emit(SkPoint *data) {
+ if (fCount < 3) {
+ return data;
+ }
+ LOG("emit() %d, size %d\n", fID, fCount);
+ for (MonotonePoly* m = fHead; m != nullptr; m = m->fNext) {
+ data = m->emit(data);
+ }
+ return data;
+ }
+ int fWinding;
+ MonotonePoly* fHead;
+ MonotonePoly* fTail;
+ MonotonePoly* fActive;
+ Poly* fNext;
+ Poly* fPartner;
+ int fCount;
+#if LOGGING_ENABLED
+ int fID;
+#endif
+};
+
+/***************************************************************************************/
+
+bool coincident(const SkPoint& a, const SkPoint& b) {
+ return a == b;
+}
+
+Poly* new_poly(Poly** head, Vertex* v, int winding, SkChunkAlloc& alloc) {
+ Poly* poly = ALLOC_NEW(Poly, (winding), alloc);
+ poly->addVertex(v, Poly::kNeither_Side, alloc);
+ poly->fNext = *head;
+ *head = poly;
+ return poly;
+}
+
+Vertex* append_point_to_contour(const SkPoint& p, Vertex* prev, Vertex** head,
+ SkChunkAlloc& alloc) {
+ Vertex* v = ALLOC_NEW(Vertex, (p), alloc);
+#if LOGGING_ENABLED
+ static float gID = 0.0f;
+ v->fID = gID++;
+#endif
+ if (prev) {
+ prev->fNext = v;
+ v->fPrev = prev;
+ } else {
+ *head = v;
+ }
+ return v;
+}
+
+Vertex* generate_quadratic_points(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ SkScalar tolSqd,
+ Vertex* prev,
+ Vertex** head,
+ int pointsLeft,
+ SkChunkAlloc& alloc) {
+ SkScalar d = p1.distanceToLineSegmentBetweenSqd(p0, p2);
+ if (pointsLeft < 2 || d < tolSqd || !SkScalarIsFinite(d)) {
+ return append_point_to_contour(p2, prev, head, alloc);
+ }
+
+ const SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ };
+ const SkPoint r = { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) };
+
+ pointsLeft >>= 1;
+ prev = generate_quadratic_points(p0, q[0], r, tolSqd, prev, head, pointsLeft, alloc);
+ prev = generate_quadratic_points(r, q[1], p2, tolSqd, prev, head, pointsLeft, alloc);
+ return prev;
+}
+
+Vertex* generate_cubic_points(const SkPoint& p0,
+ const SkPoint& p1,
+ const SkPoint& p2,
+ const SkPoint& p3,
+ SkScalar tolSqd,
+ Vertex* prev,
+ Vertex** head,
+ int pointsLeft,
+ SkChunkAlloc& alloc) {
+ SkScalar d1 = p1.distanceToLineSegmentBetweenSqd(p0, p3);
+ SkScalar d2 = p2.distanceToLineSegmentBetweenSqd(p0, p3);
+ if (pointsLeft < 2 || (d1 < tolSqd && d2 < tolSqd) ||
+ !SkScalarIsFinite(d1) || !SkScalarIsFinite(d2)) {
+ return append_point_to_contour(p3, prev, head, alloc);
+ }
+ const SkPoint q[] = {
+ { SkScalarAve(p0.fX, p1.fX), SkScalarAve(p0.fY, p1.fY) },
+ { SkScalarAve(p1.fX, p2.fX), SkScalarAve(p1.fY, p2.fY) },
+ { SkScalarAve(p2.fX, p3.fX), SkScalarAve(p2.fY, p3.fY) }
+ };
+ const SkPoint r[] = {
+ { SkScalarAve(q[0].fX, q[1].fX), SkScalarAve(q[0].fY, q[1].fY) },
+ { SkScalarAve(q[1].fX, q[2].fX), SkScalarAve(q[1].fY, q[2].fY) }
+ };
+ const SkPoint s = { SkScalarAve(r[0].fX, r[1].fX), SkScalarAve(r[0].fY, r[1].fY) };
+ pointsLeft >>= 1;
+ prev = generate_cubic_points(p0, q[0], r[0], s, tolSqd, prev, head, pointsLeft, alloc);
+ prev = generate_cubic_points(s, r[1], q[2], p3, tolSqd, prev, head, pointsLeft, alloc);
+ return prev;
+}
+
+// Stage 1: convert the input path to a set of linear contours (linked list of Vertices).
+
+void path_to_contours(const SkPath& path, SkScalar tolerance, const SkRect& clipBounds,
+ Vertex** contours, SkChunkAlloc& alloc, bool *isLinear) {
+
+ SkScalar toleranceSqd = tolerance * tolerance;
+
+ SkPoint pts[4];
+ bool done = false;
+ *isLinear = true;
+ SkPath::Iter iter(path, false);
+ Vertex* prev = nullptr;
+ Vertex* head = nullptr;
+ if (path.isInverseFillType()) {
+ SkPoint quad[4];
+ clipBounds.toQuad(quad);
+ for (int i = 3; i >= 0; i--) {
+ prev = append_point_to_contour(quad[i], prev, &head, alloc);
+ }
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ head = prev = nullptr;
+ }
+ SkAutoConicToQuads converter;
+ while (!done) {
+ SkPath::Verb verb = iter.next(pts);
+ switch (verb) {
+ case SkPath::kConic_Verb: {
+ SkScalar weight = iter.conicWeight();
+ const SkPoint* quadPts = converter.computeQuads(pts, weight, toleranceSqd);
+ for (int i = 0; i < converter.countQuads(); ++i) {
+ int pointsLeft = GrPathUtils::quadraticPointCount(quadPts, tolerance);
+ prev = generate_quadratic_points(quadPts[0], quadPts[1], quadPts[2],
+ toleranceSqd, prev, &head, pointsLeft, alloc);
+ quadPts += 2;
+ }
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kMove_Verb:
+ if (head) {
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ }
+ head = prev = nullptr;
+ prev = append_point_to_contour(pts[0], prev, &head, alloc);
+ break;
+ case SkPath::kLine_Verb: {
+ prev = append_point_to_contour(pts[1], prev, &head, alloc);
+ break;
+ }
+ case SkPath::kQuad_Verb: {
+ int pointsLeft = GrPathUtils::quadraticPointCount(pts, tolerance);
+ prev = generate_quadratic_points(pts[0], pts[1], pts[2], toleranceSqd, prev,
+ &head, pointsLeft, alloc);
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kCubic_Verb: {
+ int pointsLeft = GrPathUtils::cubicPointCount(pts, tolerance);
+ prev = generate_cubic_points(pts[0], pts[1], pts[2], pts[3],
+ toleranceSqd, prev, &head, pointsLeft, alloc);
+ *isLinear = false;
+ break;
+ }
+ case SkPath::kClose_Verb:
+ if (head) {
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ }
+ head = prev = nullptr;
+ break;
+ case SkPath::kDone_Verb:
+ if (head) {
+ head->fPrev = prev;
+ prev->fNext = head;
+ *contours++ = head;
+ }
+ done = true;
+ break;
+ }
+ }
+}
+
+inline bool apply_fill_type(SkPath::FillType fillType, int winding) {
+ switch (fillType) {
+ case SkPath::kWinding_FillType:
+ return winding != 0;
+ case SkPath::kEvenOdd_FillType:
+ return (winding & 1) != 0;
+ case SkPath::kInverseWinding_FillType:
+ return winding == 1;
+ case SkPath::kInverseEvenOdd_FillType:
+ return (winding & 1) == 1;
+ default:
+ SkASSERT(false);
+ return false;
+ }
+}
+
+Edge* new_edge(Vertex* prev, Vertex* next, SkChunkAlloc& alloc, Comparator& c) {
+ int winding = c.sweep_lt(prev->fPoint, next->fPoint) ? 1 : -1;
+ Vertex* top = winding < 0 ? next : prev;
+ Vertex* bottom = winding < 0 ? prev : next;
+ return ALLOC_NEW(Edge, (top, bottom, winding), alloc);
+}
+
+void remove_edge(Edge* edge, EdgeList* edges) {
+ LOG("removing edge %g -> %g\n", edge->fTop->fID, edge->fBottom->fID);
+ SkASSERT(edge->isActive(edges));
+ remove<Edge, &Edge::fLeft, &Edge::fRight>(edge, &edges->fHead, &edges->fTail);
+}
+
+void insert_edge(Edge* edge, Edge* prev, EdgeList* edges) {
+ LOG("inserting edge %g -> %g\n", edge->fTop->fID, edge->fBottom->fID);
+ SkASSERT(!edge->isActive(edges));
+ Edge* next = prev ? prev->fRight : edges->fHead;
+ insert<Edge, &Edge::fLeft, &Edge::fRight>(edge, prev, next, &edges->fHead, &edges->fTail);
+}
+
+void find_enclosing_edges(Vertex* v, EdgeList* edges, Edge** left, Edge** right) {
+ if (v->fFirstEdgeAbove) {
+ *left = v->fFirstEdgeAbove->fLeft;
+ *right = v->fLastEdgeAbove->fRight;
+ return;
+ }
+ Edge* next = nullptr;
+ Edge* prev;
+ for (prev = edges->fTail; prev != nullptr; prev = prev->fLeft) {
+ if (prev->isLeftOf(v)) {
+ break;
+ }
+ next = prev;
+ }
+ *left = prev;
+ *right = next;
+ return;
+}
+
+void find_enclosing_edges(Edge* edge, EdgeList* edges, Comparator& c, Edge** left, Edge** right) {
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = edges->fHead; next != nullptr; next = next->fRight) {
+ if ((c.sweep_gt(edge->fTop->fPoint, next->fTop->fPoint) && next->isRightOf(edge->fTop)) ||
+ (c.sweep_gt(next->fTop->fPoint, edge->fTop->fPoint) && edge->isLeftOf(next->fTop)) ||
+ (c.sweep_lt(edge->fBottom->fPoint, next->fBottom->fPoint) &&
+ next->isRightOf(edge->fBottom)) ||
+ (c.sweep_lt(next->fBottom->fPoint, edge->fBottom->fPoint) &&
+ edge->isLeftOf(next->fBottom))) {
+ break;
+ }
+ prev = next;
+ }
+ *left = prev;
+ *right = next;
+ return;
+}
+
+void fix_active_state(Edge* edge, EdgeList* activeEdges, Comparator& c) {
+ if (edge->isActive(activeEdges)) {
+ if (edge->fBottom->fProcessed || !edge->fTop->fProcessed) {
+ remove_edge(edge, activeEdges);
+ }
+ } else if (edge->fTop->fProcessed && !edge->fBottom->fProcessed) {
+ Edge* left;
+ Edge* right;
+ find_enclosing_edges(edge, activeEdges, c, &left, &right);
+ insert_edge(edge, left, activeEdges);
+ }
+}
+
+void insert_edge_above(Edge* edge, Vertex* v, Comparator& c) {
+ if (edge->fTop->fPoint == edge->fBottom->fPoint ||
+ c.sweep_gt(edge->fTop->fPoint, edge->fBottom->fPoint)) {
+ return;
+ }
+ LOG("insert edge (%g -> %g) above vertex %g\n", edge->fTop->fID, edge->fBottom->fID, v->fID);
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = v->fFirstEdgeAbove; next; next = next->fNextEdgeAbove) {
+ if (next->isRightOf(edge->fTop)) {
+ break;
+ }
+ prev = next;
+ }
+ insert<Edge, &Edge::fPrevEdgeAbove, &Edge::fNextEdgeAbove>(
+ edge, prev, next, &v->fFirstEdgeAbove, &v->fLastEdgeAbove);
+}
+
+void insert_edge_below(Edge* edge, Vertex* v, Comparator& c) {
+ if (edge->fTop->fPoint == edge->fBottom->fPoint ||
+ c.sweep_gt(edge->fTop->fPoint, edge->fBottom->fPoint)) {
+ return;
+ }
+ LOG("insert edge (%g -> %g) below vertex %g\n", edge->fTop->fID, edge->fBottom->fID, v->fID);
+ Edge* prev = nullptr;
+ Edge* next;
+ for (next = v->fFirstEdgeBelow; next; next = next->fNextEdgeBelow) {
+ if (next->isRightOf(edge->fBottom)) {
+ break;
+ }
+ prev = next;
+ }
+ insert<Edge, &Edge::fPrevEdgeBelow, &Edge::fNextEdgeBelow>(
+ edge, prev, next, &v->fFirstEdgeBelow, &v->fLastEdgeBelow);
+}
+
+void remove_edge_above(Edge* edge) {
+ LOG("removing edge (%g -> %g) above vertex %g\n", edge->fTop->fID, edge->fBottom->fID,
+ edge->fBottom->fID);
+ remove<Edge, &Edge::fPrevEdgeAbove, &Edge::fNextEdgeAbove>(
+ edge, &edge->fBottom->fFirstEdgeAbove, &edge->fBottom->fLastEdgeAbove);
+}
+
+void remove_edge_below(Edge* edge) {
+ LOG("removing edge (%g -> %g) below vertex %g\n", edge->fTop->fID, edge->fBottom->fID,
+ edge->fTop->fID);
+ remove<Edge, &Edge::fPrevEdgeBelow, &Edge::fNextEdgeBelow>(
+ edge, &edge->fTop->fFirstEdgeBelow, &edge->fTop->fLastEdgeBelow);
+}
+
+void erase_edge_if_zero_winding(Edge* edge, EdgeList* edges) {
+ if (edge->fWinding != 0) {
+ return;
+ }
+ LOG("erasing edge (%g -> %g)\n", edge->fTop->fID, edge->fBottom->fID);
+ remove_edge_above(edge);
+ remove_edge_below(edge);
+ if (edge->isActive(edges)) {
+ remove_edge(edge, edges);
+ }
+}
+
+void merge_collinear_edges(Edge* edge, EdgeList* activeEdges, Comparator& c);
+
+void set_top(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c) {
+ remove_edge_below(edge);
+ edge->fTop = v;
+ edge->recompute();
+ insert_edge_below(edge, v, c);
+ fix_active_state(edge, activeEdges, c);
+ merge_collinear_edges(edge, activeEdges, c);
+}
+
+void set_bottom(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c) {
+ remove_edge_above(edge);
+ edge->fBottom = v;
+ edge->recompute();
+ insert_edge_above(edge, v, c);
+ fix_active_state(edge, activeEdges, c);
+ merge_collinear_edges(edge, activeEdges, c);
+}
+
+void merge_edges_above(Edge* edge, Edge* other, EdgeList* activeEdges, Comparator& c) {
+ if (coincident(edge->fTop->fPoint, other->fTop->fPoint)) {
+ LOG("merging coincident above edges (%g, %g) -> (%g, %g)\n",
+ edge->fTop->fPoint.fX, edge->fTop->fPoint.fY,
+ edge->fBottom->fPoint.fX, edge->fBottom->fPoint.fY);
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ edge->fWinding = 0;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ } else if (c.sweep_lt(edge->fTop->fPoint, other->fTop->fPoint)) {
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ set_bottom(edge, other->fTop, activeEdges, c);
+ } else {
+ edge->fWinding += other->fWinding;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ set_bottom(other, edge->fTop, activeEdges, c);
+ }
+}
+
+void merge_edges_below(Edge* edge, Edge* other, EdgeList* activeEdges, Comparator& c) {
+ if (coincident(edge->fBottom->fPoint, other->fBottom->fPoint)) {
+ LOG("merging coincident below edges (%g, %g) -> (%g, %g)\n",
+ edge->fTop->fPoint.fX, edge->fTop->fPoint.fY,
+ edge->fBottom->fPoint.fX, edge->fBottom->fPoint.fY);
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ edge->fWinding = 0;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ } else if (c.sweep_lt(edge->fBottom->fPoint, other->fBottom->fPoint)) {
+ edge->fWinding += other->fWinding;
+ erase_edge_if_zero_winding(edge, activeEdges);
+ set_top(other, edge->fBottom, activeEdges, c);
+ } else {
+ other->fWinding += edge->fWinding;
+ erase_edge_if_zero_winding(other, activeEdges);
+ set_top(edge, other->fBottom, activeEdges, c);
+ }
+}
+
+void merge_collinear_edges(Edge* edge, EdgeList* activeEdges, Comparator& c) {
+ if (edge->fPrevEdgeAbove && (edge->fTop == edge->fPrevEdgeAbove->fTop ||
+ !edge->fPrevEdgeAbove->isLeftOf(edge->fTop))) {
+ merge_edges_above(edge, edge->fPrevEdgeAbove, activeEdges, c);
+ } else if (edge->fNextEdgeAbove && (edge->fTop == edge->fNextEdgeAbove->fTop ||
+ !edge->isLeftOf(edge->fNextEdgeAbove->fTop))) {
+ merge_edges_above(edge, edge->fNextEdgeAbove, activeEdges, c);
+ }
+ if (edge->fPrevEdgeBelow && (edge->fBottom == edge->fPrevEdgeBelow->fBottom ||
+ !edge->fPrevEdgeBelow->isLeftOf(edge->fBottom))) {
+ merge_edges_below(edge, edge->fPrevEdgeBelow, activeEdges, c);
+ } else if (edge->fNextEdgeBelow && (edge->fBottom == edge->fNextEdgeBelow->fBottom ||
+ !edge->isLeftOf(edge->fNextEdgeBelow->fBottom))) {
+ merge_edges_below(edge, edge->fNextEdgeBelow, activeEdges, c);
+ }
+}
+
+void split_edge(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c, SkChunkAlloc& alloc);
+
+void cleanup_active_edges(Edge* edge, EdgeList* activeEdges, Comparator& c, SkChunkAlloc& alloc) {
+ Vertex* top = edge->fTop;
+ Vertex* bottom = edge->fBottom;
+ if (edge->fLeft) {
+ Vertex* leftTop = edge->fLeft->fTop;
+ Vertex* leftBottom = edge->fLeft->fBottom;
+ if (c.sweep_gt(top->fPoint, leftTop->fPoint) && !edge->fLeft->isLeftOf(top)) {
+ split_edge(edge->fLeft, edge->fTop, activeEdges, c, alloc);
+ } else if (c.sweep_gt(leftTop->fPoint, top->fPoint) && !edge->isRightOf(leftTop)) {
+ split_edge(edge, leftTop, activeEdges, c, alloc);
+ } else if (c.sweep_lt(bottom->fPoint, leftBottom->fPoint) &&
+ !edge->fLeft->isLeftOf(bottom)) {
+ split_edge(edge->fLeft, bottom, activeEdges, c, alloc);
+ } else if (c.sweep_lt(leftBottom->fPoint, bottom->fPoint) && !edge->isRightOf(leftBottom)) {
+ split_edge(edge, leftBottom, activeEdges, c, alloc);
+ }
+ }
+ if (edge->fRight) {
+ Vertex* rightTop = edge->fRight->fTop;
+ Vertex* rightBottom = edge->fRight->fBottom;
+ if (c.sweep_gt(top->fPoint, rightTop->fPoint) && !edge->fRight->isRightOf(top)) {
+ split_edge(edge->fRight, top, activeEdges, c, alloc);
+ } else if (c.sweep_gt(rightTop->fPoint, top->fPoint) && !edge->isLeftOf(rightTop)) {
+ split_edge(edge, rightTop, activeEdges, c, alloc);
+ } else if (c.sweep_lt(bottom->fPoint, rightBottom->fPoint) &&
+ !edge->fRight->isRightOf(bottom)) {
+ split_edge(edge->fRight, bottom, activeEdges, c, alloc);
+ } else if (c.sweep_lt(rightBottom->fPoint, bottom->fPoint) &&
+ !edge->isLeftOf(rightBottom)) {
+ split_edge(edge, rightBottom, activeEdges, c, alloc);
+ }
+ }
+}
+
+void split_edge(Edge* edge, Vertex* v, EdgeList* activeEdges, Comparator& c, SkChunkAlloc& alloc) {
+ LOG("splitting edge (%g -> %g) at vertex %g (%g, %g)\n",
+ edge->fTop->fID, edge->fBottom->fID,
+ v->fID, v->fPoint.fX, v->fPoint.fY);
+ if (c.sweep_lt(v->fPoint, edge->fTop->fPoint)) {
+ set_top(edge, v, activeEdges, c);
+ } else if (c.sweep_gt(v->fPoint, edge->fBottom->fPoint)) {
+ set_bottom(edge, v, activeEdges, c);
+ } else {
+ Edge* newEdge = ALLOC_NEW(Edge, (v, edge->fBottom, edge->fWinding), alloc);
+ insert_edge_below(newEdge, v, c);
+ insert_edge_above(newEdge, edge->fBottom, c);
+ set_bottom(edge, v, activeEdges, c);
+ cleanup_active_edges(edge, activeEdges, c, alloc);
+ fix_active_state(newEdge, activeEdges, c);
+ merge_collinear_edges(newEdge, activeEdges, c);
+ }
+}
+
+void merge_vertices(Vertex* src, Vertex* dst, Vertex** head, Comparator& c, SkChunkAlloc& alloc) {
+ LOG("found coincident verts at %g, %g; merging %g into %g\n", src->fPoint.fX, src->fPoint.fY,
+ src->fID, dst->fID);
+ for (Edge* edge = src->fFirstEdgeAbove; edge;) {
+ Edge* next = edge->fNextEdgeAbove;
+ set_bottom(edge, dst, nullptr, c);
+ edge = next;
+ }
+ for (Edge* edge = src->fFirstEdgeBelow; edge;) {
+ Edge* next = edge->fNextEdgeBelow;
+ set_top(edge, dst, nullptr, c);
+ edge = next;
+ }
+ remove<Vertex, &Vertex::fPrev, &Vertex::fNext>(src, head, nullptr);
+}
+
+Vertex* check_for_intersection(Edge* edge, Edge* other, EdgeList* activeEdges, Comparator& c,
+ SkChunkAlloc& alloc) {
+ SkPoint p;
+ if (!edge || !other) {
+ return nullptr;
+ }
+ if (edge->intersect(*other, &p)) {
+ Vertex* v;
+ LOG("found intersection, pt is %g, %g\n", p.fX, p.fY);
+ if (p == edge->fTop->fPoint || c.sweep_lt(p, edge->fTop->fPoint)) {
+ split_edge(other, edge->fTop, activeEdges, c, alloc);
+ v = edge->fTop;
+ } else if (p == edge->fBottom->fPoint || c.sweep_gt(p, edge->fBottom->fPoint)) {
+ split_edge(other, edge->fBottom, activeEdges, c, alloc);
+ v = edge->fBottom;
+ } else if (p == other->fTop->fPoint || c.sweep_lt(p, other->fTop->fPoint)) {
+ split_edge(edge, other->fTop, activeEdges, c, alloc);
+ v = other->fTop;
+ } else if (p == other->fBottom->fPoint || c.sweep_gt(p, other->fBottom->fPoint)) {
+ split_edge(edge, other->fBottom, activeEdges, c, alloc);
+ v = other->fBottom;
+ } else {
+ Vertex* nextV = edge->fTop;
+ while (c.sweep_lt(p, nextV->fPoint)) {
+ nextV = nextV->fPrev;
+ }
+ while (c.sweep_lt(nextV->fPoint, p)) {
+ nextV = nextV->fNext;
+ }
+ Vertex* prevV = nextV->fPrev;
+ if (coincident(prevV->fPoint, p)) {
+ v = prevV;
+ } else if (coincident(nextV->fPoint, p)) {
+ v = nextV;
+ } else {
+ v = ALLOC_NEW(Vertex, (p), alloc);
+ LOG("inserting between %g (%g, %g) and %g (%g, %g)\n",
+ prevV->fID, prevV->fPoint.fX, prevV->fPoint.fY,
+ nextV->fID, nextV->fPoint.fX, nextV->fPoint.fY);
+#if LOGGING_ENABLED
+ v->fID = (nextV->fID + prevV->fID) * 0.5f;
+#endif
+ v->fPrev = prevV;
+ v->fNext = nextV;
+ prevV->fNext = v;
+ nextV->fPrev = v;
+ }
+ split_edge(edge, v, activeEdges, c, alloc);
+ split_edge(other, v, activeEdges, c, alloc);
+ }
+ return v;
+ }
+ return nullptr;
+}
+
+void sanitize_contours(Vertex** contours, int contourCnt) {
+ for (int i = 0; i < contourCnt; ++i) {
+ SkASSERT(contours[i]);
+ for (Vertex* v = contours[i];;) {
+ if (coincident(v->fPrev->fPoint, v->fPoint)) {
+ LOG("vertex %g,%g coincident; removing\n", v->fPoint.fX, v->fPoint.fY);
+ if (v->fPrev == v) {
+ contours[i] = nullptr;
+ break;
+ }
+ v->fPrev->fNext = v->fNext;
+ v->fNext->fPrev = v->fPrev;
+ if (contours[i] == v) {
+ contours[i] = v->fNext;
+ }
+ v = v->fPrev;
+ } else {
+ v = v->fNext;
+ if (v == contours[i]) break;
+ }
+ }
+ }
+}
+
+void merge_coincident_vertices(Vertex** vertices, Comparator& c, SkChunkAlloc& alloc) {
+ for (Vertex* v = (*vertices)->fNext; v != nullptr; v = v->fNext) {
+ if (c.sweep_lt(v->fPoint, v->fPrev->fPoint)) {
+ v->fPoint = v->fPrev->fPoint;
+ }
+ if (coincident(v->fPrev->fPoint, v->fPoint)) {
+ merge_vertices(v->fPrev, v, vertices, c, alloc);
+ }
+ }
+}
+
+// Stage 2: convert the contours to a mesh of edges connecting the vertices.
+
+Vertex* build_edges(Vertex** contours, int contourCnt, Comparator& c, SkChunkAlloc& alloc) {
+ Vertex* vertices = nullptr;
+ Vertex* prev = nullptr;
+ for (int i = 0; i < contourCnt; ++i) {
+ for (Vertex* v = contours[i]; v != nullptr;) {
+ Vertex* vNext = v->fNext;
+ Edge* edge = new_edge(v->fPrev, v, alloc, c);
+ if (edge->fWinding > 0) {
+ insert_edge_below(edge, v->fPrev, c);
+ insert_edge_above(edge, v, c);
+ } else {
+ insert_edge_below(edge, v, c);
+ insert_edge_above(edge, v->fPrev, c);
+ }
+ merge_collinear_edges(edge, nullptr, c);
+ if (prev) {
+ prev->fNext = v;
+ v->fPrev = prev;
+ } else {
+ vertices = v;
+ }
+ prev = v;
+ v = vNext;
+ if (v == contours[i]) break;
+ }
+ }
+ if (prev) {
+ prev->fNext = vertices->fPrev = nullptr;
+ }
+ return vertices;
+}
+
+// Stage 3: sort the vertices by increasing sweep direction.
+
+Vertex* sorted_merge(Vertex* a, Vertex* b, Comparator& c);
+
+void front_back_split(Vertex* v, Vertex** pFront, Vertex** pBack) {
+ Vertex* fast;
+ Vertex* slow;
+ if (!v || !v->fNext) {
+ *pFront = v;
+ *pBack = nullptr;
+ } else {
+ slow = v;
+ fast = v->fNext;
+
+ while (fast != nullptr) {
+ fast = fast->fNext;
+ if (fast != nullptr) {
+ slow = slow->fNext;
+ fast = fast->fNext;
+ }
+ }
+
+ *pFront = v;
+ *pBack = slow->fNext;
+ slow->fNext->fPrev = nullptr;
+ slow->fNext = nullptr;
+ }
+}
+
+void merge_sort(Vertex** head, Comparator& c) {
+ if (!*head || !(*head)->fNext) {
+ return;
+ }
+
+ Vertex* a;
+ Vertex* b;
+ front_back_split(*head, &a, &b);
+
+ merge_sort(&a, c);
+ merge_sort(&b, c);
+
+ *head = sorted_merge(a, b, c);
+}
+
+inline void append_vertex(Vertex* v, Vertex** head, Vertex** tail) {
+ insert<Vertex, &Vertex::fPrev, &Vertex::fNext>(v, *tail, nullptr, head, tail);
+}
+
+inline void append_vertex_list(Vertex* v, Vertex** head, Vertex** tail) {
+ insert<Vertex, &Vertex::fPrev, &Vertex::fNext>(v, *tail, v->fNext, head, tail);
+}
+
+Vertex* sorted_merge(Vertex* a, Vertex* b, Comparator& c) {
+ Vertex* head = nullptr;
+ Vertex* tail = nullptr;
+
+ while (a && b) {
+ if (c.sweep_lt(a->fPoint, b->fPoint)) {
+ Vertex* next = a->fNext;
+ append_vertex(a, &head, &tail);
+ a = next;
+ } else {
+ Vertex* next = b->fNext;
+ append_vertex(b, &head, &tail);
+ b = next;
+ }
+ }
+ if (a) {
+ append_vertex_list(a, &head, &tail);
+ }
+ if (b) {
+ append_vertex_list(b, &head, &tail);
+ }
+ return head;
+}
+
+// Stage 4: Simplify the mesh by inserting new vertices at intersecting edges.
+
+void simplify(Vertex* vertices, Comparator& c, SkChunkAlloc& alloc) {
+ LOG("simplifying complex polygons\n");
+ EdgeList activeEdges;
+ for (Vertex* v = vertices; v != nullptr; v = v->fNext) {
+ if (!v->fFirstEdgeAbove && !v->fFirstEdgeBelow) {
+ continue;
+ }
+#if LOGGING_ENABLED
+ LOG("\nvertex %g: (%g,%g)\n", v->fID, v->fPoint.fX, v->fPoint.fY);
+#endif
+ Edge* leftEnclosingEdge = nullptr;
+ Edge* rightEnclosingEdge = nullptr;
+ bool restartChecks;
+ do {
+ restartChecks = false;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ if (v->fFirstEdgeBelow) {
+ for (Edge* edge = v->fFirstEdgeBelow; edge != nullptr; edge = edge->fNextEdgeBelow) {
+ if (check_for_intersection(edge, leftEnclosingEdge, &activeEdges, c, alloc)) {
+ restartChecks = true;
+ break;
+ }
+ if (check_for_intersection(edge, rightEnclosingEdge, &activeEdges, c, alloc)) {
+ restartChecks = true;
+ break;
+ }
+ }
+ } else {
+ if (Vertex* pv = check_for_intersection(leftEnclosingEdge, rightEnclosingEdge,
+ &activeEdges, c, alloc)) {
+ if (c.sweep_lt(pv->fPoint, v->fPoint)) {
+ v = pv;
+ }
+ restartChecks = true;
+ }
+
+ }
+ } while (restartChecks);
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ remove_edge(e, &activeEdges);
+ }
+ Edge* leftEdge = leftEnclosingEdge;
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ insert_edge(e, leftEdge, &activeEdges);
+ leftEdge = e;
+ }
+ v->fProcessed = true;
+ }
+}
+
+// Stage 5: Tessellate the simplified mesh into monotone polygons.
+
+Poly* tessellate(Vertex* vertices, SkChunkAlloc& alloc) {
+ LOG("tessellating simple polygons\n");
+ EdgeList activeEdges;
+ Poly* polys = nullptr;
+ for (Vertex* v = vertices; v != nullptr; v = v->fNext) {
+ if (!v->fFirstEdgeAbove && !v->fFirstEdgeBelow) {
+ continue;
+ }
+#if LOGGING_ENABLED
+ LOG("\nvertex %g: (%g,%g)\n", v->fID, v->fPoint.fX, v->fPoint.fY);
+#endif
+ Edge* leftEnclosingEdge = nullptr;
+ Edge* rightEnclosingEdge = nullptr;
+ find_enclosing_edges(v, &activeEdges, &leftEnclosingEdge, &rightEnclosingEdge);
+ Poly* leftPoly = nullptr;
+ Poly* rightPoly = nullptr;
+ if (v->fFirstEdgeAbove) {
+ leftPoly = v->fFirstEdgeAbove->fLeftPoly;
+ rightPoly = v->fLastEdgeAbove->fRightPoly;
+ } else {
+ leftPoly = leftEnclosingEdge ? leftEnclosingEdge->fRightPoly : nullptr;
+ rightPoly = rightEnclosingEdge ? rightEnclosingEdge->fLeftPoly : nullptr;
+ }
+#if LOGGING_ENABLED
+ LOG("edges above:\n");
+ for (Edge* e = v->fFirstEdgeAbove; e; e = e->fNextEdgeAbove) {
+ LOG("%g -> %g, lpoly %d, rpoly %d\n", e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1, e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+ LOG("edges below:\n");
+ for (Edge* e = v->fFirstEdgeBelow; e; e = e->fNextEdgeBelow) {
+ LOG("%g -> %g, lpoly %d, rpoly %d\n", e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1, e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+#endif
+ if (v->fFirstEdgeAbove) {
+ if (leftPoly) {
+ leftPoly = leftPoly->addVertex(v, Poly::kRight_Side, alloc);
+ }
+ if (rightPoly) {
+ rightPoly = rightPoly->addVertex(v, Poly::kLeft_Side, alloc);
+ }
+ for (Edge* e = v->fFirstEdgeAbove; e != v->fLastEdgeAbove; e = e->fNextEdgeAbove) {
+ Edge* leftEdge = e;
+ Edge* rightEdge = e->fNextEdgeAbove;
+ SkASSERT(rightEdge->isRightOf(leftEdge->fTop));
+ remove_edge(leftEdge, &activeEdges);
+ if (leftEdge->fRightPoly) {
+ leftEdge->fRightPoly->end(v, alloc);
+ }
+ if (rightEdge->fLeftPoly && rightEdge->fLeftPoly != leftEdge->fRightPoly) {
+ rightEdge->fLeftPoly->end(v, alloc);
+ }
+ }
+ remove_edge(v->fLastEdgeAbove, &activeEdges);
+ if (!v->fFirstEdgeBelow) {
+ if (leftPoly && rightPoly && leftPoly != rightPoly) {
+ SkASSERT(leftPoly->fPartner == nullptr && rightPoly->fPartner == nullptr);
+ rightPoly->fPartner = leftPoly;
+ leftPoly->fPartner = rightPoly;
+ }
+ }
+ }
+ if (v->fFirstEdgeBelow) {
+ if (!v->fFirstEdgeAbove) {
+ if (leftPoly && leftPoly == rightPoly) {
+ // Split the poly.
+ if (leftPoly->fActive->fSide == Poly::kLeft_Side) {
+ leftPoly = new_poly(&polys, leftEnclosingEdge->fTop, leftPoly->fWinding,
+ alloc);
+ leftPoly->addVertex(v, Poly::kRight_Side, alloc);
+ rightPoly->addVertex(v, Poly::kLeft_Side, alloc);
+ leftEnclosingEdge->fRightPoly = leftPoly;
+ } else {
+ rightPoly = new_poly(&polys, rightEnclosingEdge->fTop, rightPoly->fWinding,
+ alloc);
+ rightPoly->addVertex(v, Poly::kLeft_Side, alloc);
+ leftPoly->addVertex(v, Poly::kRight_Side, alloc);
+ rightEnclosingEdge->fLeftPoly = rightPoly;
+ }
+ } else {
+ if (leftPoly) {
+ leftPoly = leftPoly->addVertex(v, Poly::kRight_Side, alloc);
+ }
+ if (rightPoly) {
+ rightPoly = rightPoly->addVertex(v, Poly::kLeft_Side, alloc);
+ }
+ }
+ }
+ Edge* leftEdge = v->fFirstEdgeBelow;
+ leftEdge->fLeftPoly = leftPoly;
+ insert_edge(leftEdge, leftEnclosingEdge, &activeEdges);
+ for (Edge* rightEdge = leftEdge->fNextEdgeBelow; rightEdge;
+ rightEdge = rightEdge->fNextEdgeBelow) {
+ insert_edge(rightEdge, leftEdge, &activeEdges);
+ int winding = leftEdge->fLeftPoly ? leftEdge->fLeftPoly->fWinding : 0;
+ winding += leftEdge->fWinding;
+ if (winding != 0) {
+ Poly* poly = new_poly(&polys, v, winding, alloc);
+ leftEdge->fRightPoly = rightEdge->fLeftPoly = poly;
+ }
+ leftEdge = rightEdge;
+ }
+ v->fLastEdgeBelow->fRightPoly = rightPoly;
+ }
+#if LOGGING_ENABLED
+ LOG("\nactive edges:\n");
+ for (Edge* e = activeEdges.fHead; e != nullptr; e = e->fRight) {
+ LOG("%g -> %g, lpoly %d, rpoly %d\n", e->fTop->fID, e->fBottom->fID,
+ e->fLeftPoly ? e->fLeftPoly->fID : -1, e->fRightPoly ? e->fRightPoly->fID : -1);
+ }
+#endif
+ }
+ return polys;
+}
+
+// This is a driver function which calls stages 2-5 in turn.
+
+Poly* contours_to_polys(Vertex** contours, int contourCnt, Comparator& c, SkChunkAlloc& alloc) {
+#if LOGGING_ENABLED
+ for (int i = 0; i < contourCnt; ++i) {
+ Vertex* v = contours[i];
+ SkASSERT(v);
+ LOG("path.moveTo(%20.20g, %20.20g);\n", v->fPoint.fX, v->fPoint.fY);
+ for (v = v->fNext; v != contours[i]; v = v->fNext) {
+ LOG("path.lineTo(%20.20g, %20.20g);\n", v->fPoint.fX, v->fPoint.fY);
+ }
+ }
+#endif
+ sanitize_contours(contours, contourCnt);
+ Vertex* vertices = build_edges(contours, contourCnt, c, alloc);
+ if (!vertices) {
+ return nullptr;
+ }
+
+ // Sort vertices in Y (secondarily in X).
+ merge_sort(&vertices, c);
+ merge_coincident_vertices(&vertices, c, alloc);
+#if LOGGING_ENABLED
+ for (Vertex* v = vertices; v != nullptr; v = v->fNext) {
+ static float gID = 0.0f;
+ v->fID = gID++;
+ }
+#endif
+ simplify(vertices, c, alloc);
+ return tessellate(vertices, alloc);
+}
+
+// Stage 6: Triangulate the monotone polygons into a vertex buffer.
+
+SkPoint* polys_to_triangles(Poly* polys, SkPath::FillType fillType, SkPoint* data) {
+ SkPoint* d = data;
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly->fWinding)) {
+ d = poly->emit(d);
+ }
+ }
+ return d;
+}
+
+struct TessInfo {
+ SkScalar fTolerance;
+ int fVertexCount;
+};
+
+bool cache_match(GrVertexBuffer* vertexBuffer, SkScalar tol, int* vertexCount) {
+ if (!vertexBuffer) {
+ return false;
+ }
+ const SkData* data = vertexBuffer->getUniqueKey().getCustomData();
+ SkASSERT(data);
+ const TessInfo* info = static_cast<const TessInfo*>(data->data());
+ if (info->fTolerance == 0 || info->fTolerance < 3.0f * tol) {
+ *vertexCount = info->fVertexCount;
+ return true;
+ }
+ return false;
+}
+
+};
+
+GrTessellatingPathRenderer::GrTessellatingPathRenderer() {
+}
+
+namespace {
+
+// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
+class PathInvalidator : public SkPathRef::GenIDChangeListener {
+public:
+ explicit PathInvalidator(const GrUniqueKey& key) : fMsg(key) {}
+private:
+ GrUniqueKeyInvalidatedMessage fMsg;
+
+ void onChange() override {
+ SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg);
+ }
+};
+
+} // namespace
+
+bool GrTessellatingPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+ // This path renderer can draw all fill styles, all stroke styles except hairlines, but does
+ // not do antialiasing. It can do convex and concave paths, but we'll leave the convex ones to
+ // simpler algorithms.
+ return !IsStrokeHairlineOrEquivalent(*args.fStroke, *args.fViewMatrix, nullptr) &&
+ !args.fAntiAlias && !args.fPath->isConvex();
+}
+
+class TessellatingPathBatch : public GrVertexBatch {
+public:
+
+ static GrDrawBatch* Create(const GrColor& color,
+ const SkPath& path,
+ const GrStrokeInfo& stroke,
+ const SkMatrix& viewMatrix,
+ SkRect clipBounds) {
+ return new TessellatingPathBatch(color, path, stroke, viewMatrix, clipBounds);
+ }
+
+ const char* name() const override { return "TessellatingPathBatch"; }
+
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const override {
+ out->setKnownFourComponents(fColor);
+ }
+
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override {
+ out->setUnknownSingleComponent();
+ }
+
+private:
+ void initBatchTracker(const GrPipelineOptimizations& opt) override {
+ // Handle any color overrides
+ if (!opt.readsColor()) {
+ fColor = GrColor_ILLEGAL;
+ }
+ opt.getOverrideColorIfSet(&fColor);
+ fPipelineInfo = opt;
+ }
+
+ int tessellate(GrUniqueKey* key,
+ GrResourceProvider* resourceProvider,
+ SkAutoTUnref<GrVertexBuffer>& vertexBuffer,
+ bool canMapVB) {
+ SkPath path;
+ GrStrokeInfo stroke(fStroke);
+ if (stroke.isDashed()) {
+ if (!stroke.applyDashToPath(&path, &stroke, fPath)) {
+ return 0;
+ }
+ } else {
+ path = fPath;
+ }
+ if (!stroke.isFillStyle()) {
+ stroke.setResScale(SkScalarAbs(fViewMatrix.getMaxScale()));
+ if (!stroke.applyToPath(&path, path)) {
+ return 0;
+ }
+ stroke.setFillStyle();
+ }
+ SkRect pathBounds = path.getBounds();
+ Comparator c;
+ if (pathBounds.width() > pathBounds.height()) {
+ c.sweep_lt = sweep_lt_horiz;
+ c.sweep_gt = sweep_gt_horiz;
+ } else {
+ c.sweep_lt = sweep_lt_vert;
+ c.sweep_gt = sweep_gt_vert;
+ }
+ SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance;
+ SkScalar tol = GrPathUtils::scaleToleranceToSrc(screenSpaceTol, fViewMatrix, pathBounds);
+ int contourCnt;
+ int maxPts = GrPathUtils::worstCasePointCount(path, &contourCnt, tol);
+ if (maxPts <= 0) {
+ return 0;
+ }
+ if (maxPts > ((int)SK_MaxU16 + 1)) {
+ SkDebugf("Path not rendered, too many verts (%d)\n", maxPts);
+ return 0;
+ }
+ SkPath::FillType fillType = path.getFillType();
+ if (SkPath::IsInverseFillType(fillType)) {
+ contourCnt++;
+ }
+
+ LOG("got %d pts, %d contours\n", maxPts, contourCnt);
+ SkAutoTDeleteArray<Vertex*> contours(new Vertex* [contourCnt]);
+
+ // For the initial size of the chunk allocator, estimate based on the point count:
+ // one vertex per point for the initial passes, plus two for the vertices in the
+ // resulting Polys, since the same point may end up in two Polys. Assume minimal
+ // connectivity of one Edge per Vertex (will grow for intersections).
+ SkChunkAlloc alloc(maxPts * (3 * sizeof(Vertex) + sizeof(Edge)));
+ bool isLinear;
+ path_to_contours(path, tol, fClipBounds, contours.get(), alloc, &isLinear);
+ Poly* polys;
+ polys = contours_to_polys(contours.get(), contourCnt, c, alloc);
+ int vertexCount = 0;
+ for (Poly* poly = polys; poly; poly = poly->fNext) {
+ if (apply_fill_type(fillType, poly->fWinding) && poly->fCount >= 3) {
+ vertexCount += (poly->fCount - 2) * (WIREFRAME ? 6 : 3);
+ }
+ }
+ if (0 == vertexCount) {
+ return 0;
+ }
+
+ size_t size = vertexCount * sizeof(SkPoint);
+ if (!vertexBuffer.get() || vertexBuffer->gpuMemorySize() < size) {
+ vertexBuffer.reset(resourceProvider->createVertexBuffer(
+ size, GrResourceProvider::kStatic_BufferUsage, 0));
+ }
+ if (!vertexBuffer.get()) {
+ SkDebugf("Could not allocate vertices\n");
+ return 0;
+ }
+ SkPoint* verts;
+ if (canMapVB) {
+ verts = static_cast<SkPoint*>(vertexBuffer->map());
+ } else {
+ verts = new SkPoint[vertexCount];
+ }
+ SkDEBUGCODE(SkPoint* end = ) polys_to_triangles(polys, fillType, verts);
+ SkASSERT(static_cast<int>(end - verts) == vertexCount);
+ LOG("vertex count: %d\n", vertexCount);
+ if (canMapVB) {
+ vertexBuffer->unmap();
+ } else {
+ vertexBuffer->updateData(verts, vertexCount * sizeof(SkPoint));
+ delete[] verts;
+ }
+
+
+ if (!fPath.isVolatile()) {
+ TessInfo info;
+ info.fTolerance = isLinear ? 0 : tol;
+ info.fVertexCount = vertexCount;
+ SkAutoTUnref<SkData> data(SkData::NewWithCopy(&info, sizeof(info)));
+ key->setCustomData(data.get());
+ resourceProvider->assignUniqueKeyToResource(*key, vertexBuffer.get());
+ SkPathPriv::AddGenIDChangeListener(fPath, new PathInvalidator(*key));
+ }
+ return vertexCount;
+ }
+
+ void onPrepareDraws(Target* target) override {
+ // construct a cache key from the path's genID and the view matrix
+ static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
+ GrUniqueKey key;
+ int clipBoundsSize32 =
+ fPath.isInverseFillType() ? sizeof(fClipBounds) / sizeof(uint32_t) : 0;
+ int strokeDataSize32 = fStroke.computeUniqueKeyFragmentData32Cnt();
+ GrUniqueKey::Builder builder(&key, kDomain, 2 + clipBoundsSize32 + strokeDataSize32);
+ builder[0] = fPath.getGenerationID();
+ builder[1] = fPath.getFillType();
+ // For inverse fills, the tessellation is dependent on clip bounds.
+ if (fPath.isInverseFillType()) {
+ memcpy(&builder[2], &fClipBounds, sizeof(fClipBounds));
+ }
+ fStroke.asUniqueKeyFragment(&builder[2 + clipBoundsSize32]);
+ builder.finish();
+ GrResourceProvider* rp = target->resourceProvider();
+ SkAutoTUnref<GrVertexBuffer> vertexBuffer(rp->findAndRefTByUniqueKey<GrVertexBuffer>(key));
+ int vertexCount;
+ SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance;
+ SkScalar tol = GrPathUtils::scaleToleranceToSrc(
+ screenSpaceTol, fViewMatrix, fPath.getBounds());
+ if (!cache_match(vertexBuffer.get(), tol, &vertexCount)) {
+ bool canMapVB = GrCaps::kNone_MapFlags != target->caps().mapBufferFlags();
+ vertexCount = tessellate(&key, rp, vertexBuffer, canMapVB);
+ }
+
+ if (vertexCount == 0) {
+ return;
+ }
+
+ SkAutoTUnref<const GrGeometryProcessor> gp;
+ {
+ using namespace GrDefaultGeoProcFactory;
+
+ Color color(fColor);
+ LocalCoords localCoords(fPipelineInfo.readsLocalCoords() ?
+ LocalCoords::kUsePosition_Type :
+ LocalCoords::kUnused_Type);
+ Coverage::Type coverageType;
+ if (fPipelineInfo.readsCoverage()) {
+ coverageType = Coverage::kSolid_Type;
+ } else {
+ coverageType = Coverage::kNone_Type;
+ }
+ Coverage coverage(coverageType);
+ gp.reset(GrDefaultGeoProcFactory::Create(color, coverage, localCoords,
+ fViewMatrix));
+ }
+
+ target->initDraw(gp, this->pipeline());
+ SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
+
+ GrPrimitiveType primitiveType = WIREFRAME ? kLines_GrPrimitiveType
+ : kTriangles_GrPrimitiveType;
+ GrVertices vertices;
+ vertices.init(primitiveType, vertexBuffer.get(), 0, vertexCount);
+ target->draw(vertices);
+ }
+
+ bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
+
+ TessellatingPathBatch(const GrColor& color,
+ const SkPath& path,
+ const GrStrokeInfo& stroke,
+ const SkMatrix& viewMatrix,
+ const SkRect& clipBounds)
+ : fColor(color)
+ , fPath(path)
+ , fStroke(stroke)
+ , fViewMatrix(viewMatrix)
+ , fClipBounds(clipBounds) {
+ this->initClassID<TessellatingPathBatch>();
+
+ fBounds = path.getBounds();
+ if (!stroke.isFillStyle()) {
+ SkScalar radius = SkScalarHalf(stroke.getWidth());
+ if (stroke.getJoin() == SkPaint::kMiter_Join) {
+ SkScalar scale = stroke.getMiter();
+ if (scale > SK_Scalar1) {
+ radius = SkScalarMul(radius, scale);
+ }
+ }
+ fBounds.outset(radius, radius);
+ }
+ viewMatrix.mapRect(&fBounds);
+ }
+
+ GrColor fColor;
+ SkPath fPath;
+ GrStrokeInfo fStroke;
+ SkMatrix fViewMatrix;
+ SkRect fClipBounds; // in source space
+ GrPipelineOptimizations fPipelineInfo;
+};
+
+bool GrTessellatingPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ SkASSERT(!args.fAntiAlias);
+ const GrRenderTarget* rt = args.fPipelineBuilder->getRenderTarget();
+ if (nullptr == rt) {
+ return false;
+ }
+
+ SkIRect clipBoundsI;
+ args.fPipelineBuilder->clip().getConservativeBounds(rt, &clipBoundsI);
+ SkRect clipBounds = SkRect::Make(clipBoundsI);
+ SkMatrix vmi;
+ if (!args.fViewMatrix->invert(&vmi)) {
+ return false;
+ }
+ vmi.mapRect(&clipBounds);
+ SkAutoTUnref<GrDrawBatch> batch(TessellatingPathBatch::Create(args.fColor, *args.fPath,
+ *args.fStroke, *args.fViewMatrix,
+ clipBounds));
+ args.fTarget->drawBatch(*args.fPipelineBuilder, batch);
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef GR_TEST_UTILS
+
+DRAW_BATCH_TEST_DEFINE(TesselatingPathBatch) {
+ GrColor color = GrRandomColor(random);
+ SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random);
+ SkPath path = GrTest::TestPath(random);
+ SkRect clipBounds = GrTest::TestRect(random);
+ SkMatrix vmi;
+ bool result = viewMatrix.invert(&vmi);
+ if (!result) {
+ SkFAIL("Cannot invert matrix\n");
+ }
+ vmi.mapRect(&clipBounds);
+ GrStrokeInfo strokeInfo = GrTest::TestStrokeInfo(random);
+ return TessellatingPathBatch::Create(color, path, strokeInfo, viewMatrix, clipBounds);
+}
+
+#endif
diff --git a/src/gpu/batches/GrTessellatingPathRenderer.h b/src/gpu/batches/GrTessellatingPathRenderer.h
new file mode 100644
index 0000000000..7598ceb065
--- /dev/null
+++ b/src/gpu/batches/GrTessellatingPathRenderer.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTessellatingPathRenderer_DEFINED
+#define GrTessellatingPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+/**
+ * Subclass that renders the path by converting to screen-space trapezoids plus
+ * extra 1-pixel geometry for AA.
+ */
+class SK_API GrTessellatingPathRenderer : public GrPathRenderer {
+public:
+ GrTessellatingPathRenderer();
+
+private:
+ bool onCanDrawPath(const CanDrawPathArgs& ) const override;
+
+ StencilSupport onGetStencilSupport(const SkPath&, const GrStrokeInfo&) const override {
+ return GrPathRenderer::kNoSupport_StencilSupport;
+ }
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif