aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
authorGravatar bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81>2011-10-12 19:53:16 +0000
committerGravatar bsalomon@google.com <bsalomon@google.com@2bbb7eff-a529-9590-31e7-b0007b416f81>2011-10-12 19:53:16 +0000
commitd38f137e9b813f8193675ebd3dfbfe8bc42639e9 (patch)
tree1e670c378d7b31a4538fde3c2b3e4e29b72c05b5 /src/gpu
parent4d5cb45f3e3e62633304b4911d131cdd02dfd541 (diff)
Move gpu/include/* to include/gpu and gpu/src/* to src/gpu
Review URL: http://codereview.appspot.com/5250070/ git-svn-id: http://skia.googlecode.com/svn/trunk@2471 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/FlingState.cpp127
-rw-r--r--src/gpu/GrAAHairLinePathRenderer.cpp732
-rw-r--r--src/gpu/GrAAHairLinePathRenderer.h61
-rw-r--r--src/gpu/GrAddPathRenderers_aahairline.cpp20
-rw-r--r--src/gpu/GrAddPathRenderers_none.cpp15
-rw-r--r--src/gpu/GrAddPathRenderers_tesselated.cpp17
-rw-r--r--src/gpu/GrAllocPool.cpp120
-rw-r--r--src/gpu/GrAtlas.cpp200
-rw-r--r--src/gpu/GrBinHashKey.h94
-rw-r--r--src/gpu/GrBufferAllocPool.cpp459
-rw-r--r--src/gpu/GrBufferAllocPool.h350
-rw-r--r--src/gpu/GrClip.cpp145
-rw-r--r--src/gpu/GrContext.cpp1930
-rw-r--r--src/gpu/GrDefaultPathRenderer.cpp560
-rw-r--r--src/gpu/GrDefaultPathRenderer.h59
-rw-r--r--src/gpu/GrDrawMesh.cpp147
-rw-r--r--src/gpu/GrDrawTarget.cpp1262
-rw-r--r--src/gpu/GrDrawTarget.h1476
-rw-r--r--src/gpu/GrGLDefaultInterface_none.cpp13
-rw-r--r--src/gpu/GrGLIRect.h74
-rw-r--r--src/gpu/GrGLIndexBuffer.cpp131
-rw-r--r--src/gpu/GrGLIndexBuffer.h55
-rw-r--r--src/gpu/GrGLInterface.cpp495
-rw-r--r--src/gpu/GrGLProgram.cpp1623
-rw-r--r--src/gpu/GrGLProgram.h347
-rw-r--r--src/gpu/GrGLRenderTarget.cpp96
-rw-r--r--src/gpu/GrGLRenderTarget.h108
-rw-r--r--src/gpu/GrGLShaderVar.h217
-rw-r--r--src/gpu/GrGLStencilBuffer.cpp40
-rw-r--r--src/gpu/GrGLStencilBuffer.h60
-rw-r--r--src/gpu/GrGLTexture.cpp187
-rw-r--r--src/gpu/GrGLTexture.h154
-rw-r--r--src/gpu/GrGLUtil.cpp48
-rw-r--r--src/gpu/GrGLVertexBuffer.cpp126
-rw-r--r--src/gpu/GrGLVertexBuffer.h52
-rw-r--r--src/gpu/GrGeometryBuffer.h90
-rw-r--r--src/gpu/GrGpu.cpp937
-rw-r--r--src/gpu/GrGpu.h423
-rw-r--r--src/gpu/GrGpuFactory.cpp69
-rw-r--r--src/gpu/GrGpuGL.cpp2260
-rw-r--r--src/gpu/GrGpuGL.h264
-rw-r--r--src/gpu/GrGpuGLFixed.cpp382
-rw-r--r--src/gpu/GrGpuGLFixed.h65
-rw-r--r--src/gpu/GrGpuGLShaders.cpp1059
-rw-r--r--src/gpu/GrGpuGLShaders.h93
-rw-r--r--src/gpu/GrInOrderDrawBuffer.cpp618
-rw-r--r--src/gpu/GrInOrderDrawBuffer.h188
-rw-r--r--src/gpu/GrIndexBuffer.h33
-rw-r--r--src/gpu/GrMatrix.cpp713
-rw-r--r--src/gpu/GrMemory.cpp27
-rw-r--r--src/gpu/GrPathRenderer.cpp41
-rw-r--r--src/gpu/GrPathRenderer.h229
-rw-r--r--src/gpu/GrPathRendererChain.cpp64
-rw-r--r--src/gpu/GrPathRendererChain.h63
-rw-r--r--src/gpu/GrPathUtils.cpp188
-rw-r--r--src/gpu/GrPathUtils.h50
-rw-r--r--src/gpu/GrPrintf_printf.cpp29
-rw-r--r--src/gpu/GrRectanizer.cpp123
-rw-r--r--src/gpu/GrRectanizer_fifo.cpp123
-rw-r--r--src/gpu/GrRedBlackTree.h1118
-rw-r--r--src/gpu/GrRenderTarget.cpp75
-rw-r--r--src/gpu/GrResource.cpp34
-rw-r--r--src/gpu/GrResourceCache.cpp376
-rw-r--r--src/gpu/GrResourceCache.h312
-rw-r--r--src/gpu/GrStencil.cpp376
-rw-r--r--src/gpu/GrStencilBuffer.cpp55
-rw-r--r--src/gpu/GrStencilBuffer.h106
-rw-r--r--src/gpu/GrTesselatedPathRenderer.cpp607
-rw-r--r--src/gpu/GrTesselatedPathRenderer.h33
-rw-r--r--src/gpu/GrTextContext.cpp314
-rw-r--r--src/gpu/GrTextStrike.cpp205
-rw-r--r--src/gpu/GrTextStrike_impl.h106
-rw-r--r--src/gpu/GrTexture.cpp41
-rw-r--r--src/gpu/GrVertexBuffer.h24
-rw-r--r--src/gpu/android/GrGLDefaultInterface_android.cpp120
-rw-r--r--src/gpu/app-android.cpp394
-rw-r--r--src/gpu/gr_hello_world.cpp37
-rw-r--r--src/gpu/gr_unittests.cpp231
-rw-r--r--src/gpu/ios/GrGLDefaultInterface_iOS.cpp152
-rw-r--r--src/gpu/ios/SkUIView.mm827
-rw-r--r--src/gpu/mac/GrGLDefaultInterface_mac.cpp170
-rw-r--r--src/gpu/mesa/GrGLDefaultInterface_mesa.cpp188
-rw-r--r--src/gpu/unix/GrGLDefaultInterface_unix.cpp191
-rw-r--r--src/gpu/win/GrGLDefaultInterface_win.cpp197
84 files changed, 26020 insertions, 0 deletions
diff --git a/src/gpu/FlingState.cpp b/src/gpu/FlingState.cpp
new file mode 100644
index 0000000000..050a810499
--- /dev/null
+++ b/src/gpu/FlingState.cpp
@@ -0,0 +1,127 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "FlingState.h"
+#include "SkMatrix.h"
+#include "SkTime.h"
+
+#define DISCRETIZE_TRANSLATE_TO_AVOID_FLICKER true
+
+static const float MAX_FLING_SPEED = 1500;
+
+static float pin_max_fling(float speed) {
+ if (speed > MAX_FLING_SPEED) {
+ speed = MAX_FLING_SPEED;
+ }
+ return speed;
+}
+
+static double getseconds() {
+ return SkTime::GetMSecs() * 0.001;
+}
+
+// returns +1 or -1, depending on the sign of x
+// returns +1 if x is zero
+static SkScalar SkScalarSign(SkScalar x) {
+ SkScalar sign = SK_Scalar1;
+ if (x < 0) {
+ sign = -sign;
+ }
+ return sign;
+}
+
+static void unit_axis_align(SkVector* unit) {
+ const SkScalar TOLERANCE = SkDoubleToScalar(0.15);
+ if (SkScalarAbs(unit->fX) < TOLERANCE) {
+ unit->fX = 0;
+ unit->fY = SkScalarSign(unit->fY);
+ } else if (SkScalarAbs(unit->fY) < TOLERANCE) {
+ unit->fX = SkScalarSign(unit->fX);
+ unit->fY = 0;
+ }
+}
+
+void FlingState::reset(float sx, float sy) {
+ fActive = true;
+ fDirection.set(sx, sy);
+ fSpeed0 = SkPoint::Normalize(&fDirection);
+ fSpeed0 = pin_max_fling(fSpeed0);
+ fTime0 = getseconds();
+
+ unit_axis_align(&fDirection);
+// printf("---- speed %g dir %g %g\n", fSpeed0, fDirection.fX, fDirection.fY);
+}
+
+bool FlingState::evaluateMatrix(SkMatrix* matrix) {
+ if (!fActive) {
+ return false;
+ }
+
+ const float t = getseconds() - fTime0;
+ const float MIN_SPEED = 2;
+ const float K0 = 5.0;
+ const float K1 = 0.02;
+ const float speed = fSpeed0 * (sk_float_exp(- K0 * t) - K1);
+ if (speed <= MIN_SPEED) {
+ fActive = false;
+ return false;
+ }
+ float dist = (fSpeed0 - speed) / K0;
+
+// printf("---- time %g speed %g dist %g\n", t, speed, dist);
+ float tx = fDirection.fX * dist;
+ float ty = fDirection.fY * dist;
+ if (DISCRETIZE_TRANSLATE_TO_AVOID_FLICKER) {
+ tx = sk_float_round2int(tx);
+ ty = sk_float_round2int(ty);
+ }
+ matrix->setTranslate(tx, ty);
+// printf("---- evaluate (%g %g)\n", tx, ty);
+
+ return true;
+}
+
+////////////////////////////////////////
+
+GrAnimateFloat::GrAnimateFloat() : fTime0(0) {}
+
+void GrAnimateFloat::start(float v0, float v1, float duration) {
+ fValue0 = v0;
+ fValue1 = v1;
+ fDuration = duration;
+ if (duration > 0) {
+ fTime0 = SkTime::GetMSecs();
+ if (!fTime0) {
+ fTime0 = 1; // time0 is our sentinel
+ }
+ } else {
+ fTime0 = 0;
+ }
+}
+
+float GrAnimateFloat::evaluate() {
+ if (!fTime0) {
+ return fValue1;
+ }
+
+ double elapsed = (SkTime::GetMSecs() - fTime0) * 0.001;
+ if (elapsed >= fDuration) {
+ fTime0 = 0;
+ return fValue1;
+ }
+
+ double t = elapsed / fDuration;
+ if (true) {
+ t = (3 - 2 * t) * t * t;
+ }
+ return fValue0 + t * (fValue1 - fValue0);
+}
+
+
diff --git a/src/gpu/GrAAHairLinePathRenderer.cpp b/src/gpu/GrAAHairLinePathRenderer.cpp
new file mode 100644
index 0000000000..f16c8efa63
--- /dev/null
+++ b/src/gpu/GrAAHairLinePathRenderer.cpp
@@ -0,0 +1,732 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrAAHairLinePathRenderer.h"
+
+#include "GrContext.h"
+#include "GrGpu.h"
+#include "GrIndexBuffer.h"
+#include "GrPathUtils.h"
+#include "SkGeometry.h"
+#include "SkTemplates.h"
+
+namespace {
+// quadratics are rendered as 5-sided polys in order to bound the
+// AA stroke around the center-curve. See comments in push_quad_index_buffer and
+// bloat_quad.
+static const int kVertsPerQuad = 5;
+static const int kIdxsPerQuad = 9;
+
+static const int kVertsPerLineSeg = 4;
+static const int kIdxsPerLineSeg = 6;
+
+static const int kNumQuadsInIdxBuffer = 256;
+static const size_t kQuadIdxSBufize = kIdxsPerQuad *
+ sizeof(uint16_t) *
+ kNumQuadsInIdxBuffer;
+
+bool push_quad_index_data(GrIndexBuffer* qIdxBuffer) {
+ uint16_t* data = (uint16_t*) qIdxBuffer->lock();
+ bool tempData = NULL == data;
+ if (tempData) {
+ data = new uint16_t[kNumQuadsInIdxBuffer * kIdxsPerQuad];
+ }
+ for (int i = 0; i < kNumQuadsInIdxBuffer; ++i) {
+
+ // Each quadratic is rendered as a five sided polygon. This poly bounds
+ // the quadratic's bounding triangle but has been expanded so that the
+ // 1-pixel wide area around the curve is inside the poly.
+ // If a,b,c are the original control points then the poly a0,b0,c0,c1,a1
+ // that is rendered would look like this:
+ // b0
+ // b
+ //
+ // a0 c0
+ // a c
+ // a1 c1
+ // Each is drawn as three triagnles specified by these 9 indices:
+ int baseIdx = i * kIdxsPerQuad;
+ uint16_t baseVert = (uint16_t)(i * kVertsPerQuad);
+ data[0 + baseIdx] = baseVert + 0; // a0
+ data[1 + baseIdx] = baseVert + 1; // a1
+ data[2 + baseIdx] = baseVert + 2; // b0
+ data[3 + baseIdx] = baseVert + 2; // b0
+ data[4 + baseIdx] = baseVert + 4; // c1
+ data[5 + baseIdx] = baseVert + 3; // c0
+ data[6 + baseIdx] = baseVert + 1; // a1
+ data[7 + baseIdx] = baseVert + 4; // c1
+ data[8 + baseIdx] = baseVert + 2; // b0
+ }
+ if (tempData) {
+ bool ret = qIdxBuffer->updateData(data, kQuadIdxSBufize);
+ delete[] data;
+ return ret;
+ } else {
+ qIdxBuffer->unlock();
+ return true;
+ }
+}
+}
+
+GrPathRenderer* GrAAHairLinePathRenderer::Create(GrContext* context) {
+ const GrIndexBuffer* lIdxBuffer = context->getQuadIndexBuffer();
+ if (NULL == lIdxBuffer) {
+ return NULL;
+ }
+ GrGpu* gpu = context->getGpu();
+ GrIndexBuffer* qIdxBuf = gpu->createIndexBuffer(kQuadIdxSBufize, false);
+ SkAutoTUnref<GrIndexBuffer> qIdxBuffer(qIdxBuf);
+ if (NULL == qIdxBuf ||
+ !push_quad_index_data(qIdxBuf)) {
+ return NULL;
+ }
+ return new GrAAHairLinePathRenderer(context,
+ lIdxBuffer,
+ qIdxBuf);
+}
+
+GrAAHairLinePathRenderer::GrAAHairLinePathRenderer(
+ const GrContext* context,
+ const GrIndexBuffer* linesIndexBuffer,
+ const GrIndexBuffer* quadsIndexBuffer) {
+ fLinesIndexBuffer = linesIndexBuffer;
+ linesIndexBuffer->ref();
+ fQuadsIndexBuffer = quadsIndexBuffer;
+ quadsIndexBuffer->ref();
+ this->resetGeom();
+}
+
+GrAAHairLinePathRenderer::~GrAAHairLinePathRenderer() {
+ fLinesIndexBuffer->unref();
+ fQuadsIndexBuffer->unref();
+}
+
+bool GrAAHairLinePathRenderer::supportsAA(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const {
+ return kHairLine_PathFill == fill;
+}
+
+bool GrAAHairLinePathRenderer::canDrawPath(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const {
+ static const uint32_t gReqDerivMask = SkPath::kCubic_SegmentMask |
+ SkPath::kQuad_SegmentMask;
+ return (kHairLine_PathFill == fill &&
+ target->isAntialiasState() &&
+ (target->getCaps().fShaderDerivativeSupport ||
+ !(gReqDerivMask & path.getSegmentMasks())));
+}
+
+void GrAAHairLinePathRenderer::pathWillClear() {
+ this->resetGeom();
+}
+
+void GrAAHairLinePathRenderer::resetGeom() {
+ fPreviousStages = ~0;
+ fPreviousRTHeight = ~0;
+ fPreviousViewMatrix = GrMatrix::InvalidMatrix();
+ fLineSegmentCnt = 0;
+ fQuadCnt = 0;
+ if ((fQuadCnt || fLineSegmentCnt) && NULL != fTarget) {
+ fTarget->resetVertexSource();
+ }
+}
+
+namespace {
+
+typedef SkTArray<SkPoint, true> PtArray;
+#define PREALLOC_PTARRAY(N) SkSTArray<(N),SkPoint, true>
+typedef SkTArray<int, true> IntArray;
+
+/**
+ * We convert cubics to quadratics (for now).
+ */
+void convert_noninflect_cubic_to_quads(const SkPoint p[4],
+ SkScalar tolScale,
+ PtArray* quads,
+ int sublevel = 0) {
+ SkVector ab = p[1];
+ ab -= p[0];
+ SkVector dc = p[2];
+ dc -= p[3];
+
+ static const SkScalar gLengthScale = 3 * SK_Scalar1 / 2;
+ // base tolerance is 2 pixels in dev coords.
+ const SkScalar distanceSqdTol = SkScalarMul(tolScale, 2 * SK_Scalar1);
+ static const int kMaxSubdivs = 10;
+
+ ab.scale(gLengthScale);
+ dc.scale(gLengthScale);
+
+ SkVector c0 = p[0];
+ c0 += ab;
+ SkVector c1 = p[3];
+ c1 += dc;
+
+ SkScalar dSqd = c0.distanceToSqd(c1);
+ if (sublevel > kMaxSubdivs || dSqd <= distanceSqdTol) {
+ SkPoint cAvg = c0;
+ cAvg += c1;
+ cAvg.scale(SK_ScalarHalf);
+
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = p[0];
+ pts[1] = cAvg;
+ pts[2] = p[3];
+
+ return;
+ } else {
+ SkPoint choppedPts[7];
+ SkChopCubicAtHalf(p, choppedPts);
+ convert_noninflect_cubic_to_quads(choppedPts + 0, tolScale,
+ quads, sublevel + 1);
+ convert_noninflect_cubic_to_quads(choppedPts + 3, tolScale,
+ quads, sublevel + 1);
+ }
+}
+
+void convert_cubic_to_quads(const SkPoint p[4],
+ SkScalar tolScale,
+ PtArray* quads) {
+ SkPoint chopped[13];
+ int count = SkChopCubicAtInflections(p, chopped);
+
+ for (int i = 0; i < count; ++i) {
+ SkPoint* cubic = chopped + 3*i;
+ convert_noninflect_cubic_to_quads(cubic, tolScale, quads);
+ }
+}
+
+// Takes 178th time of logf on Z600 / VC2010
+int get_float_exp(float x) {
+ GR_STATIC_ASSERT(sizeof(int) == sizeof(float));
+#if GR_DEBUG
+ static bool tested;
+ if (!tested) {
+ tested = true;
+ GrAssert(get_float_exp(0.25f) == -2);
+ GrAssert(get_float_exp(0.3f) == -2);
+ GrAssert(get_float_exp(0.5f) == -1);
+ GrAssert(get_float_exp(1.f) == 0);
+ GrAssert(get_float_exp(2.f) == 1);
+ GrAssert(get_float_exp(2.5f) == 1);
+ GrAssert(get_float_exp(8.f) == 3);
+ GrAssert(get_float_exp(100.f) == 6);
+ GrAssert(get_float_exp(1000.f) == 9);
+ GrAssert(get_float_exp(1024.f) == 10);
+ GrAssert(get_float_exp(3000000.f) == 21);
+ }
+#endif
+ const int* iptr = (const int*)&x;
+ return (((*iptr) & 0x7f800000) >> 23) - 127;
+}
+
+// we subdivide the quads to avoid huge overfill
+// if it returns -1 then should be drawn as lines
+int num_quad_subdivs(const SkPoint p[3]) {
+ static const SkScalar gDegenerateToLineTol = SK_Scalar1;
+ static const SkScalar gDegenerateToLineTolSqd =
+ SkScalarMul(gDegenerateToLineTol, gDegenerateToLineTol);
+
+ if (p[0].distanceToSqd(p[1]) < gDegenerateToLineTolSqd ||
+ p[1].distanceToSqd(p[2]) < gDegenerateToLineTolSqd) {
+ return -1;
+ }
+
+ GrScalar dsqd = p[1].distanceToLineBetweenSqd(p[0], p[2]);
+ if (dsqd < gDegenerateToLineTolSqd) {
+ return -1;
+ }
+
+ if (p[2].distanceToLineBetweenSqd(p[1], p[0]) < gDegenerateToLineTolSqd) {
+ return -1;
+ }
+
+ static const int kMaxSub = 4;
+ // tolerance of triangle height in pixels
+ // tuned on windows Quadro FX 380 / Z600
+ // trade off of fill vs cpu time on verts
+ // maybe different when do this using gpu (geo or tess shaders)
+ static const SkScalar gSubdivTol = 175 * SK_Scalar1;
+
+ if (dsqd <= gSubdivTol*gSubdivTol) {
+ return 0;
+ } else {
+ // subdividing the quad reduces d by 4. so we want x = log4(d/tol)
+ // = log4(d*d/tol*tol)/2
+ // = log2(d*d/tol*tol)
+
+#ifdef SK_SCALAR_IS_FLOAT
+ // +1 since we're ignoring the mantissa contribution.
+ int log = get_float_exp(dsqd/(gSubdivTol*gSubdivTol)) + 1;
+ log = GrMin(GrMax(0, log), kMaxSub);
+ return log;
+#else
+ SkScalar log = SkScalarLog(SkScalarDiv(dsqd,gSubdivTol*gSubdivTol));
+ static const SkScalar conv = SkScalarInvert(SkScalarLog(2));
+ log = SkScalarMul(log, conv);
+ return GrMin(GrMax(0, SkScalarCeilToInt(log)),kMaxSub);
+#endif
+ }
+}
+
+/**
+ * Generates the lines and quads to be rendered. Lines are always recorded in
+ * device space. We will do a device space bloat to account for the 1pixel
+ * thickness.
+ * Quads are recorded in device space unless m contains
+ * perspective, then in they are in src space. We do this because we will
+ * subdivide large quads to reduce over-fill. This subdivision has to be
+ * performed before applying the perspective matrix.
+ */
+int generate_lines_and_quads(const SkPath& path,
+ const SkMatrix& m,
+ const SkVector& translate,
+ GrIRect clip,
+ PtArray* lines,
+ PtArray* quads,
+ IntArray* quadSubdivCnts) {
+ SkPath::Iter iter(path, false);
+
+ int totalQuadCount = 0;
+ GrRect bounds;
+ GrIRect ibounds;
+
+ bool persp = m.hasPerspective();
+
+ for (;;) {
+ GrPoint pts[4];
+ GrPoint devPts[4];
+ GrPathCmd cmd = (GrPathCmd)iter.next(pts);
+ switch (cmd) {
+ case kMove_PathCmd:
+ break;
+ case kLine_PathCmd:
+ SkPoint::Offset(pts, 2, translate);
+ m.mapPoints(devPts, pts, 2);
+ bounds.setBounds(devPts, 2);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(clip, ibounds)) {
+ SkPoint* pts = lines->push_back_n(2);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ }
+ break;
+ case kQuadratic_PathCmd:
+ SkPoint::Offset(pts, 3, translate);
+ m.mapPoints(devPts, pts, 3);
+ bounds.setBounds(devPts, 3);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(clip, ibounds)) {
+ int subdiv = num_quad_subdivs(devPts);
+ GrAssert(subdiv >= -1);
+ if (-1 == subdiv) {
+ SkPoint* pts = lines->push_back_n(4);
+ pts[0] = devPts[0];
+ pts[1] = devPts[1];
+ pts[2] = devPts[1];
+ pts[3] = devPts[2];
+ } else {
+ // when in perspective keep quads in src space
+ SkPoint* qPts = persp ? pts : devPts;
+ SkPoint* pts = quads->push_back_n(3);
+ pts[0] = qPts[0];
+ pts[1] = qPts[1];
+ pts[2] = qPts[2];
+ quadSubdivCnts->push_back() = subdiv;
+ totalQuadCount += 1 << subdiv;
+ }
+ }
+ break;
+ case kCubic_PathCmd:
+ SkPoint::Offset(pts, 4, translate);
+ m.mapPoints(devPts, pts, 4);
+ bounds.setBounds(devPts, 4);
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(clip, ibounds)) {
+ PREALLOC_PTARRAY(32) q;
+ // in perspective have to do conversion in src space
+ if (persp) {
+ SkScalar tolScale =
+ GrPathUtils::scaleToleranceToSrc(SK_Scalar1, m,
+ path.getBounds());
+ convert_cubic_to_quads(pts, tolScale, &q);
+ } else {
+ convert_cubic_to_quads(devPts, SK_Scalar1, &q);
+ }
+ for (int i = 0; i < q.count(); i += 3) {
+ SkPoint* qInDevSpace;
+ // bounds has to be calculated in device space, but q is
+ // in src space when there is perspective.
+ if (persp) {
+ m.mapPoints(devPts, &q[i], 3);
+ bounds.setBounds(devPts, 3);
+ qInDevSpace = devPts;
+ } else {
+ bounds.setBounds(&q[i], 3);
+ qInDevSpace = &q[i];
+ }
+ bounds.outset(SK_Scalar1, SK_Scalar1);
+ bounds.roundOut(&ibounds);
+ if (SkIRect::Intersects(clip, ibounds)) {
+ int subdiv = num_quad_subdivs(qInDevSpace);
+ GrAssert(subdiv >= -1);
+ if (-1 == subdiv) {
+ SkPoint* pts = lines->push_back_n(4);
+ // lines should always be in device coords
+ pts[0] = qInDevSpace[0];
+ pts[1] = qInDevSpace[1];
+ pts[2] = qInDevSpace[1];
+ pts[3] = qInDevSpace[2];
+ } else {
+ SkPoint* pts = quads->push_back_n(3);
+ // q is already in src space when there is no
+ // perspective and dev coords otherwise.
+ pts[0] = q[0 + i];
+ pts[1] = q[1 + i];
+ pts[2] = q[2 + i];
+ quadSubdivCnts->push_back() = subdiv;
+ totalQuadCount += 1 << subdiv;
+ }
+ }
+ }
+ }
+ break;
+ case kClose_PathCmd:
+ break;
+ case kEnd_PathCmd:
+ return totalQuadCount;
+ }
+ }
+}
+
+struct Vertex {
+ GrPoint fPos;
+ union {
+ struct {
+ GrScalar fA;
+ GrScalar fB;
+ GrScalar fC;
+ } fLine;
+ GrVec fQuadCoord;
+ struct {
+ GrScalar fBogus[4];
+ };
+ };
+};
+GR_STATIC_ASSERT(sizeof(Vertex) == 3 * sizeof(GrPoint));
+
+void intersect_lines(const SkPoint& ptA, const SkVector& normA,
+ const SkPoint& ptB, const SkVector& normB,
+ SkPoint* result) {
+
+ SkScalar lineAW = -normA.dot(ptA);
+ SkScalar lineBW = -normB.dot(ptB);
+
+ SkScalar wInv = SkScalarMul(normA.fX, normB.fY) -
+ SkScalarMul(normA.fY, normB.fX);
+ wInv = SkScalarInvert(wInv);
+
+ result->fX = SkScalarMul(normA.fY, lineBW) - SkScalarMul(lineAW, normB.fY);
+ result->fX = SkScalarMul(result->fX, wInv);
+
+ result->fY = SkScalarMul(lineAW, normB.fX) - SkScalarMul(normA.fX, lineBW);
+ result->fY = SkScalarMul(result->fY, wInv);
+}
+
+void bloat_quad(const SkPoint qpts[3], const GrMatrix* toDevice,
+ const GrMatrix* toSrc, Vertex verts[kVertsPerQuad]) {
+ GrAssert(!toDevice == !toSrc);
+ // original quad is specified by tri a,b,c
+ SkPoint a = qpts[0];
+ SkPoint b = qpts[1];
+ SkPoint c = qpts[2];
+
+ // compute a matrix that goes from device coords to U,V quad params
+ // this should be in the src space, not dev coords, when we have perspective
+ SkMatrix DevToUV;
+ DevToUV.setAll(a.fX, b.fX, c.fX,
+ a.fY, b.fY, c.fY,
+ SK_Scalar1, SK_Scalar1, SK_Scalar1);
+ DevToUV.invert(&DevToUV);
+ // can't make this static, no cons :(
+ SkMatrix UVpts;
+ UVpts.setAll(0, SK_ScalarHalf, SK_Scalar1,
+ 0, 0, SK_Scalar1,
+ SK_Scalar1, SK_Scalar1, SK_Scalar1);
+ DevToUV.postConcat(UVpts);
+
+ // We really want to avoid perspective matrix muls.
+ // These may wind up really close to zero
+ DevToUV.setPerspX(0);
+ DevToUV.setPerspY(0);
+
+ if (toDevice) {
+ toDevice->mapPoints(&a, 1);
+ toDevice->mapPoints(&b, 1);
+ toDevice->mapPoints(&c, 1);
+ }
+ // make a new poly where we replace a and c by a 1-pixel wide edges orthog
+ // to edges ab and bc:
+ //
+ // before | after
+ // | b0
+ // b |
+ // |
+ // | a0 c0
+ // a c | a1 c1
+ //
+ // edges a0->b0 and b0->c0 are parallel to original edges a->b and b->c,
+ // respectively.
+ Vertex& a0 = verts[0];
+ Vertex& a1 = verts[1];
+ Vertex& b0 = verts[2];
+ Vertex& c0 = verts[3];
+ Vertex& c1 = verts[4];
+
+ SkVector ab = b;
+ ab -= a;
+ SkVector ac = c;
+ ac -= a;
+ SkVector cb = b;
+ cb -= c;
+
+ // We should have already handled degenerates
+ GrAssert(ab.length() > 0 && cb.length() > 0);
+
+ ab.normalize();
+ SkVector abN;
+ abN.setOrthog(ab, SkVector::kLeft_Side);
+ if (abN.dot(ac) > 0) {
+ abN.negate();
+ }
+
+ cb.normalize();
+ SkVector cbN;
+ cbN.setOrthog(cb, SkVector::kLeft_Side);
+ if (cbN.dot(ac) < 0) {
+ cbN.negate();
+ }
+
+ a0.fPos = a;
+ a0.fPos += abN;
+ a1.fPos = a;
+ a1.fPos -= abN;
+
+ c0.fPos = c;
+ c0.fPos += cbN;
+ c1.fPos = c;
+ c1.fPos -= cbN;
+
+ intersect_lines(a0.fPos, abN, c0.fPos, cbN, &b0.fPos);
+
+ if (toSrc) {
+ toSrc->mapPointsWithStride(&verts[0].fPos, sizeof(Vertex), kVertsPerQuad);
+ }
+ DevToUV.mapPointsWithStride(&verts[0].fQuadCoord,
+ &verts[0].fPos, sizeof(Vertex), kVertsPerQuad);
+}
+
+void add_quads(const SkPoint p[3],
+ int subdiv,
+ const GrMatrix* toDevice,
+ const GrMatrix* toSrc,
+ Vertex** vert) {
+ GrAssert(subdiv >= 0);
+ if (subdiv) {
+ SkPoint newP[5];
+ SkChopQuadAtHalf(p, newP);
+ add_quads(newP + 0, subdiv-1, toDevice, toSrc, vert);
+ add_quads(newP + 2, subdiv-1, toDevice, toSrc, vert);
+ } else {
+ bloat_quad(p, toDevice, toSrc, *vert);
+ *vert += kVertsPerQuad;
+ }
+}
+
+void add_line(const SkPoint p[2],
+ int rtHeight,
+ const SkMatrix* toSrc,
+ Vertex** vert) {
+ const SkPoint& a = p[0];
+ const SkPoint& b = p[1];
+
+ SkVector orthVec = b;
+ orthVec -= a;
+
+ if (orthVec.setLength(SK_Scalar1)) {
+ orthVec.setOrthog(orthVec);
+
+ // the values we pass down to the frag shader
+ // have to be in y-points-up space;
+ SkVector normal;
+ normal.fX = orthVec.fX;
+ normal.fY = -orthVec.fY;
+ SkPoint aYDown;
+ aYDown.fX = a.fX;
+ aYDown.fY = rtHeight - a.fY;
+
+ SkScalar lineC = -(aYDown.dot(normal));
+ for (int i = 0; i < kVertsPerLineSeg; ++i) {
+ (*vert)[i].fPos = (i < 2) ? a : b;
+ if (0 == i || 3 == i) {
+ (*vert)[i].fPos -= orthVec;
+ } else {
+ (*vert)[i].fPos += orthVec;
+ }
+ (*vert)[i].fLine.fA = normal.fX;
+ (*vert)[i].fLine.fB = normal.fY;
+ (*vert)[i].fLine.fC = lineC;
+ }
+ if (NULL != toSrc) {
+ toSrc->mapPointsWithStride(&(*vert)->fPos,
+ sizeof(Vertex),
+ kVertsPerLineSeg);
+ }
+ } else {
+ // just make it degenerate and likely offscreen
+ (*vert)[0].fPos.set(SK_ScalarMax, SK_ScalarMax);
+ (*vert)[1].fPos.set(SK_ScalarMax, SK_ScalarMax);
+ (*vert)[2].fPos.set(SK_ScalarMax, SK_ScalarMax);
+ (*vert)[3].fPos.set(SK_ScalarMax, SK_ScalarMax);
+ }
+
+ *vert += kVertsPerLineSeg;
+}
+
+}
+
+bool GrAAHairLinePathRenderer::createGeom(GrDrawTarget::StageBitfield stages) {
+
+ int rtHeight = fTarget->getRenderTarget()->height();
+
+ GrIRect clip;
+ if (fTarget->getClip().hasConservativeBounds()) {
+ GrRect clipRect = fTarget->getClip().getConservativeBounds();
+ clipRect.roundOut(&clip);
+ } else {
+ clip.setLargest();
+ }
+
+ // If none of the inputs that affect generation of path geometry have
+ // have changed since last previous path draw then we can reuse the
+ // previous geoemtry.
+ if (stages == fPreviousStages &&
+ fPreviousViewMatrix == fTarget->getViewMatrix() &&
+ fPreviousTranslate == fTranslate &&
+ rtHeight == fPreviousRTHeight &&
+ fClipRect == clip) {
+ return true;
+ }
+
+ GrVertexLayout layout = GrDrawTarget::kEdge_VertexLayoutBit;
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ if ((1 << s) & stages) {
+ layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s);
+ }
+ }
+
+ GrMatrix viewM = fTarget->getViewMatrix();
+
+ PREALLOC_PTARRAY(128) lines;
+ PREALLOC_PTARRAY(128) quads;
+ IntArray qSubdivs;
+ fQuadCnt = generate_lines_and_quads(*fPath, viewM, fTranslate, clip,
+ &lines, &quads, &qSubdivs);
+
+ fLineSegmentCnt = lines.count() / 2;
+ int vertCnt = kVertsPerLineSeg * fLineSegmentCnt + kVertsPerQuad * fQuadCnt;
+
+ GrAssert(sizeof(Vertex) == GrDrawTarget::VertexSize(layout));
+
+ Vertex* verts;
+ if (!fTarget->reserveVertexSpace(layout, vertCnt, (void**)&verts)) {
+ return false;
+ }
+ Vertex* base = verts;
+
+ const GrMatrix* toDevice = NULL;
+ const GrMatrix* toSrc = NULL;
+ GrMatrix ivm;
+
+ if (viewM.hasPerspective()) {
+ if (viewM.invert(&ivm)) {
+ toDevice = &viewM;
+ toSrc = &ivm;
+ }
+ }
+
+ for (int i = 0; i < fLineSegmentCnt; ++i) {
+ add_line(&lines[2*i], rtHeight, toSrc, &verts);
+ }
+
+ int unsubdivQuadCnt = quads.count() / 3;
+ for (int i = 0; i < unsubdivQuadCnt; ++i) {
+ GrAssert(qSubdivs[i] >= 0);
+ add_quads(&quads[3*i], qSubdivs[i], toDevice, toSrc, &verts);
+ }
+
+ fPreviousStages = stages;
+ fPreviousViewMatrix = fTarget->getViewMatrix();
+ fPreviousRTHeight = rtHeight;
+ fClipRect = clip;
+ fPreviousTranslate = fTranslate;
+ return true;
+}
+
+void GrAAHairLinePathRenderer::drawPath(GrDrawTarget::StageBitfield stages) {
+
+ if (!this->createGeom(stages)) {
+ return;
+ }
+
+ GrDrawTarget::AutoStateRestore asr;
+ if (!fTarget->getViewMatrix().hasPerspective()) {
+ asr.set(fTarget);
+ GrMatrix ivm;
+ if (fTarget->getViewInverse(&ivm)) {
+ fTarget->preConcatSamplerMatrices(stages, ivm);
+ }
+ fTarget->setViewMatrix(GrMatrix::I());
+ }
+
+ // TODO: See whether rendering lines as degenerate quads improves perf
+ // when we have a mix
+ fTarget->setIndexSourceToBuffer(fLinesIndexBuffer);
+ int lines = 0;
+ int nBufLines = fLinesIndexBuffer->maxQuads();
+ while (lines < fLineSegmentCnt) {
+ int n = GrMin(fLineSegmentCnt-lines, nBufLines);
+ fTarget->setVertexEdgeType(GrDrawTarget::kHairLine_EdgeType);
+ fTarget->drawIndexed(kTriangles_PrimitiveType,
+ kVertsPerLineSeg*lines, // startV
+ 0, // startI
+ kVertsPerLineSeg*n, // vCount
+ kIdxsPerLineSeg*n); // iCount
+ lines += n;
+ }
+
+ fTarget->setIndexSourceToBuffer(fQuadsIndexBuffer);
+ int quads = 0;
+ while (quads < fQuadCnt) {
+ int n = GrMin(fQuadCnt-quads, kNumQuadsInIdxBuffer);
+ fTarget->setVertexEdgeType(GrDrawTarget::kHairQuad_EdgeType);
+ fTarget->drawIndexed(kTriangles_PrimitiveType,
+ 4*fLineSegmentCnt + kVertsPerQuad*quads, // startV
+ 0, // startI
+ kVertsPerQuad*n, // vCount
+ kIdxsPerQuad*n); // iCount
+ quads += n;
+ }
+
+}
+
diff --git a/src/gpu/GrAAHairLinePathRenderer.h b/src/gpu/GrAAHairLinePathRenderer.h
new file mode 100644
index 0000000000..81cecf2c68
--- /dev/null
+++ b/src/gpu/GrAAHairLinePathRenderer.h
@@ -0,0 +1,61 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAAHairLinePathRenderer_DEFINED
+#define GrAAHairLinePathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrAAHairLinePathRenderer : public GrPathRenderer {
+public:
+ virtual ~GrAAHairLinePathRenderer();
+
+ static GrPathRenderer* Create(GrContext* context);
+ // GrPathRenderer overrides
+ virtual bool supportsAA(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const;
+ virtual bool canDrawPath(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const;
+ virtual void drawPath(GrDrawTarget::StageBitfield stages);
+
+protected:
+
+ // GrPathRenderer overrides
+ virtual void pathWillClear();
+
+private:
+ void resetGeom();
+
+ GrAAHairLinePathRenderer(const GrContext* context,
+ const GrIndexBuffer* fLinesIndexBuffer,
+ const GrIndexBuffer* fQuadsIndexBuffer);
+
+ bool createGeom(GrDrawTarget::StageBitfield stages);
+
+ const GrIndexBuffer* fLinesIndexBuffer;
+ const GrIndexBuffer* fQuadsIndexBuffer;
+
+ // have to recreate geometry if stages in use changes :(
+ GrDrawTarget::StageBitfield fPreviousStages;
+ int fPreviousRTHeight;
+ SkVector fPreviousTranslate;
+ GrIRect fClipRect;
+
+ // this path renderer draws everything in device coordinates
+ GrMatrix fPreviousViewMatrix;
+ int fLineSegmentCnt;
+ int fQuadCnt;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+
+#endif
+
diff --git a/src/gpu/GrAddPathRenderers_aahairline.cpp b/src/gpu/GrAddPathRenderers_aahairline.cpp
new file mode 100644
index 0000000000..a7df66e980
--- /dev/null
+++ b/src/gpu/GrAddPathRenderers_aahairline.cpp
@@ -0,0 +1,20 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrAAHairLinePathRenderer.h"
+
+void GrPathRenderer::AddPathRenderers(GrContext* ctx,
+ GrPathRendererChain::UsageFlags flags,
+ GrPathRendererChain* chain) {
+ if (!(GrPathRendererChain::kNonAAOnly_UsageFlag & flags)) {
+ if (GrPathRenderer* pr = GrAAHairLinePathRenderer::Create(ctx)) {
+ chain->addPathRenderer(pr)->unref();
+ }
+ }
+}
diff --git a/src/gpu/GrAddPathRenderers_none.cpp b/src/gpu/GrAddPathRenderers_none.cpp
new file mode 100644
index 0000000000..46855db7ff
--- /dev/null
+++ b/src/gpu/GrAddPathRenderers_none.cpp
@@ -0,0 +1,15 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrPathRenderer.h"
+
+
+void GrPathRenderer::AddPathRenderers(GrContext*,
+ GrPathRendererChain::UsageFlags,
+ GrPathRendererChain*) {}
diff --git a/src/gpu/GrAddPathRenderers_tesselated.cpp b/src/gpu/GrAddPathRenderers_tesselated.cpp
new file mode 100644
index 0000000000..a1cde13155
--- /dev/null
+++ b/src/gpu/GrAddPathRenderers_tesselated.cpp
@@ -0,0 +1,17 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrTesselatedPathRenderer.h"
+
+
+void GrPathRenderer::AddPathRenderers(GrContext*,
+ GrPathRendererChain::UsageFlags flags,
+ GrPathRendererChain* chain) {
+ chain->addPathRenderer(new GrTesselatedPathRenderer())->unref();
+}
diff --git a/src/gpu/GrAllocPool.cpp b/src/gpu/GrAllocPool.cpp
new file mode 100644
index 0000000000..ecd2acfa05
--- /dev/null
+++ b/src/gpu/GrAllocPool.cpp
@@ -0,0 +1,120 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrAllocPool.h"
+
+#define GrAllocPool_MIN_BLOCK_SIZE ((size_t)128)
+
+struct GrAllocPool::Block {
+ Block* fNext;
+ char* fPtr;
+ size_t fBytesFree;
+ size_t fBytesTotal;
+
+ static Block* Create(size_t size, Block* next) {
+ GrAssert(size >= GrAllocPool_MIN_BLOCK_SIZE);
+
+ Block* block = (Block*)GrMalloc(sizeof(Block) + size);
+ block->fNext = next;
+ block->fPtr = (char*)block + sizeof(Block);
+ block->fBytesFree = size;
+ block->fBytesTotal = size;
+ return block;
+ }
+
+ bool canAlloc(size_t bytes) const {
+ return bytes <= fBytesFree;
+ }
+
+ void* alloc(size_t bytes) {
+ GrAssert(bytes <= fBytesFree);
+ fBytesFree -= bytes;
+ void* ptr = fPtr;
+ fPtr += bytes;
+ return ptr;
+ }
+
+ size_t release(size_t bytes) {
+ GrAssert(bytes > 0);
+ size_t free = GrMin(bytes, fBytesTotal - fBytesFree);
+ fBytesFree += free;
+ fPtr -= free;
+ return bytes - free;
+ }
+
+ bool empty() const { return fBytesTotal == fBytesFree; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrAllocPool::GrAllocPool(size_t blockSize) {
+ fBlock = NULL;
+ fMinBlockSize = GrMax(blockSize, GrAllocPool_MIN_BLOCK_SIZE);
+ GR_DEBUGCODE(fBlocksAllocated = 0;)
+}
+
+GrAllocPool::~GrAllocPool() {
+ this->reset();
+}
+
+void GrAllocPool::reset() {
+ this->validate();
+
+ Block* block = fBlock;
+ while (block) {
+ Block* next = block->fNext;
+ GrFree(block);
+ block = next;
+ }
+ fBlock = NULL;
+ GR_DEBUGCODE(fBlocksAllocated = 0;)
+}
+
+void* GrAllocPool::alloc(size_t size) {
+ this->validate();
+
+ if (!fBlock || !fBlock->canAlloc(size)) {
+ size_t blockSize = GrMax(fMinBlockSize, size);
+ fBlock = Block::Create(blockSize, fBlock);
+ GR_DEBUGCODE(fBlocksAllocated += 1;)
+ }
+ return fBlock->alloc(size);
+}
+
+void GrAllocPool::release(size_t bytes) {
+ this->validate();
+
+ while (bytes && NULL != fBlock) {
+ bytes = fBlock->release(bytes);
+ if (fBlock->empty()) {
+ Block* next = fBlock->fNext;
+ GrFree(fBlock);
+ fBlock = next;
+ GR_DEBUGCODE(fBlocksAllocated -= 1;)
+ }
+ }
+}
+
+
+#if GR_DEBUG
+
+void GrAllocPool::validate() const {
+ Block* block = fBlock;
+ int count = 0;
+ while (block) {
+ count += 1;
+ block = block->fNext;
+ }
+ GrAssert(fBlocksAllocated == count);
+}
+
+#endif
+
+
diff --git a/src/gpu/GrAtlas.cpp b/src/gpu/GrAtlas.cpp
new file mode 100644
index 0000000000..0838895a13
--- /dev/null
+++ b/src/gpu/GrAtlas.cpp
@@ -0,0 +1,200 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrAtlas.h"
+#include "GrGpu.h"
+#include "GrRectanizer.h"
+#include "GrPlotMgr.h"
+
+#if 0
+#define GR_PLOT_WIDTH 8
+#define GR_PLOT_HEIGHT 4
+#define GR_ATLAS_WIDTH 256
+#define GR_ATLAS_HEIGHT 256
+
+#define GR_ATLAS_TEXTURE_WIDTH (GR_PLOT_WIDTH * GR_ATLAS_WIDTH)
+#define GR_ATLAS_TEXTURE_HEIGHT (GR_PLOT_HEIGHT * GR_ATLAS_HEIGHT)
+
+#else
+
+#define GR_ATLAS_TEXTURE_WIDTH 1024
+#define GR_ATLAS_TEXTURE_HEIGHT 2048
+
+#define GR_ATLAS_WIDTH 341
+#define GR_ATLAS_HEIGHT 341
+
+#define GR_PLOT_WIDTH (GR_ATLAS_TEXTURE_WIDTH / GR_ATLAS_WIDTH)
+#define GR_PLOT_HEIGHT (GR_ATLAS_TEXTURE_HEIGHT / GR_ATLAS_HEIGHT)
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define BORDER 1
+
+#if GR_DEBUG
+ static int gCounter;
+#endif
+
+GrAtlas::GrAtlas(GrAtlasMgr* mgr, int plotX, int plotY, GrMaskFormat format) {
+ fAtlasMgr = mgr; // just a pointer, not an owner
+ fNext = NULL;
+ fTexture = mgr->getTexture(format); // we're not an owner, just a pointer
+ fPlot.set(plotX, plotY);
+
+ fRects = GrRectanizer::Factory(GR_ATLAS_WIDTH - BORDER,
+ GR_ATLAS_HEIGHT - BORDER);
+
+ fMaskFormat = format;
+
+#if GR_DEBUG
+// GrPrintf(" GrAtlas %p [%d %d] %d\n", this, plotX, plotY, gCounter);
+ gCounter += 1;
+#endif
+}
+
+GrAtlas::~GrAtlas() {
+ fAtlasMgr->freePlot(fPlot.fX, fPlot.fY);
+
+ delete fRects;
+
+#if GR_DEBUG
+ --gCounter;
+// GrPrintf("~GrAtlas %p [%d %d] %d\n", this, fPlot.fX, fPlot.fY, gCounter);
+#endif
+}
+
+static void adjustForPlot(GrIPoint16* loc, const GrIPoint16& plot) {
+ loc->fX += plot.fX * GR_ATLAS_WIDTH;
+ loc->fY += plot.fY * GR_ATLAS_HEIGHT;
+}
+
+static uint8_t* zerofill(uint8_t* ptr, int count) {
+ while (--count >= 0) {
+ *ptr++ = 0;
+ }
+ return ptr;
+}
+
+bool GrAtlas::addSubImage(int width, int height, const void* image,
+ GrIPoint16* loc) {
+ if (!fRects->addRect(width + BORDER, height + BORDER, loc)) {
+ return false;
+ }
+
+ SkAutoSMalloc<1024> storage;
+ int dstW = width + 2*BORDER;
+ int dstH = height + 2*BORDER;
+ if (BORDER) {
+ const int bpp = GrMaskFormatBytesPerPixel(fMaskFormat);
+ const size_t dstRB = dstW * bpp;
+ uint8_t* dst = (uint8_t*)storage.reset(dstH * dstRB);
+ Gr_bzero(dst, dstRB); // zero top row
+ dst += dstRB;
+ for (int y = 0; y < height; y++) {
+ dst = zerofill(dst, bpp); // zero left edge
+ memcpy(dst, image, width * bpp);
+ dst += width * bpp;
+ dst = zerofill(dst, bpp); // zero right edge
+ image = (const void*)((const char*)image + width * bpp);
+ }
+ Gr_bzero(dst, dstRB); // zero bottom row
+ image = storage.get();
+ }
+ adjustForPlot(loc, fPlot);
+ fTexture->uploadTextureData(loc->fX, loc->fY, dstW, dstH, image, 0);
+
+ // now tell the caller to skip the top/left BORDER
+ loc->fX += BORDER;
+ loc->fY += BORDER;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrAtlasMgr::GrAtlasMgr(GrGpu* gpu) {
+ fGpu = gpu;
+ gpu->ref();
+ Gr_bzero(fTexture, sizeof(fTexture));
+ fPlotMgr = new GrPlotMgr(GR_PLOT_WIDTH, GR_PLOT_HEIGHT);
+}
+
+GrAtlasMgr::~GrAtlasMgr() {
+ for (size_t i = 0; i < GR_ARRAY_COUNT(fTexture); i++) {
+ GrSafeUnref(fTexture[i]);
+ }
+ delete fPlotMgr;
+ fGpu->unref();
+}
+
+static GrPixelConfig maskformat2pixelconfig(GrMaskFormat format) {
+ switch (format) {
+ case kA8_GrMaskFormat:
+ return kAlpha_8_GrPixelConfig;
+ case kA565_GrMaskFormat:
+ return kRGB_565_GrPixelConfig;
+ case kA888_GrMaskFormat:
+ return kRGBA_8888_GrPixelConfig;
+ default:
+ GrAssert(!"unknown maskformat");
+ }
+ return kUnknown_GrPixelConfig;
+}
+
+GrAtlas* GrAtlasMgr::addToAtlas(GrAtlas* atlas,
+ int width, int height, const void* image,
+ GrMaskFormat format,
+ GrIPoint16* loc) {
+ GrAssert(NULL == atlas || atlas->getMaskFormat() == format);
+
+ if (atlas && atlas->addSubImage(width, height, image, loc)) {
+ return atlas;
+ }
+
+ // If the above fails, then either we have no starting atlas, or the current
+ // one is full. Either way we need to allocate a new atlas
+
+ GrIPoint16 plot;
+ if (!fPlotMgr->newPlot(&plot)) {
+ return NULL;
+ }
+
+ GrAssert(0 == kA8_GrMaskFormat);
+ GrAssert(1 == kA565_GrMaskFormat);
+ if (NULL == fTexture[format]) {
+ GrTextureDesc desc = {
+ kDynamicUpdate_GrTextureFlagBit,
+ kNone_GrAALevel,
+ GR_ATLAS_TEXTURE_WIDTH,
+ GR_ATLAS_TEXTURE_HEIGHT,
+ maskformat2pixelconfig(format)
+ };
+ fTexture[format] = fGpu->createTexture(desc, NULL, 0);
+ if (NULL == fTexture[format]) {
+ return NULL;
+ }
+ }
+
+ GrAtlas* newAtlas = new GrAtlas(this, plot.fX, plot.fY, format);
+ if (!newAtlas->addSubImage(width, height, image, loc)) {
+ delete newAtlas;
+ return NULL;
+ }
+
+ newAtlas->fNext = atlas;
+ return newAtlas;
+}
+
+void GrAtlasMgr::freePlot(int x, int y) {
+ GrAssert(fPlotMgr->isBusy(x, y));
+ fPlotMgr->freePlot(x, y);
+}
+
+
diff --git a/src/gpu/GrBinHashKey.h b/src/gpu/GrBinHashKey.h
new file mode 100644
index 0000000000..ceaef7aa6f
--- /dev/null
+++ b/src/gpu/GrBinHashKey.h
@@ -0,0 +1,94 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrBinHashKey_DEFINED
+#define GrBinHashKey_DEFINED
+
+#include "GrTypes.h"
+
+/**
+ * Hash function class that can take a data chunk of any predetermined
+ * length. The hash function used is the One-at-a-Time Hash
+ * (http://burtleburtle.net/bob/hash/doobs.html).
+ */
+template<typename Entry, size_t KeySize>
+class GrBinHashKey {
+public:
+ GrBinHashKey()
+ : fHash(0)
+#if GR_DEBUG
+ , fIsValid(false)
+#endif
+ {}
+
+ GrBinHashKey(const GrBinHashKey<Entry, KeySize>& other) {
+ *this = other;
+ }
+ GrBinHashKey<Entry, KeySize>& operator=(const GrBinHashKey<Entry,
+ KeySize>& other) {
+ memcpy(this, &other, sizeof(*this));
+ return *this;
+ }
+
+ ~GrBinHashKey() {
+ }
+
+ void setKeyData(const uint32_t *data) {
+ GrAssert(GrIsALIGN4(KeySize));
+ memcpy(&fData, data, KeySize);
+
+ fHash = 0;
+ size_t len = KeySize;
+ while (len >= 4) {
+ fHash += *data++;
+ fHash += (fHash << 10);
+ fHash ^= (fHash >> 6);
+ len -= 4;
+ }
+ fHash += (fHash << 3);
+ fHash ^= (fHash >> 11);
+ fHash += (fHash << 15);
+#if GR_DEBUG
+ fIsValid = true;
+#endif
+ }
+
+ int compare(const GrBinHashKey<Entry, KeySize>& key) const {
+ GrAssert(fIsValid && key.fIsValid);
+ return memcmp(fData, key.fData, KeySize);
+ }
+
+ static bool
+ EQ(const Entry& entry, const GrBinHashKey<Entry, KeySize>& key) {
+ GrAssert(key.fIsValid);
+ return 0 == entry.compare(key);
+ }
+
+ static bool
+ LT(const Entry& entry, const GrBinHashKey<Entry, KeySize>& key) {
+ GrAssert(key.fIsValid);
+ return entry.compare(key) < 0;
+ }
+
+ uint32_t getHash() const {
+ GrAssert(fIsValid);
+ return fHash;
+ }
+
+private:
+ uint32_t fHash;
+ uint8_t fData[KeySize]; //Buffer for key storage
+
+#if GR_DEBUG
+public:
+ bool fIsValid;
+#endif
+};
+
+#endif
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
new file mode 100644
index 0000000000..c01192db70
--- /dev/null
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -0,0 +1,459 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrBufferAllocPool.h"
+#include "GrTypes.h"
+#include "GrVertexBuffer.h"
+#include "GrIndexBuffer.h"
+#include "GrGpu.h"
+
+#if GR_DEBUG
+ #define VALIDATE validate
+#else
+ static void VALIDATE(bool x = false) {}
+#endif
+
+// page size
+#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
+
+GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
+ BufferType bufferType,
+ bool frequentResetHint,
+ size_t blockSize,
+ int preallocBufferCnt) :
+ fBlocks(GrMax(8, 2*preallocBufferCnt)) {
+
+ GrAssert(NULL != gpu);
+ fGpu = gpu;
+ fGpu->ref();
+ fGpuIsReffed = true;
+
+ fBufferType = bufferType;
+ fFrequentResetHint = frequentResetHint;
+ fBufferPtr = NULL;
+ fMinBlockSize = GrMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
+
+ fBytesInUse = 0;
+
+ fPreallocBuffersInUse = 0;
+ fFirstPreallocBuffer = 0;
+ for (int i = 0; i < preallocBufferCnt; ++i) {
+ GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
+ if (NULL != buffer) {
+ *fPreallocBuffers.append() = buffer;
+ buffer->ref();
+ }
+ }
+}
+
+GrBufferAllocPool::~GrBufferAllocPool() {
+ VALIDATE();
+ if (fBlocks.count()) {
+ GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
+ if (buffer->isLocked()) {
+ buffer->unlock();
+ }
+ }
+ while (!fBlocks.empty()) {
+ destroyBlock();
+ }
+ fPreallocBuffers.unrefAll();
+ releaseGpuRef();
+}
+
+void GrBufferAllocPool::releaseGpuRef() {
+ if (fGpuIsReffed) {
+ fGpu->unref();
+ fGpuIsReffed = false;
+ }
+}
+
+void GrBufferAllocPool::reset() {
+ VALIDATE();
+ fBytesInUse = 0;
+ if (fBlocks.count()) {
+ GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
+ if (buffer->isLocked()) {
+ buffer->unlock();
+ }
+ }
+ while (!fBlocks.empty()) {
+ destroyBlock();
+ }
+ if (fPreallocBuffers.count()) {
+ // must set this after above loop.
+ fFirstPreallocBuffer = (fFirstPreallocBuffer + fPreallocBuffersInUse) %
+ fPreallocBuffers.count();
+ }
+ fCpuData.reset(fGpu->getCaps().fBufferLockSupport ? 0 : fMinBlockSize);
+ GrAssert(0 == fPreallocBuffersInUse);
+ VALIDATE();
+}
+
+void GrBufferAllocPool::unlock() {
+ VALIDATE();
+
+ if (NULL != fBufferPtr) {
+ BufferBlock& block = fBlocks.back();
+ if (block.fBuffer->isLocked()) {
+ block.fBuffer->unlock();
+ } else {
+ size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
+ flushCpuData(fBlocks.back().fBuffer, flushSize);
+ }
+ fBufferPtr = NULL;
+ }
+ VALIDATE();
+}
+
+#if GR_DEBUG
+void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
+ if (NULL != fBufferPtr) {
+ GrAssert(!fBlocks.empty());
+ if (fBlocks.back().fBuffer->isLocked()) {
+ GrGeometryBuffer* buf = fBlocks.back().fBuffer;
+ GrAssert(buf->lockPtr() == fBufferPtr);
+ } else {
+ GrAssert(fCpuData.get() == fBufferPtr);
+ }
+ } else {
+ GrAssert(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
+ }
+ size_t bytesInUse = 0;
+ for (int i = 0; i < fBlocks.count() - 1; ++i) {
+ GrAssert(!fBlocks[i].fBuffer->isLocked());
+ }
+ for (int i = 0; i < fBlocks.count(); ++i) {
+ size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
+ bytesInUse += bytes;
+ GrAssert(bytes || unusedBlockAllowed);
+ }
+
+ GrAssert(bytesInUse == fBytesInUse);
+ if (unusedBlockAllowed) {
+ GrAssert((fBytesInUse && !fBlocks.empty()) ||
+ (!fBytesInUse && (fBlocks.count() < 2)));
+ } else {
+ GrAssert((0 == fBytesInUse) == fBlocks.empty());
+ }
+}
+#endif
+
+void* GrBufferAllocPool::makeSpace(size_t size,
+ size_t alignment,
+ const GrGeometryBuffer** buffer,
+ size_t* offset) {
+ VALIDATE();
+
+ GrAssert(NULL != buffer);
+ GrAssert(NULL != offset);
+
+ if (NULL != fBufferPtr) {
+ BufferBlock& back = fBlocks.back();
+ size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
+ size_t pad = GrSizeAlignUpPad(usedBytes,
+ alignment);
+ if ((size + pad) <= back.fBytesFree) {
+ usedBytes += pad;
+ *offset = usedBytes;
+ *buffer = back.fBuffer;
+ back.fBytesFree -= size + pad;
+ fBytesInUse += size;
+ return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
+ }
+ }
+
+ // We could honor the space request using by a partial update of the current
+ // VB (if there is room). But we don't currently use draw calls to GL that
+ // allow the driver to know that previously issued draws won't read from
+ // the part of the buffer we update. Also, the GL buffer implementation
+ // may be cheating on the actual buffer size by shrinking the buffer on
+ // updateData() if the amount of data passed is less than the full buffer
+ // size.
+
+ if (!createBlock(size)) {
+ return NULL;
+ }
+ GrAssert(NULL != fBufferPtr);
+
+ *offset = 0;
+ BufferBlock& back = fBlocks.back();
+ *buffer = back.fBuffer;
+ back.fBytesFree -= size;
+ fBytesInUse += size;
+ VALIDATE();
+ return fBufferPtr;
+}
+
+int GrBufferAllocPool::currentBufferItems(size_t itemSize) const {
+ VALIDATE();
+ if (NULL != fBufferPtr) {
+ const BufferBlock& back = fBlocks.back();
+ size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
+ size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
+ return (back.fBytesFree - pad) / itemSize;
+ } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
+ return fMinBlockSize / itemSize;
+ }
+ return 0;
+}
+
+int GrBufferAllocPool::preallocatedBuffersRemaining() const {
+ return fPreallocBuffers.count() - fPreallocBuffersInUse;
+}
+
+int GrBufferAllocPool::preallocatedBufferCount() const {
+ return fPreallocBuffers.count();
+}
+
+void GrBufferAllocPool::putBack(size_t bytes) {
+ VALIDATE();
+
+ while (bytes) {
+ // caller shouldnt try to put back more than they've taken
+ GrAssert(!fBlocks.empty());
+ BufferBlock& block = fBlocks.back();
+ size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
+ if (bytes >= bytesUsed) {
+ bytes -= bytesUsed;
+ fBytesInUse -= bytesUsed;
+ // if we locked a vb to satisfy the make space and we're releasing
+ // beyond it, then unlock it.
+ if (block.fBuffer->isLocked()) {
+ block.fBuffer->unlock();
+ }
+ this->destroyBlock();
+ } else {
+ block.fBytesFree += bytes;
+ fBytesInUse -= bytes;
+ bytes = 0;
+ break;
+ }
+ }
+ VALIDATE();
+}
+
+bool GrBufferAllocPool::createBlock(size_t requestSize) {
+
+ size_t size = GrMax(requestSize, fMinBlockSize);
+ GrAssert(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
+
+ VALIDATE();
+
+ BufferBlock& block = fBlocks.push_back();
+
+ if (size == fMinBlockSize &&
+ fPreallocBuffersInUse < fPreallocBuffers.count()) {
+
+ uint32_t nextBuffer = (fPreallocBuffersInUse + fFirstPreallocBuffer) %
+ fPreallocBuffers.count();
+ block.fBuffer = fPreallocBuffers[nextBuffer];
+ block.fBuffer->ref();
+ ++fPreallocBuffersInUse;
+ } else {
+ block.fBuffer = this->createBuffer(size);
+ if (NULL == block.fBuffer) {
+ fBlocks.pop_back();
+ return false;
+ }
+ }
+
+ block.fBytesFree = size;
+ if (NULL != fBufferPtr) {
+ GrAssert(fBlocks.count() > 1);
+ BufferBlock& prev = fBlocks.fromBack(1);
+ if (prev.fBuffer->isLocked()) {
+ prev.fBuffer->unlock();
+ } else {
+ flushCpuData(prev.fBuffer,
+ prev.fBuffer->sizeInBytes() - prev.fBytesFree);
+ }
+ fBufferPtr = NULL;
+ }
+
+ GrAssert(NULL == fBufferPtr);
+
+ if (fGpu->getCaps().fBufferLockSupport &&
+ size > GR_GEOM_BUFFER_LOCK_THRESHOLD &&
+ (!fFrequentResetHint || requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD)) {
+ fBufferPtr = block.fBuffer->lock();
+ }
+
+ if (NULL == fBufferPtr) {
+ fBufferPtr = fCpuData.reset(size);
+ }
+
+ VALIDATE(true);
+
+ return true;
+}
+
+void GrBufferAllocPool::destroyBlock() {
+ GrAssert(!fBlocks.empty());
+
+ BufferBlock& block = fBlocks.back();
+ if (fPreallocBuffersInUse > 0) {
+ uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
+ fFirstPreallocBuffer +
+ (fPreallocBuffers.count() - 1)) %
+ fPreallocBuffers.count();
+ if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
+ --fPreallocBuffersInUse;
+ }
+ }
+ GrAssert(!block.fBuffer->isLocked());
+ block.fBuffer->unref();
+ fBlocks.pop_back();
+ fBufferPtr = NULL;
+}
+
+void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
+ size_t flushSize) {
+ GrAssert(NULL != buffer);
+ GrAssert(!buffer->isLocked());
+ GrAssert(fCpuData.get() == fBufferPtr);
+ GrAssert(flushSize <= buffer->sizeInBytes());
+
+ bool updated = false;
+ if (fGpu->getCaps().fBufferLockSupport &&
+ flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
+ void* data = buffer->lock();
+ if (NULL != data) {
+ memcpy(data, fBufferPtr, flushSize);
+ buffer->unlock();
+ updated = true;
+ }
+ }
+ buffer->updateData(fBufferPtr, flushSize);
+}
+
+GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
+ if (kIndex_BufferType == fBufferType) {
+ return fGpu->createIndexBuffer(size, true);
+ } else {
+ GrAssert(kVertex_BufferType == fBufferType);
+ return fGpu->createVertexBuffer(size, true);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
+ bool frequentResetHint,
+ size_t bufferSize,
+ int preallocBufferCnt)
+: GrBufferAllocPool(gpu,
+ kVertex_BufferType,
+ frequentResetHint,
+ bufferSize,
+ preallocBufferCnt) {
+}
+
+void* GrVertexBufferAllocPool::makeSpace(GrVertexLayout layout,
+ int vertexCount,
+ const GrVertexBuffer** buffer,
+ int* startVertex) {
+
+ GrAssert(vertexCount >= 0);
+ GrAssert(NULL != buffer);
+ GrAssert(NULL != startVertex);
+
+ size_t vSize = GrDrawTarget::VertexSize(layout);
+ size_t offset = 0; // assign to suppress warning
+ const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
+ void* ptr = INHERITED::makeSpace(vSize * vertexCount,
+ vSize,
+ &geomBuffer,
+ &offset);
+
+ *buffer = (const GrVertexBuffer*) geomBuffer;
+ GrAssert(0 == offset % vSize);
+ *startVertex = offset / vSize;
+ return ptr;
+}
+
+bool GrVertexBufferAllocPool::appendVertices(GrVertexLayout layout,
+ int vertexCount,
+ const void* vertices,
+ const GrVertexBuffer** buffer,
+ int* startVertex) {
+ void* space = makeSpace(layout, vertexCount, buffer, startVertex);
+ if (NULL != space) {
+ memcpy(space,
+ vertices,
+ GrDrawTarget::VertexSize(layout) * vertexCount);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int GrVertexBufferAllocPool::preallocatedBufferVertices(GrVertexLayout layout) const {
+ return INHERITED::preallocatedBufferSize() /
+ GrDrawTarget::VertexSize(layout);
+}
+
+int GrVertexBufferAllocPool::currentBufferVertices(GrVertexLayout layout) const {
+ return currentBufferItems(GrDrawTarget::VertexSize(layout));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
+ bool frequentResetHint,
+ size_t bufferSize,
+ int preallocBufferCnt)
+: GrBufferAllocPool(gpu,
+ kIndex_BufferType,
+ frequentResetHint,
+ bufferSize,
+ preallocBufferCnt) {
+}
+
+void* GrIndexBufferAllocPool::makeSpace(int indexCount,
+ const GrIndexBuffer** buffer,
+ int* startIndex) {
+
+ GrAssert(indexCount >= 0);
+ GrAssert(NULL != buffer);
+ GrAssert(NULL != startIndex);
+
+ size_t offset = 0; // assign to suppress warning
+ const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
+ void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
+ sizeof(uint16_t),
+ &geomBuffer,
+ &offset);
+
+ *buffer = (const GrIndexBuffer*) geomBuffer;
+ GrAssert(0 == offset % sizeof(uint16_t));
+ *startIndex = offset / sizeof(uint16_t);
+ return ptr;
+}
+
+bool GrIndexBufferAllocPool::appendIndices(int indexCount,
+ const void* indices,
+ const GrIndexBuffer** buffer,
+ int* startIndex) {
+ void* space = makeSpace(indexCount, buffer, startIndex);
+ if (NULL != space) {
+ memcpy(space, indices, sizeof(uint16_t) * indexCount);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int GrIndexBufferAllocPool::preallocatedBufferIndices() const {
+ return INHERITED::preallocatedBufferSize() / sizeof(uint16_t);
+}
+
+int GrIndexBufferAllocPool::currentBufferIndices() const {
+ return currentBufferItems(sizeof(uint16_t));
+}
diff --git a/src/gpu/GrBufferAllocPool.h b/src/gpu/GrBufferAllocPool.h
new file mode 100644
index 0000000000..acf0289582
--- /dev/null
+++ b/src/gpu/GrBufferAllocPool.h
@@ -0,0 +1,350 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrBufferAllocPool_DEFINED
+#define GrBufferAllocPool_DEFINED
+
+#include "GrNoncopyable.h"
+#include "GrTDArray.h"
+
+#include "SkTArray.h"
+
+class GrGeometryBuffer;
+class GrGpu;
+
+/**
+ * A pool of geometry buffers tied to a GrGpu.
+ *
+ * The pool allows a client to make space for geometry and then put back excess
+ * space if it over allocated. When a client is ready to draw from the pool
+ * it calls unlock on the pool ensure buffers are ready for drawing. The pool
+ * can be reset after drawing is completed to recycle space.
+ *
+ * At creation time a minimum per-buffer size can be specified. Additionally,
+ * a number of buffers to preallocate can be specified. These will
+ * be allocated at the min size and kept around until the pool is destroyed.
+ */
+class GrBufferAllocPool : GrNoncopyable {
+
+public:
+ /**
+ * Ensures all buffers are unlocked and have all data written to them.
+ * Call before drawing using buffers from the pool.
+ */
+ void unlock();
+
+ /**
+ * Invalidates all the data in the pool, unrefs non-preallocated buffers.
+ */
+ void reset();
+
+ /**
+ * Gets the number of preallocated buffers that are yet to be used.
+ */
+ int preallocatedBuffersRemaining() const;
+
+ /**
+ * gets the number of preallocated buffers
+ */
+ int preallocatedBufferCount() const;
+
+ /**
+ * Frees data from makeSpaces in LIFO order.
+ */
+ void putBack(size_t bytes);
+
+ /**
+ * Gets the GrGpu that this pool is associated with.
+ */
+ GrGpu* getGpu() { return fGpu; }
+
+protected:
+ /**
+ * Used to determine what type of buffers to create. We could make the
+ * createBuffer a virtual except that we want to use it in the cons for
+ * pre-allocated buffers.
+ */
+ enum BufferType {
+ kVertex_BufferType,
+ kIndex_BufferType,
+ };
+
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the buffers.
+ * @param bufferType The type of buffers to create.
+ * @param frequentResetHint A hint that indicates that the pool
+ * should expect frequent unlock() calls
+ * (as opposed to many makeSpace / acquires
+ * between resets).
+ * @param bufferSize The minimum size of created buffers.
+ * This value will be clamped to some
+ * reasonable minimum.
+ * @param preallocBufferCnt The pool will allocate this number of
+ * buffers at bufferSize and keep them until it
+ * is destroyed.
+ */
+ GrBufferAllocPool(GrGpu* gpu,
+ BufferType bufferType,
+ bool frequentResetHint,
+ size_t bufferSize = 0,
+ int preallocBufferCnt = 0);
+
+ virtual ~GrBufferAllocPool();
+
+ /**
+ * Gets the size of the preallocated buffers.
+ *
+ * @return the size of preallocated buffers.
+ */
+ size_t preallocatedBufferSize() const {
+ return fPreallocBuffers.count() ? fMinBlockSize : 0;
+ }
+
+ /**
+ * Returns a block of memory to hold data. A buffer designated to hold the
+ * data is given to the caller. The buffer may or may not be locked. The
+ * returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unlock is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unlock on the pool is called the data is guaranteed to be in the
+ * buffer at the offset indicated by offset. Until that time it may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * @param size the amount of data to make space for
+ * @param alignment alignment constraint from start of buffer
+ * @param buffer returns the buffer that will hold the data.
+ * @param offset returns the offset into buffer of the data.
+ * @return pointer to where the client should write the data.
+ */
+ void* makeSpace(size_t size,
+ size_t alignment,
+ const GrGeometryBuffer** buffer,
+ size_t* offset);
+
+ /**
+ * Gets the number of items of a size that can be added to the current
+ * buffer without spilling to another buffer. If the pool has been reset, or
+ * the previous makeSpace completely exhausted a buffer then the returned
+ * size will be the size of the next available preallocated buffer, or zero
+ * if no preallocated buffer remains available. It is assumed that items
+ * should be itemSize-aligned from the start of a buffer.
+ *
+ * @return the number of items that would fit in the current buffer.
+ */
+ int currentBufferItems(size_t itemSize) const;
+
+ GrGeometryBuffer* createBuffer(size_t size);
+
+private:
+
+ // The GrGpu must be able to clear the ref of pools it creates as members
+ friend class GrGpu;
+ void releaseGpuRef();
+
+ struct BufferBlock {
+ size_t fBytesFree;
+ GrGeometryBuffer* fBuffer;
+ };
+
+ bool createBlock(size_t requestSize);
+ void destroyBlock();
+ void flushCpuData(GrGeometryBuffer* buffer, size_t flushSize);
+#if GR_DEBUG
+ void validate(bool unusedBlockAllowed = false) const;
+#endif
+
+ size_t fBytesInUse;
+
+ GrGpu* fGpu;
+ bool fGpuIsReffed;
+ bool fFrequentResetHint;
+ GrTDArray<GrGeometryBuffer*> fPreallocBuffers;
+ size_t fMinBlockSize;
+ BufferType fBufferType;
+
+ SkTArray<BufferBlock> fBlocks;
+ int fPreallocBuffersInUse;
+ int fFirstPreallocBuffer;
+ SkAutoMalloc fCpuData;
+ void* fBufferPtr;
+};
+
+class GrVertexBuffer;
+
+/**
+ * A GrBufferAllocPool of vertex buffers
+ */
+class GrVertexBufferAllocPool : public GrBufferAllocPool {
+public:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the vertex buffers.
+ * @param frequentResetHint A hint that indicates that the pool
+ * should expect frequent unlock() calls
+ * (as opposed to many makeSpace / acquires
+ * between resets).
+ * @param bufferSize The minimum size of created VBs This value
+ * will be clamped to some reasonable minimum.
+ * @param preallocBufferCnt The pool will allocate this number of VBs at
+ * bufferSize and keep them until it is
+ * destroyed.
+ */
+ GrVertexBufferAllocPool(GrGpu* gpu,
+ bool frequentResetHint,
+ size_t bufferSize = 0,
+ int preallocBufferCnt = 0);
+
+ /**
+ * Returns a block of memory to hold vertices. A buffer designated to hold
+ * the vertices given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unlock is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unlock on the pool is called the vertices are guaranteed to be in
+ * the buffer at the offset indicated by startVertex. Until that time they
+ * may be in temporary storage and/or the buffer may be locked.
+ *
+ * @param layout specifies type of vertices to allocate space for
+ * @param vertexCount number of vertices to allocate space for
+ * @param buffer returns the vertex buffer that will hold the
+ * vertices.
+ * @param startVertex returns the offset into buffer of the first vertex.
+ * In units of the size of a vertex from layout param.
+ * @return pointer to first vertex.
+ */
+ void* makeSpace(GrVertexLayout layout,
+ int vertexCount,
+ const GrVertexBuffer** buffer,
+ int* startVertex);
+
+ /**
+ * Shortcut to make space and then write verts into the made space.
+ */
+ bool appendVertices(GrVertexLayout layout,
+ int vertexCount,
+ const void* vertices,
+ const GrVertexBuffer** buffer,
+ int* startVertex);
+
+ /**
+ * Gets the number of vertices that can be added to the current VB without
+ * spilling to another VB. If the pool has been reset, or the previous
+ * makeSpace completely exhausted a VB then the returned number of vertices
+ * would fit in the next available preallocated buffer. If any makeSpace
+ * would force a new VB to be created the return value will be zero.
+ *
+ * @param the format of vertices to compute space for.
+ * @return the number of vertices that would fit in the current buffer.
+ */
+ int currentBufferVertices(GrVertexLayout layout) const;
+
+ /**
+ * Gets the number of vertices that can fit in a preallocated vertex buffer.
+ * Zero if no preallocated buffers.
+ *
+ * @param the format of vertices to compute space for.
+ *
+ * @return number of vertices that fit in one of the preallocated vertex
+ * buffers.
+ */
+ int preallocatedBufferVertices(GrVertexLayout layout) const;
+
+private:
+ typedef GrBufferAllocPool INHERITED;
+};
+
+class GrIndexBuffer;
+
+/**
+ * A GrBufferAllocPool of index buffers
+ */
+class GrIndexBufferAllocPool : public GrBufferAllocPool {
+public:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the index buffers.
+ * @param frequentResetHint A hint that indicates that the pool
+ * should expect frequent unlock() calls
+ * (as opposed to many makeSpace / acquires
+ * between resets).
+ * @param bufferSize The minimum size of created IBs This value
+ * will be clamped to some reasonable minimum.
+ * @param preallocBufferCnt The pool will allocate this number of VBs at
+ * bufferSize and keep them until it is
+ * destroyed.
+ */
+ GrIndexBufferAllocPool(GrGpu* gpu,
+ bool frequentResetHint,
+ size_t bufferSize = 0,
+ int preallocBufferCnt = 0);
+
+ /**
+ * Returns a block of memory to hold indices. A buffer designated to hold
+ * the indices is given to the caller. The buffer may or may not be locked.
+ * The returned ptr remains valid until any of the following:
+ * *makeSpace is called again.
+ * *unlock is called.
+ * *reset is called.
+ * *this object is destroyed.
+ *
+ * Once unlock on the pool is called the indices are guaranteed to be in the
+ * buffer at the offset indicated by startIndex. Until that time they may be
+ * in temporary storage and/or the buffer may be locked.
+ *
+ * @param indexCount number of indices to allocate space for
+ * @param buffer returns the index buffer that will hold the indices.
+ * @param startIndex returns the offset into buffer of the first index.
+ * @return pointer to first index.
+ */
+ void* makeSpace(int indexCount,
+ const GrIndexBuffer** buffer,
+ int* startIndex);
+
+ /**
+ * Shortcut to make space and then write indices into the made space.
+ */
+ bool appendIndices(int indexCount,
+ const void* indices,
+ const GrIndexBuffer** buffer,
+ int* startIndex);
+
+ /**
+ * Gets the number of indices that can be added to the current IB without
+ * spilling to another IB. If the pool has been reset, or the previous
+ * makeSpace completely exhausted a IB then the returned number of indices
+ * would fit in the next available preallocated buffer. If any makeSpace
+ * would force a new IB to be created the return value will be zero.
+ */
+ int currentBufferIndices() const;
+
+ /**
+ * Gets the number of indices that can fit in a preallocated index buffer.
+ * Zero if no preallocated buffers.
+ *
+ * @return number of indices that fit in one of the preallocated index
+ * buffers.
+ */
+ int preallocatedBufferIndices() const;
+
+private:
+ typedef GrBufferAllocPool INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrClip.cpp b/src/gpu/GrClip.cpp
new file mode 100644
index 0000000000..a02d9f4504
--- /dev/null
+++ b/src/gpu/GrClip.cpp
@@ -0,0 +1,145 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrClip.h"
+
+GrClip::GrClip() {
+ fConservativeBounds.setEmpty();
+ fConservativeBoundsValid = true;
+}
+
+GrClip::GrClip(const GrClip& src) {
+ *this = src;
+}
+
+GrClip::GrClip(const GrIRect& rect) {
+ this->setFromIRect(rect);
+}
+
+GrClip::GrClip(const GrRect& rect) {
+ this->setFromRect(rect);
+}
+
+GrClip::GrClip(GrClipIterator* iter, GrScalar tx, GrScalar ty,
+ const GrRect* bounds) {
+ this->setFromIterator(iter, tx, ty, bounds);
+}
+
+GrClip::~GrClip() {}
+
+GrClip& GrClip::operator=(const GrClip& src) {
+ fList = src.fList;
+ fConservativeBounds = src.fConservativeBounds;
+ fConservativeBoundsValid = src.fConservativeBoundsValid;
+ return *this;
+}
+
+void GrClip::setEmpty() {
+ fList.reset();
+ fConservativeBounds.setEmpty();
+ fConservativeBoundsValid = true;
+}
+
+void GrClip::setFromRect(const GrRect& r) {
+ fList.reset();
+ if (r.isEmpty()) {
+ // use a canonical empty rect for == testing.
+ setEmpty();
+ } else {
+ fList.push_back();
+ fList.back().fRect = r;
+ fList.back().fType = kRect_ClipType;
+ fList.back().fOp = kReplace_SetOp;
+ fConservativeBounds = r;
+ fConservativeBoundsValid = true;
+ }
+}
+
+void GrClip::setFromIRect(const GrIRect& r) {
+ fList.reset();
+ if (r.isEmpty()) {
+ // use a canonical empty rect for == testing.
+ setEmpty();
+ } else {
+ fList.push_back();
+ fList.back().fRect.set(r);
+ fList.back().fType = kRect_ClipType;
+ fList.back().fOp = kReplace_SetOp;
+ fConservativeBounds.set(r);
+ fConservativeBoundsValid = true;
+ }
+}
+
+static void intersectWith(SkRect* dst, const SkRect& src) {
+ if (!dst->intersect(src)) {
+ dst->setEmpty();
+ }
+}
+
+void GrClip::setFromIterator(GrClipIterator* iter, GrScalar tx, GrScalar ty,
+ const GrRect* conservativeBounds) {
+ fList.reset();
+
+ int rectCount = 0;
+
+ // compute bounds for common case of series of intersecting rects.
+ bool isectRectValid = true;
+
+ if (iter) {
+ for (iter->rewind(); !iter->isDone(); iter->next()) {
+ Element& e = fList.push_back();
+ e.fType = iter->getType();
+ e.fOp = iter->getOp();
+ // iterators should not emit replace
+ GrAssert(kReplace_SetOp != e.fOp);
+ switch (e.fType) {
+ case kRect_ClipType:
+ iter->getRect(&e.fRect);
+ if (tx || ty) {
+ e.fRect.offset(tx, ty);
+ }
+ ++rectCount;
+ if (isectRectValid) {
+ if (kIntersect_SetOp == e.fOp) {
+ GrAssert(fList.count() <= 2);
+ if (fList.count() > 1) {
+ GrAssert(2 == rectCount);
+ rectCount = 1;
+ fList.pop_back();
+ GrAssert(kRect_ClipType == fList.back().fType);
+ intersectWith(&fList.back().fRect, e.fRect);
+ }
+ } else {
+ isectRectValid = false;
+ }
+ }
+ break;
+ case kPath_ClipType:
+ e.fPath = *iter->getPath();
+ if (tx || ty) {
+ e.fPath.offset(tx, ty);
+ }
+ e.fPathFill = iter->getPathFill();
+ isectRectValid = false;
+ break;
+ default:
+ GrCrash("Unknown clip element type.");
+ }
+ }
+ }
+ fConservativeBoundsValid = false;
+ if (isectRectValid && rectCount) {
+ fConservativeBounds = fList[0].fRect;
+ fConservativeBoundsValid = true;
+ } else if (NULL != conservativeBounds) {
+ fConservativeBounds = *conservativeBounds;
+ fConservativeBoundsValid = true;
+ }
+}
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
new file mode 100644
index 0000000000..59218386fb
--- /dev/null
+++ b/src/gpu/GrContext.cpp
@@ -0,0 +1,1930 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrBufferAllocPool.h"
+#include "GrClipIterator.h"
+#include "GrContext.h"
+#include "GrGpu.h"
+#include "GrIndexBuffer.h"
+#include "GrInOrderDrawBuffer.h"
+#include "GrPathRenderer.h"
+#include "GrPathUtils.h"
+#include "GrResourceCache.h"
+#include "GrStencilBuffer.h"
+#include "GrTextStrike.h"
+#include "SkTLazy.h"
+#include "SkTrace.h"
+
+// Using MSAA seems to be slower for some yet unknown reason.
+#define PREFER_MSAA_OFFSCREEN_AA 0
+#define OFFSCREEN_SSAA_SCALE 4 // super sample at 4x4
+
+#define DEFER_TEXT_RENDERING 1
+
+#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB)
+
+// When we're using coverage AA but the blend is incompatible (given gpu
+// limitations) should we disable AA or draw wrong?
+#define DISABLE_COVERAGE_AA_FOR_BLEND 1
+
+static const size_t MAX_TEXTURE_CACHE_COUNT = 256;
+static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024;
+
+static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 18;
+static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
+
+// We are currently only batching Text and drawRectToRect, both
+// of which use the quad index buffer.
+static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 0;
+static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 0;
+
+GrContext* GrContext::Create(GrEngine engine,
+ GrPlatform3DContext context3D) {
+ GrContext* ctx = NULL;
+ GrGpu* fGpu = GrGpu::Create(engine, context3D);
+ if (NULL != fGpu) {
+ ctx = new GrContext(fGpu);
+ fGpu->unref();
+ }
+ return ctx;
+}
+
+GrContext* GrContext::CreateGLShaderContext() {
+ return GrContext::Create(kOpenGL_Shaders_GrEngine, 0);
+}
+
+GrContext::~GrContext() {
+ this->flush();
+ delete fTextureCache;
+ delete fFontCache;
+ delete fDrawBuffer;
+ delete fDrawBufferVBAllocPool;
+ delete fDrawBufferIBAllocPool;
+
+ GrSafeUnref(fAAFillRectIndexBuffer);
+ GrSafeUnref(fAAStrokeRectIndexBuffer);
+ fGpu->unref();
+ GrSafeUnref(fPathRendererChain);
+}
+
+void GrContext::contextLost() {
+ contextDestroyed();
+ this->setupDrawBuffer();
+}
+
+void GrContext::contextDestroyed() {
+ // abandon first to so destructors
+ // don't try to free the resources in the API.
+ fGpu->abandonResources();
+
+ // a path renderer may be holding onto resources that
+ // are now unusable
+ GrSafeSetNull(fPathRendererChain);
+
+ delete fDrawBuffer;
+ fDrawBuffer = NULL;
+
+ delete fDrawBufferVBAllocPool;
+ fDrawBufferVBAllocPool = NULL;
+
+ delete fDrawBufferIBAllocPool;
+ fDrawBufferIBAllocPool = NULL;
+
+ GrSafeSetNull(fAAFillRectIndexBuffer);
+ GrSafeSetNull(fAAStrokeRectIndexBuffer);
+
+ fTextureCache->removeAll();
+ fFontCache->freeAll();
+ fGpu->markContextDirty();
+}
+
+void GrContext::resetContext() {
+ fGpu->markContextDirty();
+}
+
+void GrContext::freeGpuResources() {
+ this->flush();
+ fTextureCache->removeAll();
+ fFontCache->freeAll();
+ // a path renderer may be holding onto resources
+ GrSafeSetNull(fPathRendererChain);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+int GrContext::PaintStageVertexLayoutBits(
+ const GrPaint& paint,
+ const bool hasTexCoords[GrPaint::kTotalStages]) {
+ int stageMask = paint.getActiveStageMask();
+ int layout = 0;
+ for (int i = 0; i < GrPaint::kTotalStages; ++i) {
+ if ((1 << i) & stageMask) {
+ if (NULL != hasTexCoords && hasTexCoords[i]) {
+ layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i);
+ } else {
+ layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i);
+ }
+ }
+ }
+ return layout;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+enum {
+ // flags for textures
+ kNPOTBit = 0x1,
+ kFilterBit = 0x2,
+ kScratchBit = 0x4,
+
+ // resource type
+ kTextureBit = 0x8,
+ kStencilBufferBit = 0x10
+};
+
+GrTexture* GrContext::TextureCacheEntry::texture() const {
+ if (NULL == fEntry) {
+ return NULL;
+ } else {
+ return (GrTexture*) fEntry->resource();
+ }
+}
+
+namespace {
+// returns true if this is a "special" texture because of gpu NPOT limitations
+bool gen_texture_key_values(const GrGpu* gpu,
+ const GrSamplerState& sampler,
+ GrContext::TextureKey clientKey,
+ int width,
+ int height,
+ bool scratch,
+ uint32_t v[4]) {
+ GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t));
+ // we assume we only need 16 bits of width and height
+ // assert that texture creation will fail anyway if this assumption
+ // would cause key collisions.
+ GrAssert(gpu->getCaps().fMaxTextureSize <= SK_MaxU16);
+ v[0] = clientKey & 0xffffffffUL;
+ v[1] = (clientKey >> 32) & 0xffffffffUL;
+ v[2] = width | (height << 16);
+
+ v[3] = 0;
+ if (!gpu->getCaps().fNPOTTextureTileSupport) {
+ bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
+
+ bool tiled = (sampler.getWrapX() != GrSamplerState::kClamp_WrapMode) ||
+ (sampler.getWrapY() != GrSamplerState::kClamp_WrapMode);
+
+ if (tiled && !isPow2) {
+ v[3] |= kNPOTBit;
+ if (GrSamplerState::kNearest_Filter != sampler.getFilter()) {
+ v[3] |= kFilterBit;
+ }
+ }
+ }
+
+ if (scratch) {
+ v[3] |= kScratchBit;
+ }
+
+ v[3] |= kTextureBit;
+
+ return v[3] & kNPOTBit;
+}
+
+// we should never have more than one stencil buffer with same combo of
+// (width,height,samplecount)
+void gen_stencil_key_values(int width, int height,
+ int sampleCnt, uint32_t v[4]) {
+ v[0] = width;
+ v[1] = height;
+ v[2] = sampleCnt;
+ v[3] = kStencilBufferBit;
+}
+
+void gen_stencil_key_values(const GrStencilBuffer* sb,
+ uint32_t v[4]) {
+ gen_stencil_key_values(sb->width(), sb->height(),
+ sb->numSamples(), v);
+}
+}
+
+GrContext::TextureCacheEntry GrContext::findAndLockTexture(TextureKey key,
+ int width,
+ int height,
+ const GrSamplerState& sampler) {
+ uint32_t v[4];
+ gen_texture_key_values(fGpu, sampler, key, width, height, false, v);
+ GrResourceKey resourceKey(v);
+ return TextureCacheEntry(fTextureCache->findAndLock(resourceKey,
+ GrResourceCache::kNested_LockType));
+}
+
+GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) {
+ uint32_t v[4];
+ gen_stencil_key_values(sb, v);
+ GrResourceKey resourceKey(v);
+ return fTextureCache->createAndLock(resourceKey, sb);
+}
+
+GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
+ int sampleCnt) {
+ uint32_t v[4];
+ gen_stencil_key_values(width, height, sampleCnt, v);
+ GrResourceKey resourceKey(v);
+ GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey,
+ GrResourceCache::kSingle_LockType);
+ if (NULL != entry) {
+ GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource();
+ return sb;
+ } else {
+ return NULL;
+ }
+}
+
+void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) {
+ fTextureCache->unlock(sbEntry);
+}
+
+static void stretchImage(void* dst,
+ int dstW,
+ int dstH,
+ void* src,
+ int srcW,
+ int srcH,
+ int bpp) {
+ GrFixed dx = (srcW << 16) / dstW;
+ GrFixed dy = (srcH << 16) / dstH;
+
+ GrFixed y = dy >> 1;
+
+ int dstXLimit = dstW*bpp;
+ for (int j = 0; j < dstH; ++j) {
+ GrFixed x = dx >> 1;
+ void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
+ void* dstRow = (uint8_t*)dst + j*dstW*bpp;
+ for (int i = 0; i < dstXLimit; i += bpp) {
+ memcpy((uint8_t*) dstRow + i,
+ (uint8_t*) srcRow + (x>>16)*bpp,
+ bpp);
+ x += dx;
+ }
+ y += dy;
+ }
+}
+
+GrContext::TextureCacheEntry GrContext::createAndLockTexture(TextureKey key,
+ const GrSamplerState& sampler,
+ const GrTextureDesc& desc,
+ void* srcData, size_t rowBytes) {
+ SK_TRACE_EVENT0("GrContext::createAndLockTexture");
+
+#if GR_DUMP_TEXTURE_UPLOAD
+ GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
+#endif
+
+ TextureCacheEntry entry;
+ uint32_t v[4];
+ bool special = gen_texture_key_values(fGpu, sampler, key,
+ desc.fWidth, desc.fHeight, false, v);
+ GrResourceKey resourceKey(v);
+
+ if (special) {
+ TextureCacheEntry clampEntry =
+ findAndLockTexture(key, desc.fWidth, desc.fHeight,
+ GrSamplerState::ClampNoFilter());
+
+ if (NULL == clampEntry.texture()) {
+ clampEntry = createAndLockTexture(key,
+ GrSamplerState::ClampNoFilter(),
+ desc, srcData, rowBytes);
+ GrAssert(NULL != clampEntry.texture());
+ if (NULL == clampEntry.texture()) {
+ return entry;
+ }
+ }
+ GrTextureDesc rtDesc = desc;
+ rtDesc.fFlags = rtDesc.fFlags |
+ kRenderTarget_GrTextureFlagBit |
+ kNoStencil_GrTextureFlagBit;
+ rtDesc.fWidth =
+ GrNextPow2(GrMax<int>(desc.fWidth,
+ fGpu->getCaps().fMinRenderTargetWidth));
+ rtDesc.fHeight =
+ GrNextPow2(GrMax<int>(desc.fHeight,
+ fGpu->getCaps().fMinRenderTargetHeight));
+
+ GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
+
+ if (NULL != texture) {
+ GrDrawTarget::AutoStateRestore asr(fGpu);
+ fGpu->setRenderTarget(texture->asRenderTarget());
+ fGpu->setTexture(0, clampEntry.texture());
+ fGpu->disableStencil();
+ fGpu->setViewMatrix(GrMatrix::I());
+ fGpu->setAlpha(0xff);
+ fGpu->setBlendFunc(kOne_BlendCoeff, kZero_BlendCoeff);
+ fGpu->disableState(GrDrawTarget::kDither_StateBit |
+ GrDrawTarget::kClip_StateBit |
+ GrDrawTarget::kAntialias_StateBit);
+ GrSamplerState::Filter filter;
+ // if filtering is not desired then we want to ensure all
+ // texels in the resampled image are copies of texels from
+ // the original.
+ if (GrSamplerState::kNearest_Filter == sampler.getFilter()) {
+ filter = GrSamplerState::kNearest_Filter;
+ } else {
+ filter = GrSamplerState::kBilinear_Filter;
+ }
+ GrSamplerState stretchSampler(GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kClamp_WrapMode,
+ filter);
+ fGpu->setSamplerState(0, stretchSampler);
+
+ static const GrVertexLayout layout =
+ GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
+ GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
+
+ if (arg.succeeded()) {
+ GrPoint* verts = (GrPoint*) arg.vertices();
+ verts[0].setIRectFan(0, 0,
+ texture->width(),
+ texture->height(),
+ 2*sizeof(GrPoint));
+ verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint));
+ fGpu->drawNonIndexed(kTriangleFan_PrimitiveType,
+ 0, 4);
+ entry.set(fTextureCache->createAndLock(resourceKey, texture));
+ }
+ texture->releaseRenderTarget();
+ } else {
+ // TODO: Our CPU stretch doesn't filter. But we create separate
+ // stretched textures when the sampler state is either filtered or
+ // not. Either implement filtered stretch blit on CPU or just create
+ // one when FBO case fails.
+
+ rtDesc.fFlags = kNone_GrTextureFlags;
+ // no longer need to clamp at min RT size.
+ rtDesc.fWidth = GrNextPow2(desc.fWidth);
+ rtDesc.fHeight = GrNextPow2(desc.fHeight);
+ int bpp = GrBytesPerPixel(desc.fFormat);
+ SkAutoSMalloc<128*128*4> stretchedPixels(bpp *
+ rtDesc.fWidth *
+ rtDesc.fHeight);
+ stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
+ srcData, desc.fWidth, desc.fHeight, bpp);
+
+ size_t stretchedRowBytes = rtDesc.fWidth * bpp;
+
+ GrTexture* texture = fGpu->createTexture(rtDesc,
+ stretchedPixels.get(),
+ stretchedRowBytes);
+ GrAssert(NULL != texture);
+ entry.set(fTextureCache->createAndLock(resourceKey, texture));
+ }
+ fTextureCache->unlock(clampEntry.cacheEntry());
+
+ } else {
+ GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
+ if (NULL != texture) {
+ entry.set(fTextureCache->createAndLock(resourceKey, texture));
+ }
+ }
+ return entry;
+}
+
+namespace {
+inline void gen_scratch_tex_key_values(const GrGpu* gpu,
+ const GrTextureDesc& desc,
+ uint32_t v[4]) {
+ // Instead of a client-provided key of the texture contents
+ // we create a key of from the descriptor.
+ GrContext::TextureKey descKey = desc.fAALevel |
+ (desc.fFlags << 8) |
+ ((uint64_t) desc.fFormat << 32);
+ // this code path isn't friendly to tiling with NPOT restricitons
+ // We just pass ClampNoFilter()
+ gen_texture_key_values(gpu, GrSamplerState::ClampNoFilter(), descKey,
+ desc.fWidth, desc.fHeight, true, v);
+}
+}
+
+GrContext::TextureCacheEntry GrContext::lockScratchTexture(
+ const GrTextureDesc& inDesc,
+ ScratchTexMatch match) {
+
+ GrTextureDesc desc = inDesc;
+ if (kExact_ScratchTexMatch != match) {
+ // bin by pow2 with a reasonable min
+ static const int MIN_SIZE = 256;
+ desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
+ desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
+ }
+
+ uint32_t p0 = desc.fFormat;
+ uint32_t p1 = (desc.fAALevel << 16) | desc.fFlags;
+
+ GrResourceEntry* entry;
+ int origWidth = desc.fWidth;
+ int origHeight = desc.fHeight;
+ bool doubledW = false;
+ bool doubledH = false;
+
+ do {
+ uint32_t v[4];
+ gen_scratch_tex_key_values(fGpu, desc, v);
+ GrResourceKey key(v);
+ entry = fTextureCache->findAndLock(key,
+ GrResourceCache::kNested_LockType);
+ // if we miss, relax the fit of the flags...
+ // then try doubling width... then height.
+ if (NULL != entry || kExact_ScratchTexMatch == match) {
+ break;
+ }
+ if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) {
+ desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit;
+ } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
+ desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
+ } else if (!doubledW) {
+ desc.fFlags = inDesc.fFlags;
+ desc.fWidth *= 2;
+ doubledW = true;
+ } else if (!doubledH) {
+ desc.fFlags = inDesc.fFlags;
+ desc.fWidth = origWidth;
+ desc.fHeight *= 2;
+ doubledH = true;
+ } else {
+ break;
+ }
+
+ } while (true);
+
+ if (NULL == entry) {
+ desc.fFlags = inDesc.fFlags;
+ desc.fWidth = origWidth;
+ desc.fHeight = origHeight;
+ GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
+ if (NULL != texture) {
+ uint32_t v[4];
+ gen_scratch_tex_key_values(fGpu, desc, v);
+ GrResourceKey key(v);
+ entry = fTextureCache->createAndLock(key, texture);
+ }
+ }
+
+ // If the caller gives us the same desc/sampler twice we don't want
+ // to return the same texture the second time (unless it was previously
+ // released). So we detach the entry from the cache and reattach at release.
+ if (NULL != entry) {
+ fTextureCache->detach(entry);
+ }
+ return TextureCacheEntry(entry);
+}
+
+void GrContext::unlockTexture(TextureCacheEntry entry) {
+ // If this is a scratch texture we detached it from the cache
+ // while it was locked (to avoid two callers simultaneously getting
+ // the same texture).
+ if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) {
+ fTextureCache->reattachAndUnlock(entry.cacheEntry());
+ } else {
+ fTextureCache->unlock(entry.cacheEntry());
+ }
+}
+
+GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc,
+ void* srcData,
+ size_t rowBytes) {
+ return fGpu->createTexture(desc, srcData, rowBytes);
+}
+
+void GrContext::getTextureCacheLimits(int* maxTextures,
+ size_t* maxTextureBytes) const {
+ fTextureCache->getLimits(maxTextures, maxTextureBytes);
+}
+
+void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
+ fTextureCache->setLimits(maxTextures, maxTextureBytes);
+}
+
+int GrContext::getMaxTextureSize() const {
+ return fGpu->getCaps().fMaxTextureSize;
+}
+
+int GrContext::getMaxRenderTargetSize() const {
+ return fGpu->getCaps().fMaxRenderTargetSize;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrResource* GrContext::createPlatformSurface(const GrPlatformSurfaceDesc& desc) {
+ // validate flags here so that GrGpu subclasses don't have to check
+ if (kTexture_GrPlatformSurfaceType == desc.fSurfaceType &&
+ 0 != desc.fRenderTargetFlags) {
+ return NULL;
+ }
+ if (desc.fSampleCnt &&
+ (kGrCanResolve_GrPlatformRenderTargetFlagBit & desc.fRenderTargetFlags)) {
+ return NULL;
+ }
+ if (kTextureRenderTarget_GrPlatformSurfaceType == desc.fSurfaceType &&
+ desc.fSampleCnt &&
+ !(kGrCanResolve_GrPlatformRenderTargetFlagBit & desc.fRenderTargetFlags)) {
+ return NULL;
+ }
+ return fGpu->createPlatformSurface(desc);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrContext::supportsIndex8PixelConfig(const GrSamplerState& sampler,
+ int width, int height) const {
+ const GrDrawTarget::Caps& caps = fGpu->getCaps();
+ if (!caps.f8BitPaletteSupport) {
+ return false;
+ }
+
+ bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
+
+ if (!isPow2) {
+ if (!caps.fNPOTTextureSupport) {
+ return false;
+ }
+
+ bool tiled = sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
+ sampler.getWrapY() != GrSamplerState::kClamp_WrapMode;
+ if (tiled && !caps.fNPOTTextureTileSupport) {
+ return false;
+ }
+ }
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+const GrClip& GrContext::getClip() const { return fGpu->getClip(); }
+
+void GrContext::setClip(const GrClip& clip) {
+ fGpu->setClip(clip);
+ fGpu->enableState(GrDrawTarget::kClip_StateBit);
+}
+
+void GrContext::setClip(const GrIRect& rect) {
+ GrClip clip;
+ clip.setFromIRect(rect);
+ fGpu->setClip(clip);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::clear(const GrIRect* rect, const GrColor color) {
+ this->flush();
+ fGpu->clear(rect, color);
+}
+
+void GrContext::drawPaint(const GrPaint& paint) {
+ // set rect to be big enough to fill the space, but not super-huge, so we
+ // don't overflow fixed-point implementations
+ GrRect r;
+ r.setLTRB(0, 0,
+ GrIntToScalar(getRenderTarget()->width()),
+ GrIntToScalar(getRenderTarget()->height()));
+ GrAutoMatrix am;
+ GrMatrix inverse;
+ SkTLazy<GrPaint> tmpPaint;
+ const GrPaint* p = &paint;
+ // We attempt to map r by the inverse matrix and draw that. mapRect will
+ // map the four corners and bound them with a new rect. This will not
+ // produce a correct result for some perspective matrices.
+ if (!this->getMatrix().hasPerspective()) {
+ if (!fGpu->getViewInverse(&inverse)) {
+ GrPrintf("Could not invert matrix");
+ return;
+ }
+ inverse.mapRect(&r);
+ } else {
+ if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) {
+ if (!fGpu->getViewInverse(&inverse)) {
+ GrPrintf("Could not invert matrix");
+ return;
+ }
+ tmpPaint.set(paint);
+ tmpPaint.get()->preConcatActiveSamplerMatrices(inverse);
+ p = tmpPaint.get();
+ }
+ am.set(this, GrMatrix::I());
+ }
+ // by definition this fills the entire clip, no need for AA
+ if (paint.fAntiAlias) {
+ if (!tmpPaint.isValid()) {
+ tmpPaint.set(paint);
+ p = tmpPaint.get();
+ }
+ GrAssert(p == tmpPaint.get());
+ tmpPaint.get()->fAntiAlias = false;
+ }
+ this->drawRect(*p, r);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
+ return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
+}
+}
+
+struct GrContext::OffscreenRecord {
+ enum Downsample {
+ k4x4TwoPass_Downsample,
+ k4x4SinglePass_Downsample,
+ kFSAA_Downsample
+ } fDownsample;
+ int fTileSizeX;
+ int fTileSizeY;
+ int fTileCountX;
+ int fTileCountY;
+ int fScale;
+ GrAutoScratchTexture fOffscreen0;
+ GrAutoScratchTexture fOffscreen1;
+ GrDrawTarget::SavedDrawState fSavedState;
+ GrClip fClip;
+};
+
+bool GrContext::doOffscreenAA(GrDrawTarget* target,
+ bool isHairLines) const {
+#if !GR_USE_OFFSCREEN_AA
+ return false;
+#else
+ if (!target->isAntialiasState()) {
+ return false;
+ }
+ // Line primitves are always rasterized as 1 pixel wide.
+ // Super-sampling would make them too thin but MSAA would be OK.
+ if (isHairLines &&
+ (!PREFER_MSAA_OFFSCREEN_AA || !fGpu->getCaps().fFSAASupport)) {
+ return false;
+ }
+ if (target->getRenderTarget()->isMultisampled()) {
+ return false;
+ }
+ if (disable_coverage_aa_for_blend(target)) {
+#if GR_DEBUG
+ GrPrintf("Turning off AA to correctly apply blend.\n");
+#endif
+ return false;
+ }
+ return true;
+#endif
+}
+
+bool GrContext::prepareForOffscreenAA(GrDrawTarget* target,
+ bool requireStencil,
+ const GrIRect& boundRect,
+ GrPathRenderer* pr,
+ OffscreenRecord* record) {
+
+ GrAssert(GR_USE_OFFSCREEN_AA);
+
+ GrAssert(NULL == record->fOffscreen0.texture());
+ GrAssert(NULL == record->fOffscreen1.texture());
+ GrAssert(!boundRect.isEmpty());
+
+ int boundW = boundRect.width();
+ int boundH = boundRect.height();
+
+ GrTextureDesc desc;
+
+ desc.fWidth = GrMin(fMaxOffscreenAASize, boundW);
+ desc.fHeight = GrMin(fMaxOffscreenAASize, boundH);
+
+ if (requireStencil) {
+ desc.fFlags = kRenderTarget_GrTextureFlagBit;
+ } else {
+ desc.fFlags = kRenderTarget_GrTextureFlagBit |
+ kNoStencil_GrTextureFlagBit;
+ }
+
+ desc.fFormat = kRGBA_8888_GrPixelConfig;
+
+ if (PREFER_MSAA_OFFSCREEN_AA && fGpu->getCaps().fFSAASupport) {
+ record->fDownsample = OffscreenRecord::kFSAA_Downsample;
+ record->fScale = 1;
+ desc.fAALevel = kMed_GrAALevel;
+ } else {
+ record->fDownsample = fGpu->getCaps().fShaderSupport ?
+ OffscreenRecord::k4x4SinglePass_Downsample :
+ OffscreenRecord::k4x4TwoPass_Downsample;
+ record->fScale = OFFSCREEN_SSAA_SCALE;
+ // both downsample paths assume this
+ GR_STATIC_ASSERT(4 == OFFSCREEN_SSAA_SCALE);
+ desc.fAALevel = kNone_GrAALevel;
+ }
+
+ desc.fWidth *= record->fScale;
+ desc.fHeight *= record->fScale;
+ record->fOffscreen0.set(this, desc);
+ if (NULL == record->fOffscreen0.texture()) {
+ return false;
+ }
+ // the approximate lookup might have given us some slop space, might as well
+ // use it when computing the tiles size.
+ // these are scale values, will adjust after considering
+ // the possible second offscreen.
+ record->fTileSizeX = record->fOffscreen0.texture()->width();
+ record->fTileSizeY = record->fOffscreen0.texture()->height();
+
+ if (OffscreenRecord::k4x4TwoPass_Downsample == record->fDownsample) {
+ desc.fWidth /= 2;
+ desc.fHeight /= 2;
+ record->fOffscreen1.set(this, desc);
+ if (NULL == record->fOffscreen1.texture()) {
+ return false;
+ }
+ record->fTileSizeX = GrMin(record->fTileSizeX,
+ 2 * record->fOffscreen0.texture()->width());
+ record->fTileSizeY = GrMin(record->fTileSizeY,
+ 2 * record->fOffscreen0.texture()->height());
+ }
+ record->fTileSizeX /= record->fScale;
+ record->fTileSizeY /= record->fScale;
+
+ record->fTileCountX = GrIDivRoundUp(boundW, record->fTileSizeX);
+ record->fTileCountY = GrIDivRoundUp(boundH, record->fTileSizeY);
+
+ record->fClip = target->getClip();
+
+ target->saveCurrentDrawState(&record->fSavedState);
+ return true;
+}
+
+void GrContext::setupOffscreenAAPass1(GrDrawTarget* target,
+ const GrIRect& boundRect,
+ int tileX, int tileY,
+ OffscreenRecord* record) {
+
+ GrRenderTarget* offRT0 = record->fOffscreen0.texture()->asRenderTarget();
+ GrAssert(NULL != offRT0);
+
+ GrPaint tempPaint;
+ tempPaint.reset();
+ SetPaint(tempPaint, target);
+ target->setRenderTarget(offRT0);
+
+ GrMatrix transM;
+ int left = boundRect.fLeft + tileX * record->fTileSizeX;
+ int top = boundRect.fTop + tileY * record->fTileSizeY;
+ transM.setTranslate(-left * GR_Scalar1, -top * GR_Scalar1);
+ target->postConcatViewMatrix(transM);
+ GrMatrix scaleM;
+ scaleM.setScale(record->fScale * GR_Scalar1, record->fScale * GR_Scalar1);
+ target->postConcatViewMatrix(scaleM);
+
+ int w = (tileX == record->fTileCountX-1) ? boundRect.fRight - left :
+ record->fTileSizeX;
+ int h = (tileY == record->fTileCountY-1) ? boundRect.fBottom - top :
+ record->fTileSizeY;
+ GrIRect clear = SkIRect::MakeWH(record->fScale * w,
+ record->fScale * h);
+ target->setClip(GrClip(clear));
+#if 0
+ // visualize tile boundaries by setting edges of offscreen to white
+ // and interior to tranparent. black.
+ target->clear(&clear, 0xffffffff);
+
+ static const int gOffset = 2;
+ GrIRect clear2 = SkIRect::MakeLTRB(gOffset, gOffset,
+ record->fScale * w - gOffset,
+ record->fScale * h - gOffset);
+ target->clear(&clear2, 0x0);
+#else
+ target->clear(&clear, 0x0);
+#endif
+}
+
+void GrContext::doOffscreenAAPass2(GrDrawTarget* target,
+ const GrPaint& paint,
+ const GrIRect& boundRect,
+ int tileX, int tileY,
+ OffscreenRecord* record) {
+ SK_TRACE_EVENT0("GrContext::doOffscreenAAPass2");
+ GrAssert(NULL != record->fOffscreen0.texture());
+ GrDrawTarget::AutoGeometryPush agp(target);
+ GrIRect tileRect;
+ tileRect.fLeft = boundRect.fLeft + tileX * record->fTileSizeX;
+ tileRect.fTop = boundRect.fTop + tileY * record->fTileSizeY,
+ tileRect.fRight = (tileX == record->fTileCountX-1) ?
+ boundRect.fRight :
+ tileRect.fLeft + record->fTileSizeX;
+ tileRect.fBottom = (tileY == record->fTileCountY-1) ?
+ boundRect.fBottom :
+ tileRect.fTop + record->fTileSizeY;
+
+ GrSamplerState::Filter filter;
+ if (OffscreenRecord::k4x4SinglePass_Downsample == record->fDownsample) {
+ filter = GrSamplerState::k4x4Downsample_Filter;
+ } else {
+ filter = GrSamplerState::kBilinear_Filter;
+ }
+
+ GrMatrix sampleM;
+ GrSamplerState sampler(GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kClamp_WrapMode, filter);
+
+ GrTexture* src = record->fOffscreen0.texture();
+ int scale;
+
+ enum {
+ kOffscreenStage = GrPaint::kTotalStages,
+ };
+
+ if (OffscreenRecord::k4x4TwoPass_Downsample == record->fDownsample) {
+ GrAssert(NULL != record->fOffscreen1.texture());
+ scale = 2;
+ GrRenderTarget* dst = record->fOffscreen1.texture()->asRenderTarget();
+
+ // Do 2x2 downsample from first to second
+ target->setTexture(kOffscreenStage, src);
+ target->setRenderTarget(dst);
+ target->setViewMatrix(GrMatrix::I());
+ sampleM.setScale(scale * GR_Scalar1 / src->width(),
+ scale * GR_Scalar1 / src->height());
+ sampler.setMatrix(sampleM);
+ target->setSamplerState(kOffscreenStage, sampler);
+ GrRect rect = SkRect::MakeWH(SkIntToScalar(scale * tileRect.width()),
+ SkIntToScalar(scale * tileRect.height()));
+ target->drawSimpleRect(rect, NULL, 1 << kOffscreenStage);
+
+ src = record->fOffscreen1.texture();
+ } else if (OffscreenRecord::kFSAA_Downsample == record->fDownsample) {
+ scale = 1;
+ GrIRect rect = SkIRect::MakeWH(tileRect.width(), tileRect.height());
+ src->asRenderTarget()->overrideResolveRect(rect);
+ } else {
+ GrAssert(OffscreenRecord::k4x4SinglePass_Downsample ==
+ record->fDownsample);
+ scale = 4;
+ }
+
+ // setup for draw back to main RT, we use the original
+ // draw state setup by the caller plus an additional coverage
+ // stage to handle the AA resolve. Also, we use an identity
+ // view matrix and so pre-concat sampler matrices with view inv.
+ int stageMask = paint.getActiveStageMask();
+
+ target->restoreDrawState(record->fSavedState);
+ target->setClip(record->fClip);
+
+ if (stageMask) {
+ GrMatrix invVM;
+ if (target->getViewInverse(&invVM)) {
+ target->preConcatSamplerMatrices(stageMask, invVM);
+ }
+ }
+ // This is important when tiling, otherwise second tile's
+ // pass 1 view matrix will be incorrect.
+ GrDrawTarget::AutoViewMatrixRestore avmr(target);
+
+ target->setViewMatrix(GrMatrix::I());
+
+ target->setTexture(kOffscreenStage, src);
+ sampleM.setScale(scale * GR_Scalar1 / src->width(),
+ scale * GR_Scalar1 / src->height());
+ sampler.setMatrix(sampleM);
+ sampleM.setTranslate(SkIntToScalar(-tileRect.fLeft),
+ SkIntToScalar(-tileRect.fTop));
+ sampler.preConcatMatrix(sampleM);
+ target->setSamplerState(kOffscreenStage, sampler);
+
+ GrRect dstRect;
+ int stages = (1 << kOffscreenStage) | stageMask;
+ dstRect.set(tileRect);
+ target->drawSimpleRect(dstRect, NULL, stages);
+}
+
+void GrContext::cleanupOffscreenAA(GrDrawTarget* target,
+ GrPathRenderer* pr,
+ OffscreenRecord* record) {
+ target->restoreDrawState(record->fSavedState);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/* create a triangle strip that strokes the specified triangle. There are 8
+ unique vertices, but we repreat the last 2 to close up. Alternatively we
+ could use an indices array, and then only send 8 verts, but not sure that
+ would be faster.
+ */
+static void setStrokeRectStrip(GrPoint verts[10], GrRect rect,
+ GrScalar width) {
+ const GrScalar rad = GrScalarHalf(width);
+ rect.sort();
+
+ verts[0].set(rect.fLeft + rad, rect.fTop + rad);
+ verts[1].set(rect.fLeft - rad, rect.fTop - rad);
+ verts[2].set(rect.fRight - rad, rect.fTop + rad);
+ verts[3].set(rect.fRight + rad, rect.fTop - rad);
+ verts[4].set(rect.fRight - rad, rect.fBottom - rad);
+ verts[5].set(rect.fRight + rad, rect.fBottom + rad);
+ verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
+ verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
+ verts[8] = verts[0];
+ verts[9] = verts[1];
+}
+
+static void setInsetFan(GrPoint* pts, size_t stride,
+ const GrRect& r, GrScalar dx, GrScalar dy) {
+ pts->setRectFan(r.fLeft + dx, r.fTop + dy, r.fRight - dx, r.fBottom - dy, stride);
+}
+
+static const uint16_t gFillAARectIdx[] = {
+ 0, 1, 5, 5, 4, 0,
+ 1, 2, 6, 6, 5, 1,
+ 2, 3, 7, 7, 6, 2,
+ 3, 0, 4, 4, 7, 3,
+ 4, 5, 6, 6, 7, 4,
+};
+
+int GrContext::aaFillRectIndexCount() const {
+ return GR_ARRAY_COUNT(gFillAARectIdx);
+}
+
+GrIndexBuffer* GrContext::aaFillRectIndexBuffer() {
+ if (NULL == fAAFillRectIndexBuffer) {
+ fAAFillRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gFillAARectIdx),
+ false);
+ if (NULL != fAAFillRectIndexBuffer) {
+ #if GR_DEBUG
+ bool updated =
+ #endif
+ fAAFillRectIndexBuffer->updateData(gFillAARectIdx,
+ sizeof(gFillAARectIdx));
+ GR_DEBUGASSERT(updated);
+ }
+ }
+ return fAAFillRectIndexBuffer;
+}
+
+static const uint16_t gStrokeAARectIdx[] = {
+ 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0,
+ 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0,
+ 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0,
+ 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0,
+
+ 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4,
+ 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4,
+ 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4,
+ 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4,
+
+ 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8,
+ 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8,
+ 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8,
+ 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8,
+};
+
+int GrContext::aaStrokeRectIndexCount() const {
+ return GR_ARRAY_COUNT(gStrokeAARectIdx);
+}
+
+GrIndexBuffer* GrContext::aaStrokeRectIndexBuffer() {
+ if (NULL == fAAStrokeRectIndexBuffer) {
+ fAAStrokeRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gStrokeAARectIdx),
+ false);
+ if (NULL != fAAStrokeRectIndexBuffer) {
+ #if GR_DEBUG
+ bool updated =
+ #endif
+ fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx,
+ sizeof(gStrokeAARectIdx));
+ GR_DEBUGASSERT(updated);
+ }
+ }
+ return fAAStrokeRectIndexBuffer;
+}
+
+static GrVertexLayout aa_rect_layout(const GrDrawTarget* target,
+ bool useCoverage) {
+ GrVertexLayout layout = 0;
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ if (NULL != target->getTexture(s)) {
+ layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s);
+ }
+ }
+ if (useCoverage) {
+ layout |= GrDrawTarget::kCoverage_VertexLayoutBit;
+ } else {
+ layout |= GrDrawTarget::kColor_VertexLayoutBit;
+ }
+ return layout;
+}
+
+void GrContext::fillAARect(GrDrawTarget* target,
+ const GrRect& devRect,
+ bool useVertexCoverage) {
+ GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage);
+
+ size_t vsize = GrDrawTarget::VertexSize(layout);
+
+ GrDrawTarget::AutoReleaseGeometry geo(target, layout, 8, 0);
+ if (!geo.succeeded()) {
+ GrPrintf("Failed to get space for vertices!\n");
+ return;
+ }
+ GrIndexBuffer* indexBuffer = this->aaFillRectIndexBuffer();
+ if (NULL == indexBuffer) {
+ GrPrintf("Failed to create index buffer!\n");
+ return;
+ }
+
+ intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
+
+ GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts);
+ GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize);
+
+ setInsetFan(fan0Pos, vsize, devRect, -GR_ScalarHalf, -GR_ScalarHalf);
+ setInsetFan(fan1Pos, vsize, devRect, GR_ScalarHalf, GR_ScalarHalf);
+
+ verts += sizeof(GrPoint);
+ for (int i = 0; i < 4; ++i) {
+ *reinterpret_cast<GrColor*>(verts + i * vsize) = 0;
+ }
+
+ GrColor innerColor;
+ if (useVertexCoverage) {
+ innerColor = 0xffffffff;
+ } else {
+ innerColor = target->getColor();
+ }
+
+ verts += 4 * vsize;
+ for (int i = 0; i < 4; ++i) {
+ *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor;
+ }
+
+ target->setIndexSourceToBuffer(indexBuffer);
+
+ target->drawIndexed(kTriangles_PrimitiveType, 0,
+ 0, 8, this->aaFillRectIndexCount());
+}
+
+void GrContext::strokeAARect(GrDrawTarget* target,
+ const GrRect& devRect,
+ const GrVec& devStrokeSize,
+ bool useVertexCoverage) {
+ const GrScalar& dx = devStrokeSize.fX;
+ const GrScalar& dy = devStrokeSize.fY;
+ const GrScalar rx = GrMul(dx, GR_ScalarHalf);
+ const GrScalar ry = GrMul(dy, GR_ScalarHalf);
+
+ GrScalar spare;
+ {
+ GrScalar w = devRect.width() - dx;
+ GrScalar h = devRect.height() - dy;
+ spare = GrMin(w, h);
+ }
+
+ if (spare <= 0) {
+ GrRect r(devRect);
+ r.inset(-rx, -ry);
+ fillAARect(target, r, useVertexCoverage);
+ return;
+ }
+ GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage);
+ size_t vsize = GrDrawTarget::VertexSize(layout);
+
+ GrDrawTarget::AutoReleaseGeometry geo(target, layout, 16, 0);
+ if (!geo.succeeded()) {
+ GrPrintf("Failed to get space for vertices!\n");
+ return;
+ }
+ GrIndexBuffer* indexBuffer = this->aaStrokeRectIndexBuffer();
+ if (NULL == indexBuffer) {
+ GrPrintf("Failed to create index buffer!\n");
+ return;
+ }
+
+ intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
+
+ GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts);
+ GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize);
+ GrPoint* fan2Pos = reinterpret_cast<GrPoint*>(verts + 8 * vsize);
+ GrPoint* fan3Pos = reinterpret_cast<GrPoint*>(verts + 12 * vsize);
+
+ setInsetFan(fan0Pos, vsize, devRect, -rx - GR_ScalarHalf, -ry - GR_ScalarHalf);
+ setInsetFan(fan1Pos, vsize, devRect, -rx + GR_ScalarHalf, -ry + GR_ScalarHalf);
+ setInsetFan(fan2Pos, vsize, devRect, rx - GR_ScalarHalf, ry - GR_ScalarHalf);
+ setInsetFan(fan3Pos, vsize, devRect, rx + GR_ScalarHalf, ry + GR_ScalarHalf);
+
+ verts += sizeof(GrPoint);
+ for (int i = 0; i < 4; ++i) {
+ *reinterpret_cast<GrColor*>(verts + i * vsize) = 0;
+ }
+
+ GrColor innerColor;
+ if (useVertexCoverage) {
+ innerColor = 0xffffffff;
+ } else {
+ innerColor = target->getColor();
+ }
+ verts += 4 * vsize;
+ for (int i = 0; i < 8; ++i) {
+ *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor;
+ }
+
+ verts += 8 * vsize;
+ for (int i = 0; i < 8; ++i) {
+ *reinterpret_cast<GrColor*>(verts + i * vsize) = 0;
+ }
+
+ target->setIndexSourceToBuffer(indexBuffer);
+ target->drawIndexed(kTriangles_PrimitiveType,
+ 0, 0, 16, aaStrokeRectIndexCount());
+}
+
+/**
+ * Returns true if the rects edges are integer-aligned.
+ */
+static bool isIRect(const GrRect& r) {
+ return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) &&
+ GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom);
+}
+
+static bool apply_aa_to_rect(GrDrawTarget* target,
+ const GrRect& rect,
+ GrScalar width,
+ const GrMatrix* matrix,
+ GrMatrix* combinedMatrix,
+ GrRect* devRect,
+ bool* useVertexCoverage) {
+ // we use a simple alpha ramp to do aa on axis-aligned rects
+ // do AA with alpha ramp if the caller requested AA, the rect
+ // will be axis-aligned,the render target is not
+ // multisampled, and the rect won't land on integer coords.
+
+ if (!target->isAntialiasState()) {
+ return false;
+ }
+
+ // we are keeping around the "tweak the alpha" trick because
+ // it is our only hope for the fixed-pipe implementation.
+ // In a shader implementation we can give a separate coverage input
+ *useVertexCoverage = false;
+ if (!target->canTweakAlphaForCoverage()) {
+ if (target->getCaps().fSupportPerVertexCoverage) {
+ if (disable_coverage_aa_for_blend(target)) {
+#if GR_DEBUG
+ GrPrintf("Turning off AA to correctly apply blend.\n");
+#endif
+ return false;
+ } else {
+ *useVertexCoverage = true;
+ }
+ } else {
+ GrPrintf("Rect AA dropped because no support for coverage.\n");
+ return false;
+ }
+ }
+
+ if (target->getRenderTarget()->isMultisampled()) {
+ return false;
+ }
+
+ if (0 == width && target->willUseHWAALines()) {
+ return false;
+ }
+
+ if (!target->getViewMatrix().preservesAxisAlignment()) {
+ return false;
+ }
+
+ if (NULL != matrix &&
+ !matrix->preservesAxisAlignment()) {
+ return false;
+ }
+
+ *combinedMatrix = target->getViewMatrix();
+ if (NULL != matrix) {
+ combinedMatrix->preConcat(*matrix);
+ GrAssert(combinedMatrix->preservesAxisAlignment());
+ }
+
+ combinedMatrix->mapRect(devRect, rect);
+ devRect->sort();
+
+ if (width < 0) {
+ return !isIRect(*devRect);
+ } else {
+ return true;
+ }
+}
+
+void GrContext::drawRect(const GrPaint& paint,
+ const GrRect& rect,
+ GrScalar width,
+ const GrMatrix* matrix) {
+ SK_TRACE_EVENT0("GrContext::drawRect");
+
+ GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
+ int stageMask = paint.getActiveStageMask();
+
+ GrRect devRect = rect;
+ GrMatrix combinedMatrix;
+ bool useVertexCoverage;
+ bool doAA = apply_aa_to_rect(target, rect, width, matrix,
+ &combinedMatrix, &devRect, &useVertexCoverage);
+
+ if (doAA) {
+ GrDrawTarget::AutoViewMatrixRestore avm(target);
+ if (stageMask) {
+ GrMatrix inv;
+ if (combinedMatrix.invert(&inv)) {
+ target->preConcatSamplerMatrices(stageMask, inv);
+ }
+ }
+ target->setViewMatrix(GrMatrix::I());
+ if (width >= 0) {
+ GrVec strokeSize;;
+ if (width > 0) {
+ strokeSize.set(width, width);
+ combinedMatrix.mapVectors(&strokeSize, 1);
+ strokeSize.setAbs(strokeSize);
+ } else {
+ strokeSize.set(GR_Scalar1, GR_Scalar1);
+ }
+ strokeAARect(target, devRect, strokeSize, useVertexCoverage);
+ } else {
+ fillAARect(target, devRect, useVertexCoverage);
+ }
+ return;
+ }
+
+ if (width >= 0) {
+ // TODO: consider making static vertex buffers for these cases.
+ // Hairline could be done by just adding closing vertex to
+ // unitSquareVertexBuffer()
+ GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL);
+
+ static const int worstCaseVertCount = 10;
+ GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0);
+
+ if (!geo.succeeded()) {
+ GrPrintf("Failed to get space for vertices!\n");
+ return;
+ }
+
+ GrPrimitiveType primType;
+ int vertCount;
+ GrPoint* vertex = geo.positions();
+
+ if (width > 0) {
+ vertCount = 10;
+ primType = kTriangleStrip_PrimitiveType;
+ setStrokeRectStrip(vertex, rect, width);
+ } else {
+ // hairline
+ vertCount = 5;
+ primType = kLineStrip_PrimitiveType;
+ vertex[0].set(rect.fLeft, rect.fTop);
+ vertex[1].set(rect.fRight, rect.fTop);
+ vertex[2].set(rect.fRight, rect.fBottom);
+ vertex[3].set(rect.fLeft, rect.fBottom);
+ vertex[4].set(rect.fLeft, rect.fTop);
+ }
+
+ GrDrawTarget::AutoViewMatrixRestore avmr;
+ if (NULL != matrix) {
+ avmr.set(target);
+ target->preConcatViewMatrix(*matrix);
+ target->preConcatSamplerMatrices(stageMask, *matrix);
+ }
+
+ target->drawNonIndexed(primType, 0, vertCount);
+ } else {
+ #if GR_STATIC_RECT_VB
+ GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL);
+ const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
+ if (NULL == sqVB) {
+ GrPrintf("Failed to create static rect vb.\n");
+ return;
+ }
+ target->setVertexSourceToBuffer(layout, sqVB);
+ GrDrawTarget::AutoViewMatrixRestore avmr(target);
+ GrMatrix m;
+ m.setAll(rect.width(), 0, rect.fLeft,
+ 0, rect.height(), rect.fTop,
+ 0, 0, GrMatrix::I()[8]);
+
+ if (NULL != matrix) {
+ m.postConcat(*matrix);
+ }
+
+ target->preConcatViewMatrix(m);
+ target->preConcatSamplerMatrices(stageMask, m);
+
+ target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4);
+ #else
+ target->drawSimpleRect(rect, matrix, stageMask);
+ #endif
+ }
+}
+
+void GrContext::drawRectToRect(const GrPaint& paint,
+ const GrRect& dstRect,
+ const GrRect& srcRect,
+ const GrMatrix* dstMatrix,
+ const GrMatrix* srcMatrix) {
+ SK_TRACE_EVENT0("GrContext::drawRectToRect");
+
+ // srcRect refers to paint's first texture
+ if (NULL == paint.getTexture(0)) {
+ drawRect(paint, dstRect, -1, dstMatrix);
+ return;
+ }
+
+ GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB);
+
+#if GR_STATIC_RECT_VB
+ GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
+
+ GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL);
+ GrDrawTarget::AutoViewMatrixRestore avmr(target);
+
+ GrMatrix m;
+
+ m.setAll(dstRect.width(), 0, dstRect.fLeft,
+ 0, dstRect.height(), dstRect.fTop,
+ 0, 0, GrMatrix::I()[8]);
+ if (NULL != dstMatrix) {
+ m.postConcat(*dstMatrix);
+ }
+ target->preConcatViewMatrix(m);
+
+ // srcRect refers to first stage
+ int otherStageMask = paint.getActiveStageMask() &
+ (~(1 << GrPaint::kFirstTextureStage));
+ if (otherStageMask) {
+ target->preConcatSamplerMatrices(otherStageMask, m);
+ }
+
+ m.setAll(srcRect.width(), 0, srcRect.fLeft,
+ 0, srcRect.height(), srcRect.fTop,
+ 0, 0, GrMatrix::I()[8]);
+ if (NULL != srcMatrix) {
+ m.postConcat(*srcMatrix);
+ }
+ target->preConcatSamplerMatrix(GrPaint::kFirstTextureStage, m);
+
+ const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
+ if (NULL == sqVB) {
+ GrPrintf("Failed to create static rect vb.\n");
+ return;
+ }
+ target->setVertexSourceToBuffer(layout, sqVB);
+ target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4);
+#else
+
+ GrDrawTarget* target;
+#if BATCH_RECT_TO_RECT
+ target = this->prepareToDraw(paint, kBuffered_DrawCategory);
+#else
+ target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
+#endif
+
+ const GrRect* srcRects[GrDrawTarget::kNumStages] = {NULL};
+ const GrMatrix* srcMatrices[GrDrawTarget::kNumStages] = {NULL};
+ srcRects[0] = &srcRect;
+ srcMatrices[0] = srcMatrix;
+
+ target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices);
+#endif
+}
+
+void GrContext::drawVertices(const GrPaint& paint,
+ GrPrimitiveType primitiveType,
+ int vertexCount,
+ const GrPoint positions[],
+ const GrPoint texCoords[],
+ const GrColor colors[],
+ const uint16_t indices[],
+ int indexCount) {
+ SK_TRACE_EVENT0("GrContext::drawVertices");
+
+ GrDrawTarget::AutoReleaseGeometry geo;
+
+ GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
+
+ bool hasTexCoords[GrPaint::kTotalStages] = {
+ NULL != texCoords, // texCoordSrc provides explicit stage 0 coords
+ 0 // remaining stages use positions
+ };
+
+ GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords);
+
+ if (NULL != colors) {
+ layout |= GrDrawTarget::kColor_VertexLayoutBit;
+ }
+ int vertexSize = GrDrawTarget::VertexSize(layout);
+
+ if (sizeof(GrPoint) != vertexSize) {
+ if (!geo.set(target, layout, vertexCount, 0)) {
+ GrPrintf("Failed to get space for vertices!\n");
+ return;
+ }
+ int texOffsets[GrDrawTarget::kMaxTexCoords];
+ int colorOffset;
+ GrDrawTarget::VertexSizeAndOffsetsByIdx(layout,
+ texOffsets,
+ &colorOffset,
+ NULL,
+ NULL);
+ void* curVertex = geo.vertices();
+
+ for (int i = 0; i < vertexCount; ++i) {
+ *((GrPoint*)curVertex) = positions[i];
+
+ if (texOffsets[0] > 0) {
+ *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i];
+ }
+ if (colorOffset > 0) {
+ *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
+ }
+ curVertex = (void*)((intptr_t)curVertex + vertexSize);
+ }
+ } else {
+ target->setVertexSourceToArray(layout, positions, vertexCount);
+ }
+
+ // we don't currently apply offscreen AA to this path. Need improved
+ // management of GrDrawTarget's geometry to avoid copying points per-tile.
+
+ if (NULL != indices) {
+ target->setIndexSourceToArray(indices, indexCount);
+ target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
+ } else {
+ target->drawNonIndexed(primitiveType, 0, vertexCount);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrContext::drawPath(const GrPaint& paint, const GrPath& path,
+ GrPathFill fill, const GrPoint* translate) {
+
+ if (path.isEmpty()) {
+#if GR_DEBUG
+ GrPrintf("Empty path should have been caught by canvas.\n");
+#endif
+ if (GrIsFillInverted(fill)) {
+ this->drawPaint(paint);
+ }
+ return;
+ }
+
+ GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory);
+
+ // An Assumption here is that path renderer would use some form of tweaking
+ // the src color (either the input alpha or in the frag shader) to implement
+ // aa. If we have some future driver-mojo path AA that can do the right
+ // thing WRT to the blend then we'll need some query on the PR.
+ if (disable_coverage_aa_for_blend(target)) {
+#if GR_DEBUG
+ GrPrintf("Turning off AA to correctly apply blend.\n");
+#endif
+ target->disableState(GrDrawTarget::kAntialias_StateBit);
+ }
+
+ GrPathRenderer* pr = this->getPathRenderer(target, path, fill);
+ if (NULL == pr) {
+#if GR_DEBUG
+ GrPrintf("Unable to find path renderer compatible with path.\n");
+#endif
+ return;
+ }
+
+ GrPathRenderer::AutoClearPath arp(pr, target, &path, fill, translate);
+ GrDrawTarget::StageBitfield stageMask = paint.getActiveStageMask();
+
+ if (!pr->supportsAA(target, path, fill) &&
+ this->doOffscreenAA(target, kHairLine_PathFill == fill)) {
+
+ bool needsStencil = pr->requiresStencilPass(target, path, fill);
+
+ // compute bounds as intersection of rt size, clip, and path
+ GrIRect bound = SkIRect::MakeWH(target->getRenderTarget()->width(),
+ target->getRenderTarget()->height());
+ GrIRect clipIBounds;
+ if (target->getClip().hasConservativeBounds()) {
+ target->getClip().getConservativeBounds().roundOut(&clipIBounds);
+ if (!bound.intersect(clipIBounds)) {
+ return;
+ }
+ }
+
+ GrRect pathBounds = path.getBounds();
+ if (!pathBounds.isEmpty()) {
+ if (NULL != translate) {
+ pathBounds.offset(*translate);
+ }
+ target->getViewMatrix().mapRect(&pathBounds, pathBounds);
+ GrIRect pathIBounds;
+ pathBounds.roundOut(&pathIBounds);
+ if (!bound.intersect(pathIBounds)) {
+ return;
+ }
+ }
+ OffscreenRecord record;
+ if (this->prepareForOffscreenAA(target, needsStencil, bound,
+ pr, &record)) {
+ for (int tx = 0; tx < record.fTileCountX; ++tx) {
+ for (int ty = 0; ty < record.fTileCountY; ++ty) {
+ this->setupOffscreenAAPass1(target, bound, tx, ty, &record);
+ pr->drawPath(0);
+ this->doOffscreenAAPass2(target, paint, bound, tx, ty, &record);
+ }
+ }
+ this->cleanupOffscreenAA(target, pr, &record);
+ if (GrIsFillInverted(fill) && bound != clipIBounds) {
+ GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask);
+ GrRect rect;
+ if (clipIBounds.fTop < bound.fTop) {
+ rect.iset(clipIBounds.fLeft, clipIBounds.fTop,
+ clipIBounds.fRight, bound.fTop);
+ target->drawSimpleRect(rect, NULL, stageMask);
+ }
+ if (clipIBounds.fLeft < bound.fLeft) {
+ rect.iset(clipIBounds.fLeft, bound.fTop,
+ bound.fLeft, bound.fBottom);
+ target->drawSimpleRect(rect, NULL, stageMask);
+ }
+ if (clipIBounds.fRight > bound.fRight) {
+ rect.iset(bound.fRight, bound.fTop,
+ clipIBounds.fRight, bound.fBottom);
+ target->drawSimpleRect(rect, NULL, stageMask);
+ }
+ if (clipIBounds.fBottom > bound.fBottom) {
+ rect.iset(clipIBounds.fLeft, bound.fBottom,
+ clipIBounds.fRight, clipIBounds.fBottom);
+ target->drawSimpleRect(rect, NULL, stageMask);
+ }
+ }
+ return;
+ }
+ }
+ pr->drawPath(stageMask);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrContext::supportsShaders() const {
+ return fGpu->getCaps().fShaderSupport;
+}
+
+void GrContext::flush(int flagsBitfield) {
+ if (kDiscard_FlushBit & flagsBitfield) {
+ fDrawBuffer->reset();
+ } else {
+ flushDrawBuffer();
+ }
+
+ if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) {
+ fGpu->forceRenderTargetFlush();
+ }
+}
+
+void GrContext::flushText() {
+ if (kText_DrawCategory == fLastDrawCategory) {
+ flushDrawBuffer();
+ }
+}
+
+void GrContext::flushDrawBuffer() {
+#if BATCH_RECT_TO_RECT || DEFER_TEXT_RENDERING
+ if (fDrawBuffer) {
+ fDrawBuffer->playback(fGpu);
+ fDrawBuffer->reset();
+ }
+#endif
+}
+
+bool GrContext::readTexturePixels(GrTexture* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer) {
+ SK_TRACE_EVENT0("GrContext::readTexturePixels");
+
+ // TODO: code read pixels for textures that aren't rendertargets
+
+ this->flush();
+ GrRenderTarget* target = texture->asRenderTarget();
+ if (NULL != target) {
+ return fGpu->readPixels(target,
+ left, top, width, height,
+ config, buffer);
+ } else {
+ return false;
+ }
+}
+
+bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer) {
+ SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
+ uint32_t flushFlags = 0;
+ if (NULL == target) {
+ flushFlags |= GrContext::kForceCurrentRenderTarget_FlushBit;
+ }
+
+ this->flush(flushFlags);
+ return fGpu->readPixels(target,
+ left, top, width, height,
+ config, buffer);
+}
+
+void GrContext::writePixels(int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t stride) {
+ SK_TRACE_EVENT0("GrContext::writePixels");
+
+ // TODO: when underlying api has a direct way to do this we should use it
+ // (e.g. glDrawPixels on desktop GL).
+
+ this->flush(true);
+
+ const GrTextureDesc desc = {
+ kNone_GrTextureFlags, kNone_GrAALevel, width, height, config
+ };
+ GrAutoScratchTexture ast(this, desc);
+ GrTexture* texture = ast.texture();
+ if (NULL == texture) {
+ return;
+ }
+ texture->uploadTextureData(0, 0, width, height, buffer, stride);
+
+ GrDrawTarget::AutoStateRestore asr(fGpu);
+
+ GrMatrix matrix;
+ matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
+ fGpu->setViewMatrix(matrix);
+
+ fGpu->setColorFilter(0, SkXfermode::kDst_Mode);
+ fGpu->disableState(GrDrawTarget::kClip_StateBit);
+ fGpu->setAlpha(0xFF);
+ fGpu->setBlendFunc(kOne_BlendCoeff,
+ kZero_BlendCoeff);
+ fGpu->setTexture(0, texture);
+
+ GrSamplerState sampler;
+ sampler.setClampNoFilter();
+ matrix.setIDiv(texture->width(), texture->height());
+ sampler.setMatrix(matrix);
+ fGpu->setSamplerState(0, sampler);
+
+ GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0);
+ static const int VCOUNT = 4;
+ // TODO: Use GrGpu::drawRect here
+ GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0);
+ if (!geo.succeeded()) {
+ GrPrintf("Failed to get space for vertices!\n");
+ return;
+ }
+ ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height);
+ fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 0, VCOUNT);
+}
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::SetPaint(const GrPaint& paint, GrDrawTarget* target) {
+
+ for (int i = 0; i < GrPaint::kMaxTextures; ++i) {
+ int s = i + GrPaint::kFirstTextureStage;
+ target->setTexture(s, paint.getTexture(i));
+ target->setSamplerState(s, *paint.getTextureSampler(i));
+ }
+
+ target->setFirstCoverageStage(GrPaint::kFirstMaskStage);
+
+ for (int i = 0; i < GrPaint::kMaxMasks; ++i) {
+ int s = i + GrPaint::kFirstMaskStage;
+ target->setTexture(s, paint.getMask(i));
+ target->setSamplerState(s, *paint.getMaskSampler(i));
+ }
+
+ target->setColor(paint.fColor);
+
+ if (paint.fDither) {
+ target->enableState(GrDrawTarget::kDither_StateBit);
+ } else {
+ target->disableState(GrDrawTarget::kDither_StateBit);
+ }
+ if (paint.fAntiAlias) {
+ target->enableState(GrDrawTarget::kAntialias_StateBit);
+ } else {
+ target->disableState(GrDrawTarget::kAntialias_StateBit);
+ }
+ target->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff);
+ target->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode);
+
+ if (paint.getActiveMaskStageMask() && !target->canApplyCoverage()) {
+ GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
+ }
+}
+
+GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint,
+ DrawCategory category) {
+ if (category != fLastDrawCategory) {
+ flushDrawBuffer();
+ fLastDrawCategory = category;
+ }
+ SetPaint(paint, fGpu);
+ GrDrawTarget* target = fGpu;
+ switch (category) {
+ case kText_DrawCategory:
+#if DEFER_TEXT_RENDERING
+ target = fDrawBuffer;
+ fDrawBuffer->initializeDrawStateAndClip(*fGpu);
+#else
+ target = fGpu;
+#endif
+ break;
+ case kUnbuffered_DrawCategory:
+ target = fGpu;
+ break;
+ case kBuffered_DrawCategory:
+ target = fDrawBuffer;
+ fDrawBuffer->initializeDrawStateAndClip(*fGpu);
+ break;
+ }
+ return target;
+}
+
+GrPathRenderer* GrContext::getPathRenderer(const GrDrawTarget* target,
+ const GrPath& path,
+ GrPathFill fill) {
+ if (NULL == fPathRendererChain) {
+ fPathRendererChain =
+ new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag);
+ }
+ return fPathRendererChain->getPathRenderer(target, path, fill);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::setRenderTarget(GrRenderTarget* target) {
+ this->flush(false);
+ fGpu->setRenderTarget(target);
+}
+
+GrRenderTarget* GrContext::getRenderTarget() {
+ return fGpu->getRenderTarget();
+}
+
+const GrRenderTarget* GrContext::getRenderTarget() const {
+ return fGpu->getRenderTarget();
+}
+
+const GrMatrix& GrContext::getMatrix() const {
+ return fGpu->getViewMatrix();
+}
+
+void GrContext::setMatrix(const GrMatrix& m) {
+ fGpu->setViewMatrix(m);
+}
+
+void GrContext::concatMatrix(const GrMatrix& m) const {
+ fGpu->preConcatViewMatrix(m);
+}
+
+static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
+ intptr_t mask = 1 << shift;
+ if (pred) {
+ bits |= mask;
+ } else {
+ bits &= ~mask;
+ }
+ return bits;
+}
+
+void GrContext::resetStats() {
+ fGpu->resetStats();
+}
+
+const GrGpuStats& GrContext::getStats() const {
+ return fGpu->getStats();
+}
+
+void GrContext::printStats() const {
+ fGpu->printStats();
+}
+
+GrContext::GrContext(GrGpu* gpu) {
+ fGpu = gpu;
+ fGpu->ref();
+ fGpu->setContext(this);
+
+ fPathRendererChain = NULL;
+
+ fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT,
+ MAX_TEXTURE_CACHE_BYTES);
+ fFontCache = new GrFontCache(fGpu);
+
+ fLastDrawCategory = kUnbuffered_DrawCategory;
+
+ fDrawBuffer = NULL;
+ fDrawBufferVBAllocPool = NULL;
+ fDrawBufferIBAllocPool = NULL;
+
+ fAAFillRectIndexBuffer = NULL;
+ fAAStrokeRectIndexBuffer = NULL;
+
+ int gpuMaxOffscreen = gpu->getCaps().fMaxRenderTargetSize;
+ if (!PREFER_MSAA_OFFSCREEN_AA || !gpu->getCaps().fFSAASupport) {
+ gpuMaxOffscreen /= OFFSCREEN_SSAA_SCALE;
+ }
+ fMaxOffscreenAASize = GrMin(GR_MAX_OFFSCREEN_AA_SIZE, gpuMaxOffscreen);
+
+ this->setupDrawBuffer();
+}
+
+void GrContext::setupDrawBuffer() {
+
+ GrAssert(NULL == fDrawBuffer);
+ GrAssert(NULL == fDrawBufferVBAllocPool);
+ GrAssert(NULL == fDrawBufferIBAllocPool);
+
+#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT
+ fDrawBufferVBAllocPool =
+ new GrVertexBufferAllocPool(fGpu, false,
+ DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
+ DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS);
+ fDrawBufferIBAllocPool =
+ new GrIndexBufferAllocPool(fGpu, false,
+ DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
+ DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS);
+
+ fDrawBuffer = new GrInOrderDrawBuffer(fGpu,
+ fDrawBufferVBAllocPool,
+ fDrawBufferIBAllocPool);
+#endif
+
+#if BATCH_RECT_TO_RECT
+ fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer());
+#endif
+}
+
+GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) {
+ GrDrawTarget* target;
+#if DEFER_TEXT_RENDERING
+ target = prepareToDraw(paint, kText_DrawCategory);
+#else
+ target = prepareToDraw(paint, kUnbuffered_DrawCategory);
+#endif
+ SetPaint(paint, target);
+ return target;
+}
+
+const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
+ return fGpu->getQuadIndexBuffer();
+}
+
+void GrContext::convolveInX(GrTexture* texture,
+ const SkRect& rect,
+ const float* kernel,
+ int kernelWidth) {
+ float imageIncrement[2] = {1.0f / texture->width(), 0.0f};
+ convolve(texture, rect, imageIncrement, kernel, kernelWidth);
+}
+
+void GrContext::convolveInY(GrTexture* texture,
+ const SkRect& rect,
+ const float* kernel,
+ int kernelWidth) {
+ float imageIncrement[2] = {0.0f, 1.0f / texture->height()};
+ convolve(texture, rect, imageIncrement, kernel, kernelWidth);
+}
+
+void GrContext::convolve(GrTexture* texture,
+ const SkRect& rect,
+ float imageIncrement[2],
+ const float* kernel,
+ int kernelWidth) {
+ GrDrawTarget::AutoStateRestore asr(fGpu);
+ GrMatrix sampleM;
+ GrSamplerState sampler(GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kConvolution_Filter);
+ sampler.setConvolutionParams(kernelWidth, kernel, imageIncrement);
+ sampleM.setScale(GR_Scalar1 / texture->width(),
+ GR_Scalar1 / texture->height());
+ sampler.setMatrix(sampleM);
+ fGpu->setSamplerState(0, sampler);
+ fGpu->setViewMatrix(GrMatrix::I());
+ fGpu->setTexture(0, texture);
+ fGpu->setBlendFunc(kOne_BlendCoeff, kZero_BlendCoeff);
+ fGpu->drawSimpleRect(rect, NULL, 1 << 0);
+}
diff --git a/src/gpu/GrDefaultPathRenderer.cpp b/src/gpu/GrDefaultPathRenderer.cpp
new file mode 100644
index 0000000000..14c032b937
--- /dev/null
+++ b/src/gpu/GrDefaultPathRenderer.cpp
@@ -0,0 +1,560 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrDefaultPathRenderer.h"
+
+#include "GrContext.h"
+#include "GrPathUtils.h"
+#include "SkString.h"
+#include "SkTrace.h"
+
+
+GrDefaultPathRenderer::GrDefaultPathRenderer(bool separateStencilSupport,
+ bool stencilWrapOpsSupport)
+ : fSeparateStencil(separateStencilSupport)
+ , fStencilWrapOps(stencilWrapOpsSupport)
+ , fSubpathCount(0)
+ , fSubpathVertCount(0)
+ , fPreviousSrcTol(-GR_Scalar1)
+ , fPreviousStages(-1) {
+ fTarget = NULL;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Stencil rules for paths
+
+////// Even/Odd
+
+static const GrStencilSettings gEOStencilPass = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kAlwaysIfInClip_StencilFunc, kAlwaysIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+};
+
+// ok not to check clip b/c stencil pass only wrote inside clip
+static const GrStencilSettings gEOColorPass = {
+ kZero_StencilOp, kZero_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kNotEqual_StencilFunc, kNotEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0,
+ 0xffffffff, 0xffffffff
+};
+
+// have to check clip b/c outside clip will always be zero.
+static const GrStencilSettings gInvEOColorPass = {
+ kZero_StencilOp, kZero_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kEqualIfInClip_StencilFunc, kEqualIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0,
+ 0xffffffff, 0xffffffff
+};
+
+////// Winding
+
+// when we have separate stencil we increment front faces / decrement back faces
+// when we don't have wrap incr and decr we use the stencil test to simulate
+// them.
+
+static const GrStencilSettings gWindStencilSeparateWithWrap = {
+ kIncWrap_StencilOp, kDecWrap_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kAlwaysIfInClip_StencilFunc, kAlwaysIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+};
+
+// if inc'ing the max value, invert to make 0
+// if dec'ing zero invert to make all ones.
+// we can't avoid touching the stencil on both passing and
+// failing, so we can't resctrict ourselves to the clip.
+static const GrStencilSettings gWindStencilSeparateNoWrap = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kIncClamp_StencilOp, kDecClamp_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x0,
+ 0xffffffff, 0xffffffff
+};
+
+// When there are no separate faces we do two passes to setup the winding rule
+// stencil. First we draw the front faces and inc, then we draw the back faces
+// and dec. These are same as the above two split into the incrementing and
+// decrementing passes.
+static const GrStencilSettings gWindSingleStencilWithWrapInc = {
+ kIncWrap_StencilOp, kIncWrap_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kAlwaysIfInClip_StencilFunc, kAlwaysIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+};
+static const GrStencilSettings gWindSingleStencilWithWrapDec = {
+ kDecWrap_StencilOp, kDecWrap_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kAlwaysIfInClip_StencilFunc, kAlwaysIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+};
+static const GrStencilSettings gWindSingleStencilNoWrapInc = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kIncClamp_StencilOp, kIncClamp_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+};
+static const GrStencilSettings gWindSingleStencilNoWrapDec = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kDecClamp_StencilOp, kDecClamp_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0,
+ 0xffffffff, 0xffffffff
+};
+
+static const GrStencilSettings gWindColorPass = {
+ kZero_StencilOp, kZero_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kNonZeroIfInClip_StencilFunc, kNonZeroIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0,
+ 0xffffffff, 0xffffffff
+};
+
+static const GrStencilSettings gInvWindColorPass = {
+ kZero_StencilOp, kZero_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kEqualIfInClip_StencilFunc, kEqualIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0,
+ 0xffffffff, 0xffffffff
+};
+
+////// Normal render to stencil
+
+// Sometimes the default path renderer can draw a path directly to the stencil
+// buffer without having to first resolve the interior / exterior.
+static const GrStencilSettings gDirectToStencil = {
+ kZero_StencilOp, kZero_StencilOp,
+ kIncClamp_StencilOp, kIncClamp_StencilOp,
+ kAlwaysIfInClip_StencilFunc, kAlwaysIfInClip_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0,
+ 0xffffffff, 0xffffffff
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Helpers for drawPath
+
+static GrConvexHint getConvexHint(const SkPath& path) {
+ return path.isConvex() ? kConvex_ConvexHint : kConcave_ConvexHint;
+}
+
+#define STENCIL_OFF 0 // Always disable stencil (even when needed)
+
+static inline bool single_pass_path(const GrDrawTarget& target,
+ const GrPath& path,
+ GrPathFill fill) {
+#if STENCIL_OFF
+ return true;
+#else
+ if (kEvenOdd_PathFill == fill) {
+ GrConvexHint hint = getConvexHint(path);
+ return hint == kConvex_ConvexHint ||
+ hint == kNonOverlappingConvexPieces_ConvexHint;
+ } else if (kWinding_PathFill == fill) {
+ GrConvexHint hint = getConvexHint(path);
+ return hint == kConvex_ConvexHint ||
+ hint == kNonOverlappingConvexPieces_ConvexHint ||
+ (hint == kSameWindingConvexPieces_ConvexHint &&
+ !target.drawWillReadDst() && !target.isDitherState());
+
+ }
+ return false;
+#endif
+}
+
+bool GrDefaultPathRenderer::requiresStencilPass(const GrDrawTarget* target,
+ const GrPath& path,
+ GrPathFill fill) const {
+ return !single_pass_path(*target, path, fill);
+}
+
+void GrDefaultPathRenderer::pathWillClear() {
+ fSubpathVertCount.reset(0);
+ fTarget->resetVertexSource();
+ if (fUseIndexedDraw) {
+ fTarget->resetIndexSource();
+ }
+ fPreviousSrcTol = -GR_Scalar1;
+ fPreviousStages = -1;
+}
+
+static inline void append_countour_edge_indices(GrPathFill fillType,
+ uint16_t fanCenterIdx,
+ uint16_t edgeV0Idx,
+ uint16_t** indices) {
+ // when drawing lines we're appending line segments along
+ // the contour. When applying the other fill rules we're
+ // drawing triangle fans around fanCenterIdx.
+ if (kHairLine_PathFill != fillType) {
+ *((*indices)++) = fanCenterIdx;
+ }
+ *((*indices)++) = edgeV0Idx;
+ *((*indices)++) = edgeV0Idx + 1;
+}
+
+bool GrDefaultPathRenderer::createGeom(GrScalar srcSpaceTol,
+ GrDrawTarget::StageBitfield stages) {
+ {
+ SK_TRACE_EVENT0("GrDefaultPathRenderer::createGeom");
+
+ GrScalar srcSpaceTolSqd = GrMul(srcSpaceTol, srcSpaceTol);
+ int maxPts = GrPathUtils::worstCasePointCount(*fPath, &fSubpathCount,
+ srcSpaceTol);
+
+ if (maxPts <= 0) {
+ return false;
+ }
+ if (maxPts > ((int)SK_MaxU16 + 1)) {
+ GrPrintf("Path not rendered, too many verts (%d)\n", maxPts);
+ return false;
+ }
+
+ GrVertexLayout layout = 0;
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ if ((1 << s) & stages) {
+ layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s);
+ }
+ }
+
+ fUseIndexedDraw = fSubpathCount > 1;
+
+ int maxIdxs = 0;
+ if (kHairLine_PathFill == fFill) {
+ if (fUseIndexedDraw) {
+ maxIdxs = 2 * maxPts;
+ fPrimitiveType = kLines_PrimitiveType;
+ } else {
+ fPrimitiveType = kLineStrip_PrimitiveType;
+ }
+ } else {
+ if (fUseIndexedDraw) {
+ maxIdxs = 3 * maxPts;
+ fPrimitiveType = kTriangles_PrimitiveType;
+ } else {
+ fPrimitiveType = kTriangleFan_PrimitiveType;
+ }
+ }
+
+ GrPoint* base;
+ if (!fTarget->reserveVertexSpace(layout, maxPts, (void**)&base)) {
+ return false;
+ }
+ GrAssert(NULL != base);
+ GrPoint* vert = base;
+
+ uint16_t* idxBase = NULL;
+ uint16_t* idx = NULL;
+ uint16_t subpathIdxStart = 0;
+ if (fUseIndexedDraw) {
+ if (!fTarget->reserveIndexSpace(maxIdxs, (void**)&idxBase)) {
+ fTarget->resetVertexSource();
+ return false;
+ }
+ GrAssert(NULL != idxBase);
+ idx = idxBase;
+ }
+
+ fSubpathVertCount.reset(fSubpathCount);
+
+ GrPoint pts[4];
+
+ bool first = true;
+ int subpath = 0;
+
+ SkPath::Iter iter(*fPath, false);
+
+ for (;;) {
+ GrPathCmd cmd = (GrPathCmd)iter.next(pts);
+ switch (cmd) {
+ case kMove_PathCmd:
+ if (!first) {
+ uint16_t currIdx = (uint16_t) (vert - base);
+ fSubpathVertCount[subpath] = currIdx - subpathIdxStart;
+ subpathIdxStart = currIdx;
+ ++subpath;
+ }
+ *vert = pts[0];
+ vert++;
+ break;
+ case kLine_PathCmd:
+ if (fUseIndexedDraw) {
+ uint16_t prevIdx = (uint16_t)(vert - base) - 1;
+ append_countour_edge_indices(fFill, subpathIdxStart,
+ prevIdx, &idx);
+ }
+ *(vert++) = pts[1];
+ break;
+ case kQuadratic_PathCmd: {
+ // first pt of quad is the pt we ended on in previous step
+ uint16_t firstQPtIdx = (uint16_t)(vert - base) - 1;
+ uint16_t numPts = (uint16_t)
+ GrPathUtils::generateQuadraticPoints(
+ pts[0], pts[1], pts[2],
+ srcSpaceTolSqd, &vert,
+ GrPathUtils::quadraticPointCount(pts, srcSpaceTol));
+ if (fUseIndexedDraw) {
+ for (uint16_t i = 0; i < numPts; ++i) {
+ append_countour_edge_indices(fFill, subpathIdxStart,
+ firstQPtIdx + i, &idx);
+ }
+ }
+ break;
+ }
+ case kCubic_PathCmd: {
+ // first pt of cubic is the pt we ended on in previous step
+ uint16_t firstCPtIdx = (uint16_t)(vert - base) - 1;
+ uint16_t numPts = (uint16_t) GrPathUtils::generateCubicPoints(
+ pts[0], pts[1], pts[2], pts[3],
+ srcSpaceTolSqd, &vert,
+ GrPathUtils::cubicPointCount(pts, srcSpaceTol));
+ if (fUseIndexedDraw) {
+ for (uint16_t i = 0; i < numPts; ++i) {
+ append_countour_edge_indices(fFill, subpathIdxStart,
+ firstCPtIdx + i, &idx);
+ }
+ }
+ break;
+ }
+ case kClose_PathCmd:
+ break;
+ case kEnd_PathCmd:
+ uint16_t currIdx = (uint16_t) (vert - base);
+ fSubpathVertCount[subpath] = currIdx - subpathIdxStart;
+ goto FINISHED;
+ }
+ first = false;
+ }
+FINISHED:
+ GrAssert((vert - base) <= maxPts);
+ GrAssert((idx - idxBase) <= maxIdxs);
+
+ fVertexCnt = vert - base;
+ fIndexCnt = idx - idxBase;
+
+ if (fTranslate.fX || fTranslate.fY) {
+ int count = vert - base;
+ for (int i = 0; i < count; i++) {
+ base[i].offset(fTranslate.fX, fTranslate.fY);
+ }
+ }
+ }
+ // set these at the end so if we failed on first drawPath inside a
+ // setPath/clearPath block we won't assume geom was created on a subsequent
+ // drawPath in the same block.
+ fPreviousSrcTol = srcSpaceTol;
+ fPreviousStages = stages;
+ return true;
+}
+
+void GrDefaultPathRenderer::onDrawPath(GrDrawTarget::StageBitfield stages,
+ bool stencilOnly) {
+
+ GrMatrix viewM = fTarget->getViewMatrix();
+ GrScalar tol = GR_Scalar1;
+ tol = GrPathUtils::scaleToleranceToSrc(tol, viewM, fPath->getBounds());
+
+ // FIXME: It's really dumb that we recreate the verts for a new vertex
+ // layout. We only do that because the GrDrawTarget API doesn't allow
+ // us to change the vertex layout after reserveVertexSpace(). We won't
+ // actually change the vertex data when the layout changes since all the
+ // stages reference the positions (rather than having separate tex coords)
+ // and we don't ever have per-vert colors. In practice our call sites
+ // won't change the stages in use inside a setPath / removePath pair. But
+ // it is a silly limitation of the GrDrawTarget design that should be fixed.
+ if (tol != fPreviousSrcTol ||
+ stages != fPreviousStages) {
+ if (!this->createGeom(tol, stages)) {
+ return;
+ }
+ }
+
+ GrAssert(NULL != fTarget);
+ GrDrawTarget::AutoStateRestore asr(fTarget);
+ bool colorWritesWereDisabled = fTarget->isColorWriteDisabled();
+ // face culling doesn't make sense here
+ GrAssert(GrDrawTarget::kBoth_DrawFace == fTarget->getDrawFace());
+
+ int passCount = 0;
+ const GrStencilSettings* passes[3];
+ GrDrawTarget::DrawFace drawFace[3];
+ bool reverse = false;
+ bool lastPassIsBounds;
+
+ if (kHairLine_PathFill == fFill) {
+ passCount = 1;
+ if (stencilOnly) {
+ passes[0] = &gDirectToStencil;
+ } else {
+ passes[0] = NULL;
+ }
+ lastPassIsBounds = false;
+ drawFace[0] = GrDrawTarget::kBoth_DrawFace;
+ } else {
+ if (single_pass_path(*fTarget, *fPath, fFill)) {
+ passCount = 1;
+ if (stencilOnly) {
+ passes[0] = &gDirectToStencil;
+ } else {
+ passes[0] = NULL;
+ }
+ drawFace[0] = GrDrawTarget::kBoth_DrawFace;
+ lastPassIsBounds = false;
+ } else {
+ switch (fFill) {
+ case kInverseEvenOdd_PathFill:
+ reverse = true;
+ // fallthrough
+ case kEvenOdd_PathFill:
+ passes[0] = &gEOStencilPass;
+ if (stencilOnly) {
+ passCount = 1;
+ lastPassIsBounds = false;
+ } else {
+ passCount = 2;
+ lastPassIsBounds = true;
+ if (reverse) {
+ passes[1] = &gInvEOColorPass;
+ } else {
+ passes[1] = &gEOColorPass;
+ }
+ }
+ drawFace[0] = drawFace[1] = GrDrawTarget::kBoth_DrawFace;
+ break;
+
+ case kInverseWinding_PathFill:
+ reverse = true;
+ // fallthrough
+ case kWinding_PathFill:
+ if (fSeparateStencil) {
+ if (fStencilWrapOps) {
+ passes[0] = &gWindStencilSeparateWithWrap;
+ } else {
+ passes[0] = &gWindStencilSeparateNoWrap;
+ }
+ passCount = 2;
+ drawFace[0] = GrDrawTarget::kBoth_DrawFace;
+ } else {
+ if (fStencilWrapOps) {
+ passes[0] = &gWindSingleStencilWithWrapInc;
+ passes[1] = &gWindSingleStencilWithWrapDec;
+ } else {
+ passes[0] = &gWindSingleStencilNoWrapInc;
+ passes[1] = &gWindSingleStencilNoWrapDec;
+ }
+ // which is cw and which is ccw is arbitrary.
+ drawFace[0] = GrDrawTarget::kCW_DrawFace;
+ drawFace[1] = GrDrawTarget::kCCW_DrawFace;
+ passCount = 3;
+ }
+ if (stencilOnly) {
+ lastPassIsBounds = false;
+ --passCount;
+ } else {
+ lastPassIsBounds = true;
+ drawFace[passCount-1] = GrDrawTarget::kBoth_DrawFace;
+ if (reverse) {
+ passes[passCount-1] = &gInvWindColorPass;
+ } else {
+ passes[passCount-1] = &gWindColorPass;
+ }
+ }
+ break;
+ default:
+ GrAssert(!"Unknown path fFill!");
+ return;
+ }
+ }
+ }
+
+ {
+ for (int p = 0; p < passCount; ++p) {
+ fTarget->setDrawFace(drawFace[p]);
+ if (NULL != passes[p]) {
+ fTarget->setStencil(*passes[p]);
+ }
+
+ if (lastPassIsBounds && (p == passCount-1)) {
+ if (!colorWritesWereDisabled) {
+ fTarget->disableState(GrDrawTarget::kNoColorWrites_StateBit);
+ }
+ GrRect bounds;
+ if (reverse) {
+ GrAssert(NULL != fTarget->getRenderTarget());
+ // draw over the whole world.
+ bounds.setLTRB(0, 0,
+ GrIntToScalar(fTarget->getRenderTarget()->width()),
+ GrIntToScalar(fTarget->getRenderTarget()->height()));
+ GrMatrix vmi;
+ // mapRect through persp matrix may not be correct
+ if (!fTarget->getViewMatrix().hasPerspective() &&
+ fTarget->getViewInverse(&vmi)) {
+ vmi.mapRect(&bounds);
+ } else {
+ if (stages) {
+ if (!fTarget->getViewInverse(&vmi)) {
+ GrPrintf("Could not invert matrix.");
+ return;
+ }
+ fTarget->preConcatSamplerMatrices(stages, vmi);
+ }
+ fTarget->setViewMatrix(GrMatrix::I());
+ }
+ } else {
+ bounds = fPath->getBounds();
+ bounds.offset(fTranslate);
+ }
+ GrDrawTarget::AutoGeometryPush agp(fTarget);
+ fTarget->drawSimpleRect(bounds, NULL, stages);
+ } else {
+ if (passCount > 1) {
+ fTarget->enableState(GrDrawTarget::kNoColorWrites_StateBit);
+ }
+ if (fUseIndexedDraw) {
+ fTarget->drawIndexed(fPrimitiveType, 0, 0,
+ fVertexCnt, fIndexCnt);
+ } else {
+ int baseVertex = 0;
+ for (int sp = 0; sp < fSubpathCount; ++sp) {
+ fTarget->drawNonIndexed(fPrimitiveType, baseVertex,
+ fSubpathVertCount[sp]);
+ baseVertex += fSubpathVertCount[sp];
+ }
+ }
+ }
+ }
+ }
+}
+
+void GrDefaultPathRenderer::drawPath(GrDrawTarget::StageBitfield stages) {
+ this->onDrawPath(stages, false);
+}
+
+void GrDefaultPathRenderer::drawPathToStencil() {
+ GrAssert(kInverseEvenOdd_PathFill != fFill);
+ GrAssert(kInverseWinding_PathFill != fFill);
+ this->onDrawPath(0, true);
+}
diff --git a/src/gpu/GrDefaultPathRenderer.h b/src/gpu/GrDefaultPathRenderer.h
new file mode 100644
index 0000000000..dd5964122d
--- /dev/null
+++ b/src/gpu/GrDefaultPathRenderer.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDefaultPathRenderer_DEFINED
+#define GrDefaultPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+#include "SkTemplates.h"
+
+/**
+ * Subclass that renders the path using the stencil buffer to resolve fill
+ * rules (e.g. winding, even-odd)
+ */
+class GR_API GrDefaultPathRenderer : public GrPathRenderer {
+public:
+ GrDefaultPathRenderer(bool separateStencilSupport,
+ bool stencilWrapOpsSupport);
+
+ virtual bool canDrawPath(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const { return true; }
+
+ virtual bool requiresStencilPass(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const;
+
+ virtual void drawPath(GrDrawTarget::StageBitfield stages);
+ virtual void drawPathToStencil();
+
+protected:
+ virtual void pathWillClear();
+
+private:
+
+ void onDrawPath(GrDrawTarget::StageBitfield stages, bool stencilOnly);
+
+ bool createGeom(GrScalar srcSpaceTol,
+ GrDrawTarget::StageBitfield stages);
+
+ bool fSeparateStencil;
+ bool fStencilWrapOps;
+
+ int fSubpathCount;
+ SkAutoSTMalloc<8, uint16_t> fSubpathVertCount;
+ int fIndexCnt;
+ int fVertexCnt;
+ GrScalar fPreviousSrcTol;
+ GrDrawTarget::StageBitfield fPreviousStages;
+ GrPrimitiveType fPrimitiveType;
+ bool fUseIndexedDraw;
+
+ typedef GrPathRenderer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrDrawMesh.cpp b/src/gpu/GrDrawMesh.cpp
new file mode 100644
index 0000000000..75960e4f92
--- /dev/null
+++ b/src/gpu/GrDrawMesh.cpp
@@ -0,0 +1,147 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "GrMesh.h"
+#include "SkCanvas.h"
+
+GrMesh::GrMesh() : fPts(NULL), fCount(0), fIndices(NULL), fIndexCount(0) {}
+
+GrMesh::~GrMesh() {
+ delete[] fPts;
+ delete[] fIndices;
+}
+
+GrMesh& GrMesh::operator=(const GrMesh& src) {
+ delete[] fPts;
+ delete[] fIndices;
+
+ fBounds = src.fBounds;
+ fRows = src.fRows;
+ fCols = src.fCols;
+
+ fCount = src.fCount;
+ fPts = new SkPoint[fCount * 2];
+ fTex = fPts + fCount;
+ memcpy(fPts, src.fPts, fCount * 2 * sizeof(SkPoint));
+
+ delete[] fIndices;
+ fIndexCount = src.fIndexCount;
+ fIndices = new uint16_t[fIndexCount];
+ memcpy(fIndices, src.fIndices, fIndexCount * sizeof(uint16_t));
+
+ return *this;
+}
+
+void GrMesh::init(const SkRect& bounds, int rows, int cols,
+ const SkRect& texture) {
+ SkASSERT(rows > 0 && cols > 0);
+
+ fBounds = bounds;
+ fRows = rows;
+ fCols = cols;
+
+ delete[] fPts;
+ fCount = (rows + 1) * (cols + 1);
+ fPts = new SkPoint[fCount * 2];
+ fTex = fPts + fCount;
+
+ delete[] fIndices;
+ fIndexCount = rows * cols * 6;
+ fIndices = new uint16_t[fIndexCount];
+
+ SkPoint* pts = fPts;
+ const SkScalar dx = bounds.width() / rows;
+ const SkScalar dy = bounds.height() / cols;
+ SkPoint* tex = fTex;
+ const SkScalar dtx = texture.width() / rows;
+ const SkScalar dty = texture.height() / cols;
+ uint16_t* idx = fIndices;
+ int index = 0;
+ for (int y = 0; y <= cols; y++) {
+ for (int x = 0; x <= rows; x++) {
+ pts->set(bounds.fLeft + x*dx, bounds.fTop + y*dy);
+ pts += 1;
+ tex->set(texture.fLeft + x*dtx, texture.fTop + y*dty);
+ tex += 1;
+
+ if (y < cols && x < rows) {
+ *idx++ = index;
+ *idx++ = index + rows + 1;
+ *idx++ = index + 1;
+
+ *idx++ = index + 1;
+ *idx++ = index + rows + 1;
+ *idx++ = index + rows + 2;
+
+ index += 1;
+ }
+ }
+ index += 1;
+ }
+}
+
+void GrMesh::draw(SkCanvas* canvas, const SkPaint& paint) {
+ canvas->drawVertices(SkCanvas::kTriangles_VertexMode, fCount,
+ fPts, fTex, NULL, NULL, fIndices, fIndexCount,
+ paint);
+}
+
+/////////////////////////////////////////////
+
+#include "SkBoundaryPatch.h"
+#include "SkMeshUtils.h"
+
+static SkPoint SkPointInterp(const SkPoint& a, const SkPoint& b, SkScalar t) {
+ return SkPoint::Make(SkScalarInterp(a.fX, b.fX, t),
+ SkScalarInterp(a.fY, b.fY, t));
+}
+
+static void set_cubic(SkPoint pts[4], SkScalar x0, SkScalar y0,
+ SkScalar x3, SkScalar y3, SkScalar scale = 1) {
+ SkPoint tmp, tmp2;
+
+ pts[0].set(x0, y0);
+ pts[3].set(x3, y3);
+
+ tmp = SkPointInterp(pts[0], pts[3], SK_Scalar1/3);
+ tmp2 = pts[0] - tmp;
+ tmp2.rotateCW();
+ tmp2.scale(scale);
+ pts[1] = tmp + tmp2;
+
+ tmp = SkPointInterp(pts[0], pts[3], 2*SK_Scalar1/3);
+ tmp2 = pts[3] - tmp;
+ tmp2.rotateCW();
+ tmp2.scale(scale);
+ pts[2] = tmp + tmp2;
+}
+
+void test_patch(SkCanvas* canvas, const SkBitmap& bm, SkScalar scale) {
+ const float w = bm.width();
+ const float h = bm.height();
+ SkCubicBoundary cubic;
+ set_cubic(cubic.fPts + 0, 0, 0, w, 0, scale);
+ set_cubic(cubic.fPts + 3, w, 0, w, h, scale);
+ set_cubic(cubic.fPts + 6, w, h, 0, h, -scale);
+ set_cubic(cubic.fPts + 9, 0, h, 0, 0, scale);
+
+ SkBoundaryPatch patch;
+ patch.setBoundary(&cubic);
+
+ const int Rows = 16;
+ const int Cols = 16;
+ SkPoint pts[Rows * Cols];
+ patch.evalPatch(pts, Rows, Cols);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setFilterBitmap(true);
+
+ SkMeshUtils::Draw(canvas, bm, Rows, Cols, pts, NULL, paint);
+}
+
+
diff --git a/src/gpu/GrDrawTarget.cpp b/src/gpu/GrDrawTarget.cpp
new file mode 100644
index 0000000000..0ba7ead077
--- /dev/null
+++ b/src/gpu/GrDrawTarget.cpp
@@ -0,0 +1,1262 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrDrawTarget.h"
+#include "GrGpuVertex.h"
+#include "GrTexture.h"
+#include "GrVertexBuffer.h"
+#include "GrIndexBuffer.h"
+
+namespace {
+
+// recursive helper for creating mask with all the tex coord bits set for
+// one stage
+template <int N>
+int stage_mask_recur(int stage) {
+ return GrDrawTarget::StageTexCoordVertexLayoutBit(stage, N) |
+ stage_mask_recur<N+1>(stage);
+}
+template<>
+int stage_mask_recur<GrDrawTarget::kNumStages>(int) { return 0; }
+
+// mask of all tex coord indices for one stage
+int stage_tex_coord_mask(int stage) {
+ return stage_mask_recur<0>(stage);
+}
+
+// mask of all bits relevant to one stage
+int stage_mask(int stage) {
+ return stage_tex_coord_mask(stage) |
+ GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(stage);
+}
+
+// recursive helper for creating mask of with all bits set relevant to one
+// texture coordinate index
+template <int N>
+int tex_coord_mask_recur(int texCoordIdx) {
+ return GrDrawTarget::StageTexCoordVertexLayoutBit(N, texCoordIdx) |
+ tex_coord_mask_recur<N+1>(texCoordIdx);
+}
+template<>
+int tex_coord_mask_recur<GrDrawTarget::kMaxTexCoords>(int) { return 0; }
+
+// mask of all bits relevant to one texture coordinate index
+int tex_coord_idx_mask(int texCoordIdx) {
+ return tex_coord_mask_recur<0>(texCoordIdx);
+}
+
+bool check_layout(GrVertexLayout layout) {
+ // can only have 1 or 0 bits set for each stage.
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ int stageBits = layout & stage_mask(s);
+ if (stageBits && !GrIsPow2(stageBits)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int num_tex_coords(GrVertexLayout layout) {
+ int cnt = 0;
+ // figure out how many tex coordinates are present
+ for (int t = 0; t < GrDrawTarget::kMaxTexCoords; ++t) {
+ if (tex_coord_idx_mask(t) & layout) {
+ ++cnt;
+ }
+ }
+ return cnt;
+}
+
+} //unnamed namespace
+
+size_t GrDrawTarget::VertexSize(GrVertexLayout vertexLayout) {
+ GrAssert(check_layout(vertexLayout));
+
+ size_t vecSize = (vertexLayout & kTextFormat_VertexLayoutBit) ?
+ sizeof(GrGpuTextVertex) :
+ sizeof(GrPoint);
+
+ size_t size = vecSize; // position
+ size += num_tex_coords(vertexLayout) * vecSize;
+ if (vertexLayout & kColor_VertexLayoutBit) {
+ size += sizeof(GrColor);
+ }
+ if (vertexLayout & kCoverage_VertexLayoutBit) {
+ size += sizeof(GrColor);
+ }
+ if (vertexLayout & kEdge_VertexLayoutBit) {
+ size += 4 * sizeof(GrScalar);
+ }
+ return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Functions for computing offsets of various components from the layout
+ * bitfield.
+ *
+ * Order of vertex components:
+ * Position
+ * Tex Coord 0
+ * ...
+ * Tex Coord kMaxTexCoords-1
+ * Color
+ * Coverage
+ */
+
+int GrDrawTarget::VertexStageCoordOffset(int stage, GrVertexLayout vertexLayout) {
+ GrAssert(check_layout(vertexLayout));
+ if (StagePosAsTexCoordVertexLayoutBit(stage) & vertexLayout) {
+ return 0;
+ }
+ int tcIdx = VertexTexCoordsForStage(stage, vertexLayout);
+ if (tcIdx >= 0) {
+
+ int vecSize = (vertexLayout & kTextFormat_VertexLayoutBit) ?
+ sizeof(GrGpuTextVertex) :
+ sizeof(GrPoint);
+ int offset = vecSize; // position
+ // figure out how many tex coordinates are present and precede this one.
+ for (int t = 0; t < tcIdx; ++t) {
+ if (tex_coord_idx_mask(t) & vertexLayout) {
+ offset += vecSize;
+ }
+ }
+ return offset;
+ }
+
+ return -1;
+}
+
+int GrDrawTarget::VertexColorOffset(GrVertexLayout vertexLayout) {
+ GrAssert(check_layout(vertexLayout));
+
+ if (vertexLayout & kColor_VertexLayoutBit) {
+ int vecSize = (vertexLayout & kTextFormat_VertexLayoutBit) ?
+ sizeof(GrGpuTextVertex) :
+ sizeof(GrPoint);
+ return vecSize * (num_tex_coords(vertexLayout) + 1); //+1 for pos
+ }
+ return -1;
+}
+
+int GrDrawTarget::VertexCoverageOffset(GrVertexLayout vertexLayout) {
+ GrAssert(check_layout(vertexLayout));
+
+ if (vertexLayout & kCoverage_VertexLayoutBit) {
+ int vecSize = (vertexLayout & kTextFormat_VertexLayoutBit) ?
+ sizeof(GrGpuTextVertex) :
+ sizeof(GrPoint);
+
+ int offset = vecSize * (num_tex_coords(vertexLayout) + 1);
+ if (vertexLayout & kColor_VertexLayoutBit) {
+ offset += sizeof(GrColor);
+ }
+ return offset;
+ }
+ return -1;
+}
+
+int GrDrawTarget::VertexEdgeOffset(GrVertexLayout vertexLayout) {
+ GrAssert(check_layout(vertexLayout));
+
+ // edge pts are after the pos, tex coords, and color
+ if (vertexLayout & kEdge_VertexLayoutBit) {
+ int vecSize = (vertexLayout & kTextFormat_VertexLayoutBit) ?
+ sizeof(GrGpuTextVertex) :
+ sizeof(GrPoint);
+ int offset = vecSize * (num_tex_coords(vertexLayout) + 1); //+1 for pos
+ if (vertexLayout & kColor_VertexLayoutBit) {
+ offset += sizeof(GrColor);
+ }
+ if (vertexLayout & kCoverage_VertexLayoutBit) {
+ offset += sizeof(GrColor);
+ }
+ return offset;
+ }
+ return -1;
+}
+
+int GrDrawTarget::VertexSizeAndOffsetsByIdx(GrVertexLayout vertexLayout,
+ int texCoordOffsetsByIdx[kMaxTexCoords],
+ int* colorOffset,
+ int* coverageOffset,
+ int* edgeOffset) {
+ GrAssert(check_layout(vertexLayout));
+
+ int vecSize = (vertexLayout & kTextFormat_VertexLayoutBit) ?
+ sizeof(GrGpuTextVertex) :
+ sizeof(GrPoint);
+ int size = vecSize; // position
+
+ for (int t = 0; t < kMaxTexCoords; ++t) {
+ if (tex_coord_idx_mask(t) & vertexLayout) {
+ if (NULL != texCoordOffsetsByIdx) {
+ texCoordOffsetsByIdx[t] = size;
+ }
+ size += vecSize;
+ } else {
+ if (NULL != texCoordOffsetsByIdx) {
+ texCoordOffsetsByIdx[t] = -1;
+ }
+ }
+ }
+ if (kColor_VertexLayoutBit & vertexLayout) {
+ if (NULL != colorOffset) {
+ *colorOffset = size;
+ }
+ size += sizeof(GrColor);
+ } else {
+ if (NULL != colorOffset) {
+ *colorOffset = -1;
+ }
+ }
+ if (kCoverage_VertexLayoutBit & vertexLayout) {
+ if (NULL != coverageOffset) {
+ *coverageOffset = size;
+ }
+ size += sizeof(GrColor);
+ } else {
+ if (NULL != coverageOffset) {
+ *coverageOffset = -1;
+ }
+ }
+ if (kEdge_VertexLayoutBit & vertexLayout) {
+ if (NULL != edgeOffset) {
+ *edgeOffset = size;
+ }
+ size += 4 * sizeof(GrScalar);
+ } else {
+ if (NULL != edgeOffset) {
+ *edgeOffset = -1;
+ }
+ }
+ return size;
+}
+
+int GrDrawTarget::VertexSizeAndOffsetsByStage(GrVertexLayout vertexLayout,
+ int texCoordOffsetsByStage[kNumStages],
+ int* colorOffset,
+ int* coverageOffset,
+ int* edgeOffset) {
+ GrAssert(check_layout(vertexLayout));
+
+ int texCoordOffsetsByIdx[kMaxTexCoords];
+ int size = VertexSizeAndOffsetsByIdx(vertexLayout,
+ (NULL == texCoordOffsetsByStage) ?
+ NULL :
+ texCoordOffsetsByIdx,
+ colorOffset,
+ coverageOffset,
+ edgeOffset);
+ if (NULL != texCoordOffsetsByStage) {
+ for (int s = 0; s < kNumStages; ++s) {
+ int tcIdx;
+ if (StagePosAsTexCoordVertexLayoutBit(s) & vertexLayout) {
+ texCoordOffsetsByStage[s] = 0;
+ } else if ((tcIdx = VertexTexCoordsForStage(s, vertexLayout)) >= 0) {
+ texCoordOffsetsByStage[s] = texCoordOffsetsByIdx[tcIdx];
+ } else {
+ texCoordOffsetsByStage[s] = -1;
+ }
+ }
+ }
+ return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrDrawTarget::VertexUsesStage(int stage, GrVertexLayout vertexLayout) {
+ GrAssert(stage < kNumStages);
+ GrAssert(check_layout(vertexLayout));
+ return !!(stage_mask(stage) & vertexLayout);
+}
+
+bool GrDrawTarget::VertexUsesTexCoordIdx(int coordIndex,
+ GrVertexLayout vertexLayout) {
+ GrAssert(coordIndex < kMaxTexCoords);
+ GrAssert(check_layout(vertexLayout));
+ return !!(tex_coord_idx_mask(coordIndex) & vertexLayout);
+}
+
+int GrDrawTarget::VertexTexCoordsForStage(int stage, GrVertexLayout vertexLayout) {
+ GrAssert(stage < kNumStages);
+ GrAssert(check_layout(vertexLayout));
+ int bit = vertexLayout & stage_tex_coord_mask(stage);
+ if (bit) {
+ // figure out which set of texture coordates is used
+ // bits are ordered T0S0, T0S1, T0S2, ..., T1S0, T1S1, ...
+ // and start at bit 0.
+ GR_STATIC_ASSERT(sizeof(GrVertexLayout) <= sizeof(uint32_t));
+ return (32 - Gr_clz(bit) - 1) / kNumStages;
+ }
+ return -1;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrDrawTarget::VertexLayoutUnitTest() {
+ // not necessarily exhaustive
+ static bool run;
+ if (!run) {
+ run = true;
+ for (int s = 0; s < kNumStages; ++s) {
+
+ GrAssert(!VertexUsesStage(s, 0));
+ GrAssert(-1 == VertexStageCoordOffset(s, 0));
+ GrVertexLayout stageMask = 0;
+ for (int t = 0; t < kMaxTexCoords; ++t) {
+ stageMask |= StageTexCoordVertexLayoutBit(s,t);
+ }
+ GrAssert(1 == kMaxTexCoords || !check_layout(stageMask));
+ GrAssert(stage_tex_coord_mask(s) == stageMask);
+ stageMask |= StagePosAsTexCoordVertexLayoutBit(s);
+ GrAssert(stage_mask(s) == stageMask);
+ GrAssert(!check_layout(stageMask));
+ }
+ for (int t = 0; t < kMaxTexCoords; ++t) {
+ GrVertexLayout tcMask = 0;
+ GrAssert(!VertexUsesTexCoordIdx(t, 0));
+ for (int s = 0; s < kNumStages; ++s) {
+ tcMask |= StageTexCoordVertexLayoutBit(s,t);
+ GrAssert(VertexUsesStage(s, tcMask));
+ GrAssert(sizeof(GrPoint) == VertexStageCoordOffset(s, tcMask));
+ GrAssert(VertexUsesTexCoordIdx(t, tcMask));
+ GrAssert(2*sizeof(GrPoint) == VertexSize(tcMask));
+ GrAssert(t == VertexTexCoordsForStage(s, tcMask));
+ for (int s2 = s + 1; s2 < kNumStages; ++s2) {
+ GrAssert(-1 == VertexStageCoordOffset(s2, tcMask));
+ GrAssert(!VertexUsesStage(s2, tcMask));
+ GrAssert(-1 == VertexTexCoordsForStage(s2, tcMask));
+
+ #if GR_DEBUG
+ GrVertexLayout posAsTex = tcMask | StagePosAsTexCoordVertexLayoutBit(s2);
+ #endif
+ GrAssert(0 == VertexStageCoordOffset(s2, posAsTex));
+ GrAssert(VertexUsesStage(s2, posAsTex));
+ GrAssert(2*sizeof(GrPoint) == VertexSize(posAsTex));
+ GrAssert(-1 == VertexTexCoordsForStage(s2, posAsTex));
+ GrAssert(-1 == VertexEdgeOffset(posAsTex));
+ }
+ GrAssert(-1 == VertexEdgeOffset(tcMask));
+ GrAssert(-1 == VertexColorOffset(tcMask));
+ GrAssert(-1 == VertexCoverageOffset(tcMask));
+ #if GR_DEBUG
+ GrVertexLayout withColor = tcMask | kColor_VertexLayoutBit;
+ #endif
+ GrAssert(-1 == VertexCoverageOffset(withColor));
+ GrAssert(2*sizeof(GrPoint) == VertexColorOffset(withColor));
+ GrAssert(2*sizeof(GrPoint) + sizeof(GrColor) == VertexSize(withColor));
+ #if GR_DEBUG
+ GrVertexLayout withEdge = tcMask | kEdge_VertexLayoutBit;
+ #endif
+ GrAssert(-1 == VertexColorOffset(withEdge));
+ GrAssert(2*sizeof(GrPoint) == VertexEdgeOffset(withEdge));
+ GrAssert(4*sizeof(GrPoint) == VertexSize(withEdge));
+ #if GR_DEBUG
+ GrVertexLayout withColorAndEdge = withColor | kEdge_VertexLayoutBit;
+ #endif
+ GrAssert(2*sizeof(GrPoint) == VertexColorOffset(withColorAndEdge));
+ GrAssert(2*sizeof(GrPoint) + sizeof(GrColor) == VertexEdgeOffset(withColorAndEdge));
+ GrAssert(4*sizeof(GrPoint) + sizeof(GrColor) == VertexSize(withColorAndEdge));
+ #if GR_DEBUG
+ GrVertexLayout withCoverage = tcMask | kCoverage_VertexLayoutBit;
+ #endif
+ GrAssert(-1 == VertexColorOffset(withCoverage));
+ GrAssert(2*sizeof(GrPoint) == VertexCoverageOffset(withCoverage));
+ GrAssert(2*sizeof(GrPoint) + sizeof(GrColor) == VertexSize(withCoverage));
+ #if GR_DEBUG
+ GrVertexLayout withCoverageAndColor = tcMask | kCoverage_VertexLayoutBit |
+ kColor_VertexLayoutBit;
+ #endif
+ GrAssert(2*sizeof(GrPoint) == VertexColorOffset(withCoverageAndColor));
+ GrAssert(2*sizeof(GrPoint) + sizeof(GrColor) == VertexCoverageOffset(withCoverageAndColor));
+ GrAssert(2*sizeof(GrPoint) + 2 * sizeof(GrColor) == VertexSize(withCoverageAndColor));
+ }
+ GrAssert(tex_coord_idx_mask(t) == tcMask);
+ GrAssert(check_layout(tcMask));
+
+ int stageOffsets[kNumStages];
+ int colorOffset;
+ int edgeOffset;
+ int coverageOffset;
+ int size;
+ size = VertexSizeAndOffsetsByStage(tcMask, stageOffsets, &colorOffset,
+ &coverageOffset, &edgeOffset);
+ GrAssert(2*sizeof(GrPoint) == size);
+ GrAssert(-1 == colorOffset);
+ GrAssert(-1 == coverageOffset);
+ GrAssert(-1 == edgeOffset);
+ for (int s = 0; s < kNumStages; ++s) {
+ GrAssert(VertexUsesStage(s, tcMask));
+ GrAssert(sizeof(GrPoint) == stageOffsets[s]);
+ GrAssert(sizeof(GrPoint) == VertexStageCoordOffset(s, tcMask));
+ }
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#define DEBUG_INVAL_BUFFER 0xdeadcafe
+#define DEBUG_INVAL_START_IDX -1
+
+GrDrawTarget::GrDrawTarget() {
+#if GR_DEBUG
+ VertexLayoutUnitTest();
+#endif
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.push_back();
+#if GR_DEBUG
+ geoSrc.fVertexCount = DEBUG_INVAL_START_IDX;
+ geoSrc.fVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
+ geoSrc.fIndexCount = DEBUG_INVAL_START_IDX;
+ geoSrc.fIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
+#endif
+ geoSrc.fVertexSrc = kNone_GeometrySrcType;
+ geoSrc.fIndexSrc = kNone_GeometrySrcType;
+}
+
+GrDrawTarget::~GrDrawTarget() {
+ int popCnt = fGeoSrcStateStack.count() - 1;
+ while (popCnt) {
+ this->popGeometrySource();
+ --popCnt;
+ }
+ this->releasePreviousVertexSource();
+ this->releasePreviousIndexSource();
+}
+
+void GrDrawTarget::setClip(const GrClip& clip) {
+ clipWillBeSet(clip);
+ fClip = clip;
+}
+
+const GrClip& GrDrawTarget::getClip() const {
+ return fClip;
+}
+
+void GrDrawTarget::setTexture(int stage, GrTexture* tex) {
+ GrAssert(stage >= 0 && stage < kNumStages);
+ fCurrDrawState.fTextures[stage] = tex;
+}
+
+const GrTexture* GrDrawTarget::getTexture(int stage) const {
+ GrAssert(stage >= 0 && stage < kNumStages);
+ return fCurrDrawState.fTextures[stage];
+}
+
+GrTexture* GrDrawTarget::getTexture(int stage) {
+ GrAssert(stage >= 0 && stage < kNumStages);
+ return fCurrDrawState.fTextures[stage];
+}
+
+void GrDrawTarget::setRenderTarget(GrRenderTarget* target) {
+ fCurrDrawState.fRenderTarget = target;
+}
+
+const GrRenderTarget* GrDrawTarget::getRenderTarget() const {
+ return fCurrDrawState.fRenderTarget;
+}
+
+GrRenderTarget* GrDrawTarget::getRenderTarget() {
+ return fCurrDrawState.fRenderTarget;
+}
+
+void GrDrawTarget::setViewMatrix(const GrMatrix& m) {
+ fCurrDrawState.fViewMatrix = m;
+}
+
+void GrDrawTarget::preConcatViewMatrix(const GrMatrix& matrix) {
+ fCurrDrawState.fViewMatrix.preConcat(matrix);
+}
+
+void GrDrawTarget::postConcatViewMatrix(const GrMatrix& matrix) {
+ fCurrDrawState.fViewMatrix.postConcat(matrix);
+}
+
+const GrMatrix& GrDrawTarget::getViewMatrix() const {
+ return fCurrDrawState.fViewMatrix;
+}
+
+bool GrDrawTarget::getViewInverse(GrMatrix* matrix) const {
+ // Mike: Can we cache this somewhere?
+ // Brian: Sure, do we use it often?
+
+ GrMatrix inverse;
+ if (fCurrDrawState.fViewMatrix.invert(&inverse)) {
+ if (matrix) {
+ *matrix = inverse;
+ }
+ return true;
+ }
+ return false;
+}
+
+void GrDrawTarget::setSamplerState(int stage, const GrSamplerState& state) {
+ GrAssert(stage >= 0 && stage < kNumStages);
+ fCurrDrawState.fSamplerStates[stage] = state;
+}
+
+void GrDrawTarget::enableState(uint32_t bits) {
+ fCurrDrawState.fFlagBits |= bits;
+}
+
+void GrDrawTarget::disableState(uint32_t bits) {
+ fCurrDrawState.fFlagBits &= ~(bits);
+}
+
+void GrDrawTarget::setBlendFunc(GrBlendCoeff srcCoeff,
+ GrBlendCoeff dstCoeff) {
+ fCurrDrawState.fSrcBlend = srcCoeff;
+ fCurrDrawState.fDstBlend = dstCoeff;
+#if GR_DEBUG
+ switch (dstCoeff) {
+ case kDC_BlendCoeff:
+ case kIDC_BlendCoeff:
+ case kDA_BlendCoeff:
+ case kIDA_BlendCoeff:
+ GrPrintf("Unexpected dst blend coeff. Won't work correctly with"
+ "coverage stages.\n");
+ break;
+ default:
+ break;
+ }
+ switch (srcCoeff) {
+ case kSC_BlendCoeff:
+ case kISC_BlendCoeff:
+ case kSA_BlendCoeff:
+ case kISA_BlendCoeff:
+ GrPrintf("Unexpected src blend coeff. Won't work correctly with"
+ "coverage stages.\n");
+ break;
+ default:
+ break;
+ }
+#endif
+}
+
+void GrDrawTarget::setColor(GrColor c) {
+ fCurrDrawState.fColor = c;
+}
+
+void GrDrawTarget::setColorFilter(GrColor c, SkXfermode::Mode mode) {
+ fCurrDrawState.fColorFilterColor = c;
+ fCurrDrawState.fColorFilterXfermode = mode;
+}
+
+void GrDrawTarget::setAlpha(uint8_t a) {
+ this->setColor((a << 24) | (a << 16) | (a << 8) | a);
+}
+
+void GrDrawTarget::saveCurrentDrawState(SavedDrawState* state) const {
+ state->fState = fCurrDrawState;
+}
+
+void GrDrawTarget::restoreDrawState(const SavedDrawState& state) {
+ fCurrDrawState = state.fState;
+}
+
+void GrDrawTarget::copyDrawState(const GrDrawTarget& srcTarget) {
+ fCurrDrawState = srcTarget.fCurrDrawState;
+}
+
+bool GrDrawTarget::reserveVertexSpace(GrVertexLayout vertexLayout,
+ int vertexCount,
+ void** vertices) {
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ bool acquired = false;
+ if (vertexCount > 0) {
+ GrAssert(NULL != vertices);
+ this->releasePreviousVertexSource();
+ geoSrc.fVertexSrc = kNone_GeometrySrcType;
+
+ acquired = this->onReserveVertexSpace(vertexLayout,
+ vertexCount,
+ vertices);
+ }
+ if (acquired) {
+ geoSrc.fVertexSrc = kReserved_GeometrySrcType;
+ geoSrc.fVertexCount = vertexCount;
+ geoSrc.fVertexLayout = vertexLayout;
+ } else if (NULL != vertices) {
+ *vertices = NULL;
+ }
+ return acquired;
+}
+
+bool GrDrawTarget::reserveIndexSpace(int indexCount,
+ void** indices) {
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ bool acquired = false;
+ if (indexCount > 0) {
+ GrAssert(NULL != indices);
+ this->releasePreviousIndexSource();
+ geoSrc.fIndexSrc = kNone_GeometrySrcType;
+
+ acquired = this->onReserveIndexSpace(indexCount, indices);
+ }
+ if (acquired) {
+ geoSrc.fIndexSrc = kReserved_GeometrySrcType;
+ geoSrc.fIndexCount = indexCount;
+ } else if (NULL != indices) {
+ *indices = NULL;
+ }
+ return acquired;
+
+}
+
+bool GrDrawTarget::geometryHints(GrVertexLayout vertexLayout,
+ int32_t* vertexCount,
+ int32_t* indexCount) const {
+ if (NULL != vertexCount) {
+ *vertexCount = -1;
+ }
+ if (NULL != indexCount) {
+ *indexCount = -1;
+ }
+ return false;
+}
+
+void GrDrawTarget::releasePreviousVertexSource() {
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ switch (geoSrc.fVertexSrc) {
+ case kNone_GeometrySrcType:
+ break;
+ case kArray_GeometrySrcType:
+ this->releaseVertexArray();
+ break;
+ case kReserved_GeometrySrcType:
+ this->releaseReservedVertexSpace();
+ break;
+ case kBuffer_GeometrySrcType:
+ geoSrc.fVertexBuffer->unref();
+#if GR_DEBUG
+ geoSrc.fVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
+#endif
+ break;
+ default:
+ GrCrash("Unknown Vertex Source Type.");
+ break;
+ }
+}
+
+void GrDrawTarget::releasePreviousIndexSource() {
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ switch (geoSrc.fIndexSrc) {
+ case kNone_GeometrySrcType: // these two don't require
+ break;
+ case kArray_GeometrySrcType:
+ this->releaseIndexArray();
+ break;
+ case kReserved_GeometrySrcType:
+ this->releaseReservedIndexSpace();
+ break;
+ case kBuffer_GeometrySrcType:
+ geoSrc.fIndexBuffer->unref();
+#if GR_DEBUG
+ geoSrc.fIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
+#endif
+ break;
+ default:
+ GrCrash("Unknown Index Source Type.");
+ break;
+ }
+}
+
+void GrDrawTarget::setVertexSourceToArray(GrVertexLayout vertexLayout,
+ const void* vertexArray,
+ int vertexCount) {
+ this->releasePreviousVertexSource();
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ geoSrc.fVertexSrc = kArray_GeometrySrcType;
+ geoSrc.fVertexLayout = vertexLayout;
+ geoSrc.fVertexCount = vertexCount;
+ this->onSetVertexSourceToArray(vertexArray, vertexCount);
+}
+
+void GrDrawTarget::setIndexSourceToArray(const void* indexArray,
+ int indexCount) {
+ this->releasePreviousIndexSource();
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ geoSrc.fIndexSrc = kArray_GeometrySrcType;
+ geoSrc.fIndexCount = indexCount;
+ this->onSetIndexSourceToArray(indexArray, indexCount);
+}
+
+void GrDrawTarget::setVertexSourceToBuffer(GrVertexLayout vertexLayout,
+ const GrVertexBuffer* buffer) {
+ this->releasePreviousVertexSource();
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ geoSrc.fVertexSrc = kBuffer_GeometrySrcType;
+ geoSrc.fVertexBuffer = buffer;
+ buffer->ref();
+ geoSrc.fVertexLayout = vertexLayout;
+}
+
+void GrDrawTarget::setIndexSourceToBuffer(const GrIndexBuffer* buffer) {
+ this->releasePreviousIndexSource();
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ geoSrc.fIndexSrc = kBuffer_GeometrySrcType;
+ geoSrc.fIndexBuffer = buffer;
+ buffer->ref();
+}
+
+void GrDrawTarget::resetVertexSource() {
+ this->releasePreviousVertexSource();
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ geoSrc.fVertexSrc = kNone_GeometrySrcType;
+}
+
+void GrDrawTarget::resetIndexSource() {
+ this->releasePreviousIndexSource();
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ geoSrc.fIndexSrc = kNone_GeometrySrcType;
+}
+
+void GrDrawTarget::pushGeometrySource() {
+ this->geometrySourceWillPush();
+ GeometrySrcState& newState = fGeoSrcStateStack.push_back();
+ newState.fIndexSrc = kNone_GeometrySrcType;
+ newState.fVertexSrc = kNone_GeometrySrcType;
+#if GR_DEBUG
+ newState.fVertexCount = ~0;
+ newState.fVertexBuffer = (GrVertexBuffer*)~0;
+ newState.fIndexCount = ~0;
+ newState.fIndexBuffer = (GrIndexBuffer*)~0;
+#endif
+}
+
+void GrDrawTarget::popGeometrySource() {
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+ // if popping last element then pops are unbalanced with pushes
+ GrAssert(fGeoSrcStateStack.count() > 1);
+
+ this->geometrySourceWillPop(fGeoSrcStateStack.fromBack(1));
+ this->releasePreviousVertexSource();
+ this->releasePreviousIndexSource();
+ fGeoSrcStateStack.pop_back();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrDrawTarget::drawIndexed(GrPrimitiveType type, int startVertex,
+ int startIndex, int vertexCount,
+ int indexCount) {
+#if GR_DEBUG
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ int maxVertex = startVertex + vertexCount;
+ int maxValidVertex;
+ switch (geoSrc.fVertexSrc) {
+ case kNone_GeometrySrcType:
+ GrCrash("Attempting to draw indexed geom without vertex src.");
+ case kReserved_GeometrySrcType: // fallthrough
+ case kArray_GeometrySrcType:
+ maxValidVertex = geoSrc.fVertexCount;
+ break;
+ case kBuffer_GeometrySrcType:
+ maxValidVertex = geoSrc.fVertexBuffer->sizeInBytes() /
+ VertexSize(geoSrc.fVertexLayout);
+ break;
+ }
+ if (maxVertex > maxValidVertex) {
+ GrCrash("Indexed drawing outside valid vertex range.");
+ }
+ int maxIndex = startIndex + indexCount;
+ int maxValidIndex;
+ switch (geoSrc.fIndexSrc) {
+ case kNone_GeometrySrcType:
+ GrCrash("Attempting to draw indexed geom without index src.");
+ case kReserved_GeometrySrcType: // fallthrough
+ case kArray_GeometrySrcType:
+ maxValidIndex = geoSrc.fIndexCount;
+ break;
+ case kBuffer_GeometrySrcType:
+ maxValidIndex = geoSrc.fIndexBuffer->sizeInBytes() / sizeof(uint16_t);
+ break;
+ }
+ if (maxIndex > maxValidIndex) {
+ GrCrash("Indexed drawing outside valid index range.");
+ }
+#endif
+ if (indexCount > 0) {
+ this->onDrawIndexed(type, startVertex, startIndex,
+ vertexCount, indexCount);
+ }
+}
+
+
+void GrDrawTarget::drawNonIndexed(GrPrimitiveType type,
+ int startVertex,
+ int vertexCount) {
+#if GR_DEBUG
+ GeometrySrcState& geoSrc = fGeoSrcStateStack.back();
+ int maxVertex = startVertex + vertexCount;
+ int maxValidVertex;
+ switch (geoSrc.fVertexSrc) {
+ case kNone_GeometrySrcType:
+ GrCrash("Attempting to draw non-indexed geom without vertex src.");
+ case kReserved_GeometrySrcType: // fallthrough
+ case kArray_GeometrySrcType:
+ maxValidVertex = geoSrc.fVertexCount;
+ break;
+ case kBuffer_GeometrySrcType:
+ maxValidVertex = geoSrc.fVertexBuffer->sizeInBytes() /
+ VertexSize(geoSrc.fVertexLayout);
+ break;
+ }
+ if (maxVertex > maxValidVertex) {
+ GrCrash("Non-indexed drawing outside valid vertex range.");
+ }
+#endif
+ if (vertexCount > 0) {
+ this->onDrawNonIndexed(type, startVertex, vertexCount);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Some blend modes allow folding a partial coverage value into the color's
+// alpha channel, while others will blend incorrectly.
+bool GrDrawTarget::canTweakAlphaForCoverage() const {
+ /**
+ * The fractional coverage is f
+ * The src and dst coeffs are Cs and Cd
+ * The dst and src colors are S and D
+ * We want the blend to compute: f*Cs*S + (f*Cd + (1-f))D
+ * By tweaking the source color's alpha we're replacing S with S'=fS. It's
+ * obvious that that first term will always be ok. The second term can be
+ * rearranged as [1-(1-Cd)f]D. By substituing in the various possbilities
+ * for Cd we find that only 1, ISA, and ISC produce the correct depth
+ * coeffecient in terms of S' and D.
+ */
+ return kOne_BlendCoeff == fCurrDrawState.fDstBlend||
+ kISA_BlendCoeff == fCurrDrawState.fDstBlend ||
+ kISC_BlendCoeff == fCurrDrawState.fDstBlend;
+}
+
+
+bool GrDrawTarget::srcAlphaWillBeOne() const {
+ const GrVertexLayout& layout = this->getGeomSrc().fVertexLayout;
+
+ // Check if per-vertex or constant color may have partial alpha
+ if ((layout & kColor_VertexLayoutBit) ||
+ 0xff != GrColorUnpackA(fCurrDrawState.fColor)) {
+ return false;
+ }
+ // Check if color filter could introduce an alpha
+ // (TODO: Consider being more aggressive with regards to detecting 0xff
+ // final alpha from color filter).
+ if (SkXfermode::kDst_Mode != fCurrDrawState.fColorFilterXfermode) {
+ return false;
+ }
+ // Check if a color stage could create a partial alpha
+ for (int s = 0; s < fCurrDrawState.fFirstCoverageStage; ++s) {
+ if (StageWillBeUsed(s, layout, fCurrDrawState)) {
+ GrAssert(NULL != fCurrDrawState.fTextures[s]);
+ GrPixelConfig config = fCurrDrawState.fTextures[s]->config();
+ if (!GrPixelConfigIsOpaque(config)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+GrDrawTarget::BlendOptFlags
+GrDrawTarget::getBlendOpts(bool forceCoverage,
+ GrBlendCoeff* srcCoeff,
+ GrBlendCoeff* dstCoeff) const {
+
+ const GrVertexLayout& layout = this->getGeomSrc().fVertexLayout;
+
+ GrBlendCoeff bogusSrcCoeff, bogusDstCoeff;
+ if (NULL == srcCoeff) {
+ srcCoeff = &bogusSrcCoeff;
+ }
+ *srcCoeff = fCurrDrawState.fSrcBlend;
+
+ if (NULL == dstCoeff) {
+ dstCoeff = &bogusDstCoeff;
+ }
+ *dstCoeff = fCurrDrawState.fDstBlend;
+
+ // We don't ever expect source coeffecients to reference the source
+ GrAssert(kSA_BlendCoeff != *srcCoeff &&
+ kISA_BlendCoeff != *srcCoeff &&
+ kSC_BlendCoeff != *srcCoeff &&
+ kISC_BlendCoeff != *srcCoeff);
+ // same for dst
+ GrAssert(kDA_BlendCoeff != *dstCoeff &&
+ kIDA_BlendCoeff != *dstCoeff &&
+ kDC_BlendCoeff != *dstCoeff &&
+ kIDC_BlendCoeff != *dstCoeff);
+
+ if (SkToBool(kNoColorWrites_StateBit & fCurrDrawState.fFlagBits)) {
+ *srcCoeff = kZero_BlendCoeff;
+ *dstCoeff = kOne_BlendCoeff;
+ }
+
+ bool srcAIsOne = this->srcAlphaWillBeOne();
+ bool dstCoeffIsOne = kOne_BlendCoeff == *dstCoeff ||
+ (kSA_BlendCoeff == *dstCoeff && srcAIsOne);
+ bool dstCoeffIsZero = kZero_BlendCoeff == *dstCoeff ||
+ (kISA_BlendCoeff == *dstCoeff && srcAIsOne);
+
+
+ // When coeffs are (0,1) there is no reason to draw at all, unless
+ // stenciling is enabled. Having color writes disabled is effectively
+ // (0,1).
+ if ((kZero_BlendCoeff == *srcCoeff && dstCoeffIsOne)) {
+ if (fCurrDrawState.fStencilSettings.doesWrite()) {
+ if (fCaps.fShaderSupport) {
+ return kDisableBlend_BlendOptFlag |
+ kEmitTransBlack_BlendOptFlag;
+ } else {
+ return kDisableBlend_BlendOptFlag;
+ }
+ } else {
+ return kSkipDraw_BlendOptFlag;
+ }
+ }
+
+ // check for coverage due to edge aa or coverage texture stage
+ bool hasCoverage = forceCoverage ||
+ fCurrDrawState.fEdgeAANumEdges > 0 ||
+ (layout & kCoverage_VertexLayoutBit) ||
+ (layout & kEdge_VertexLayoutBit);
+ for (int s = fCurrDrawState.fFirstCoverageStage;
+ !hasCoverage && s < kNumStages;
+ ++s) {
+ if (StageWillBeUsed(s, layout, fCurrDrawState)) {
+ hasCoverage = true;
+ }
+ }
+
+ // if we don't have coverage we can check whether the dst
+ // has to read at all. If not, we'll disable blending.
+ if (!hasCoverage) {
+ if (dstCoeffIsZero) {
+ if (kOne_BlendCoeff == *srcCoeff) {
+ // if there is no coverage and coeffs are (1,0) then we
+ // won't need to read the dst at all, it gets replaced by src
+ return kDisableBlend_BlendOptFlag;
+ } else if (kZero_BlendCoeff == *srcCoeff &&
+ fCaps.fShaderSupport) {
+ // if the op is "clear" then we don't need to emit a color
+ // or blend, just write transparent black into the dst.
+ *srcCoeff = kOne_BlendCoeff;
+ *dstCoeff = kZero_BlendCoeff;
+ return kDisableBlend_BlendOptFlag |
+ kEmitTransBlack_BlendOptFlag;
+ }
+ }
+ } else {
+ // check whether coverage can be safely rolled into alpha
+ // of if we can skip color computation and just emit coverage
+ if (this->canTweakAlphaForCoverage()) {
+ return kCoverageAsAlpha_BlendOptFlag;
+ }
+ // We haven't implemented support for these optimizations in the
+ // fixed pipe (which is on its deathbed)
+ if (fCaps.fShaderSupport) {
+ if (dstCoeffIsZero) {
+ if (kZero_BlendCoeff == *srcCoeff) {
+ // the source color is not included in the blend
+ // the dst coeff is effectively zero so blend works out to:
+ // (c)(0)D + (1-c)D = (1-c)D.
+ *dstCoeff = kISA_BlendCoeff;
+ return kEmitCoverage_BlendOptFlag;
+ } else if (srcAIsOne) {
+ // the dst coeff is effectively zero so blend works out to:
+ // cS + (c)(0)D + (1-c)D = cS + (1-c)D.
+ // If Sa is 1 then we can replace Sa with c
+ // and set dst coeff to 1-Sa.
+ *dstCoeff = kISA_BlendCoeff;
+ return kCoverageAsAlpha_BlendOptFlag;
+ }
+ } else if (dstCoeffIsOne) {
+ // the dst coeff is effectively one so blend works out to:
+ // cS + (c)(1)D + (1-c)D = cS + D.
+ *dstCoeff = kOne_BlendCoeff;
+ return kCoverageAsAlpha_BlendOptFlag;
+ }
+ }
+ }
+ return kNone_BlendOpt;
+}
+
+bool GrDrawTarget::willUseHWAALines() const {
+ // there is a conflict between using smooth lines and our use of
+ // premultiplied alpha. Smooth lines tweak the incoming alpha value
+ // but not in a premul-alpha way. So we only use them when our alpha
+ // is 0xff and tweaking the color for partial coverage is OK
+ if (!fCaps.fHWAALineSupport ||
+ !(kAntialias_StateBit & fCurrDrawState.fFlagBits)) {
+ return false;
+ }
+ BlendOptFlags opts = this->getBlendOpts();
+ return (kDisableBlend_BlendOptFlag & opts) &&
+ (kCoverageAsAlpha_BlendOptFlag & opts);
+}
+
+bool GrDrawTarget::canApplyCoverage() const {
+ // we can correctly apply coverage if a) we have dual source blending
+ // or b) one of our blend optimizations applies.
+ return this->getCaps().fDualSourceBlendingSupport ||
+ kNone_BlendOpt != this->getBlendOpts(true);
+}
+
+bool GrDrawTarget::drawWillReadDst() const {
+ return SkToBool((kDisableBlend_BlendOptFlag | kSkipDraw_BlendOptFlag) &
+ this->getBlendOpts());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrDrawTarget::setEdgeAAData(const Edge* edges, int numEdges) {
+ GrAssert(numEdges <= kMaxEdges);
+ memcpy(fCurrDrawState.fEdgeAAEdges, edges, numEdges * sizeof(Edge));
+ fCurrDrawState.fEdgeAANumEdges = numEdges;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrDrawTarget::drawRect(const GrRect& rect,
+ const GrMatrix* matrix,
+ StageBitfield stageEnableBitfield,
+ const GrRect* srcRects[],
+ const GrMatrix* srcMatrices[]) {
+ GrVertexLayout layout = GetRectVertexLayout(stageEnableBitfield, srcRects);
+
+ AutoReleaseGeometry geo(this, layout, 4, 0);
+ if (!geo.succeeded()) {
+ GrPrintf("Failed to get space for vertices!\n");
+ return;
+ }
+
+ SetRectVertices(rect, matrix, srcRects,
+ srcMatrices, layout, geo.vertices());
+
+ drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4);
+}
+
+GrVertexLayout GrDrawTarget::GetRectVertexLayout(StageBitfield stageEnableBitfield,
+ const GrRect* srcRects[]) {
+ GrVertexLayout layout = 0;
+
+ for (int i = 0; i < kNumStages; ++i) {
+ int numTC = 0;
+ if (stageEnableBitfield & (1 << i)) {
+ if (NULL != srcRects && NULL != srcRects[i]) {
+ layout |= StageTexCoordVertexLayoutBit(i, numTC);
+ ++numTC;
+ } else {
+ layout |= StagePosAsTexCoordVertexLayoutBit(i);
+ }
+ }
+ }
+ return layout;
+}
+
+void GrDrawTarget::clipWillBeSet(const GrClip& clip) {
+}
+
+void GrDrawTarget::SetRectVertices(const GrRect& rect,
+ const GrMatrix* matrix,
+ const GrRect* srcRects[],
+ const GrMatrix* srcMatrices[],
+ GrVertexLayout layout,
+ void* vertices) {
+#if GR_DEBUG
+ // check that the layout and srcRects agree
+ for (int i = 0; i < kNumStages; ++i) {
+ if (VertexTexCoordsForStage(i, layout) >= 0) {
+ GR_DEBUGASSERT(NULL != srcRects && NULL != srcRects[i]);
+ } else {
+ GR_DEBUGASSERT(NULL == srcRects || NULL == srcRects[i]);
+ }
+ }
+#endif
+
+ int stageOffsets[kNumStages];
+ int vsize = VertexSizeAndOffsetsByStage(layout, stageOffsets,
+ NULL, NULL, NULL);
+
+ GrTCast<GrPoint*>(vertices)->setRectFan(rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom,
+ vsize);
+ if (NULL != matrix) {
+ matrix->mapPointsWithStride(GrTCast<GrPoint*>(vertices), vsize, 4);
+ }
+
+ for (int i = 0; i < kNumStages; ++i) {
+ if (stageOffsets[i] > 0) {
+ GrPoint* coords = GrTCast<GrPoint*>(GrTCast<intptr_t>(vertices) +
+ stageOffsets[i]);
+ coords->setRectFan(srcRects[i]->fLeft, srcRects[i]->fTop,
+ srcRects[i]->fRight, srcRects[i]->fBottom,
+ vsize);
+ if (NULL != srcMatrices && NULL != srcMatrices[i]) {
+ srcMatrices[i]->mapPointsWithStride(coords, vsize, 4);
+ }
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrDrawTarget::AutoStateRestore::AutoStateRestore() {
+ fDrawTarget = NULL;
+}
+
+GrDrawTarget::AutoStateRestore::AutoStateRestore(GrDrawTarget* target) {
+ fDrawTarget = target;
+ if (NULL != fDrawTarget) {
+ fDrawTarget->saveCurrentDrawState(&fDrawState);
+ }
+}
+
+GrDrawTarget::AutoStateRestore::~AutoStateRestore() {
+ if (NULL != fDrawTarget) {
+ fDrawTarget->restoreDrawState(fDrawState);
+ }
+}
+
+void GrDrawTarget::AutoStateRestore::set(GrDrawTarget* target) {
+ if (target != fDrawTarget) {
+ if (NULL != fDrawTarget) {
+ fDrawTarget->restoreDrawState(fDrawState);
+ }
+ if (NULL != target) {
+ target->saveCurrentDrawState(&fDrawState);
+ }
+ fDrawTarget = target;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrDrawTarget::AutoDeviceCoordDraw::AutoDeviceCoordDraw(GrDrawTarget* target,
+ int stageMask) {
+ GrAssert(NULL != target);
+
+ fDrawTarget = target;
+ fViewMatrix = target->getViewMatrix();
+ fStageMask = stageMask;
+ if (fStageMask) {
+ GrMatrix invVM;
+ if (fViewMatrix.invert(&invVM)) {
+ for (int s = 0; s < kNumStages; ++s) {
+ if (fStageMask & (1 << s)) {
+ fSamplerMatrices[s] = target->getSamplerMatrix(s);
+ }
+ }
+ target->preConcatSamplerMatrices(fStageMask, invVM);
+ } else {
+ // sad trombone sound
+ fStageMask = 0;
+ }
+ }
+ target->setViewMatrix(GrMatrix::I());
+}
+
+GrDrawTarget::AutoDeviceCoordDraw::~AutoDeviceCoordDraw() {
+ fDrawTarget->setViewMatrix(fViewMatrix);
+ for (int s = 0; s < kNumStages; ++s) {
+ if (fStageMask & (1 << s)) {
+ fDrawTarget->setSamplerMatrix(s, fSamplerMatrices[s]);
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrDrawTarget::AutoReleaseGeometry::AutoReleaseGeometry(
+ GrDrawTarget* target,
+ GrVertexLayout vertexLayout,
+ int vertexCount,
+ int indexCount) {
+ fTarget = NULL;
+ this->set(target, vertexLayout, vertexCount, indexCount);
+}
+
+GrDrawTarget::AutoReleaseGeometry::AutoReleaseGeometry() {
+ fTarget = NULL;
+}
+
+GrDrawTarget::AutoReleaseGeometry::~AutoReleaseGeometry() {
+ this->reset();
+}
+
+bool GrDrawTarget::AutoReleaseGeometry::set(GrDrawTarget* target,
+ GrVertexLayout vertexLayout,
+ int vertexCount,
+ int indexCount) {
+ this->reset();
+ fTarget = target;
+ bool success = true;
+ if (NULL != fTarget) {
+ fTarget = target;
+ if (vertexCount > 0) {
+ success = target->reserveVertexSpace(vertexLayout,
+ vertexCount,
+ &fVertices);
+ if (!success) {
+ this->reset();
+ }
+ }
+ if (success && indexCount > 0) {
+ success = target->reserveIndexSpace(indexCount, &fIndices);
+ if (!success) {
+ this->reset();
+ }
+ }
+ }
+ GrAssert(success == (NULL != fTarget));
+ return success;
+}
+
+void GrDrawTarget::AutoReleaseGeometry::reset() {
+ if (NULL != fTarget) {
+ if (NULL != fVertices) {
+ fTarget->resetVertexSource();
+ }
+ if (NULL != fIndices) {
+ fTarget->resetIndexSource();
+ }
+ fTarget = NULL;
+ }
+ fVertices = NULL;
+ fIndices = NULL;
+}
+
+void GrDrawTarget::Caps::print() const {
+ static const char* gNY[] = {"NO", "YES"};
+ GrPrintf("8 Bit Palette Support : %s\n", gNY[f8BitPaletteSupport]);
+ GrPrintf("NPOT Texture Support : %s\n", gNY[fNPOTTextureSupport]);
+ GrPrintf("NPOT Texture Tile Support : %s\n", gNY[fNPOTTextureTileSupport]);
+ GrPrintf("NPOT Render Target Support : %s\n", gNY[fNPOTRenderTargetSupport]);
+ GrPrintf("Two Sided Stencil Support : %s\n", gNY[fTwoSidedStencilSupport]);
+ GrPrintf("Stencil Wrap Ops Support : %s\n", gNY[fStencilWrapOpsSupport]);
+ GrPrintf("HW AA Lines Support : %s\n", gNY[fHWAALineSupport]);
+ GrPrintf("Shader Support : %s\n", gNY[fShaderSupport]);
+ GrPrintf("Shader Derivative Support : %s\n", gNY[fShaderDerivativeSupport]);
+ GrPrintf("Geometry Shader Support : %s\n", gNY[fGeometryShaderSupport]);
+ GrPrintf("FSAA Support : %s\n", gNY[fFSAASupport]);
+ GrPrintf("Dual Source Blending Support: %s\n", gNY[fDualSourceBlendingSupport]);
+ GrPrintf("Buffer Lock Support : %s\n", gNY[fBufferLockSupport]);
+ GrPrintf("Min Render Target Width : %d\n", fMinRenderTargetWidth);
+ GrPrintf("Min Render Target Height : %d\n", fMinRenderTargetHeight);
+ GrPrintf("Max Texture Size : %d\n", fMaxTextureSize);
+ GrPrintf("Max Render Target Size : %d\n", fMaxRenderTargetSize);
+}
+
diff --git a/src/gpu/GrDrawTarget.h b/src/gpu/GrDrawTarget.h
new file mode 100644
index 0000000000..10633f5ed3
--- /dev/null
+++ b/src/gpu/GrDrawTarget.h
@@ -0,0 +1,1476 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrDrawTarget_DEFINED
+#define GrDrawTarget_DEFINED
+
+#include "GrClip.h"
+#include "GrColor.h"
+#include "GrMatrix.h"
+#include "GrRefCnt.h"
+#include "GrRenderTarget.h"
+#include "GrSamplerState.h"
+#include "GrStencil.h"
+#include "GrTexture.h"
+
+#include "SkXfermode.h"
+
+class GrTexture;
+class GrClipIterator;
+class GrVertexBuffer;
+class GrIndexBuffer;
+
+class GrDrawTarget : public GrRefCnt {
+public:
+ /**
+ * Represents the draw target capabilities.
+ */
+ struct Caps {
+ Caps() { memset(this, 0, sizeof(Caps)); }
+ Caps(const Caps& c) { *this = c; }
+ Caps& operator= (const Caps& c) {
+ memcpy(this, &c, sizeof(Caps));
+ return *this;
+ }
+ void print() const;
+ bool f8BitPaletteSupport : 1;
+ bool fNPOTTextureSupport : 1;
+ bool fNPOTTextureTileSupport : 1;
+ bool fNPOTRenderTargetSupport : 1;
+ bool fTwoSidedStencilSupport : 1;
+ bool fStencilWrapOpsSupport : 1;
+ bool fHWAALineSupport : 1;
+ bool fShaderSupport : 1;
+ bool fShaderDerivativeSupport : 1;
+ bool fGeometryShaderSupport : 1;
+ bool fFSAASupport : 1;
+ bool fDualSourceBlendingSupport : 1;
+ bool fBufferLockSupport : 1;
+ bool fSupportPerVertexCoverage : 1;
+ int fMinRenderTargetWidth;
+ int fMinRenderTargetHeight;
+ int fMaxRenderTargetSize;
+ int fMaxTextureSize;
+ };
+
+ /**
+ * Number of texture stages. Each stage takes as input a color and
+ * 2D texture coordinates. The color input to the first enabled stage is the
+ * per-vertex color or the constant color (setColor/setAlpha) if there are
+ * no per-vertex colors. For subsequent stages the input color is the output
+ * color from the previous enabled stage. The output color of each stage is
+ * the input color modulated with the result of a texture lookup. Texture
+ * lookups are specified by a texture a sampler (setSamplerState). Texture
+ * coordinates for each stage come from the vertices based on a
+ * GrVertexLayout bitfield. The output fragment color is the output color of
+ * the last enabled stage. The presence or absence of texture coordinates
+ * for each stage in the vertex layout indicates whether a stage is enabled
+ * or not.
+ */
+ enum {
+ kNumStages = 3,
+ kMaxTexCoords = kNumStages
+ };
+
+ /**
+ * The absolute maximum number of edges that may be specified for
+ * a single draw call when performing edge antialiasing. This is used for
+ * the size of several static buffers, so implementations of getMaxEdges()
+ * (below) should clamp to this value.
+ */
+ enum {
+ kMaxEdges = 32
+ };
+
+ /**
+ * When specifying edges as vertex data this enum specifies what type of
+ * edges are in use. The edges are always 4 GrScalars in memory, even when
+ * the edge type requires fewer than 4.
+ */
+ enum VertexEdgeType {
+ /* 1-pixel wide line
+ 2D implicit line eq (a*x + b*y +c = 0). 4th component unused */
+ kHairLine_EdgeType,
+ /* 1-pixel wide quadratic
+ u^2-v canonical coords (only 2 components used) */
+ kHairQuad_EdgeType
+ };
+
+ /**
+ * Bitfield used to indicate which stages are in use.
+ */
+ typedef int StageBitfield;
+ GR_STATIC_ASSERT(sizeof(StageBitfield)*8 >= kNumStages);
+
+ /**
+ * Flags that affect rendering. Controlled using enable/disableState(). All
+ * default to disabled.
+ */
+ enum StateBits {
+ kDither_StateBit = 0x01, //<! Perform color dithering
+ kAntialias_StateBit = 0x02, //<! Perform anti-aliasing. The render-
+ // target must support some form of AA
+ // (msaa, coverage sampling, etc). For
+ // GrGpu-created rendertarget/textures
+ // this is controlled by parameters
+ // passed to createTexture.
+ kClip_StateBit = 0x04, //<! Controls whether drawing is clipped
+ // against the region specified by
+ // setClip.
+ kNoColorWrites_StateBit = 0x08, //<! If set it disables writing colors.
+ // Useful while performing stencil
+ // ops.
+ kEdgeAAConcave_StateBit = 0x10,//<! If set, edge AA will test edge
+ // pairs for convexity while
+ // rasterizing. Set this if the
+ // source polygon is non-convex.
+ // subclass may use additional bits internally
+ kDummyStateBit,
+ kLastPublicStateBit = kDummyStateBit-1
+ };
+
+ enum DrawFace {
+ kBoth_DrawFace,
+ kCCW_DrawFace,
+ kCW_DrawFace,
+ };
+
+ /**
+ * Sets the stencil settings to use for the next draw.
+ * Changing the clip has the side-effect of possibly zeroing
+ * out the client settable stencil bits. So multipass algorithms
+ * using stencil should not change the clip between passes.
+ * @param settings the stencil settings to use.
+ */
+ void setStencil(const GrStencilSettings& settings) {
+ fCurrDrawState.fStencilSettings = settings;
+ }
+
+ /**
+ * Shortcut to disable stencil testing and ops.
+ */
+ void disableStencil() {
+ fCurrDrawState.fStencilSettings.setDisabled();
+ }
+
+ class Edge {
+ public:
+ Edge() {}
+ Edge(float x, float y, float z) : fX(x), fY(y), fZ(z) {}
+ GrPoint intersect(const Edge& other) {
+ return GrPoint::Make(
+ (fY * other.fZ - other.fY * fZ) /
+ (fX * other.fY - other.fX * fY),
+ (fX * other.fZ - other.fX * fZ) /
+ (other.fX * fY - fX * other.fY));
+ }
+ float fX, fY, fZ;
+ };
+
+protected:
+
+ struct DrState {
+ DrState() {
+ // make sure any pad is zero for memcmp
+ // all DrState members should default to something
+ // valid by the memset
+ memset(this, 0, sizeof(DrState));
+
+ // memset exceptions
+ fColorFilterXfermode = SkXfermode::kDstIn_Mode;
+ fFirstCoverageStage = kNumStages;
+
+ // pedantic assertion that our ptrs will
+ // be NULL (0 ptr is mem addr 0)
+ GrAssert((intptr_t)(void*)NULL == 0LL);
+
+ // default stencil setting should be disabled
+ GrAssert(fStencilSettings.isDisabled());
+ fFirstCoverageStage = kNumStages;
+ }
+ uint32_t fFlagBits;
+ GrBlendCoeff fSrcBlend;
+ GrBlendCoeff fDstBlend;
+ GrColor fBlendConstant;
+ GrTexture* fTextures[kNumStages];
+ GrSamplerState fSamplerStates[kNumStages];
+ int fFirstCoverageStage;
+ GrRenderTarget* fRenderTarget;
+ GrColor fColor;
+ DrawFace fDrawFace;
+ GrColor fColorFilterColor;
+ SkXfermode::Mode fColorFilterXfermode;
+
+ GrStencilSettings fStencilSettings;
+ GrMatrix fViewMatrix;
+ VertexEdgeType fVertexEdgeType;
+ Edge fEdgeAAEdges[kMaxEdges];
+ int fEdgeAANumEdges;
+ bool operator ==(const DrState& s) const {
+ return 0 == memcmp(this, &s, sizeof(DrState));
+ }
+ bool operator !=(const DrState& s) const { return !(*this == s); }
+ };
+
+public:
+ ///////////////////////////////////////////////////////////////////////////
+
+ GrDrawTarget();
+ virtual ~GrDrawTarget();
+
+ /**
+ * Gets the capabilities of the draw target.
+ */
+ const Caps& getCaps() const { return fCaps; }
+
+ /**
+ * Sets the current clip to the region specified by clip. All draws will be
+ * clipped against this clip if kClip_StateBit is enabled.
+ *
+ * Setting the clip may (or may not) zero out the client's stencil bits.
+ *
+ * @param description of the clipping region
+ */
+ void setClip(const GrClip& clip);
+
+ /**
+ * Gets the current clip.
+ *
+ * @return the clip.
+ */
+ const GrClip& getClip() const;
+
+ /**
+ * Sets the texture used at the next drawing call
+ *
+ * @param stage The texture stage for which the texture will be set
+ *
+ * @param texture The texture to set. Can be NULL though there is no advantage
+ * to settings a NULL texture if doing non-textured drawing
+ */
+ void setTexture(int stage, GrTexture* texture);
+
+ /**
+ * Retrieves the currently set texture.
+ *
+ * @return The currently set texture. The return value will be NULL if no
+ * texture has been set, NULL was most recently passed to
+ * setTexture, or the last setTexture was destroyed.
+ */
+ const GrTexture* getTexture(int stage) const;
+ GrTexture* getTexture(int stage);
+
+ /**
+ * Sets the rendertarget used at the next drawing call
+ *
+ * @param target The render target to set.
+ */
+ void setRenderTarget(GrRenderTarget* target);
+
+ /**
+ * Retrieves the currently set rendertarget.
+ *
+ * @return The currently set render target.
+ */
+ const GrRenderTarget* getRenderTarget() const;
+ GrRenderTarget* getRenderTarget();
+
+ /**
+ * Sets the sampler state for a stage used in subsequent draws.
+ *
+ * The sampler state determines how texture coordinates are
+ * intepretted and used to sample the texture.
+ *
+ * @param stage the stage of the sampler to set
+ * @param samplerState Specifies the sampler state.
+ */
+ void setSamplerState(int stage, const GrSamplerState& samplerState);
+
+ /**
+ * Concats the matrix of a stage's sampler.
+ *
+ * @param stage the stage of the sampler to set
+ * @param matrix the matrix to concat
+ */
+ void preConcatSamplerMatrix(int stage, const GrMatrix& matrix) {
+ GrAssert(stage >= 0 && stage < kNumStages);
+ fCurrDrawState.fSamplerStates[stage].preConcatMatrix(matrix);
+ }
+
+ /**
+ * Shortcut for preConcatSamplerMatrix on all stages in mask with same
+ * matrix
+ */
+ void preConcatSamplerMatrices(int stageMask, const GrMatrix& matrix) {
+ for (int i = 0; i < kNumStages; ++i) {
+ if ((1 << i) & stageMask) {
+ this->preConcatSamplerMatrix(i, matrix);
+ }
+ }
+ }
+
+ /**
+ * Shortcut for preConcatSamplerMatrix on all enabled stages in mask with
+ * same matrix
+ *
+ * @param stage the stage of the sampler to set
+ * @param matrix the matrix to concat
+ */
+ void preConcatEnabledSamplerMatrices(const GrMatrix& matrix) {
+ StageBitfield stageMask = this->enabledStages();
+ this->preConcatSamplerMatrices(stageMask, matrix);
+ }
+
+ /**
+ * Gets the matrix of a stage's sampler
+ *
+ * @param stage the stage to of sampler to get
+ * @return the sampler state's matrix
+ */
+ const GrMatrix& getSamplerMatrix(int stage) const {
+ return fCurrDrawState.fSamplerStates[stage].getMatrix();
+ }
+
+ /**
+ * Sets the matrix of a stage's sampler
+ *
+ * @param stage the stage of sampler set
+ * @param matrix the matrix to set
+ */
+ void setSamplerMatrix(int stage, const GrMatrix& matrix) {
+ fCurrDrawState.fSamplerStates[stage].setMatrix(matrix);
+ }
+
+ /**
+ * Sets the matrix applied to veretx positions.
+ *
+ * In the post-view-matrix space the rectangle [0,w]x[0,h]
+ * fully covers the render target. (w and h are the width and height of the
+ * the rendertarget.)
+ *
+ * @param m the matrix used to transform the vertex positions.
+ */
+ void setViewMatrix(const GrMatrix& m);
+
+ /**
+ * Multiplies the current view matrix by a matrix
+ *
+ * After this call V' = V*m where V is the old view matrix,
+ * m is the parameter to this function, and V' is the new view matrix.
+ * (We consider positions to be column vectors so position vector p is
+ * transformed by matrix X as p' = X*p.)
+ *
+ * @param m the matrix used to modify the view matrix.
+ */
+ void preConcatViewMatrix(const GrMatrix& m);
+
+ /**
+ * Multiplies the current view matrix by a matrix
+ *
+ * After this call V' = m*V where V is the old view matrix,
+ * m is the parameter to this function, and V' is the new view matrix.
+ * (We consider positions to be column vectors so position vector p is
+ * transformed by matrix X as p' = X*p.)
+ *
+ * @param m the matrix used to modify the view matrix.
+ */
+ void postConcatViewMatrix(const GrMatrix& m);
+
+ /**
+ * Retrieves the current view matrix
+ * @return the current view matrix.
+ */
+ const GrMatrix& getViewMatrix() const;
+
+ /**
+ * Retrieves the inverse of the current view matrix.
+ *
+ * If the current view matrix is invertible, return true, and if matrix
+ * is non-null, copy the inverse into it. If the current view matrix is
+ * non-invertible, return false and ignore the matrix parameter.
+ *
+ * @param matrix if not null, will receive a copy of the current inverse.
+ */
+ bool getViewInverse(GrMatrix* matrix) const;
+
+ /**
+ * Sets color for next draw to a premultiplied-alpha color.
+ *
+ * @param the color to set.
+ */
+ void setColor(GrColor);
+
+ /**
+ * Gets the currently set color.
+ * @return the current color.
+ */
+ GrColor getColor() const { return fCurrDrawState.fColor; }
+
+ /**
+ * Add a color filter that can be represented by a color and a mode.
+ */
+ void setColorFilter(GrColor, SkXfermode::Mode);
+
+ /**
+ * Sets the color to be used for the next draw to be
+ * (r,g,b,a) = (alpha, alpha, alpha, alpha).
+ *
+ * @param alpha The alpha value to set as the color.
+ */
+ void setAlpha(uint8_t alpha);
+
+ /**
+ * Controls whether clockwise, counterclockwise, or both faces are drawn.
+ * @param face the face(s) to draw.
+ */
+ void setDrawFace(DrawFace face) { fCurrDrawState.fDrawFace = face; }
+
+ /**
+ * A common pattern is to compute a color with the initial stages and then
+ * modulate that color by a coverage value in later stage(s) (AA, mask-
+ * filters, glyph mask, etc). Color-filters, xfermodes, etc should be
+ * computed based on the pre-coverage-modulated color. The division of
+ * stages between color-computing and coverage-computing is specified by
+ * this method. Initially this is kNumStages (all stages are color-
+ * computing).
+ */
+ void setFirstCoverageStage(int firstCoverageStage) {
+ fCurrDrawState.fFirstCoverageStage = firstCoverageStage;
+ }
+
+ /**
+ * Gets the index of the first coverage-computing stage.
+ */
+ int getFirstCoverageStage() const {
+ return fCurrDrawState.fFirstCoverageStage;
+ }
+
+ /**
+ * Gets whether the target is drawing clockwise, counterclockwise,
+ * or both faces.
+ * @return the current draw face(s).
+ */
+ DrawFace getDrawFace() const { return fCurrDrawState.fDrawFace; }
+
+ /**
+ * Enable render state settings.
+ *
+ * @param flags bitfield of StateBits specifing the states to enable
+ */
+ void enableState(uint32_t stateBits);
+
+ /**
+ * Disable render state settings.
+ *
+ * @param flags bitfield of StateBits specifing the states to disable
+ */
+ void disableState(uint32_t stateBits);
+
+ bool isDitherState() const {
+ return 0 != (fCurrDrawState.fFlagBits & kDither_StateBit);
+ }
+
+ bool isAntialiasState() const {
+ return 0 != (fCurrDrawState.fFlagBits & kAntialias_StateBit);
+ }
+
+ bool isClipState() const {
+ return 0 != (fCurrDrawState.fFlagBits & kClip_StateBit);
+ }
+
+ bool isColorWriteDisabled() const {
+ return 0 != (fCurrDrawState.fFlagBits & kNoColorWrites_StateBit);
+ }
+
+ /**
+ * Sets the blending function coeffecients.
+ *
+ * The blend function will be:
+ * D' = sat(S*srcCoef + D*dstCoef)
+ *
+ * where D is the existing destination color, S is the incoming source
+ * color, and D' is the new destination color that will be written. sat()
+ * is the saturation function.
+ *
+ * @param srcCoef coeffecient applied to the src color.
+ * @param dstCoef coeffecient applied to the dst color.
+ */
+ void setBlendFunc(GrBlendCoeff srcCoeff, GrBlendCoeff dstCoeff);
+
+ /**
+ * Sets the blending function constant referenced by the following blending
+ * coeffecients:
+ * kConstC_BlendCoeff
+ * kIConstC_BlendCoeff
+ * kConstA_BlendCoeff
+ * kIConstA_BlendCoeff
+ *
+ * @param constant the constant to set
+ */
+ void setBlendConstant(GrColor constant) { fCurrDrawState.fBlendConstant = constant; }
+
+ /**
+ * Retrieves the last value set by setBlendConstant()
+ * @return the blending constant value
+ */
+ GrColor getBlendConstant() const { return fCurrDrawState.fBlendConstant; }
+
+ /**
+ * Determines if blending will require a read of a dst given the current
+ * state set on the draw target
+ *
+ * @return true if the dst surface will be read at each pixel hit by the
+ * a draw operation.
+ */
+ bool drawWillReadDst() const;
+
+ /**
+ * Color alpha and coverage are two inputs to the drawing pipeline. For some
+ * blend modes it is safe to fold the coverage into constant or per-vertex
+ * color alpha value. For other blend modes they must be handled separately.
+ * Depending on features available in the underlying 3D API this may or may
+ * not be possible.
+ *
+ * This function looks at the current blend on the draw target and the draw
+ * target's capabilities to determine whether coverage can be handled
+ * correctly.
+ */
+ bool canApplyCoverage() const;
+
+ /**
+ * Determines whether incorporating partial pixel coverage into the constant
+ * color specified by setColor or per-vertex colors will give the right
+ * blending result.
+ */
+ bool canTweakAlphaForCoverage() const;
+
+ /**
+ * Determines the interpretation per-vertex edge data when the
+ * kEdge_VertexLayoutBit is set (see below). When per-vertex edges are not
+ * specified the value of this setting has no effect.
+ */
+ void setVertexEdgeType(VertexEdgeType type) {
+ fCurrDrawState.fVertexEdgeType = type;
+ }
+
+ /**
+ * Given the current draw state, vertex layout, and hw support, will HW AA
+ * lines be used (if line primitive type is drawn)? (Note that lines are
+ * always 1 pixel wide)
+ */
+ bool willUseHWAALines() const;
+
+ /**
+ * Sets the edge data required for edge antialiasing.
+ *
+ * @param edges 3 * 6 float values, representing the edge
+ * equations in Ax + By + C form
+ */
+ void setEdgeAAData(const Edge* edges, int numEdges);
+
+ /**
+ * Used to save and restore the GrGpu's drawing state
+ */
+ struct SavedDrawState {
+ private:
+ DrState fState;
+ friend class GrDrawTarget;
+ };
+
+ /**
+ * Saves the current draw state. The state can be restored at a later time
+ * with restoreDrawState.
+ *
+ * See also AutoStateRestore class.
+ *
+ * @param state will hold the state after the function returns.
+ */
+ void saveCurrentDrawState(SavedDrawState* state) const;
+
+ /**
+ * Restores previously saved draw state. The client guarantees that state
+ * was previously passed to saveCurrentDrawState and that the rendertarget
+ * and texture set at save are still valid.
+ *
+ * See also AutoStateRestore class.
+ *
+ * @param state the previously saved state to restore.
+ */
+ void restoreDrawState(const SavedDrawState& state);
+
+ /**
+ * Copies the draw state from another target to this target.
+ *
+ * @param srcTarget draw target used as src of the draw state.
+ */
+ void copyDrawState(const GrDrawTarget& srcTarget);
+
+ /**
+ * The format of vertices is represented as a bitfield of flags.
+ * Flags that indicate the layout of vertex data. Vertices always contain
+ * positions and may also contain up to kMaxTexCoords sets of 2D texture
+ * coordinates, per-vertex colors, and per-vertex coverage. Each stage can
+ * use any of the texture coordinates as its input texture coordinates or it
+ * may use the positions as texture coordinates.
+ *
+ * If no texture coordinates are specified for a stage then the stage is
+ * disabled.
+ *
+ * Only one type of texture coord can be specified per stage. For
+ * example StageTexCoordVertexLayoutBit(0, 2) and
+ * StagePosAsTexCoordVertexLayoutBit(0) cannot both be specified.
+ *
+ * The order in memory is always (position, texture coord 0, ..., color,
+ * coverage) with any unused fields omitted. Note that this means that if
+ * only texture coordinates 1 is referenced then there is no texture
+ * coordinates 0 and the order would be (position, texture coordinate 1
+ * [, color][, coverage]).
+ */
+
+ /**
+ * Generates a bit indicating that a texture stage uses texture coordinates
+ *
+ * @param stage the stage that will use texture coordinates.
+ * @param texCoordIdx the index of the texture coordinates to use
+ *
+ * @return the bit to add to a GrVertexLayout bitfield.
+ */
+ static int StageTexCoordVertexLayoutBit(int stage, int texCoordIdx) {
+ GrAssert(stage < kNumStages);
+ GrAssert(texCoordIdx < kMaxTexCoords);
+ return 1 << (stage + (texCoordIdx * kNumStages));
+ }
+
+private:
+ static const int TEX_COORD_BIT_CNT = kNumStages*kMaxTexCoords;
+
+public:
+ /**
+ * Generates a bit indicating that a texture stage uses the position
+ * as its texture coordinate.
+ *
+ * @param stage the stage that will use position as texture
+ * coordinates.
+ *
+ * @return the bit to add to a GrVertexLayout bitfield.
+ */
+ static int StagePosAsTexCoordVertexLayoutBit(int stage) {
+ GrAssert(stage < kNumStages);
+ return (1 << (TEX_COORD_BIT_CNT + stage));
+ }
+
+private:
+ static const int STAGE_BIT_CNT = TEX_COORD_BIT_CNT + kNumStages;
+
+public:
+
+ /**
+ * Additional Bits that can be specified in GrVertexLayout.
+ */
+ enum VertexLayoutBits {
+ /* vertices have colors (GrColor) */
+ kColor_VertexLayoutBit = 1 << (STAGE_BIT_CNT + 0),
+ /* vertices have coverage (GrColor where all channels should have the
+ * same value)
+ */
+ kCoverage_VertexLayoutBit = 1 << (STAGE_BIT_CNT + 1),
+ /* Use text vertices. (Pos and tex coords may be a different type for
+ * text [GrGpuTextVertex vs GrPoint].)
+ */
+ kTextFormat_VertexLayoutBit = 1 << (STAGE_BIT_CNT + 2),
+
+ /* Each vertex specificies an edge. Distance to the edge is used to
+ * compute a coverage. See setVertexEdgeType().
+ */
+ kEdge_VertexLayoutBit = 1 << (STAGE_BIT_CNT + 3),
+ // for below assert
+ kDummyVertexLayoutBit,
+ kHighVertexLayoutBit = kDummyVertexLayoutBit - 1
+ };
+ // make sure we haven't exceeded the number of bits in GrVertexLayout.
+ GR_STATIC_ASSERT(kHighVertexLayoutBit < ((uint64_t)1 << 8*sizeof(GrVertexLayout)));
+
+ /**
+ * There are three methods for specifying geometry (vertices and optionally
+ * indices) to the draw target. When indexed drawing the indices and vertices
+ * can use a different method. Once geometry is specified it can be used for
+ * multiple drawIndexed and drawNonIndexed calls.
+ *
+ * Sometimes it is necessary to perform a draw while upstack code has
+ * already specified geometry that it isn't finished with. There are push
+ * pop methods
+ *
+ * 1. Provide a cpu array (set*SourceToArray). This is useful when the
+ * caller's client has already provided vertex data in a format
+ * the time compatible with a GrVertexLayout. The array must contain the
+ * data at set*SourceToArray is called. The source stays in effect for
+ * drawIndexed & drawNonIndexed calls until set*SourceToArray is called
+ * again or one of the other two paths is chosen.
+ *
+ * 2. Reserve. This is most useful when the caller has data it must
+ * transform before drawing and is not long-lived. The caller requests
+ * that the draw target make room for some amount of vertex and/or index
+ * data. The target provides ptrs to hold the vertex and/or index data.
+ *
+ * The data is writable up until the next drawIndexed, drawNonIndexed,
+ * or pushGeometrySource At this point the data is frozen and the ptrs
+ * are no longer valid.
+ *
+ * 3. Vertex and Index Buffers. This is most useful for geometry that will
+ * is long-lived. SetVertexSourceToBuffer and SetIndexSourceToBuffer are
+ * used to set the buffer and subsequent drawIndexed and drawNonIndexed
+ * calls use this source until another source is set.
+ */
+
+ /**
+ * Reserves space for vertices. Draw target will use reserved vertices at
+ * at the next draw.
+ *
+ * If succeeds:
+ * if vertexCount > 0, *vertices will be the array
+ * of vertices to be filled by caller. The next draw will read
+ * these vertices.
+ *
+ * If a client does not already have a vertex buffer then this is the
+ * preferred way to allocate vertex data. It allows the subclass of
+ * GrDrawTarget to decide whether to put data in buffers, to group vertex
+ * data that uses the same state (e.g. for deferred rendering), etc.
+ *
+ * After the next draw or pushGeometrySource the vertices ptr is no longer
+ * valid and the geometry data cannot be further modified. The contents
+ * that were put in the reserved space can be drawn by multiple draws,
+ * however.
+ *
+ * @param vertexLayout the format of vertices (ignored if vertexCount == 0).
+ * @param vertexCount the number of vertices to reserve space for. Can be 0.
+ * @param vertices will point to reserved vertex space if vertexCount is
+ * non-zero. Illegal to pass NULL if vertexCount > 0.
+ *
+ * @return true if succeeded in allocating space for the vertices and false
+ * if not.
+ */
+ bool reserveVertexSpace(GrVertexLayout vertexLayout,
+ int vertexCount,
+ void** vertices);
+ /**
+ * Reserves space for indices. Draw target will use the reserved indices at
+ * the next indexed draw.
+ *
+ * If succeeds:
+ * if indexCount > 0, *indices will be the array
+ * of indices to be filled by caller. The next draw will read
+ * these indices.
+ *
+ * If a client does not already have a index buffer then this is the
+ * preferred way to allocate index data. It allows the subclass of
+ * GrDrawTarget to decide whether to put data in buffers, to group index
+ * data that uses the same state (e.g. for deferred rendering), etc.
+ *
+ * After the next indexed draw or pushGeometrySource the indices ptr is no
+ * longer valid and the geometry data cannot be further modified. The
+ * contents that were put in the reserved space can be drawn by multiple
+ * draws, however.
+ *
+ * @param indexCount the number of indices to reserve space for. Can be 0.
+ * @param indices will point to reserved index space if indexCount is
+ * non-zero. Illegal to pass NULL if indexCount > 0.
+ */
+
+ bool reserveIndexSpace(int indexCount, void** indices);
+ /**
+ * Provides hints to caller about the number of vertices and indices
+ * that can be allocated cheaply. This can be useful if caller is reserving
+ * space but doesn't know exactly how much geometry is needed.
+ *
+ * Also may hint whether the draw target should be flushed first. This is
+ * useful for deferred targets.
+ *
+ * @param vertexLayout layout of vertices caller would like to reserve
+ * @param vertexCount in: hint about how many vertices the caller would
+ * like to allocate.
+ * out: a hint about the number of vertices that can be
+ * allocated cheaply. Negative means no hint.
+ * Ignored if NULL.
+ * @param indexCount in: hint about how many indices the caller would
+ * like to allocate.
+ * out: a hint about the number of indices that can be
+ * allocated cheaply. Negative means no hint.
+ * Ignored if NULL.
+ *
+ * @return true if target should be flushed based on the input values.
+ */
+ virtual bool geometryHints(GrVertexLayout vertexLayout,
+ int* vertexCount,
+ int* indexCount) const;
+
+ /**
+ * Sets source of vertex data for the next draw. Array must contain
+ * the vertex data when this is called.
+ *
+ * @param array cpu array containing vertex data.
+ * @param size size of the vertex data.
+ * @param vertexCount the number of vertices in the array.
+ */
+ void setVertexSourceToArray(GrVertexLayout vertexLayout,
+ const void* vertexArray,
+ int vertexCount);
+
+ /**
+ * Sets source of index data for the next indexed draw. Array must contain
+ * the indices when this is called.
+ *
+ * @param array cpu array containing index data.
+ * @param indexCount the number of indices in the array.
+ */
+ void setIndexSourceToArray(const void* indexArray, int indexCount);
+
+ /**
+ * Sets source of vertex data for the next draw. Data does not have to be
+ * in the buffer until drawIndexed or drawNonIndexed.
+ *
+ * @param buffer vertex buffer containing vertex data. Must be
+ * unlocked before draw call.
+ * @param vertexLayout layout of the vertex data in the buffer.
+ */
+ void setVertexSourceToBuffer(GrVertexLayout vertexLayout,
+ const GrVertexBuffer* buffer);
+
+ /**
+ * Sets source of index data for the next indexed draw. Data does not have
+ * to be in the buffer until drawIndexed or drawNonIndexed.
+ *
+ * @param buffer index buffer containing indices. Must be unlocked
+ * before indexed draw call.
+ */
+ void setIndexSourceToBuffer(const GrIndexBuffer* buffer);
+
+ /**
+ * Resets vertex source. Drawing from reset vertices is illegal. Set vertex
+ * source to reserved, array, or buffer before next draw. May be able to free
+ * up temporary storage allocated by setVertexSourceToArray or
+ * reserveVertexSpace.
+ */
+ void resetVertexSource();
+
+ /**
+ * Resets index source. Indexed Drawing from reset indices is illegal. Set
+ * index source to reserved, array, or buffer before next indexed draw. May
+ * be able to free up temporary storage allocated by setIndexSourceToArray
+ * or reserveIndexSpace.
+ */
+ void resetIndexSource();
+
+ /**
+ * Pushes and resets the vertex/index sources. Any reserved vertex / index
+ * data is finalized (i.e. cannot be updated after the matching pop but can
+ * be drawn from). Must be balanced by a pop.
+ */
+ void pushGeometrySource();
+
+ /**
+ * Pops the vertex / index sources from the matching push.
+ */
+ void popGeometrySource();
+
+ /**
+ * Draws indexed geometry using the current state and current vertex / index
+ * sources.
+ *
+ * @param type The type of primitives to draw.
+ * @param startVertex the vertex in the vertex array/buffer corresponding
+ * to index 0
+ * @param startIndex first index to read from index src.
+ * @param vertexCount one greater than the max index.
+ * @param indexCount the number of index elements to read. The index count
+ * is effectively trimmed to the last completely
+ * specified primitive.
+ */
+ void drawIndexed(GrPrimitiveType type,
+ int startVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount);
+
+ /**
+ * Draws non-indexed geometry using the current state and current vertex
+ * sources.
+ *
+ * @param type The type of primitives to draw.
+ * @param startVertex the vertex in the vertex array/buffer corresponding
+ * to index 0
+ * @param vertexCount one greater than the max index.
+ */
+ void drawNonIndexed(GrPrimitiveType type,
+ int startVertex,
+ int vertexCount);
+
+ /**
+ * Helper function for drawing rects. This does not use the current index
+ * and vertex sources. After returning, the vertex and index sources may
+ * have changed. They should be reestablished before the next drawIndexed
+ * or drawNonIndexed. This cannot be called between reserving and releasing
+ * geometry. The GrDrawTarget subclass may be able to perform additional
+ * optimizations if drawRect is used rather than drawIndexed or
+ * drawNonIndexed.
+ * @param rect the rect to draw
+ * @param matrix optional matrix applied to rect (before viewMatrix)
+ * @param stageEnableBitfield bitmask indicating which stages are enabled.
+ * Bit i indicates whether stage i is enabled.
+ * @param srcRects specifies rects for stages enabled by stageEnableMask.
+ * if stageEnableMask bit i is 1, srcRects is not NULL,
+ * and srcRects[i] is not NULL, then srcRects[i] will be
+ * used as coordinates for stage i. Otherwise, if stage i
+ * is enabled then rect is used as the coordinates.
+ * @param srcMatrices optional matrices applied to srcRects. If
+ * srcRect[i] is non-NULL and srcMatrices[i] is
+ * non-NULL then srcRect[i] will be transformed by
+ * srcMatrix[i]. srcMatrices can be NULL when no
+ * srcMatrices are desired.
+ */
+ virtual void drawRect(const GrRect& rect,
+ const GrMatrix* matrix,
+ StageBitfield stageEnableBitfield,
+ const GrRect* srcRects[],
+ const GrMatrix* srcMatrices[]);
+
+ /**
+ * Helper for drawRect when the caller doesn't need separate src rects or
+ * matrices.
+ */
+ void drawSimpleRect(const GrRect& rect,
+ const GrMatrix* matrix,
+ StageBitfield stageEnableBitfield) {
+ drawRect(rect, matrix, stageEnableBitfield, NULL, NULL);
+ }
+
+ /**
+ * Clear the render target. Ignores the clip and all other draw state
+ * (blend mode, stages, etc). Clears the whole thing if rect is NULL,
+ * otherwise just the rect.
+ */
+ virtual void clear(const GrIRect* rect, GrColor color) = 0;
+
+ /**
+ * Returns the maximum number of edges that may be specified in a single
+ * draw call when performing edge antialiasing. This is usually limited
+ * by the number of fragment uniforms which may be uploaded. Must be a
+ * minimum of six, since a triangle's vertices each belong to two boundary
+ * edges which may be distinct.
+ */
+ virtual int getMaxEdges() const { return 6; }
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ class AutoStateRestore : ::GrNoncopyable {
+ public:
+ AutoStateRestore();
+ AutoStateRestore(GrDrawTarget* target);
+ ~AutoStateRestore();
+
+ /**
+ * if this object is already saving state for param target then
+ * this does nothing. Otherise, it restores previously saved state on
+ * previous target (if any) and saves current state on param target.
+ */
+ void set(GrDrawTarget* target);
+
+ private:
+ GrDrawTarget* fDrawTarget;
+ SavedDrawState fDrawState;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ class AutoViewMatrixRestore : ::GrNoncopyable {
+ public:
+ AutoViewMatrixRestore() {
+ fDrawTarget = NULL;
+ }
+
+ AutoViewMatrixRestore(GrDrawTarget* target)
+ : fDrawTarget(target), fMatrix(fDrawTarget->getViewMatrix()) {
+ GrAssert(NULL != target);
+ }
+
+ void set(GrDrawTarget* target) {
+ GrAssert(NULL != target);
+ if (NULL != fDrawTarget) {
+ fDrawTarget->setViewMatrix(fMatrix);
+ }
+ fDrawTarget = target;
+ fMatrix = target->getViewMatrix();
+ }
+
+ ~AutoViewMatrixRestore() {
+ if (NULL != fDrawTarget) {
+ fDrawTarget->setViewMatrix(fMatrix);
+ }
+ }
+
+ private:
+ GrDrawTarget* fDrawTarget;
+ GrMatrix fMatrix;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Sets the view matrix to I and preconcats all stage matrices enabled in
+ * mask by the view inverse. Destructor undoes these changes.
+ */
+ class AutoDeviceCoordDraw : ::GrNoncopyable {
+ public:
+ AutoDeviceCoordDraw(GrDrawTarget* target, int stageMask);
+ ~AutoDeviceCoordDraw();
+ private:
+ GrDrawTarget* fDrawTarget;
+ GrMatrix fViewMatrix;
+ GrMatrix fSamplerMatrices[kNumStages];
+ int fStageMask;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ class AutoReleaseGeometry : ::GrNoncopyable {
+ public:
+ AutoReleaseGeometry(GrDrawTarget* target,
+ GrVertexLayout vertexLayout,
+ int vertexCount,
+ int indexCount);
+ AutoReleaseGeometry();
+ ~AutoReleaseGeometry();
+ bool set(GrDrawTarget* target,
+ GrVertexLayout vertexLayout,
+ int vertexCount,
+ int indexCount);
+ bool succeeded() const { return NULL != fTarget; }
+ void* vertices() const { GrAssert(this->succeeded()); return fVertices; }
+ void* indices() const { GrAssert(this->succeeded()); return fIndices; }
+ GrPoint* positions() const {
+ return static_cast<GrPoint*>(this->vertices());
+ }
+
+ private:
+ void reset();
+
+ GrDrawTarget* fTarget;
+ void* fVertices;
+ void* fIndices;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ class AutoClipRestore : ::GrNoncopyable {
+ public:
+ AutoClipRestore(GrDrawTarget* target) {
+ fTarget = target;
+ fClip = fTarget->getClip();
+ }
+
+ ~AutoClipRestore() {
+ fTarget->setClip(fClip);
+ }
+ private:
+ GrDrawTarget* fTarget;
+ GrClip fClip;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ class AutoGeometryPush : ::GrNoncopyable {
+ public:
+ AutoGeometryPush(GrDrawTarget* target) {
+ GrAssert(NULL != target);
+ fTarget = target;
+ target->pushGeometrySource();
+ }
+ ~AutoGeometryPush() {
+ fTarget->popGeometrySource();
+ }
+ private:
+ GrDrawTarget* fTarget;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Helpers for picking apart vertex layouts
+
+ /**
+ * Helper function to compute the size of a vertex from a vertex layout
+ * @return size of a single vertex.
+ */
+ static size_t VertexSize(GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function for determining the index of texture coordinates that
+ * is input for a texture stage. Note that a stage may instead use positions
+ * as texture coordinates, in which case the result of the function is
+ * indistinguishable from the case when the stage is disabled.
+ *
+ * @param stage the stage to query
+ * @param vertexLayout layout to query
+ *
+ * @return the texture coordinate index or -1 if the stage doesn't use
+ * separate (non-position) texture coordinates.
+ */
+ static int VertexTexCoordsForStage(int stage, GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute the offset of texture coordinates in a vertex
+ * @return offset of texture coordinates in vertex layout or -1 if the
+ * layout has no texture coordinates. Will be 0 if positions are
+ * used as texture coordinates for the stage.
+ */
+ static int VertexStageCoordOffset(int stage, GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute the offset of the color in a vertex
+ * @return offset of color in vertex layout or -1 if the
+ * layout has no color.
+ */
+ static int VertexColorOffset(GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute the offset of the coverage in a vertex
+ * @return offset of coverage in vertex layout or -1 if the
+ * layout has no coverage.
+ */
+ static int VertexCoverageOffset(GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute the offset of the edge pts in a vertex
+ * @return offset of edge in vertex layout or -1 if the
+ * layout has no edge.
+ */
+ static int VertexEdgeOffset(GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to determine if vertex layout contains explicit texture
+ * coordinates of some index.
+ *
+ * @param coordIndex the tex coord index to query
+ * @param vertexLayout layout to query
+ *
+ * @return true if vertex specifies texture coordinates for the index,
+ * false otherwise.
+ */
+ static bool VertexUsesTexCoordIdx(int coordIndex,
+ GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to determine if vertex layout contains either explicit or
+ * implicit texture coordinates for a stage.
+ *
+ * @param stage the stage to query
+ * @param vertexLayout layout to query
+ *
+ * @return true if vertex specifies texture coordinates for the stage,
+ * false otherwise.
+ */
+ static bool VertexUsesStage(int stage, GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute the size of each vertex and the offsets of
+ * texture coordinates and color. Determines tex coord offsets by tex coord
+ * index rather than by stage. (Each stage can be mapped to any t.c. index
+ * by StageTexCoordVertexLayoutBit.)
+ *
+ * @param vertexLayout the layout to query
+ * @param texCoordOffsetsByIdx after return it is the offset of each
+ * tex coord index in the vertex or -1 if
+ * index isn't used. (optional)
+ * @param colorOffset after return it is the offset of the
+ * color field in each vertex, or -1 if
+ * there aren't per-vertex colors. (optional)
+ * @param coverageOffset after return it is the offset of the
+ * coverage field in each vertex, or -1 if
+ * there aren't per-vertex coeverages.
+ * (optional)
+ * @param edgeOffset after return it is the offset of the
+ * edge eq field in each vertex, or -1 if
+ * there aren't per-vertex edge equations.
+ * (optional)
+ * @return size of a single vertex
+ */
+ static int VertexSizeAndOffsetsByIdx(GrVertexLayout vertexLayout,
+ int texCoordOffsetsByIdx[kMaxTexCoords],
+ int *colorOffset,
+ int *coverageOffset,
+ int* edgeOffset);
+
+ /**
+ * Helper function to compute the size of each vertex and the offsets of
+ * texture coordinates and color. Determines tex coord offsets by stage
+ * rather than by index. (Each stage can be mapped to any t.c. index
+ * by StageTexCoordVertexLayoutBit.) If a stage uses positions for
+ * tex coords then that stage's offset will be 0 (positions are always at 0).
+ *
+ * @param vertexLayout the layout to query
+ * @param texCoordOffsetsByStage after return it is the offset of each
+ * tex coord index in the vertex or -1 if
+ * index isn't used. (optional)
+ * @param colorOffset after return it is the offset of the
+ * color field in each vertex, or -1 if
+ * there aren't per-vertex colors.
+ * (optional)
+ * @param coverageOffset after return it is the offset of the
+ * coverage field in each vertex, or -1 if
+ * there aren't per-vertex coeverages.
+ * (optional)
+ * @param edgeOffset after return it is the offset of the
+ * edge eq field in each vertex, or -1 if
+ * there aren't per-vertex edge equations.
+ * (optional)
+ * @return size of a single vertex
+ */
+ static int VertexSizeAndOffsetsByStage(GrVertexLayout vertexLayout,
+ int texCoordOffsetsByStage[kNumStages],
+ int *colorOffset,
+ int *coverageOffset,
+ int* edgeOffset);
+
+ /**
+ * Accessing positions, texture coords, or colors, of a vertex within an
+ * array is a hassle involving casts and simple math. These helpers exist
+ * to keep GrDrawTarget clients' code a bit nicer looking.
+ */
+
+ /**
+ * Gets a pointer to a GrPoint of a vertex's position or texture
+ * coordinate.
+ * @param vertices the vetex array
+ * @param vertexIndex the index of the vertex in the array
+ * @param vertexSize the size of each vertex in the array
+ * @param offset the offset in bytes of the vertex component.
+ * Defaults to zero (corresponding to vertex position)
+ * @return pointer to the vertex component as a GrPoint
+ */
+ static GrPoint* GetVertexPoint(void* vertices,
+ int vertexIndex,
+ int vertexSize,
+ int offset = 0) {
+ intptr_t start = GrTCast<intptr_t>(vertices);
+ return GrTCast<GrPoint*>(start + offset +
+ vertexIndex * vertexSize);
+ }
+ static const GrPoint* GetVertexPoint(const void* vertices,
+ int vertexIndex,
+ int vertexSize,
+ int offset = 0) {
+ intptr_t start = GrTCast<intptr_t>(vertices);
+ return GrTCast<const GrPoint*>(start + offset +
+ vertexIndex * vertexSize);
+ }
+
+ /**
+ * Gets a pointer to a GrColor inside a vertex within a vertex array.
+ * @param vertices the vetex array
+ * @param vertexIndex the index of the vertex in the array
+ * @param vertexSize the size of each vertex in the array
+ * @param offset the offset in bytes of the vertex color
+ * @return pointer to the vertex component as a GrColor
+ */
+ static GrColor* GetVertexColor(void* vertices,
+ int vertexIndex,
+ int vertexSize,
+ int offset) {
+ intptr_t start = GrTCast<intptr_t>(vertices);
+ return GrTCast<GrColor*>(start + offset +
+ vertexIndex * vertexSize);
+ }
+ static const GrColor* GetVertexColor(const void* vertices,
+ int vertexIndex,
+ int vertexSize,
+ int offset) {
+ const intptr_t start = GrTCast<intptr_t>(vertices);
+ return GrTCast<const GrColor*>(start + offset +
+ vertexIndex * vertexSize);
+ }
+
+ static void VertexLayoutUnitTest();
+
+protected:
+
+ /**
+ * Optimizations for blending / coverage to be applied based on the current
+ * state.
+ * Subclasses that actually draw (as opposed to those that just buffer for
+ * playback) must implement the flags that replace the output color.
+ */
+ enum BlendOptFlags {
+ /**
+ * No optimization
+ */
+ kNone_BlendOpt = 0,
+ /**
+ * Don't draw at all
+ */
+ kSkipDraw_BlendOptFlag = 0x2,
+ /**
+ * Emit the src color, disable HW blending (replace dst with src)
+ */
+ kDisableBlend_BlendOptFlag = 0x4,
+ /**
+ * The coverage value does not have to be computed separately from
+ * alpha, the the output color can be the modulation of the two.
+ */
+ kCoverageAsAlpha_BlendOptFlag = 0x1,
+ /**
+ * Instead of emitting a src color, emit coverage in the alpha channel
+ * and r,g,b are "don't cares".
+ */
+ kEmitCoverage_BlendOptFlag = 0x10,
+ /**
+ * Emit transparent black instead of the src color, no need to compute
+ * coverage.
+ */
+ kEmitTransBlack_BlendOptFlag = 0x8,
+ };
+ GR_DECL_BITFIELD_OPS_FRIENDS(BlendOptFlags);
+
+ // Determines what optimizations can be applied based on the blend.
+ // The coeffecients may have to be tweaked in order for the optimization
+ // to work. srcCoeff and dstCoeff are optional params that receive the
+ // tweaked coeffecients.
+ // Normally the function looks at the current state to see if coverage
+ // is enabled. By setting forceCoverage the caller can speculatively
+ // determine the blend optimizations that would be used if there was
+ // partial pixel coverage
+ BlendOptFlags getBlendOpts(bool forceCoverage = false,
+ GrBlendCoeff* srcCoeff = NULL,
+ GrBlendCoeff* dstCoeff = NULL) const;
+
+ // determine if src alpha is guaranteed to be one for all src pixels
+ bool srcAlphaWillBeOne() const;
+
+ enum GeometrySrcType {
+ kNone_GeometrySrcType, //<! src has not been specified
+ kReserved_GeometrySrcType, //<! src was set using reserve*Space
+ kArray_GeometrySrcType, //<! src was set using set*SourceToArray
+ kBuffer_GeometrySrcType //<! src was set using set*SourceToBuffer
+ };
+
+ struct GeometrySrcState {
+ GeometrySrcType fVertexSrc;
+ union {
+ // valid if src type is buffer
+ const GrVertexBuffer* fVertexBuffer;
+ // valid if src type is reserved or array
+ int fVertexCount;
+ };
+
+ GeometrySrcType fIndexSrc;
+ union {
+ // valid if src type is buffer
+ const GrIndexBuffer* fIndexBuffer;
+ // valid if src type is reserved or array
+ int fIndexCount;
+ };
+
+ GrVertexLayout fVertexLayout;
+ };
+
+ // given a vertex layout and a draw state, will a stage be used?
+ static bool StageWillBeUsed(int stage, GrVertexLayout layout,
+ const DrState& state) {
+ return NULL != state.fTextures[stage] && VertexUsesStage(stage, layout);
+ }
+
+ bool isStageEnabled(int stage) const {
+ return StageWillBeUsed(stage, this->getGeomSrc().fVertexLayout,
+ fCurrDrawState);
+ }
+
+ StageBitfield enabledStages() const {
+ StageBitfield mask = 0;
+ for (int s = 0; s < kNumStages; ++s) {
+ mask |= this->isStageEnabled(s) ? 1 : 0;
+ }
+ return mask;
+ }
+
+ // Helpers for GrDrawTarget subclasses that won't have private access to
+ // SavedDrawState but need to peek at the state values.
+ static DrState& accessSavedDrawState(SavedDrawState& sds)
+ { return sds.fState; }
+ static const DrState& accessSavedDrawState(const SavedDrawState& sds)
+ { return sds.fState; }
+
+ // implemented by subclass to allocate space for reserved geom
+ virtual bool onReserveVertexSpace(GrVertexLayout vertexLayout,
+ int vertexCount,
+ void** vertices) = 0;
+ virtual bool onReserveIndexSpace(int indexCount, void** indices) = 0;
+ // implemented by subclass to handle release of reserved geom space
+ virtual void releaseReservedVertexSpace() = 0;
+ virtual void releaseReservedIndexSpace() = 0;
+ // subclass must consume array contents when set
+ virtual void onSetVertexSourceToArray(const void* vertexArray,
+ int vertexCount) = 0;
+ virtual void onSetIndexSourceToArray(const void* indexArray,
+ int indexCount) = 0;
+ // subclass is notified that geom source will be set away from an array
+ virtual void releaseVertexArray() = 0;
+ virtual void releaseIndexArray() = 0;
+ // subclass overrides to be notified just before geo src state
+ // is pushed/popped.
+ virtual void geometrySourceWillPush() = 0;
+ virtual void geometrySourceWillPop(const GeometrySrcState& restoredState) = 0;
+ // subclass called to perform drawing
+ virtual void onDrawIndexed(GrPrimitiveType type,
+ int startVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount) = 0;
+ virtual void onDrawNonIndexed(GrPrimitiveType type,
+ int startVertex,
+ int vertexCount) = 0;
+ // subclass overrides to be notified when clip is set. Must call
+ // INHERITED::clipwillBeSet
+ virtual void clipWillBeSet(const GrClip& clip);
+
+ // Helpers for drawRect, protected so subclasses that override drawRect
+ // can use them.
+ static GrVertexLayout GetRectVertexLayout(StageBitfield stageEnableBitfield,
+ const GrRect* srcRects[]);
+
+ static void SetRectVertices(const GrRect& rect,
+ const GrMatrix* matrix,
+ const GrRect* srcRects[],
+ const GrMatrix* srcMatrices[],
+ GrVertexLayout layout,
+ void* vertices);
+
+ // accessor for derived classes
+ const GeometrySrcState& getGeomSrc() const {
+ return fGeoSrcStateStack.back();
+ }
+
+ GrClip fClip;
+
+ DrState fCurrDrawState;
+
+ Caps fCaps;
+
+private:
+ // called when setting a new vert/idx source to unref prev vb/ib
+ void releasePreviousVertexSource();
+ void releasePreviousIndexSource();
+
+ enum {
+ kPreallocGeoSrcStateStackCnt = 4,
+ };
+ SkSTArray<kPreallocGeoSrcStateStackCnt,
+ GeometrySrcState, true> fGeoSrcStateStack;
+
+};
+
+GR_MAKE_BITFIELD_OPS(GrDrawTarget::BlendOptFlags);
+
+#endif
diff --git a/src/gpu/GrGLDefaultInterface_none.cpp b/src/gpu/GrGLDefaultInterface_none.cpp
new file mode 100644
index 0000000000..2cca135c0a
--- /dev/null
+++ b/src/gpu/GrGLDefaultInterface_none.cpp
@@ -0,0 +1,13 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLInterface.h"
+
+const GrGLInterface* GrGLDefaultInterface() {
+ return NULL;
+}
diff --git a/src/gpu/GrGLIRect.h b/src/gpu/GrGLIRect.h
new file mode 100644
index 0000000000..e94fa21a5a
--- /dev/null
+++ b/src/gpu/GrGLIRect.h
@@ -0,0 +1,74 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLIRect_DEFINED
+#define GrGLIRect_DEFINED
+
+#include "GrGLInterface.h"
+
+/**
+ * Helper struct for dealing with the fact that Ganesh and GL use different
+ * window coordinate systems (top-down vs bottom-up)
+ */
+struct GrGLIRect {
+ GrGLint fLeft;
+ GrGLint fBottom;
+ GrGLsizei fWidth;
+ GrGLsizei fHeight;
+
+ void pushToGLViewport(const GrGLInterface* gl) const {
+ GR_GL_CALL(gl, Viewport(fLeft, fBottom, fWidth, fHeight));
+ }
+
+ void pushToGLScissor(const GrGLInterface* gl) const {
+ GR_GL_CALL(gl, Scissor(fLeft, fBottom, fWidth, fHeight));
+ }
+
+ void setFromGLViewport(const GrGLInterface* gl) {
+ GR_STATIC_ASSERT(sizeof(GrGLIRect) == 4*sizeof(GrGLint));
+ GR_GL_GetIntegerv(gl, GR_GL_VIEWPORT, (GrGLint*) this);
+ }
+
+ // sometimes we have a GrIRect from the client that we
+ // want to simultaneously make relative to GL's viewport
+ // and convert from top-down to bottom-up.
+ void setRelativeTo(const GrGLIRect& glRect,
+ int leftOffset,
+ int topOffset,
+ int width,
+ int height) {
+ fLeft = glRect.fLeft + leftOffset;
+ fWidth = width;
+ fBottom = glRect.fBottom + (glRect.fHeight - topOffset - height);
+ fHeight = height;
+
+ GrAssert(fLeft >= 0);
+ GrAssert(fWidth >= 0);
+ GrAssert(fBottom >= 0);
+ GrAssert(fHeight >= 0);
+ }
+
+ bool contains(const GrGLIRect& glRect) const {
+ return fLeft <= glRect.fLeft &&
+ fBottom <= glRect.fBottom &&
+ fLeft + fWidth >= glRect.fLeft + glRect.fWidth &&
+ fBottom + fHeight >= glRect.fBottom + glRect.fHeight;
+ }
+
+ void invalidate() {fLeft = fWidth = fBottom = fHeight = -1;}
+
+ bool operator ==(const GrGLIRect& glRect) const {
+ return 0 == memcmp(this, &glRect, sizeof(GrGLIRect));
+ }
+
+ bool operator !=(const GrGLIRect& glRect) const {return !(*this == glRect);}
+};
+
+#endif
diff --git a/src/gpu/GrGLIndexBuffer.cpp b/src/gpu/GrGLIndexBuffer.cpp
new file mode 100644
index 0000000000..b64668ede2
--- /dev/null
+++ b/src/gpu/GrGLIndexBuffer.cpp
@@ -0,0 +1,131 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrGLIndexBuffer.h"
+#include "GrGpuGL.h"
+
+#define GPUGL static_cast<GrGpuGL*>(getGpu())
+
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+GrGLIndexBuffer::GrGLIndexBuffer(GrGpuGL* gpu,
+ GrGLuint id,
+ size_t sizeInBytes,
+ bool dynamic)
+ : INHERITED(gpu, sizeInBytes, dynamic)
+ , fBufferID(id)
+ , fLockPtr(NULL) {
+
+}
+
+void GrGLIndexBuffer::onRelease() {
+ // make sure we've not been abandoned
+ if (fBufferID) {
+ GPUGL->notifyIndexBufferDelete(this);
+ GL_CALL(DeleteBuffers(1, &fBufferID));
+ fBufferID = 0;
+ }
+}
+
+void GrGLIndexBuffer::onAbandon() {
+ fBufferID = 0;
+ fLockPtr = NULL;
+}
+
+void GrGLIndexBuffer::bind() const {
+ GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, fBufferID));
+ GPUGL->notifyIndexBufferBind(this);
+}
+
+GrGLuint GrGLIndexBuffer::bufferID() const {
+ return fBufferID;
+}
+
+void* GrGLIndexBuffer::lock() {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (this->getGpu()->getCaps().fBufferLockSupport) {
+ this->bind();
+ // Let driver know it can discard the old data
+ GL_CALL(BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
+ this->sizeInBytes(),
+ NULL,
+ this->dynamic() ? GR_GL_DYNAMIC_DRAW :
+ GR_GL_STATIC_DRAW));
+ GR_GL_CALL_RET(GPUGL->glInterface(),
+ fLockPtr,
+ MapBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
+ GR_GL_WRITE_ONLY));
+
+ return fLockPtr;
+ }
+ return NULL;
+}
+
+void* GrGLIndexBuffer::lockPtr() const {
+ return fLockPtr;
+}
+
+void GrGLIndexBuffer::unlock() {
+ GrAssert(fBufferID);
+ GrAssert(isLocked());
+ GrAssert(this->getGpu()->getCaps().fBufferLockSupport);
+
+ this->bind();
+ GL_CALL(UnmapBuffer(GR_GL_ELEMENT_ARRAY_BUFFER));
+ fLockPtr = NULL;
+}
+
+bool GrGLIndexBuffer::isLocked() const {
+#if GR_DEBUG
+ if (this->isValid() && this->getGpu()->getCaps().fBufferLockSupport) {
+ this->bind();
+ GrGLint mapped;
+ GL_CALL(GetBufferParameteriv(GR_GL_ELEMENT_ARRAY_BUFFER,
+ GR_GL_BUFFER_MAPPED, &mapped));
+ GrAssert(!!mapped == !!fLockPtr);
+ }
+#endif
+ return NULL != fLockPtr;
+}
+
+bool GrGLIndexBuffer::updateData(const void* src, size_t srcSizeInBytes) {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (srcSizeInBytes > this->sizeInBytes()) {
+ return false;
+ }
+ this->bind();
+ GrGLenum usage = dynamic() ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW;
+#if !GR_GL_USE_BUFFER_DATA_NULL_HINT
+ // Note that we're cheating on the size here. Currently no methods
+ // allow a partial update that preserves contents of non-updated
+ // portions of the buffer (and lock() does a glBufferData(..size, NULL..))
+ GL_CALL(BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, srcSizeInBytes, src, usage));
+#else
+ if (this->sizeInBytes() == srcSizeInBytes) {
+ GL_CALL(BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
+ srcSizeInBytes, src, usage));
+ } else {
+ // Before we call glBufferSubData we give the driver a hint using
+ // glBufferData with NULL. This makes the old buffer contents
+ // inaccessible to future draws. The GPU may still be processing draws
+ // that reference the old contents. With this hint it can assign a
+ // different allocation for the new contents to avoid flushing the gpu
+ // past draws consuming the old contents.
+ GL_CALL(BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
+ this->sizeInBytes(), NULL, usage));
+ GL_CALL(BufferSubData(GR_GL_ELEMENT_ARRAY_BUFFER,
+ 0, srcSizeInBytes, src));
+ }
+#endif
+ return true;
+}
+
diff --git a/src/gpu/GrGLIndexBuffer.h b/src/gpu/GrGLIndexBuffer.h
new file mode 100644
index 0000000000..c3e2287260
--- /dev/null
+++ b/src/gpu/GrGLIndexBuffer.h
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLIndexBuffer_DEFINED
+#define GrGLIndexBuffer_DEFINED
+
+#include "GrIndexBuffer.h"
+#include "GrGLInterface.h"
+
+class GrGpuGL;
+
+class GrGLIndexBuffer : public GrIndexBuffer {
+
+public:
+
+ virtual ~GrGLIndexBuffer() { this->release(); }
+
+ GrGLuint bufferID() const;
+
+ // overrides of GrIndexBuffer
+ virtual void* lock();
+ virtual void* lockPtr() const;
+ virtual void unlock();
+ virtual bool isLocked() const;
+ virtual bool updateData(const void* src, size_t srcSizeInBytes);
+
+protected:
+ GrGLIndexBuffer(GrGpuGL* gpu,
+ GrGLuint id,
+ size_t sizeInBytes,
+ bool dynamic);
+
+ // overrides of GrResource
+ virtual void onAbandon();
+ virtual void onRelease();
+
+private:
+ void bind() const;
+
+ GrGLuint fBufferID;
+ void* fLockPtr;
+
+ friend class GrGpuGL;
+
+ typedef GrIndexBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGLInterface.cpp b/src/gpu/GrGLInterface.cpp
new file mode 100644
index 0000000000..70dd019f83
--- /dev/null
+++ b/src/gpu/GrGLInterface.cpp
@@ -0,0 +1,495 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrTypes.h"
+#include "GrGLInterface.h"
+#include "GrGLDefines.h"
+
+#include <stdio.h>
+
+#if GR_GL_PER_GL_FUNC_CALLBACK
+namespace {
+void GrGLDefaultInterfaceCallback(const GrGLInterface*) {}
+}
+#endif
+
+GrGLVersion GrGLGetVersionFromString(const char* versionString) {
+ if (NULL == versionString) {
+ GrAssert(!"NULL GL version string.");
+ return 0;
+ }
+
+ int major, minor;
+
+ int n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ char profile[2];
+ n = sscanf(versionString, "OpenGL ES-%c%c %d.%d", profile, profile+1,
+ &major, &minor);
+ if (4 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "OpenGL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GL_VER(major, minor);
+ }
+
+ return 0;
+}
+
+GrGLSLVersion GrGLGetGLSLVersionFromString(const char* versionString) {
+ if (NULL == versionString) {
+ GrAssert(!"NULL GLSL version string.");
+ return 0;
+ }
+
+ int major, minor;
+
+ int n = sscanf(versionString, "%d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+
+ n = sscanf(versionString, "OpenGL ES GLSL ES %d.%d", &major, &minor);
+ if (2 == n) {
+ return GR_GLSL_VER(major, minor);
+ }
+ return 0;
+}
+
+bool GrGLHasExtensionFromString(const char* ext, const char* extensionString) {
+ int extLength = strlen(ext);
+
+ while (true) {
+ int n = strcspn(extensionString, " ");
+ if (n == extLength && 0 == strncmp(ext, extensionString, n)) {
+ return true;
+ }
+ if (0 == extensionString[n]) {
+ return false;
+ }
+ extensionString += n+1;
+ }
+
+ return false;
+}
+
+bool GrGLHasExtension(const GrGLInterface* gl, const char* ext) {
+ const GrGLubyte* glstr;
+ GR_GL_CALL_RET(gl, glstr, GetString(GR_GL_EXTENSIONS));
+ return GrGLHasExtensionFromString(ext, (const char*) glstr);
+}
+
+GrGLVersion GrGLGetVersion(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_VERSION));
+ return GrGLGetVersionFromString((const char*) v);
+}
+
+GrGLSLVersion GrGLGetGLSLVersion(const GrGLInterface* gl) {
+ const GrGLubyte* v;
+ GR_GL_CALL_RET(gl, v, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
+ return GrGLGetGLSLVersionFromString((const char*) v);
+}
+
+GrGLInterface::GrGLInterface() {
+ fBindingsExported = (GrGLBinding)0;
+ fNPOTRenderTargetSupport = kProbe_GrGLCapability;
+ fMinRenderTargetHeight = kProbe_GrGLCapability;
+ fMinRenderTargetWidth = kProbe_GrGLCapability;
+
+ fActiveTexture = NULL;
+ fAttachShader = NULL;
+ fBindAttribLocation = NULL;
+ fBindBuffer = NULL;
+ fBindFragDataLocation = NULL;
+ fBindTexture = NULL;
+ fBlendColor = NULL;
+ fBlendFunc = NULL;
+ fBufferData = NULL;
+ fBufferSubData = NULL;
+ fClear = NULL;
+ fClearColor = NULL;
+ fClearStencil = NULL;
+ fClientActiveTexture = NULL;
+ fColor4ub = NULL;
+ fColorMask = NULL;
+ fColorPointer = NULL;
+ fCompileShader = NULL;
+ fCompressedTexImage2D = NULL;
+ fCreateProgram = NULL;
+ fCreateShader = NULL;
+ fCullFace = NULL;
+ fDeleteBuffers = NULL;
+ fDeleteProgram = NULL;
+ fDeleteShader = NULL;
+ fDeleteTextures = NULL;
+ fDepthMask = NULL;
+ fDisable = NULL;
+ fDisableClientState = NULL;
+ fDisableVertexAttribArray = NULL;
+ fDrawArrays = NULL;
+ fDrawBuffer = NULL;
+ fDrawBuffers = NULL;
+ fDrawElements = NULL;
+ fEnable = NULL;
+ fEnableClientState = NULL;
+ fEnableVertexAttribArray = NULL;
+ fFrontFace = NULL;
+ fGenBuffers = NULL;
+ fGenTextures = NULL;
+ fGetBufferParameteriv = NULL;
+ fGetError = NULL;
+ fGetIntegerv = NULL;
+ fGetProgramInfoLog = NULL;
+ fGetProgramiv = NULL;
+ fGetShaderInfoLog = NULL;
+ fGetShaderiv = NULL;
+ fGetString = NULL;
+ fGetTexLevelParameteriv = NULL;
+ fGetUniformLocation = NULL;
+ fLineWidth = NULL;
+ fLinkProgram = NULL;
+ fLoadMatrixf = NULL;
+ fMatrixMode = NULL;
+ fPixelStorei = NULL;
+ fPointSize = NULL;
+ fReadBuffer = NULL;
+ fReadPixels = NULL;
+ fScissor = NULL;
+ fShadeModel = NULL;
+ fShaderSource = NULL;
+ fStencilFunc = NULL;
+ fStencilFuncSeparate = NULL;
+ fStencilMask = NULL;
+ fStencilMaskSeparate = NULL;
+ fStencilOp = NULL;
+ fStencilOpSeparate = NULL;
+ fTexCoordPointer = NULL;
+ fTexEnvi = NULL;
+ fTexImage2D = NULL;
+ fTexParameteri = NULL;
+ fTexSubImage2D = NULL;
+ fUniform1f = NULL;
+ fUniform1i = NULL;
+ fUniform1fv = NULL;
+ fUniform1iv = NULL;
+ fUniform2f = NULL;
+ fUniform2i = NULL;
+ fUniform2fv = NULL;
+ fUniform2iv = NULL;
+ fUniform3f = NULL;
+ fUniform3i = NULL;
+ fUniform3fv = NULL;
+ fUniform3iv = NULL;
+ fUniform4f = NULL;
+ fUniform4i = NULL;
+ fUniform4fv = NULL;
+ fUniform4iv = NULL;
+ fUniformMatrix2fv = NULL;
+ fUniformMatrix3fv = NULL;
+ fUniformMatrix4fv = NULL;
+ fUseProgram = NULL;
+ fVertexAttrib4fv = NULL;
+ fVertexAttribPointer = NULL;
+ fVertexPointer = NULL;
+ fViewport = NULL;
+ fBindFramebuffer = NULL;
+ fBindRenderbuffer = NULL;
+ fCheckFramebufferStatus = NULL;
+ fDeleteFramebuffers = NULL;
+ fDeleteRenderbuffers = NULL;
+ fFramebufferRenderbuffer = NULL;
+ fFramebufferTexture2D = NULL;
+ fGenFramebuffers = NULL;
+ fGenRenderbuffers = NULL;
+ fGetFramebufferAttachmentParameteriv = NULL;
+ fGetRenderbufferParameteriv = NULL;
+ fRenderbufferStorage = NULL;
+ fRenderbufferStorageMultisample = NULL;
+ fBlitFramebuffer = NULL;
+ fResolveMultisampleFramebuffer = NULL;
+ fMapBuffer = NULL;
+ fUnmapBuffer = NULL;
+ fBindFragDataLocationIndexed = NULL;
+
+#if GR_GL_PER_GL_FUNC_CALLBACK
+ fCallback = GrGLDefaultInterfaceCallback;
+ fCallbackData = 0;
+#endif
+}
+
+
+bool GrGLInterface::validateShaderFunctions() const {
+ // required for GrGpuGLShaders
+ if (NULL == fAttachShader ||
+ NULL == fBindAttribLocation ||
+ NULL == fCompileShader ||
+ NULL == fCreateProgram ||
+ NULL == fCreateShader ||
+ NULL == fDeleteProgram ||
+ NULL == fDeleteShader ||
+ NULL == fDisableVertexAttribArray ||
+ NULL == fEnableVertexAttribArray ||
+ NULL == fGetProgramInfoLog ||
+ NULL == fGetProgramiv ||
+ NULL == fGetShaderInfoLog ||
+ NULL == fGetShaderiv ||
+ NULL == fGetUniformLocation ||
+ NULL == fLinkProgram ||
+ NULL == fShaderSource ||
+ NULL == fUniform1f ||
+ NULL == fUniform1i ||
+ NULL == fUniform1fv ||
+ NULL == fUniform1iv ||
+ NULL == fUniform2f ||
+ NULL == fUniform2i ||
+ NULL == fUniform2fv ||
+ NULL == fUniform2iv ||
+ NULL == fUniform3f ||
+ NULL == fUniform3i ||
+ NULL == fUniform3fv ||
+ NULL == fUniform3iv ||
+ NULL == fUniform4f ||
+ NULL == fUniform4i ||
+ NULL == fUniform4fv ||
+ NULL == fUniform4iv ||
+ NULL == fUniformMatrix2fv ||
+ NULL == fUniformMatrix3fv ||
+ NULL == fUniformMatrix4fv ||
+ NULL == fUseProgram ||
+ NULL == fVertexAttrib4fv ||
+ NULL == fVertexAttribPointer) {
+ return false;
+ }
+ return true;
+}
+
+bool GrGLInterface::validateFixedFunctions() const {
+ if (NULL == fClientActiveTexture ||
+ NULL == fColor4ub ||
+ NULL == fColorPointer ||
+ NULL == fDisableClientState ||
+ NULL == fEnableClientState ||
+ NULL == fLoadMatrixf ||
+ NULL == fMatrixMode ||
+ NULL == fPointSize ||
+ NULL == fShadeModel ||
+ NULL == fTexCoordPointer ||
+ NULL == fTexEnvi ||
+ NULL == fVertexPointer) {
+ return false;
+ }
+ return true;
+}
+
+bool GrGLInterface::validate(GrEngine engine) const {
+
+ bool isDesktop = this->supportsDesktop();
+
+ bool isES = this->supportsES();
+
+ if (isDesktop == isES) {
+ // must have one, don't support both in same interface
+ return false;
+ }
+
+ // functions that are always required
+ if (NULL == fActiveTexture ||
+ NULL == fBindBuffer ||
+ NULL == fBindTexture ||
+ NULL == fBlendFunc ||
+ NULL == fBufferData ||
+ NULL == fBufferSubData ||
+ NULL == fClear ||
+ NULL == fClearColor ||
+ NULL == fClearStencil ||
+ NULL == fColorMask ||
+ NULL == fCullFace ||
+ NULL == fDeleteBuffers ||
+ NULL == fDeleteTextures ||
+ NULL == fDepthMask ||
+ NULL == fDisable ||
+ NULL == fDrawArrays ||
+ NULL == fDrawElements ||
+ NULL == fEnable ||
+ NULL == fFrontFace ||
+ NULL == fGenBuffers ||
+ NULL == fGenTextures ||
+ NULL == fGetBufferParameteriv ||
+ NULL == fGetError ||
+ NULL == fGetIntegerv ||
+ NULL == fGetString ||
+ NULL == fPixelStorei ||
+ NULL == fReadPixels ||
+ NULL == fScissor ||
+ NULL == fStencilFunc ||
+ NULL == fStencilMask ||
+ NULL == fStencilOp ||
+ NULL == fTexImage2D ||
+ NULL == fTexParameteri ||
+ NULL == fTexSubImage2D ||
+ NULL == fViewport ||
+ NULL == fBindFramebuffer ||
+ NULL == fBindRenderbuffer ||
+ NULL == fCheckFramebufferStatus ||
+ NULL == fDeleteFramebuffers ||
+ NULL == fDeleteRenderbuffers ||
+ NULL == fFramebufferRenderbuffer ||
+ NULL == fFramebufferTexture2D ||
+ NULL == fGetFramebufferAttachmentParameteriv ||
+ NULL == fGetRenderbufferParameteriv ||
+ NULL == fGenFramebuffers ||
+ NULL == fGenRenderbuffers ||
+ NULL == fRenderbufferStorage) {
+ return false;
+ }
+
+ switch (engine) {
+ case kOpenGL_Shaders_GrEngine:
+ if (kES1_GrGLBinding == fBindingsExported) {
+ return false;
+ }
+ if (!this->validateShaderFunctions()) {
+ return false;
+ }
+ break;
+ case kOpenGL_Fixed_GrEngine:
+ if (kES1_GrGLBinding == fBindingsExported) {
+ return false;
+ }
+ if (!this->validateFixedFunctions()) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ const char* ext;
+ GrGLVersion glVer = GrGLGetVersion(this);
+ ext = (const char*)fGetString(GR_GL_EXTENSIONS);
+
+ // Now check that baseline ES/Desktop fns not covered above are present
+ // and that we have fn pointers for any advertised extensions that we will
+ // try to use.
+
+ // these functions are part of ES2, we assume they are available
+ // On the desktop we assume they are available if the extension
+ // is present or GL version is high enough.
+ if ((kES2_GrGLBinding & fBindingsExported)) {
+ if (NULL == fBlendColor ||
+ NULL == fStencilFuncSeparate ||
+ NULL == fStencilMaskSeparate ||
+ NULL == fStencilOpSeparate) {
+ return false;
+ }
+ } else if (kDesktop_GrGLBinding == fBindingsExported) {
+ if (glVer >= GR_GL_VER(2,0)) {
+ if (NULL == fStencilFuncSeparate ||
+ NULL == fStencilMaskSeparate ||
+ NULL == fStencilOpSeparate) {
+ return false;
+ }
+ }
+ if (glVer >= GR_GL_VER(3,0) && NULL == fBindFragDataLocation) {
+ return false;
+ }
+ if (glVer >= GR_GL_VER(2,0) ||
+ GrGLHasExtensionFromString("GL_ARB_draw_buffers", ext)) {
+ if (NULL == fDrawBuffers) {
+ return false;
+ }
+ }
+ if (glVer >= GR_GL_VER(1,4) ||
+ GrGLHasExtensionFromString("GL_EXT_blend_color", ext)) {
+ if (NULL == fBlendColor) {
+ return false;
+ }
+ }
+ }
+
+ // optional function on desktop before 1.3
+ if (kDesktop_GrGLBinding != fBindingsExported ||
+ (glVer >= GR_GL_VER(1,3) ||
+ GrGLHasExtensionFromString("GL_ARB_texture_compression", ext))) {
+ if (NULL == fCompressedTexImage2D) {
+ return false;
+ }
+ }
+
+ // part of desktop GL, but not ES
+ if (kDesktop_GrGLBinding == fBindingsExported &&
+ (NULL == fLineWidth ||
+ NULL == fGetTexLevelParameteriv ||
+ NULL == fDrawBuffer ||
+ NULL == fReadBuffer)) {
+ return false;
+ }
+
+ // FBO MSAA
+ if (kDesktop_GrGLBinding == fBindingsExported) {
+ // GL 3.0 and the ARB extension have multisample + blit
+ if (glVer >= GR_GL_VER(3,0) || GrGLHasExtensionFromString("GL_ARB_framebuffer_object", ext)) {
+ if (NULL == fRenderbufferStorageMultisample ||
+ NULL == fBlitFramebuffer) {
+ return false;
+ }
+ } else {
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_blit", ext) &&
+ NULL == fBlitFramebuffer) {
+ return false;
+ }
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_multisample", ext) &&
+ NULL == fRenderbufferStorageMultisample) {
+ return false;
+ }
+ }
+ } else {
+ if (GrGLHasExtensionFromString("GL_CHROMIUM_framebuffer_multisample", ext)) {
+ if (NULL == fRenderbufferStorageMultisample ||
+ NULL == fBlitFramebuffer) {
+ return false;
+ }
+ }
+ if (GrGLHasExtensionFromString("GL_APPLE_framebuffer_multisample", ext)) {
+ if (NULL == fRenderbufferStorageMultisample ||
+ NULL == fResolveMultisampleFramebuffer) {
+ return false;
+ }
+ }
+ }
+
+ // On ES buffer mapping is an extension. On Desktop
+ // buffer mapping was part of original VBO extension
+ // which we require.
+ if (kDesktop_GrGLBinding == fBindingsExported ||
+ GrGLHasExtensionFromString("GL_OES_mapbuffer", ext)) {
+ if (NULL == fMapBuffer ||
+ NULL == fUnmapBuffer) {
+ return false;
+ }
+ }
+
+ // Dual source blending
+ if (kDesktop_GrGLBinding == fBindingsExported &&
+ (glVer >= GR_GL_VER(3,3) ||
+ GrGLHasExtensionFromString("GL_ARB_blend_func_extended", ext))) {
+ if (NULL == fBindFragDataLocationIndexed) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
diff --git a/src/gpu/GrGLProgram.cpp b/src/gpu/GrGLProgram.cpp
new file mode 100644
index 0000000000..4b4140d206
--- /dev/null
+++ b/src/gpu/GrGLProgram.cpp
@@ -0,0 +1,1623 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLProgram.h"
+
+#include "GrAllocator.h"
+#include "GrGLShaderVar.h"
+#include "SkTrace.h"
+#include "SkXfermode.h"
+
+namespace {
+
+const char* GrPrecision(const GrGLInterface* gl) {
+ if (gl->supportsES()) {
+ return "mediump";
+ } else {
+ return " ";
+ }
+}
+
+const char* GrShaderPrecision(const GrGLInterface* gl) {
+ if (gl->supportsES()) {
+ return "precision mediump float;\n";
+ } else {
+ return "";
+ }
+}
+
+} // namespace
+
+#define PRINT_SHADERS 0
+
+typedef GrTAllocator<GrGLShaderVar> VarArray;
+
+// number of each input/output type in a single allocation block
+static const int gVarsPerBlock = 8;
+// except FS outputs where we expect 2 at most.
+static const int gMaxFSOutputs = 2;
+
+struct ShaderCodeSegments {
+ ShaderCodeSegments()
+ : fVSUnis(gVarsPerBlock)
+ , fVSAttrs(gVarsPerBlock)
+ , fVSOutputs(gVarsPerBlock)
+ , fGSInputs(gVarsPerBlock)
+ , fGSOutputs(gVarsPerBlock)
+ , fFSInputs(gVarsPerBlock)
+ , fFSUnis(gVarsPerBlock)
+ , fFSOutputs(gMaxFSOutputs)
+ , fUsesGS(false) {}
+ GrStringBuilder fHeader; // VS+FS, GLSL version, etc
+ VarArray fVSUnis;
+ VarArray fVSAttrs;
+ VarArray fVSOutputs;
+ VarArray fGSInputs;
+ VarArray fGSOutputs;
+ VarArray fFSInputs;
+ GrStringBuilder fGSHeader; // layout qualifiers specific to GS
+ VarArray fFSUnis;
+ VarArray fFSOutputs;
+ GrStringBuilder fFSFunctions;
+ GrStringBuilder fVSCode;
+ GrStringBuilder fGSCode;
+ GrStringBuilder fFSCode;
+
+ bool fUsesGS;
+};
+
+
+#if GR_GL_ATTRIBUTE_MATRICES
+ #define VIEW_MATRIX_NAME "aViewM"
+#else
+ #define VIEW_MATRIX_NAME "uViewM"
+#endif
+
+#define POS_ATTR_NAME "aPosition"
+#define COL_ATTR_NAME "aColor"
+#define COV_ATTR_NAME "aCoverage"
+#define EDGE_ATTR_NAME "aEdge"
+#define COL_UNI_NAME "uColor"
+#define EDGES_UNI_NAME "uEdges"
+#define COL_FILTER_UNI_NAME "uColorFilter"
+
+namespace {
+inline void tex_attr_name(int coordIdx, GrStringBuilder* s) {
+ *s = "aTexCoord";
+ s->appendS32(coordIdx);
+}
+
+inline GrGLShaderVar::Type float_vector_type(int count) {
+ GR_STATIC_ASSERT(GrGLShaderVar::kFloat_Type == 0);
+ GR_STATIC_ASSERT(GrGLShaderVar::kVec2f_Type == 1);
+ GR_STATIC_ASSERT(GrGLShaderVar::kVec3f_Type == 2);
+ GR_STATIC_ASSERT(GrGLShaderVar::kVec4f_Type == 3);
+ GrAssert(count > 0 && count <= 4);
+ return (GrGLShaderVar::Type)(count - 1);
+}
+
+inline const char* float_vector_type_str(int count) {
+ return GrGLShaderVar::TypeString(float_vector_type(count));
+}
+
+inline const char* vector_homog_coord(int count) {
+ static const char* HOMOGS[] = {"ERROR", "", ".y", ".z", ".w"};
+ GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(HOMOGS));
+ return HOMOGS[count];
+}
+
+inline const char* vector_nonhomog_coords(int count) {
+ static const char* NONHOMOGS[] = {"ERROR", "", ".x", ".xy", ".xyz"};
+ GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(NONHOMOGS));
+ return NONHOMOGS[count];
+}
+
+inline const char* vector_all_coords(int count) {
+ static const char* ALL[] = {"ERROR", "", ".xy", ".xyz", ".xyzw"};
+ GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(ALL));
+ return ALL[count];
+}
+
+inline const char* all_ones_vec(int count) {
+ static const char* ONESVEC[] = {"ERROR", "1.0", "vec2(1,1)",
+ "vec3(1,1,1)", "vec4(1,1,1,1)"};
+ GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(ONESVEC));
+ return ONESVEC[count];
+}
+
+inline const char* all_zeros_vec(int count) {
+ static const char* ZEROSVEC[] = {"ERROR", "0.0", "vec2(0,0)",
+ "vec3(0,0,0)", "vec4(0,0,0,0)"};
+ GrAssert(count >= 1 && count < (int)GR_ARRAY_COUNT(ZEROSVEC));
+ return ZEROSVEC[count];
+}
+
+inline const char* declared_color_output_name() { return "fsColorOut"; }
+inline const char* dual_source_output_name() { return "dualSourceOut"; }
+
+inline void tex_matrix_name(int stage, GrStringBuilder* s) {
+#if GR_GL_ATTRIBUTE_MATRICES
+ *s = "aTexM";
+#else
+ *s = "uTexM";
+#endif
+ s->appendS32(stage);
+}
+
+inline void normalized_texel_size_name(int stage, GrStringBuilder* s) {
+ *s = "uTexelSize";
+ s->appendS32(stage);
+}
+
+inline void sampler_name(int stage, GrStringBuilder* s) {
+ *s = "uSampler";
+ s->appendS32(stage);
+}
+
+inline void radial2_param_name(int stage, GrStringBuilder* s) {
+ *s = "uRadial2Params";
+ s->appendS32(stage);
+}
+
+inline void convolve_param_names(int stage, GrStringBuilder* k, GrStringBuilder* i) {
+ *k = "uKernel";
+ k->appendS32(stage);
+ *i = "uImageIncrement";
+ i->appendS32(stage);
+}
+
+inline void tex_domain_name(int stage, GrStringBuilder* s) {
+ *s = "uTexDom";
+ s->appendS32(stage);
+}
+}
+
+GrGLProgram::GrGLProgram() {
+}
+
+GrGLProgram::~GrGLProgram() {
+}
+
+void GrGLProgram::overrideBlend(GrBlendCoeff* srcCoeff,
+ GrBlendCoeff* dstCoeff) const {
+ switch (fProgramDesc.fDualSrcOutput) {
+ case ProgramDesc::kNone_DualSrcOutput:
+ break;
+ // the prog will write a coverage value to the secondary
+ // output and the dst is blended by one minus that value.
+ case ProgramDesc::kCoverage_DualSrcOutput:
+ case ProgramDesc::kCoverageISA_DualSrcOutput:
+ case ProgramDesc::kCoverageISC_DualSrcOutput:
+ *dstCoeff = (GrBlendCoeff)GrGpu::kIS2C_BlendCoeff;
+ break;
+ default:
+ GrCrash("Unexpected dual source blend output");
+ break;
+ }
+}
+
+// assigns modulation of two vars to an output var
+// vars can be vec4s or floats (or one of each)
+// result is always vec4
+// if either var is "" then assign to the other var
+// if both are "" then assign all ones
+static inline void modulate_helper(const char* outputVar,
+ const char* var0,
+ const char* var1,
+ GrStringBuilder* code) {
+ GrAssert(NULL != outputVar);
+ GrAssert(NULL != var0);
+ GrAssert(NULL != var1);
+ GrAssert(NULL != code);
+
+ bool has0 = '\0' != *var0;
+ bool has1 = '\0' != *var1;
+
+ if (!has0 && !has1) {
+ code->appendf("\t%s = %s;\n", outputVar, all_ones_vec(4));
+ } else if (!has0) {
+ code->appendf("\t%s = vec4(%s);\n", outputVar, var1);
+ } else if (!has1) {
+ code->appendf("\t%s = vec4(%s);\n", outputVar, var0);
+ } else {
+ code->appendf("\t%s = vec4(%s * %s);\n", outputVar, var0, var1);
+ }
+}
+
+// assigns addition of two vars to an output var
+// vars can be vec4s or floats (or one of each)
+// result is always vec4
+// if either var is "" then assign to the other var
+// if both are "" then assign all zeros
+static inline void add_helper(const char* outputVar,
+ const char* var0,
+ const char* var1,
+ GrStringBuilder* code) {
+ GrAssert(NULL != outputVar);
+ GrAssert(NULL != var0);
+ GrAssert(NULL != var1);
+ GrAssert(NULL != code);
+
+ bool has0 = '\0' != *var0;
+ bool has1 = '\0' != *var1;
+
+ if (!has0 && !has1) {
+ code->appendf("\t%s = %s;\n", outputVar, all_zeros_vec(4));
+ } else if (!has0) {
+ code->appendf("\t%s = vec4(%s);\n", outputVar, var1);
+ } else if (!has1) {
+ code->appendf("\t%s = vec4(%s);\n", outputVar, var0);
+ } else {
+ code->appendf("\t%s = vec4(%s + %s);\n", outputVar, var0, var1);
+ }
+}
+
+// given two blend coeffecients determine whether the src
+// and/or dst computation can be omitted.
+static inline void needBlendInputs(SkXfermode::Coeff srcCoeff,
+ SkXfermode::Coeff dstCoeff,
+ bool* needSrcValue,
+ bool* needDstValue) {
+ if (SkXfermode::kZero_Coeff == srcCoeff) {
+ switch (dstCoeff) {
+ // these all read the src
+ case SkXfermode::kSC_Coeff:
+ case SkXfermode::kISC_Coeff:
+ case SkXfermode::kSA_Coeff:
+ case SkXfermode::kISA_Coeff:
+ *needSrcValue = true;
+ break;
+ default:
+ *needSrcValue = false;
+ break;
+ }
+ } else {
+ *needSrcValue = true;
+ }
+ if (SkXfermode::kZero_Coeff == dstCoeff) {
+ switch (srcCoeff) {
+ // these all read the dst
+ case SkXfermode::kDC_Coeff:
+ case SkXfermode::kIDC_Coeff:
+ case SkXfermode::kDA_Coeff:
+ case SkXfermode::kIDA_Coeff:
+ *needDstValue = true;
+ break;
+ default:
+ *needDstValue = false;
+ break;
+ }
+ } else {
+ *needDstValue = true;
+ }
+}
+
+/**
+ * Create a blend_coeff * value string to be used in shader code. Sets empty
+ * string if result is trivially zero.
+ */
+static void blendTermString(GrStringBuilder* str, SkXfermode::Coeff coeff,
+ const char* src, const char* dst,
+ const char* value) {
+ switch (coeff) {
+ case SkXfermode::kZero_Coeff: /** 0 */
+ *str = "";
+ break;
+ case SkXfermode::kOne_Coeff: /** 1 */
+ *str = value;
+ break;
+ case SkXfermode::kSC_Coeff:
+ str->printf("(%s * %s)", src, value);
+ break;
+ case SkXfermode::kISC_Coeff:
+ str->printf("((%s - %s) * %s)", all_ones_vec(4), src, value);
+ break;
+ case SkXfermode::kDC_Coeff:
+ str->printf("(%s * %s)", dst, value);
+ break;
+ case SkXfermode::kIDC_Coeff:
+ str->printf("((%s - %s) * %s)", all_ones_vec(4), dst, value);
+ break;
+ case SkXfermode::kSA_Coeff: /** src alpha */
+ str->printf("(%s.a * %s)", src, value);
+ break;
+ case SkXfermode::kISA_Coeff: /** inverse src alpha (i.e. 1 - sa) */
+ str->printf("((1.0 - %s.a) * %s)", src, value);
+ break;
+ case SkXfermode::kDA_Coeff: /** dst alpha */
+ str->printf("(%s.a * %s)", dst, value);
+ break;
+ case SkXfermode::kIDA_Coeff: /** inverse dst alpha (i.e. 1 - da) */
+ str->printf("((1.0 - %s.a) * %s)", dst, value);
+ break;
+ default:
+ GrCrash("Unexpected xfer coeff.");
+ break;
+ }
+}
+/**
+ * Adds a line to the fragment shader code which modifies the color by
+ * the specified color filter.
+ */
+static void addColorFilter(GrStringBuilder* fsCode, const char * outputVar,
+ SkXfermode::Coeff uniformCoeff,
+ SkXfermode::Coeff colorCoeff,
+ const char* inColor) {
+ GrStringBuilder colorStr, constStr;
+ blendTermString(&colorStr, colorCoeff, COL_FILTER_UNI_NAME,
+ inColor, inColor);
+ blendTermString(&constStr, uniformCoeff, COL_FILTER_UNI_NAME,
+ inColor, COL_FILTER_UNI_NAME);
+
+ add_helper(outputVar, colorStr.c_str(), constStr.c_str(), fsCode);
+}
+
+namespace {
+
+const char* glsl_version_string(const GrGLInterface* gl,
+ GrGLProgram::GLSLVersion v) {
+ switch (v) {
+ case GrGLProgram::k120_GLSLVersion:
+ if (gl->supportsES()) {
+ // ES2s shader language is based on version 1.20 but is version
+ // 1.00 of the ES language.
+ return "#version 100\n";
+ } else {
+ return "#version 120\n";
+ }
+ case GrGLProgram::k130_GLSLVersion:
+ GrAssert(!gl->supportsES());
+ return "#version 130\n";
+ case GrGLProgram::k150_GLSLVersion:
+ GrAssert(!gl->supportsES());
+ return "#version 150\n";
+ default:
+ GrCrash("Unknown GL version.");
+ return ""; // suppress warning
+ }
+}
+
+// Adds a var that is computed in the VS and read in FS.
+// If there is a GS it will just pass it through.
+void append_varying(GrGLShaderVar::Type type,
+ const char* name,
+ ShaderCodeSegments* segments,
+ const char** vsOutName = NULL,
+ const char** fsInName = NULL) {
+ segments->fVSOutputs.push_back();
+ segments->fVSOutputs.back().setType(type);
+ segments->fVSOutputs.back().accessName()->printf("v%s", name);
+ if (vsOutName) {
+ *vsOutName = segments->fVSOutputs.back().getName().c_str();
+ }
+ // input to FS comes either from VS or GS
+ const GrStringBuilder* fsName;
+ if (segments->fUsesGS) {
+ // if we have a GS take each varying in as an array
+ // and output as non-array.
+ segments->fGSInputs.push_back();
+ segments->fGSInputs.back().setType(type);
+ segments->fGSInputs.back().setUnsizedArray();
+ *segments->fGSInputs.back().accessName() =
+ segments->fVSOutputs.back().getName();
+ segments->fGSOutputs.push_back();
+ segments->fGSOutputs.back().setType(type);
+ segments->fGSOutputs.back().accessName()->printf("g%s", name);
+ fsName = segments->fGSOutputs.back().accessName();
+ } else {
+ fsName = segments->fVSOutputs.back().accessName();
+ }
+ segments->fFSInputs.push_back();
+ segments->fFSInputs.back().setType(type);
+ segments->fFSInputs.back().setName(*fsName);
+ if (fsInName) {
+ *fsInName = fsName->c_str();
+ }
+}
+
+// version of above that adds a stage number to the
+// the var name (for uniqueness)
+void append_varying(GrGLShaderVar::Type type,
+ const char* name,
+ int stageNum,
+ ShaderCodeSegments* segments,
+ const char** vsOutName = NULL,
+ const char** fsInName = NULL) {
+ GrStringBuilder nameWithStage(name);
+ nameWithStage.appendS32(stageNum);
+ append_varying(type, nameWithStage.c_str(), segments, vsOutName, fsInName);
+}
+}
+
+void GrGLProgram::genEdgeCoverage(const GrGLInterface* gl,
+ GrVertexLayout layout,
+ CachedData* programData,
+ GrStringBuilder* coverageVar,
+ ShaderCodeSegments* segments) const {
+ if (fProgramDesc.fEdgeAANumEdges > 0) {
+ segments->fFSUnis.push_back().set(GrGLShaderVar::kVec3f_Type,
+ EDGES_UNI_NAME,
+ fProgramDesc.fEdgeAANumEdges);
+ programData->fUniLocations.fEdgesUni = kUseUniform;
+ int count = fProgramDesc.fEdgeAANumEdges;
+ segments->fFSCode.append(
+ "\tvec3 pos = vec3(gl_FragCoord.xy, 1);\n");
+ for (int i = 0; i < count; i++) {
+ segments->fFSCode.append("\tfloat a");
+ segments->fFSCode.appendS32(i);
+ segments->fFSCode.append(" = clamp(dot(" EDGES_UNI_NAME "[");
+ segments->fFSCode.appendS32(i);
+ segments->fFSCode.append("], pos), 0.0, 1.0);\n");
+ }
+ if (fProgramDesc.fEdgeAAConcave && (count & 0x01) == 0) {
+ // For concave polys, we consider the edges in pairs.
+ segments->fFSFunctions.append("float cross2(vec2 a, vec2 b) {\n");
+ segments->fFSFunctions.append("\treturn dot(a, vec2(b.y, -b.x));\n");
+ segments->fFSFunctions.append("}\n");
+ for (int i = 0; i < count; i += 2) {
+ segments->fFSCode.appendf("\tfloat eb%d;\n", i / 2);
+ segments->fFSCode.appendf("\tif (cross2(" EDGES_UNI_NAME "[%d].xy, " EDGES_UNI_NAME "[%d].xy) < 0.0) {\n", i, i + 1);
+ segments->fFSCode.appendf("\t\teb%d = a%d * a%d;\n", i / 2, i, i + 1);
+ segments->fFSCode.append("\t} else {\n");
+ segments->fFSCode.appendf("\t\teb%d = a%d + a%d - a%d * a%d;\n", i / 2, i, i + 1, i, i + 1);
+ segments->fFSCode.append("\t}\n");
+ }
+ segments->fFSCode.append("\tfloat edgeAlpha = ");
+ for (int i = 0; i < count / 2 - 1; i++) {
+ segments->fFSCode.appendf("min(eb%d, ", i);
+ }
+ segments->fFSCode.appendf("eb%d", count / 2 - 1);
+ for (int i = 0; i < count / 2 - 1; i++) {
+ segments->fFSCode.append(")");
+ }
+ segments->fFSCode.append(";\n");
+ } else {
+ segments->fFSCode.append("\tfloat edgeAlpha = ");
+ for (int i = 0; i < count - 1; i++) {
+ segments->fFSCode.appendf("min(a%d * a%d, ", i, i + 1);
+ }
+ segments->fFSCode.appendf("a%d * a0", count - 1);
+ for (int i = 0; i < count - 1; i++) {
+ segments->fFSCode.append(")");
+ }
+ segments->fFSCode.append(";\n");
+ }
+ *coverageVar = "edgeAlpha";
+ } else if (layout & GrDrawTarget::kEdge_VertexLayoutBit) {
+ const char *vsName, *fsName;
+ append_varying(GrGLShaderVar::kVec4f_Type, "Edge", segments, &vsName, &fsName);
+ segments->fVSAttrs.push_back().set(GrGLShaderVar::kVec4f_Type, EDGE_ATTR_NAME);
+ segments->fVSCode.appendf("\t%s = " EDGE_ATTR_NAME ";\n", vsName);
+ if (GrDrawTarget::kHairLine_EdgeType == fProgramDesc.fVertexEdgeType) {
+ segments->fFSCode.appendf("\tfloat edgeAlpha = abs(dot(vec3(gl_FragCoord.xy,1), %s.xyz));\n", fsName);
+ } else {
+ GrAssert(GrDrawTarget::kHairQuad_EdgeType == fProgramDesc.fVertexEdgeType);
+ // for now we know we're not in perspective, so we could compute this
+ // per-quadratic rather than per pixel
+ segments->fFSCode.appendf("\tvec2 duvdx = dFdx(%s.xy);\n", fsName);
+ segments->fFSCode.appendf("\tvec2 duvdy = dFdy(%s.xy);\n", fsName);
+ segments->fFSCode.appendf("\tfloat dfdx = 2.0*%s.x*duvdx.x - duvdx.y;\n", fsName);
+ segments->fFSCode.appendf("\tfloat dfdy = 2.0*%s.x*duvdy.x - duvdy.y;\n", fsName);
+ segments->fFSCode.appendf("\tfloat edgeAlpha = (%s.x*%s.x - %s.y);\n", fsName, fsName, fsName);
+ segments->fFSCode.append("\tedgeAlpha = sqrt(edgeAlpha*edgeAlpha / (dfdx*dfdx + dfdy*dfdy));\n");
+ if (gl->supportsES()) {
+ segments->fHeader.printf("#extension GL_OES_standard_derivatives: enable\n");
+ }
+ }
+ segments->fFSCode.append("\tedgeAlpha = max(1.0 - edgeAlpha, 0.0);\n");
+ *coverageVar = "edgeAlpha";
+ } else {
+ coverageVar->reset();
+ }
+}
+
+namespace {
+
+// returns true if the color output was explicitly declared or not.
+bool decl_and_get_fs_color_output(GrGLProgram::GLSLVersion v,
+ VarArray* fsOutputs,
+ const char** name) {
+ switch (v) {
+ case GrGLProgram::k120_GLSLVersion:
+ *name = "gl_FragColor";
+ return false;
+ break;
+ case GrGLProgram::k130_GLSLVersion: // fallthru
+ case GrGLProgram::k150_GLSLVersion:
+ *name = declared_color_output_name();
+ fsOutputs->push_back().set(GrGLShaderVar::kVec4f_Type,
+ declared_color_output_name());
+ return true;
+ break;
+ default:
+ GrCrash("Unknown GLSL version.");
+ return false; // suppress warning
+ }
+}
+
+}
+
+void GrGLProgram::genGeometryShader(const GrGLInterface* gl,
+ GLSLVersion glslVersion,
+ ShaderCodeSegments* segments) const {
+#if GR_GL_EXPERIMENTAL_GS
+ if (fProgramDesc.fExperimentalGS) {
+ GrAssert(glslVersion >= k150_GLSLVersion);
+ segments->fGSHeader.append("layout(triangles) in;\n"
+ "layout(triangle_strip, max_vertices = 6) out;\n");
+ segments->fGSCode.append("void main() {\n"
+ "\tfor (int i = 0; i < 3; ++i) {\n"
+ "\t\tgl_Position = gl_in[i].gl_Position;\n");
+ if (this->fProgramDesc.fEmitsPointSize) {
+ segments->fGSCode.append("\t\tgl_PointSize = 1.0;\n");
+ }
+ GrAssert(segments->fGSInputs.count() == segments->fGSOutputs.count());
+ int count = segments->fGSInputs.count();
+ for (int i = 0; i < count; ++i) {
+ segments->fGSCode.appendf("\t\t%s = %s[i];\n",
+ segments->fGSOutputs[i].getName().c_str(),
+ segments->fGSInputs[i].getName().c_str());
+ }
+ segments->fGSCode.append("\t\tEmitVertex();\n"
+ "\t}\n"
+ "\tEndPrimitive();\n"
+ "}\n");
+ }
+#endif
+}
+
+bool GrGLProgram::genProgram(const GrGLInterface* gl,
+ GLSLVersion glslVersion,
+ GrGLProgram::CachedData* programData) const {
+
+ ShaderCodeSegments segments;
+ const uint32_t& layout = fProgramDesc.fVertexLayout;
+
+ programData->fUniLocations.reset();
+
+#if GR_GL_EXPERIMENTAL_GS
+ segments.fUsesGS = fProgramDesc.fExperimentalGS;
+#endif
+
+ SkXfermode::Coeff colorCoeff, uniformCoeff;
+ // The rest of transfer mode color filters have not been implemented
+ if (fProgramDesc.fColorFilterXfermode < SkXfermode::kCoeffModesCnt) {
+ GR_DEBUGCODE(bool success =)
+ SkXfermode::ModeAsCoeff(static_cast<SkXfermode::Mode>
+ (fProgramDesc.fColorFilterXfermode),
+ &uniformCoeff, &colorCoeff);
+ GR_DEBUGASSERT(success);
+ } else {
+ colorCoeff = SkXfermode::kOne_Coeff;
+ uniformCoeff = SkXfermode::kZero_Coeff;
+ }
+
+ // If we know the final color is going to be all zeros then we can
+ // simplify the color filter coeffecients. needComputedColor will then
+ // come out false below.
+ if (ProgramDesc::kTransBlack_ColorType == fProgramDesc.fColorType) {
+ colorCoeff = SkXfermode::kZero_Coeff;
+ if (SkXfermode::kDC_Coeff == uniformCoeff ||
+ SkXfermode::kDA_Coeff == uniformCoeff) {
+ uniformCoeff = SkXfermode::kZero_Coeff;
+ } else if (SkXfermode::kIDC_Coeff == uniformCoeff ||
+ SkXfermode::kIDA_Coeff == uniformCoeff) {
+ uniformCoeff = SkXfermode::kOne_Coeff;
+ }
+ }
+
+ bool needColorFilterUniform;
+ bool needComputedColor;
+ needBlendInputs(uniformCoeff, colorCoeff,
+ &needColorFilterUniform, &needComputedColor);
+
+ // the dual source output has no canonical var name, have to
+ // declare an output, which is incompatible with gl_FragColor/gl_FragData.
+ const char* fsColorOutput = NULL;
+ bool dualSourceOutputWritten = false;
+ segments.fHeader.printf(glsl_version_string(gl, glslVersion));
+ bool isColorDeclared = decl_and_get_fs_color_output(glslVersion,
+ &segments.fFSOutputs,
+ &fsColorOutput);
+
+#if GR_GL_ATTRIBUTE_MATRICES
+ segments.fVSAttrs.push_back().set(GrGLShaderVar::kMat33f_Type, VIEW_MATRIX_NAME);
+ programData->fUniLocations.fViewMatrixUni = kSetAsAttribute;
+#else
+ segments.fVSUnis.push_back().set(GrGLShaderVar::kMat33f_Type, VIEW_MATRIX_NAME);
+ programData->fUniLocations.fViewMatrixUni = kUseUniform;
+#endif
+ segments.fVSAttrs.push_back().set(GrGLShaderVar::kVec2f_Type, POS_ATTR_NAME);
+
+ segments.fVSCode.append(
+ "void main() {\n"
+ "\tvec3 pos3 = " VIEW_MATRIX_NAME " * vec3("POS_ATTR_NAME", 1);\n"
+ "\tgl_Position = vec4(pos3.xy, 0, pos3.z);\n");
+
+ // incoming color to current stage being processed.
+ GrStringBuilder inColor;
+
+ if (needComputedColor) {
+ switch (fProgramDesc.fColorType) {
+ case ProgramDesc::kAttribute_ColorType: {
+ segments.fVSAttrs.push_back().set(GrGLShaderVar::kVec4f_Type,
+ COL_ATTR_NAME);
+ const char *vsName, *fsName;
+ append_varying(GrGLShaderVar::kVec4f_Type, "Color", &segments, &vsName, &fsName);
+ segments.fVSCode.appendf("\t%s = " COL_ATTR_NAME ";\n", vsName);
+ inColor = fsName;
+ } break;
+ case ProgramDesc::kUniform_ColorType:
+ segments.fFSUnis.push_back().set(GrGLShaderVar::kVec4f_Type,
+ COL_UNI_NAME);
+ programData->fUniLocations.fColorUni = kUseUniform;
+ inColor = COL_UNI_NAME;
+ break;
+ case ProgramDesc::kTransBlack_ColorType:
+ GrAssert(!"needComputedColor should be false.");
+ break;
+ case ProgramDesc::kSolidWhite_ColorType:
+ break;
+ default:
+ GrCrash("Unknown color type.");
+ break;
+ }
+ }
+
+ // we output point size in the GS if present
+ if (fProgramDesc.fEmitsPointSize && !segments.fUsesGS){
+ segments.fVSCode.append("\tgl_PointSize = 1.0;\n");
+ }
+
+ segments.fFSCode.append("void main() {\n");
+
+ // add texture coordinates that are used to the list of vertex attr decls
+ GrStringBuilder texCoordAttrs[GrDrawTarget::kMaxTexCoords];
+ for (int t = 0; t < GrDrawTarget::kMaxTexCoords; ++t) {
+ if (GrDrawTarget::VertexUsesTexCoordIdx(t, layout)) {
+ tex_attr_name(t, texCoordAttrs + t);
+ segments.fVSAttrs.push_back().set(GrGLShaderVar::kVec2f_Type,
+ texCoordAttrs[t].c_str());
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // compute the final color
+
+ // if we have color stages string them together, feeding the output color
+ // of each to the next and generating code for each stage.
+ if (needComputedColor) {
+ GrStringBuilder outColor;
+ for (int s = 0; s < fProgramDesc.fFirstCoverageStage; ++s) {
+ if (fProgramDesc.fStages[s].isEnabled()) {
+ // create var to hold stage result
+ outColor = "color";
+ outColor.appendS32(s);
+ segments.fFSCode.appendf("\tvec4 %s;\n", outColor.c_str());
+
+ const char* inCoords;
+ // figure out what our input coords are
+ if (GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s) &
+ layout) {
+ inCoords = POS_ATTR_NAME;
+ } else {
+ int tcIdx = GrDrawTarget::VertexTexCoordsForStage(s, layout);
+ // we better have input tex coordinates if stage is enabled.
+ GrAssert(tcIdx >= 0);
+ GrAssert(texCoordAttrs[tcIdx].size());
+ inCoords = texCoordAttrs[tcIdx].c_str();
+ }
+
+ genStageCode(gl,
+ s,
+ fProgramDesc.fStages[s],
+ inColor.size() ? inColor.c_str() : NULL,
+ outColor.c_str(),
+ inCoords,
+ &segments,
+ &programData->fUniLocations.fStages[s]);
+ inColor = outColor;
+ }
+ }
+ }
+
+ // if have all ones or zeros for the "dst" input to the color filter then we
+ // may be able to make additional optimizations.
+ if (needColorFilterUniform && needComputedColor && !inColor.size()) {
+ GrAssert(ProgramDesc::kSolidWhite_ColorType == fProgramDesc.fColorType);
+ bool uniformCoeffIsZero = SkXfermode::kIDC_Coeff == uniformCoeff ||
+ SkXfermode::kIDA_Coeff == uniformCoeff;
+ if (uniformCoeffIsZero) {
+ uniformCoeff = SkXfermode::kZero_Coeff;
+ bool bogus;
+ needBlendInputs(SkXfermode::kZero_Coeff, colorCoeff,
+ &needColorFilterUniform, &bogus);
+ }
+ }
+ if (needColorFilterUniform) {
+ segments.fFSUnis.push_back().set(GrGLShaderVar::kVec4f_Type,
+ COL_FILTER_UNI_NAME);
+ programData->fUniLocations.fColorFilterUni = kUseUniform;
+ }
+
+ bool wroteFragColorZero = false;
+ if (SkXfermode::kZero_Coeff == uniformCoeff &&
+ SkXfermode::kZero_Coeff == colorCoeff) {
+ segments.fFSCode.appendf("\t%s = %s;\n",
+ fsColorOutput,
+ all_zeros_vec(4));
+ wroteFragColorZero = true;
+ } else if (SkXfermode::kDst_Mode != fProgramDesc.fColorFilterXfermode) {
+ segments.fFSCode.appendf("\tvec4 filteredColor;\n");
+ const char* color;
+ if (inColor.size()) {
+ color = inColor.c_str();
+ } else {
+ if (ProgramDesc::kSolidWhite_ColorType == fProgramDesc.fColorType) {
+ color = all_ones_vec(4);
+ } else {
+ color = all_zeros_vec(4);
+ }
+ }
+ addColorFilter(&segments.fFSCode, "filteredColor", uniformCoeff,
+ colorCoeff, color);
+ inColor = "filteredColor";
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // compute the partial coverage (coverage stages and edge aa)
+
+ GrStringBuilder inCoverage;
+
+ // we don't need to compute coverage at all if we know the final shader
+ // output will be zero and we don't have a dual src blend output.
+ if (!wroteFragColorZero ||
+ ProgramDesc::kNone_DualSrcOutput != fProgramDesc.fDualSrcOutput) {
+
+ // get edge AA coverage and use it as inCoverage to first coverage stage
+ this->genEdgeCoverage(gl, layout, programData, &inCoverage, &segments);
+
+ // include explicit per-vertex coverage if we have it
+ if (GrDrawTarget::kCoverage_VertexLayoutBit & layout) {
+ segments.fVSAttrs.push_back().set(GrGLShaderVar::kFloat_Type,
+ COV_ATTR_NAME);
+ const char *vsName, *fsName;
+ append_varying(GrGLShaderVar::kFloat_Type, "Coverage",
+ &segments, &vsName, &fsName);
+ segments.fVSCode.appendf("\t%s = " COV_ATTR_NAME ";\n", vsName);
+ if (inCoverage.size()) {
+ segments.fFSCode.appendf("\tfloat edgeAndAttrCov = %s * %s;\n",
+ fsName, inCoverage.c_str());
+ inCoverage = "edgeAndAttrCov";
+ } else {
+ inCoverage = fsName;
+ }
+ }
+
+ GrStringBuilder outCoverage;
+ const int& startStage = fProgramDesc.fFirstCoverageStage;
+ for (int s = startStage; s < GrDrawTarget::kNumStages; ++s) {
+ if (fProgramDesc.fStages[s].isEnabled()) {
+ // create var to hold stage output
+ outCoverage = "coverage";
+ outCoverage.appendS32(s);
+ segments.fFSCode.appendf("\tvec4 %s;\n", outCoverage.c_str());
+
+ const char* inCoords;
+ // figure out what our input coords are
+ if (GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s) & layout) {
+ inCoords = POS_ATTR_NAME;
+ } else {
+ int tcIdx = GrDrawTarget::VertexTexCoordsForStage(s, layout);
+ // we better have input tex coordinates if stage is enabled.
+ GrAssert(tcIdx >= 0);
+ GrAssert(texCoordAttrs[tcIdx].size());
+ inCoords = texCoordAttrs[tcIdx].c_str();
+ }
+
+ genStageCode(gl, s,
+ fProgramDesc.fStages[s],
+ inCoverage.size() ? inCoverage.c_str() : NULL,
+ outCoverage.c_str(),
+ inCoords,
+ &segments,
+ &programData->fUniLocations.fStages[s]);
+ inCoverage = outCoverage;
+ }
+ }
+ if (ProgramDesc::kNone_DualSrcOutput != fProgramDesc.fDualSrcOutput) {
+ segments.fFSOutputs.push_back().set(GrGLShaderVar::kVec4f_Type,
+ dual_source_output_name());
+ bool outputIsZero = false;
+ GrStringBuilder coeff;
+ if (ProgramDesc::kCoverage_DualSrcOutput !=
+ fProgramDesc.fDualSrcOutput && !wroteFragColorZero) {
+ if (!inColor.size()) {
+ outputIsZero = true;
+ } else {
+ if (fProgramDesc.fDualSrcOutput ==
+ ProgramDesc::kCoverageISA_DualSrcOutput) {
+ coeff.printf("(1 - %s.a)", inColor.c_str());
+ } else {
+ coeff.printf("(vec4(1,1,1,1) - %s)", inColor.c_str());
+ }
+ }
+ }
+ if (outputIsZero) {
+ segments.fFSCode.appendf("\t%s = %s;\n",
+ dual_source_output_name(),
+ all_zeros_vec(4));
+ } else {
+ modulate_helper(dual_source_output_name(),
+ coeff.c_str(),
+ inCoverage.c_str(),
+ &segments.fFSCode);
+ }
+ dualSourceOutputWritten = true;
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // combine color and coverage as frag color
+
+ if (!wroteFragColorZero) {
+ modulate_helper(fsColorOutput,
+ inColor.c_str(),
+ inCoverage.c_str(),
+ &segments.fFSCode);
+ }
+
+ segments.fVSCode.append("}\n");
+ segments.fFSCode.append("}\n");
+
+ ///////////////////////////////////////////////////////////////////////////
+ // insert GS
+#if GR_DEBUG
+ this->genGeometryShader(gl, glslVersion, &segments);
+#endif
+
+ ///////////////////////////////////////////////////////////////////////////
+ // compile and setup attribs and unis
+
+ if (!CompileShaders(gl, glslVersion, segments, programData)) {
+ return false;
+ }
+
+ if (!this->bindOutputsAttribsAndLinkProgram(gl, texCoordAttrs,
+ isColorDeclared,
+ dualSourceOutputWritten,
+ programData)) {
+ return false;
+ }
+
+ this->getUniformLocationsAndInitCache(gl, programData);
+
+ return true;
+}
+
+namespace {
+
+inline void expand_decls(const VarArray& vars,
+ const GrGLInterface* gl,
+ const char* prefix,
+ GrStringBuilder* string) {
+ const int count = vars.count();
+ for (int i = 0; i < count; ++i) {
+ string->append(prefix);
+ string->append(" ");
+ vars[i].appendDecl(gl, string);
+ string->append(";\n");
+ }
+}
+
+inline void print_shader(int stringCnt,
+ const char** strings,
+ int* stringLengths) {
+ for (int i = 0; i < stringCnt; ++i) {
+ if (NULL == stringLengths || stringLengths[i] < 0) {
+ GrPrintf(strings[i]);
+ } else {
+ GrPrintf("%.*s", stringLengths[i], strings[i]);
+ }
+ }
+}
+
+typedef SkTArray<const char*, true> StrArray;
+#define PREALLOC_STR_ARRAY(N) SkSTArray<(N), const char*, true>
+
+typedef SkTArray<int, true> LengthArray;
+#define PREALLOC_LENGTH_ARRAY(N) SkSTArray<(N), int, true>
+
+// these shouldn't relocate
+typedef GrTAllocator<GrStringBuilder> TempArray;
+#define PREALLOC_TEMP_ARRAY(N) GrSTAllocator<(N), GrStringBuilder>
+
+inline void append_string(const GrStringBuilder& str,
+ StrArray* strings,
+ LengthArray* lengths) {
+ int length = (int) str.size();
+ if (length) {
+ strings->push_back(str.c_str());
+ lengths->push_back(length);
+ }
+ GrAssert(strings->count() == lengths->count());
+}
+
+inline void append_decls(const VarArray& vars,
+ const GrGLInterface* gl,
+ const char* prefix,
+ StrArray* strings,
+ LengthArray* lengths,
+ TempArray* temp) {
+ expand_decls(vars, gl, prefix, &temp->push_back());
+ append_string(temp->back(), strings, lengths);
+}
+
+}
+
+bool GrGLProgram::CompileShaders(const GrGLInterface* gl,
+ GLSLVersion glslVersion,
+ const ShaderCodeSegments& segments,
+ CachedData* programData) {
+ enum { kPreAllocStringCnt = 8 };
+
+ PREALLOC_STR_ARRAY(kPreAllocStringCnt) strs;
+ PREALLOC_LENGTH_ARRAY(kPreAllocStringCnt) lengths;
+ PREALLOC_TEMP_ARRAY(kPreAllocStringCnt) temps;
+
+ GrStringBuilder unis;
+ GrStringBuilder inputs;
+ GrStringBuilder outputs;
+
+ static const char* gVaryingPrefixes[2][2] = {{"varying", "varying"},
+ {"out", "in"}};
+ const char** varyingPrefixes = k120_GLSLVersion == glslVersion ?
+ gVaryingPrefixes[0] :
+ gVaryingPrefixes[1];
+ const char* attributePrefix = k120_GLSLVersion == glslVersion ?
+ "attribute" :
+ "in";
+
+ append_string(segments.fHeader, &strs, &lengths);
+ append_decls(segments.fVSUnis, gl, "uniform", &strs, &lengths, &temps);
+ append_decls(segments.fVSAttrs, gl, attributePrefix, &strs, &lengths, &temps);
+ append_decls(segments.fVSOutputs, gl, varyingPrefixes[0], &strs, &lengths, &temps);
+ append_string(segments.fVSCode, &strs, &lengths);
+
+#if PRINT_SHADERS
+ print_shader(strs.count(), &strs[0], &lengths[0]);
+ GrPrintf("\n");
+#endif
+
+ programData->fVShaderID =
+ CompileShader(gl, GR_GL_VERTEX_SHADER, strs.count(),
+ &strs[0], &lengths[0]);
+
+ if (!programData->fVShaderID) {
+ return false;
+ }
+ if (segments.fUsesGS) {
+ strs.reset();
+ lengths.reset();
+ temps.reset();
+ append_string(segments.fHeader, &strs, &lengths);
+ append_string(segments.fGSHeader, &strs, &lengths);
+ append_decls(segments.fGSInputs, gl, "in", &strs, &lengths, &temps);
+ append_decls(segments.fGSOutputs, gl, "out", &strs, &lengths, &temps);
+ append_string(segments.fGSCode, &strs, &lengths);
+#if PRINT_SHADERS
+ print_shader(strs.count(), &strs[0], &lengths[0]);
+ GrPrintf("\n");
+#endif
+ programData->fGShaderID =
+ CompileShader(gl, GR_GL_GEOMETRY_SHADER, strs.count(),
+ &strs[0], &lengths[0]);
+ } else {
+ programData->fGShaderID = 0;
+ }
+
+ strs.reset();
+ lengths.reset();
+ temps.reset();
+
+ append_string(segments.fHeader, &strs, &lengths);
+ GrStringBuilder precisionStr(GrShaderPrecision(gl));
+ append_string(precisionStr, &strs, &lengths);
+ append_decls(segments.fFSUnis, gl, "uniform", &strs, &lengths, &temps);
+ append_decls(segments.fFSInputs, gl, varyingPrefixes[1], &strs, &lengths, &temps);
+ // We shouldn't have declared outputs on 1.2
+ GrAssert(k120_GLSLVersion != glslVersion || segments.fFSOutputs.empty());
+ append_decls(segments.fFSOutputs, gl, "out", &strs, &lengths, &temps);
+ append_string(segments.fFSFunctions, &strs, &lengths);
+ append_string(segments.fFSCode, &strs, &lengths);
+
+#if PRINT_SHADERS
+ print_shader(strs.count(), &strs[0], &lengths[0]);
+ GrPrintf("\n");
+#endif
+
+ programData->fFShaderID =
+ CompileShader(gl, GR_GL_FRAGMENT_SHADER, strs.count(),
+ &strs[0], &lengths[0]);
+
+ if (!programData->fFShaderID) {
+ return false;
+ }
+
+ return true;
+}
+
+GrGLuint GrGLProgram::CompileShader(const GrGLInterface* gl,
+ GrGLenum type,
+ int stringCnt,
+ const char** strings,
+ int* stringLengths) {
+ SK_TRACE_EVENT1("GrGLProgram::CompileShader",
+ "stringCount", SkStringPrintf("%i", stringCnt).c_str());
+
+ GrGLuint shader;
+ GR_GL_CALL_RET(gl, shader, CreateShader(type));
+ if (0 == shader) {
+ return 0;
+ }
+
+ GrGLint compiled = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gl, ShaderSource(shader, stringCnt, strings, stringLengths));
+ GR_GL_CALL(gl, CompileShader(shader));
+ GR_GL_CALL(gl, GetShaderiv(shader, GR_GL_COMPILE_STATUS, &compiled));
+
+ if (!compiled) {
+ GrGLint infoLen = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gl, GetShaderiv(shader, GR_GL_INFO_LOG_LENGTH, &infoLen));
+ SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ // retrieve length even though we don't need it to workaround
+ // bug in chrome cmd buffer param validation.
+ GrGLsizei length = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gl, GetShaderInfoLog(shader, infoLen+1,
+ &length, (char*)log.get()));
+ print_shader(stringCnt, strings, stringLengths);
+ GrPrintf("\n%s", log.get());
+ }
+ GrAssert(!"Shader compilation failed!");
+ GR_GL_CALL(gl, DeleteShader(shader));
+ return 0;
+ }
+ return shader;
+}
+
+bool GrGLProgram::bindOutputsAttribsAndLinkProgram(
+ const GrGLInterface* gl,
+ GrStringBuilder texCoordAttrNames[],
+ bool bindColorOut,
+ bool bindDualSrcOut,
+ CachedData* programData) const {
+ GR_GL_CALL_RET(gl, programData->fProgramID, CreateProgram());
+ if (!programData->fProgramID) {
+ return false;
+ }
+ const GrGLint& progID = programData->fProgramID;
+
+ GR_GL_CALL(gl, AttachShader(progID, programData->fVShaderID));
+ if (programData->fGShaderID) {
+ GR_GL_CALL(gl, AttachShader(progID, programData->fGShaderID));
+ }
+ GR_GL_CALL(gl, AttachShader(progID, programData->fFShaderID));
+
+ if (bindColorOut) {
+ GR_GL_CALL(gl, BindFragDataLocation(programData->fProgramID,
+ 0, declared_color_output_name()));
+ }
+ if (bindDualSrcOut) {
+ GR_GL_CALL(gl, BindFragDataLocationIndexed(programData->fProgramID,
+ 0, 1, dual_source_output_name()));
+ }
+
+ // Bind the attrib locations to same values for all shaders
+ GR_GL_CALL(gl, BindAttribLocation(progID, PositionAttributeIdx(),
+ POS_ATTR_NAME));
+ for (int t = 0; t < GrDrawTarget::kMaxTexCoords; ++t) {
+ if (texCoordAttrNames[t].size()) {
+ GR_GL_CALL(gl, BindAttribLocation(progID,
+ TexCoordAttributeIdx(t),
+ texCoordAttrNames[t].c_str()));
+ }
+ }
+
+ if (kSetAsAttribute == programData->fUniLocations.fViewMatrixUni) {
+ GR_GL_CALL(gl, BindAttribLocation(progID,
+ ViewMatrixAttributeIdx(),
+ VIEW_MATRIX_NAME));
+ }
+
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ const StageUniLocations& unis = programData->fUniLocations.fStages[s];
+ if (kSetAsAttribute == unis.fTextureMatrixUni) {
+ GrStringBuilder matName;
+ tex_matrix_name(s, &matName);
+ GR_GL_CALL(gl, BindAttribLocation(progID,
+ TextureMatrixAttributeIdx(s),
+ matName.c_str()));
+ }
+ }
+
+ GR_GL_CALL(gl, BindAttribLocation(progID, ColorAttributeIdx(),
+ COL_ATTR_NAME));
+ GR_GL_CALL(gl, BindAttribLocation(progID, CoverageAttributeIdx(),
+ COV_ATTR_NAME));
+ GR_GL_CALL(gl, BindAttribLocation(progID, EdgeAttributeIdx(),
+ EDGE_ATTR_NAME));
+
+ GR_GL_CALL(gl, LinkProgram(progID));
+
+ GrGLint linked = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gl, GetProgramiv(progID, GR_GL_LINK_STATUS, &linked));
+ if (!linked) {
+ GrGLint infoLen = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gl, GetProgramiv(progID, GR_GL_INFO_LOG_LENGTH, &infoLen));
+ SkAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ // retrieve length even though we don't need it to workaround
+ // bug in chrome cmd buffer param validation.
+ GrGLsizei length = GR_GL_INIT_ZERO;
+ GR_GL_CALL(gl, GetProgramInfoLog(progID, infoLen+1,
+ &length, (char*)log.get()));
+ GrPrintf((char*)log.get());
+ }
+ GrAssert(!"Error linking program");
+ GR_GL_CALL(gl, DeleteProgram(progID));
+ programData->fProgramID = 0;
+ return false;
+ }
+ return true;
+}
+
+void GrGLProgram::getUniformLocationsAndInitCache(const GrGLInterface* gl,
+ CachedData* programData) const {
+ const GrGLint& progID = programData->fProgramID;
+
+ if (kUseUniform == programData->fUniLocations.fViewMatrixUni) {
+ GR_GL_CALL_RET(gl, programData->fUniLocations.fViewMatrixUni,
+ GetUniformLocation(progID, VIEW_MATRIX_NAME));
+ GrAssert(kUnusedUniform != programData->fUniLocations.fViewMatrixUni);
+ }
+ if (kUseUniform == programData->fUniLocations.fColorUni) {
+ GR_GL_CALL_RET(gl, programData->fUniLocations.fColorUni,
+ GetUniformLocation(progID, COL_UNI_NAME));
+ GrAssert(kUnusedUniform != programData->fUniLocations.fColorUni);
+ }
+ if (kUseUniform == programData->fUniLocations.fColorFilterUni) {
+ GR_GL_CALL_RET(gl, programData->fUniLocations.fColorFilterUni,
+ GetUniformLocation(progID, COL_FILTER_UNI_NAME));
+ GrAssert(kUnusedUniform != programData->fUniLocations.fColorFilterUni);
+ }
+
+ if (kUseUniform == programData->fUniLocations.fEdgesUni) {
+ GR_GL_CALL_RET(gl, programData->fUniLocations.fEdgesUni,
+ GetUniformLocation(progID, EDGES_UNI_NAME));
+ GrAssert(kUnusedUniform != programData->fUniLocations.fEdgesUni);
+ } else {
+ programData->fUniLocations.fEdgesUni = kUnusedUniform;
+ }
+
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ StageUniLocations& locations = programData->fUniLocations.fStages[s];
+ if (fProgramDesc.fStages[s].isEnabled()) {
+ if (kUseUniform == locations.fTextureMatrixUni) {
+ GrStringBuilder texMName;
+ tex_matrix_name(s, &texMName);
+ GR_GL_CALL_RET(gl, locations.fTextureMatrixUni,
+ GetUniformLocation(progID, texMName.c_str()));
+ GrAssert(kUnusedUniform != locations.fTextureMatrixUni);
+ }
+
+ if (kUseUniform == locations.fSamplerUni) {
+ GrStringBuilder samplerName;
+ sampler_name(s, &samplerName);
+ GR_GL_CALL_RET(gl, locations.fSamplerUni,
+ GetUniformLocation(progID,samplerName.c_str()));
+ GrAssert(kUnusedUniform != locations.fSamplerUni);
+ }
+
+ if (kUseUniform == locations.fNormalizedTexelSizeUni) {
+ GrStringBuilder texelSizeName;
+ normalized_texel_size_name(s, &texelSizeName);
+ GR_GL_CALL_RET(gl, locations.fNormalizedTexelSizeUni,
+ GetUniformLocation(progID, texelSizeName.c_str()));
+ GrAssert(kUnusedUniform != locations.fNormalizedTexelSizeUni);
+ }
+
+ if (kUseUniform == locations.fRadial2Uni) {
+ GrStringBuilder radial2ParamName;
+ radial2_param_name(s, &radial2ParamName);
+ GR_GL_CALL_RET(gl, locations.fRadial2Uni,
+ GetUniformLocation(progID, radial2ParamName.c_str()));
+ GrAssert(kUnusedUniform != locations.fRadial2Uni);
+ }
+
+ if (kUseUniform == locations.fTexDomUni) {
+ GrStringBuilder texDomName;
+ tex_domain_name(s, &texDomName);
+ GR_GL_CALL_RET(gl, locations.fTexDomUni,
+ GetUniformLocation(progID, texDomName.c_str()));
+ GrAssert(kUnusedUniform != locations.fTexDomUni);
+ }
+
+ GrStringBuilder kernelName, imageIncrementName;
+ convolve_param_names(s, &kernelName, &imageIncrementName);
+ if (kUseUniform == locations.fKernelUni) {
+ GR_GL_CALL_RET(gl, locations.fKernelUni,
+ GetUniformLocation(progID, kernelName.c_str()));
+ GrAssert(kUnusedUniform != locations.fKernelUni);
+ }
+
+ if (kUseUniform == locations.fImageIncrementUni) {
+ GR_GL_CALL_RET(gl, locations.fImageIncrementUni,
+ GetUniformLocation(progID,
+ imageIncrementName.c_str()));
+ GrAssert(kUnusedUniform != locations.fImageIncrementUni);
+ }
+ }
+ }
+ GR_GL_CALL(gl, UseProgram(progID));
+
+ // init sampler unis and set bogus values for state tracking
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ if (kUnusedUniform != programData->fUniLocations.fStages[s].fSamplerUni) {
+ GR_GL_CALL(gl, Uniform1i(programData->fUniLocations.fStages[s].fSamplerUni, s));
+ }
+ programData->fTextureMatrices[s] = GrMatrix::InvalidMatrix();
+ programData->fRadial2CenterX1[s] = GR_ScalarMax;
+ programData->fRadial2Radius0[s] = -GR_ScalarMax;
+ programData->fTextureWidth[s] = -1;
+ programData->fTextureHeight[s] = -1;
+ }
+ programData->fViewMatrix = GrMatrix::InvalidMatrix();
+ programData->fColor = GrColor_ILLEGAL;
+ programData->fColorFilterColor = GrColor_ILLEGAL;
+}
+
+//============================================================================
+// Stage code generation
+//============================================================================
+
+void GrGLProgram::genStageCode(const GrGLInterface* gl,
+ int stageNum,
+ const GrGLProgram::StageDesc& desc,
+ const char* fsInColor, // NULL means no incoming color
+ const char* fsOutColor,
+ const char* vsInCoord,
+ ShaderCodeSegments* segments,
+ StageUniLocations* locations) const {
+
+ GrAssert(stageNum >= 0 && stageNum <= 9);
+
+ // First decide how many coords are needed to access the texture
+ // Right now it's always 2 but we could start using 1D textures for
+ // gradients.
+ static const int coordDims = 2;
+ int varyingDims;
+ /// Vertex Shader Stuff
+
+ // decide whether we need a matrix to transform texture coords
+ // and whether the varying needs a perspective coord.
+ const char* matName = NULL;
+ if (desc.fOptFlags & StageDesc::kIdentityMatrix_OptFlagBit) {
+ varyingDims = coordDims;
+ } else {
+ GrGLShaderVar* mat;
+ #if GR_GL_ATTRIBUTE_MATRICES
+ mat = &segments->fVSAttrs.push_back();
+ locations->fTextureMatrixUni = kSetAsAttribute;
+ #else
+ mat = &segments->fVSUnis.push_back();
+ locations->fTextureMatrixUni = kUseUniform;
+ #endif
+ tex_matrix_name(stageNum, mat->accessName());
+ mat->setType(GrGLShaderVar::kMat33f_Type);
+ matName = mat->getName().c_str();
+
+ if (desc.fOptFlags & StageDesc::kNoPerspective_OptFlagBit) {
+ varyingDims = coordDims;
+ } else {
+ varyingDims = coordDims + 1;
+ }
+ }
+
+ segments->fFSUnis.push_back().setType(GrGLShaderVar::kSampler2D_Type);
+ sampler_name(stageNum, segments->fFSUnis.back().accessName());
+ locations->fSamplerUni = kUseUniform;
+ const char* samplerName = segments->fFSUnis.back().getName().c_str();
+
+ const char* texelSizeName = NULL;
+ if (StageDesc::k2x2_FetchMode == desc.fFetchMode) {
+ segments->fFSUnis.push_back().setType(GrGLShaderVar::kVec2f_Type);
+ normalized_texel_size_name(stageNum, segments->fFSUnis.back().accessName());
+ texelSizeName = segments->fFSUnis.back().getName().c_str();
+ }
+
+ const char *varyingVSName, *varyingFSName;
+ append_varying(float_vector_type(varyingDims),
+ "Stage",
+ stageNum,
+ segments,
+ &varyingVSName,
+ &varyingFSName);
+
+ if (!matName) {
+ GrAssert(varyingDims == coordDims);
+ segments->fVSCode.appendf("\t%s = %s;\n", varyingVSName, vsInCoord);
+ } else {
+ // varying = texMatrix * texCoord
+ segments->fVSCode.appendf("\t%s = (%s * vec3(%s, 1))%s;\n",
+ varyingVSName, matName, vsInCoord,
+ vector_all_coords(varyingDims));
+ }
+
+ const char* radial2ParamsName = NULL;
+ const char *radial2VaryingVSName = NULL;
+ const char *radial2VaryingFSName = NULL;
+
+ if (StageDesc::kRadial2Gradient_CoordMapping == desc.fCoordMapping ||
+ StageDesc::kRadial2GradientDegenerate_CoordMapping == desc.fCoordMapping) {
+
+ GrGLShaderVar* radial2FSParams = &segments->fFSUnis.push_back();
+ radial2FSParams->setType(GrGLShaderVar::kFloat_Type);
+ radial2FSParams->setArrayCount(6);
+ radial2_param_name(stageNum, radial2FSParams->accessName());
+ segments->fVSUnis.push_back(*radial2FSParams).setEmitPrecision(true);
+ radial2ParamsName = radial2FSParams->getName().c_str();
+
+ locations->fRadial2Uni = kUseUniform;
+
+ // for radial grads without perspective we can pass the linear
+ // part of the quadratic as a varying.
+ if (varyingDims == coordDims) {
+ GrAssert(2 == coordDims);
+ append_varying(GrGLShaderVar::kFloat_Type,
+ "Radial2BCoeff",
+ stageNum,
+ segments,
+ &radial2VaryingVSName,
+ &radial2VaryingFSName);
+
+ // r2Var = 2 * (r2Parm[2] * varCoord.x - r2Param[3])
+ const char* r2ParamName = radial2FSParams->getName().c_str();
+ segments->fVSCode.appendf("\t%s = 2.0 *(%s[2] * %s.x - %s[3]);\n",
+ radial2VaryingVSName, r2ParamName,
+ varyingVSName, r2ParamName);
+ }
+ }
+
+ const char* kernelName = NULL;
+ const char* imageIncrementName = NULL;
+ if (ProgramDesc::StageDesc::kConvolution_FetchMode == desc.fFetchMode) {
+
+ GrGLShaderVar* kernel = &segments->fFSUnis.push_back();
+ kernel->setType(GrGLShaderVar::kFloat_Type);
+ kernel->setArrayCount(desc.fKernelWidth);
+ GrGLShaderVar* imgInc = &segments->fFSUnis.push_back();
+ imgInc->setType(GrGLShaderVar::kVec2f_Type);
+
+ convolve_param_names(stageNum,
+ kernel->accessName(),
+ imgInc->accessName());
+ kernelName = kernel->getName().c_str();
+ imageIncrementName = imgInc->getName().c_str();
+
+ // need image increment in both VS and FS
+ segments->fVSUnis.push_back(*imgInc).setEmitPrecision(true);
+
+ locations->fKernelUni = kUseUniform;
+ locations->fImageIncrementUni = kUseUniform;
+ float scale = (desc.fKernelWidth - 1) * 0.5f;
+ segments->fVSCode.appendf("\t%s -= vec2(%g, %g) * %s;\n",
+ varyingVSName, scale, scale,
+ imageIncrementName);
+ }
+
+ /// Fragment Shader Stuff
+ GrStringBuilder fsCoordName;
+ // function used to access the shader, may be made projective
+ GrStringBuilder texFunc("texture2D");
+ if (desc.fOptFlags & (StageDesc::kIdentityMatrix_OptFlagBit |
+ StageDesc::kNoPerspective_OptFlagBit)) {
+ GrAssert(varyingDims == coordDims);
+ fsCoordName = varyingFSName;
+ } else {
+ // if we have to do some special op on the varyings to get
+ // our final tex coords then when in perspective we have to
+ // do an explicit divide. Otherwise, we can use a Proj func.
+ if (StageDesc::kIdentity_CoordMapping == desc.fCoordMapping &&
+ StageDesc::kSingle_FetchMode == desc.fFetchMode) {
+ texFunc.append("Proj");
+ fsCoordName = varyingFSName;
+ } else {
+ fsCoordName = "inCoord";
+ fsCoordName.appendS32(stageNum);
+ segments->fFSCode.appendf("\t%s %s = %s%s / %s%s;\n",
+ GrGLShaderVar::TypeString(float_vector_type(coordDims)),
+ fsCoordName.c_str(),
+ varyingFSName,
+ vector_nonhomog_coords(varyingDims),
+ varyingFSName,
+ vector_homog_coord(varyingDims));
+ }
+ }
+
+ GrStringBuilder sampleCoords;
+ bool complexCoord = false;
+ switch (desc.fCoordMapping) {
+ case StageDesc::kIdentity_CoordMapping:
+ sampleCoords = fsCoordName;
+ break;
+ case StageDesc::kSweepGradient_CoordMapping:
+ sampleCoords.printf("vec2(atan(- %s.y, - %s.x) * 0.1591549430918 + 0.5, 0.5)", fsCoordName.c_str(), fsCoordName.c_str());
+ complexCoord = true;
+ break;
+ case StageDesc::kRadialGradient_CoordMapping:
+ sampleCoords.printf("vec2(length(%s.xy), 0.5)", fsCoordName.c_str());
+ complexCoord = true;
+ break;
+ case StageDesc::kRadial2Gradient_CoordMapping: {
+ GrStringBuilder cName("c");
+ GrStringBuilder ac4Name("ac4");
+ GrStringBuilder rootName("root");
+
+ cName.appendS32(stageNum);
+ ac4Name.appendS32(stageNum);
+ rootName.appendS32(stageNum);
+
+ // if we were able to interpolate the linear component bVar is the varying
+ // otherwise compute it
+ GrStringBuilder bVar;
+ if (coordDims == varyingDims) {
+ bVar = radial2VaryingFSName;
+ GrAssert(2 == varyingDims);
+ } else {
+ GrAssert(3 == varyingDims);
+ bVar = "b";
+ bVar.appendS32(stageNum);
+ segments->fFSCode.appendf("\tfloat %s = 2.0 * (%s[2] * %s.x - %s[3]);\n",
+ bVar.c_str(), radial2ParamsName,
+ fsCoordName.c_str(), radial2ParamsName);
+ }
+
+ // c = (x^2)+(y^2) - params[4]
+ segments->fFSCode.appendf("\tfloat %s = dot(%s, %s) - %s[4];\n",
+ cName.c_str(), fsCoordName.c_str(),
+ fsCoordName.c_str(),
+ radial2ParamsName);
+ // ac4 = 4.0 * params[0] * c
+ segments->fFSCode.appendf("\tfloat %s = %s[0] * 4.0 * %s;\n",
+ ac4Name.c_str(), radial2ParamsName,
+ cName.c_str());
+
+ // root = sqrt(b^2-4ac)
+ // (abs to avoid exception due to fp precision)
+ segments->fFSCode.appendf("\tfloat %s = sqrt(abs(%s*%s - %s));\n",
+ rootName.c_str(), bVar.c_str(), bVar.c_str(),
+ ac4Name.c_str());
+
+ // x coord is: (-b + params[5] * sqrt(b^2-4ac)) * params[1]
+ // y coord is 0.5 (texture is effectively 1D)
+ sampleCoords.printf("vec2((-%s + %s[5] * %s) * %s[1], 0.5)",
+ bVar.c_str(), radial2ParamsName,
+ rootName.c_str(), radial2ParamsName);
+ complexCoord = true;
+ break;}
+ case StageDesc::kRadial2GradientDegenerate_CoordMapping: {
+ GrStringBuilder cName("c");
+
+ cName.appendS32(stageNum);
+
+ // if we were able to interpolate the linear component bVar is the varying
+ // otherwise compute it
+ GrStringBuilder bVar;
+ if (coordDims == varyingDims) {
+ bVar = radial2VaryingFSName;
+ GrAssert(2 == varyingDims);
+ } else {
+ GrAssert(3 == varyingDims);
+ bVar = "b";
+ bVar.appendS32(stageNum);
+ segments->fFSCode.appendf("\tfloat %s = 2.0 * (%s[2] * %s.x - %s[3]);\n",
+ bVar.c_str(), radial2ParamsName,
+ fsCoordName.c_str(), radial2ParamsName);
+ }
+
+ // c = (x^2)+(y^2) - params[4]
+ segments->fFSCode.appendf("\tfloat %s = dot(%s, %s) - %s[4];\n",
+ cName.c_str(), fsCoordName.c_str(),
+ fsCoordName.c_str(),
+ radial2ParamsName);
+
+ // x coord is: -c/b
+ // y coord is 0.5 (texture is effectively 1D)
+ sampleCoords.printf("vec2((-%s / %s), 0.5)", cName.c_str(), bVar.c_str());
+ complexCoord = true;
+ break;}
+ };
+
+ const char* smear;
+ if (desc.fModulation == StageDesc::kAlpha_Modulation) {
+ smear = ".aaaa";
+ } else {
+ smear = "";
+ }
+ GrStringBuilder modulate;
+ if (NULL != fsInColor) {
+ modulate.printf(" * %s", fsInColor);
+ }
+
+ if (desc.fOptFlags &
+ StageDesc::kCustomTextureDomain_OptFlagBit) {
+ GrStringBuilder texDomainName;
+ tex_domain_name(stageNum, &texDomainName);
+ segments->fFSUnis.push_back().set(GrGLShaderVar::kVec4f_Type, texDomainName);
+ GrStringBuilder coordVar("clampCoord");
+ segments->fFSCode.appendf("\t%s %s = clamp(%s, %s.xy, %s.zw);\n",
+ float_vector_type_str(coordDims),
+ coordVar.c_str(),
+ sampleCoords.c_str(),
+ texDomainName.c_str(),
+ texDomainName.c_str());
+ sampleCoords = coordVar;
+ locations->fTexDomUni = kUseUniform;
+ }
+
+ if (StageDesc::k2x2_FetchMode == desc.fFetchMode) {
+ locations->fNormalizedTexelSizeUni = kUseUniform;
+ if (complexCoord) {
+ // assign the coord to a var rather than compute 4x.
+ GrStringBuilder coordVar("tCoord");
+ coordVar.appendS32(stageNum);
+ segments->fFSCode.appendf("\t%s %s = %s;\n",
+ float_vector_type_str(coordDims),
+ coordVar.c_str(), sampleCoords.c_str());
+ sampleCoords = coordVar;
+ }
+ GrAssert(2 == coordDims);
+ GrStringBuilder accumVar("accum");
+ accumVar.appendS32(stageNum);
+ segments->fFSCode.appendf("\tvec4 %s = %s(%s, %s + vec2(-%s.x,-%s.y))%s;\n", accumVar.c_str(), texFunc.c_str(), samplerName, sampleCoords.c_str(), texelSizeName, texelSizeName, smear);
+ segments->fFSCode.appendf("\t%s += %s(%s, %s + vec2(+%s.x,-%s.y))%s;\n", accumVar.c_str(), texFunc.c_str(), samplerName, sampleCoords.c_str(), texelSizeName, texelSizeName, smear);
+ segments->fFSCode.appendf("\t%s += %s(%s, %s + vec2(-%s.x,+%s.y))%s;\n", accumVar.c_str(), texFunc.c_str(), samplerName, sampleCoords.c_str(), texelSizeName, texelSizeName, smear);
+ segments->fFSCode.appendf("\t%s += %s(%s, %s + vec2(+%s.x,+%s.y))%s;\n", accumVar.c_str(), texFunc.c_str(), samplerName, sampleCoords.c_str(), texelSizeName, texelSizeName, smear);
+ segments->fFSCode.appendf("\t%s = .25 * %s%s;\n", fsOutColor, accumVar.c_str(), modulate.c_str());
+ } else if (ProgramDesc::StageDesc::kConvolution_FetchMode == desc.fFetchMode) {
+ GrStringBuilder sumVar("sum");
+ sumVar.appendS32(stageNum);
+ GrStringBuilder coordVar("coord");
+ coordVar.appendS32(stageNum);
+
+ segments->fFSCode.appendf("\tvec4 %s = vec4(0, 0, 0, 0);\n",
+ sumVar.c_str());
+ segments->fFSCode.appendf("\tvec2 %s = %s;\n",
+ coordVar.c_str(),
+ sampleCoords.c_str());
+ segments->fFSCode.appendf("\tfor (int i = 0; i < %d; i++) {\n",
+ desc.fKernelWidth);
+ segments->fFSCode.appendf("\t\t%s += %s(%s, %s)%s * %s[i];\n",
+ sumVar.c_str(), texFunc.c_str(),
+ samplerName, coordVar.c_str(), smear,
+ kernelName);
+ segments->fFSCode.appendf("\t\t%s += %s;\n",
+ coordVar.c_str(),
+ imageIncrementName);
+ segments->fFSCode.appendf("\t}\n");
+ segments->fFSCode.appendf("\t%s = %s%s;\n", fsOutColor,
+ sumVar.c_str(), modulate.c_str());
+ } else {
+ segments->fFSCode.appendf("\t%s = %s(%s, %s)%s%s;\n",
+ fsOutColor, texFunc.c_str(),
+ samplerName, sampleCoords.c_str(),
+ smear, modulate.c_str());
+ }
+}
diff --git a/src/gpu/GrGLProgram.h b/src/gpu/GrGLProgram.h
new file mode 100644
index 0000000000..dafa79dc29
--- /dev/null
+++ b/src/gpu/GrGLProgram.h
@@ -0,0 +1,347 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLProgram_DEFINED
+#define GrGLProgram_DEFINED
+
+#include "GrGLInterface.h"
+#include "GrStringBuilder.h"
+#include "GrGpu.h"
+
+#include "SkXfermode.h"
+
+class GrBinHashKeyBuilder;
+
+struct ShaderCodeSegments;
+
+// optionally compile the experimental GS code. Set to GR_DEBUG
+// so that debug build bots will execute the code.
+#define GR_GL_EXPERIMENTAL_GS GR_DEBUG
+
+/**
+ * This class manages a GPU program and records per-program information.
+ * We can specify the attribute locations so that they are constant
+ * across our shaders. But the driver determines the uniform locations
+ * at link time. We don't need to remember the sampler uniform location
+ * because we will bind a texture slot to it and never change it
+ * Uniforms are program-local so we can't rely on fHWState to hold the
+ * previous uniform state after a program change.
+ */
+class GrGLProgram {
+public:
+ enum GLSLVersion {
+ k120_GLSLVersion, // Desktop GLSL 1.20 and ES2 shading lang
+ k130_GLSLVersion, // Desktop GLSL 1.30
+ k150_GLSLVersion // Dekstop GLSL 1.50
+ };
+
+ class CachedData;
+
+ GrGLProgram();
+ ~GrGLProgram();
+
+ /**
+ * This is the heavy initilization routine for building a GLProgram.
+ * The result of heavy init is not stored in datamembers of GrGLProgam,
+ * but in a separate cacheable container.
+ */
+ bool genProgram(const GrGLInterface* gl,
+ GLSLVersion glslVersion,
+ CachedData* programData) const;
+
+ /**
+ * The shader may modify the blend coeffecients. Params are in/out
+ */
+ void overrideBlend(GrBlendCoeff* srcCoeff, GrBlendCoeff* dstCoeff) const;
+
+ /**
+ * Attribute indices
+ */
+ static int PositionAttributeIdx() { return 0; }
+ static int TexCoordAttributeIdx(int tcIdx) { return 1 + tcIdx; }
+ static int ColorAttributeIdx() { return 1 + GrDrawTarget::kMaxTexCoords; }
+ static int CoverageAttributeIdx() {
+ return 2 + GrDrawTarget::kMaxTexCoords;
+ }
+ static int EdgeAttributeIdx() { return 3 + GrDrawTarget::kMaxTexCoords; }
+
+ static int ViewMatrixAttributeIdx() {
+ return 2 + GrDrawTarget::kMaxTexCoords;
+ }
+ static int TextureMatrixAttributeIdx(int stage) {
+ return 5 + GrDrawTarget::kMaxTexCoords + 3 * stage;
+ }
+
+private:
+
+ // Parameters that affect code generation
+ // These structs should be kept compact; they are the input to an
+ // expensive hash key generator.
+ struct ProgramDesc {
+ ProgramDesc() {
+ // since we use this as part of a key we can't have any unitialized
+ // padding
+ memset(this, 0, sizeof(ProgramDesc));
+ }
+
+ struct StageDesc {
+ enum OptFlagBits {
+ kNoPerspective_OptFlagBit = 1 << 0,
+ kIdentityMatrix_OptFlagBit = 1 << 1,
+ kCustomTextureDomain_OptFlagBit = 1 << 2,
+ kIsEnabled_OptFlagBit = 1 << 7
+ };
+ enum Modulation {
+ kColor_Modulation,
+ kAlpha_Modulation,
+
+ kModulationCnt
+ };
+ enum FetchMode {
+ kSingle_FetchMode,
+ k2x2_FetchMode,
+ kConvolution_FetchMode,
+
+ kFetchModeCnt,
+ };
+ enum CoordMapping {
+ kIdentity_CoordMapping,
+ kRadialGradient_CoordMapping,
+ kSweepGradient_CoordMapping,
+ kRadial2Gradient_CoordMapping,
+ // need different shader computation when quadratic
+ // eq describing the gradient degenerates to a linear eq.
+ kRadial2GradientDegenerate_CoordMapping,
+ kCoordMappingCnt
+ };
+
+ uint8_t fOptFlags;
+ uint8_t fModulation; // casts to enum Modulation
+ uint8_t fFetchMode; // casts to enum FetchMode
+ uint8_t fCoordMapping; // casts to enum CoordMapping
+ uint8_t fKernelWidth;
+
+ inline bool isEnabled() const {
+ return SkToBool(fOptFlags & kIsEnabled_OptFlagBit);
+ }
+ inline void setEnabled(bool newValue) {
+ if (newValue) {
+ fOptFlags |= kIsEnabled_OptFlagBit;
+ } else {
+ fOptFlags &= ~kIsEnabled_OptFlagBit;
+ }
+ }
+ };
+
+ // Specifies where the intitial color comes from before the stages are
+ // applied.
+ enum ColorType {
+ kSolidWhite_ColorType,
+ kTransBlack_ColorType,
+ kAttribute_ColorType,
+ kUniform_ColorType,
+
+ kColorTypeCnt
+ };
+ // Dual-src blending makes use of a secondary output color that can be
+ // used as a per-pixel blend coeffecient. This controls whether a
+ // secondary source is output and what value it holds.
+ enum DualSrcOutput {
+ kNone_DualSrcOutput,
+ kCoverage_DualSrcOutput,
+ kCoverageISA_DualSrcOutput,
+ kCoverageISC_DualSrcOutput,
+
+ kDualSrcOutputCnt
+ };
+
+ GrDrawTarget::VertexEdgeType fVertexEdgeType;
+
+ // stripped of bits that don't affect prog generation
+ GrVertexLayout fVertexLayout;
+
+ StageDesc fStages[GrDrawTarget::kNumStages];
+
+ // To enable experimental geometry shader code (not for use in
+ // production)
+#if GR_GL_EXPERIMENTAL_GS
+ bool fExperimentalGS;
+#endif
+
+ uint8_t fColorType; // casts to enum ColorType
+ uint8_t fDualSrcOutput; // casts to enum DualSrcOutput
+ int8_t fFirstCoverageStage;
+ SkBool8 fEmitsPointSize;
+ SkBool8 fEdgeAAConcave;
+
+ int8_t fEdgeAANumEdges;
+ uint8_t fColorFilterXfermode; // casts to enum SkXfermode::Mode
+
+ uint8_t fPadTo32bLengthMultiple [1];
+
+ } fProgramDesc;
+ GR_STATIC_ASSERT(!(sizeof(ProgramDesc) % 4));
+
+ const ProgramDesc& getDesc() { return fProgramDesc; }
+
+ // for code readability
+ typedef ProgramDesc::StageDesc StageDesc;
+
+public:
+ enum {
+ kUnusedUniform = -1,
+ kSetAsAttribute = 1000,
+ };
+
+ struct StageUniLocations {
+ GrGLint fTextureMatrixUni;
+ GrGLint fNormalizedTexelSizeUni;
+ GrGLint fSamplerUni;
+ GrGLint fRadial2Uni;
+ GrGLint fTexDomUni;
+ GrGLint fKernelUni;
+ GrGLint fImageIncrementUni;
+ void reset() {
+ fTextureMatrixUni = kUnusedUniform;
+ fNormalizedTexelSizeUni = kUnusedUniform;
+ fSamplerUni = kUnusedUniform;
+ fRadial2Uni = kUnusedUniform;
+ fTexDomUni = kUnusedUniform;
+ fKernelUni = kUnusedUniform;
+ fImageIncrementUni = kUnusedUniform;
+ }
+ };
+
+ struct UniLocations {
+ GrGLint fViewMatrixUni;
+ GrGLint fColorUni;
+ GrGLint fEdgesUni;
+ GrGLint fColorFilterUni;
+ StageUniLocations fStages[GrDrawTarget::kNumStages];
+ void reset() {
+ fViewMatrixUni = kUnusedUniform;
+ fColorUni = kUnusedUniform;
+ fEdgesUni = kUnusedUniform;
+ fColorFilterUni = kUnusedUniform;
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ fStages[s].reset();
+ }
+ }
+ };
+
+ class CachedData : public ::GrNoncopyable {
+ public:
+ CachedData() {
+ }
+
+ ~CachedData() {
+ }
+
+ void copyAndTakeOwnership(CachedData& other) {
+ memcpy(this, &other, sizeof(*this));
+ }
+
+ public:
+
+ // IDs
+ GrGLuint fVShaderID;
+ GrGLuint fGShaderID;
+ GrGLuint fFShaderID;
+ GrGLuint fProgramID;
+ // shader uniform locations (-1 if shader doesn't use them)
+ UniLocations fUniLocations;
+
+ GrMatrix fViewMatrix;
+
+ // these reflect the current values of uniforms
+ // (GL uniform values travel with program)
+ GrColor fColor;
+ GrColor fColorFilterColor;
+ GrMatrix fTextureMatrices[GrDrawTarget::kNumStages];
+ // width and height used for normalized texel size
+ int fTextureWidth[GrDrawTarget::kNumStages];
+ int fTextureHeight[GrDrawTarget::kNumStages];
+ GrScalar fRadial2CenterX1[GrDrawTarget::kNumStages];
+ GrScalar fRadial2Radius0[GrDrawTarget::kNumStages];
+ bool fRadial2PosRoot[GrDrawTarget::kNumStages];
+ GrRect fTextureDomain[GrDrawTarget::kNumStages];
+
+ private:
+ enum Constants {
+ kUniLocationPreAllocSize = 8
+ };
+
+ }; // CachedData
+
+ enum Constants {
+ kProgramKeySize = sizeof(ProgramDesc)
+ };
+
+ // Provide an opaque ProgramDesc
+ const uint32_t* keyData() const{
+ return reinterpret_cast<const uint32_t*>(&fProgramDesc);
+ }
+
+private:
+ enum {
+ kUseUniform = 2000
+ };
+
+ // should set all fields in locations var to kUseUniform if the
+ // corresponding uniform is required for the program.
+ void genStageCode(const GrGLInterface* gl,
+ int stageNum,
+ const ProgramDesc::StageDesc& desc,
+ const char* fsInColor, // NULL means no incoming color
+ const char* fsOutColor,
+ const char* vsInCoord,
+ ShaderCodeSegments* segments,
+ StageUniLocations* locations) const;
+
+ void genGeometryShader(const GrGLInterface* gl,
+ GLSLVersion glslVersion,
+ ShaderCodeSegments* segments) const;
+
+ // generates code to compute coverage based on edge AA.
+ void genEdgeCoverage(const GrGLInterface* gl,
+ GrVertexLayout layout,
+ CachedData* programData,
+ GrStringBuilder* coverageVar,
+ ShaderCodeSegments* segments) const;
+
+ static bool CompileShaders(const GrGLInterface* gl,
+ GLSLVersion glslVersion,
+ const ShaderCodeSegments& segments,
+ CachedData* programData);
+
+ // Compiles a GL shader, returns shader ID or 0 if failed
+ // params have same meaning as glShaderSource
+ static GrGLuint CompileShader(const GrGLInterface* gl,
+ GrGLenum type, int stringCnt,
+ const char** strings,
+ int* stringLengths);
+
+ // Creates a GL program ID, binds shader attributes to GL vertex attrs, and
+ // links the program
+ bool bindOutputsAttribsAndLinkProgram(
+ const GrGLInterface* gl,
+ GrStringBuilder texCoordAttrNames[GrDrawTarget::kMaxTexCoords],
+ bool bindColorOut,
+ bool bindDualSrcOut,
+ CachedData* programData) const;
+
+ // Gets locations for all uniforms set to kUseUniform and initializes cache
+ // to invalid values.
+ void getUniformLocationsAndInitCache(const GrGLInterface* gl,
+ CachedData* programData) const;
+
+ friend class GrGpuGLShaders;
+};
+
+#endif
diff --git a/src/gpu/GrGLRenderTarget.cpp b/src/gpu/GrGLRenderTarget.cpp
new file mode 100644
index 0000000000..290ae2e47d
--- /dev/null
+++ b/src/gpu/GrGLRenderTarget.cpp
@@ -0,0 +1,96 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLRenderTarget.h"
+
+#include "GrGpuGL.h"
+
+#define GPUGL static_cast<GrGpuGL*>(getGpu())
+
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+void GrGLRenderTarget::init(const Desc& desc,
+ const GrGLIRect& viewport,
+ GrGLTexID* texID) {
+ fRTFBOID = desc.fRTFBOID;
+ fTexFBOID = desc.fTexFBOID;
+ fMSColorRenderbufferID = desc.fMSColorRenderbufferID;
+ fViewport = viewport;
+ fOwnIDs = desc.fOwnIDs;
+ fTexIDObj = texID;
+ GrSafeRef(fTexIDObj);
+}
+
+GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu,
+ const Desc& desc,
+ const GrGLIRect& viewport,
+ GrGLTexID* texID,
+ GrGLTexture* texture)
+ : INHERITED(gpu,
+ texture,
+ viewport.fWidth,
+ viewport.fHeight,
+ texture->allocatedWidth(),
+ texture->allocatedHeight(),
+ desc.fConfig,
+ desc.fSampleCnt) {
+ GrAssert(NULL != texID);
+ GrAssert(NULL != texture);
+ // FBO 0 can't also be a texture, right?
+ GrAssert(0 != desc.fRTFBOID);
+ GrAssert(0 != desc.fTexFBOID);
+ this->init(desc, viewport, texID);
+}
+
+GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu,
+ const Desc& desc,
+ const GrGLIRect& viewport)
+ : INHERITED(gpu,
+ NULL,
+ viewport.fWidth,
+ viewport.fHeight,
+ viewport.fWidth, // don't really need separate alloc w/h for
+ viewport.fHeight, // non-texture RTs, repeat viewport values
+ desc.fConfig,
+ desc.fSampleCnt) {
+ this->init(desc, viewport, NULL);
+}
+
+void GrGLRenderTarget::onRelease() {
+ GPUGL->notifyRenderTargetDelete(this);
+ if (fOwnIDs) {
+ if (fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fTexFBOID));
+ }
+ if (fRTFBOID && fRTFBOID != fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &fRTFBOID));
+ }
+ if (fMSColorRenderbufferID) {
+ GL_CALL(DeleteRenderbuffers(1, &fMSColorRenderbufferID));
+ }
+ }
+ fRTFBOID = 0;
+ fTexFBOID = 0;
+ fMSColorRenderbufferID = 0;
+ GrSafeUnref(fTexIDObj);
+ fTexIDObj = NULL;
+ this->setStencilBuffer(NULL);
+}
+
+void GrGLRenderTarget::onAbandon() {
+ fRTFBOID = 0;
+ fTexFBOID = 0;
+ fMSColorRenderbufferID = 0;
+ if (NULL != fTexIDObj) {
+ fTexIDObj->abandon();
+ fTexIDObj = NULL;
+ }
+ this->setStencilBuffer(NULL);
+}
+
diff --git a/src/gpu/GrGLRenderTarget.h b/src/gpu/GrGLRenderTarget.h
new file mode 100644
index 0000000000..5aeb36dc65
--- /dev/null
+++ b/src/gpu/GrGLRenderTarget.h
@@ -0,0 +1,108 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLRenderTarget_DEFINED
+#define GrGLRenderTarget_DEFINED
+
+#include "GrGLIRect.h"
+#include "GrRenderTarget.h"
+#include "GrScalar.h"
+
+class GrGpuGL;
+class GrGLTexture;
+class GrGLTexID;
+
+class GrGLRenderTarget : public GrRenderTarget {
+
+public:
+ // set fTexFBOID to this value to indicate that it is multisampled but
+ // Gr doesn't know how to resolve it.
+ enum { kUnresolvableFBOID = 0 };
+
+ struct Desc {
+ GrGLuint fRTFBOID;
+ GrGLuint fTexFBOID;
+ GrGLuint fMSColorRenderbufferID;
+ bool fOwnIDs;
+ GrPixelConfig fConfig;
+ int fSampleCnt;
+ };
+
+ // creates a GrGLRenderTarget associated with a texture
+ GrGLRenderTarget(GrGpuGL* gpu,
+ const Desc& desc,
+ const GrGLIRect& viewport,
+ GrGLTexID* texID,
+ GrGLTexture* texture);
+
+ // creates an independent GrGLRenderTarget
+ GrGLRenderTarget(GrGpuGL* gpu,
+ const Desc& desc,
+ const GrGLIRect& viewport);
+
+ virtual ~GrGLRenderTarget() { this->release(); }
+
+ void setViewport(const GrGLIRect& rect) { fViewport = rect; }
+ const GrGLIRect& getViewport() const { return fViewport; }
+
+ // The following two functions return the same ID when a
+ // texture-rendertarget is multisampled, and different IDs when
+ // it is.
+ // FBO ID used to render into
+ GrGLuint renderFBOID() const { return fRTFBOID; }
+ // FBO ID that has texture ID attached.
+ GrGLuint textureFBOID() const { return fTexFBOID; }
+
+ // override of GrRenderTarget
+ virtual intptr_t getRenderTargetHandle() const {
+ return this->renderFBOID();
+ }
+ virtual intptr_t getRenderTargetResolvedHandle() const {
+ return this->textureFBOID();
+ }
+ virtual ResolveType getResolveType() const {
+ if (fRTFBOID == fTexFBOID) {
+ // catches FBO 0 and non MSAA case
+ return kAutoResolves_ResolveType;
+ } else if (kUnresolvableFBOID == fTexFBOID) {
+ return kCantResolve_ResolveType;
+ } else {
+ return kCanResolve_ResolveType;
+ }
+ }
+
+protected:
+ // override of GrResource
+ virtual void onAbandon();
+ virtual void onRelease();
+
+private:
+ GrGLuint fRTFBOID;
+ GrGLuint fTexFBOID;
+
+ GrGLuint fMSColorRenderbufferID;
+
+ // Should this object delete IDs when it is destroyed or does someone
+ // else own them.
+ bool fOwnIDs;
+
+ // when we switch to this rendertarget we want to set the viewport to
+ // only render to to content area (as opposed to the whole allocation) and
+ // we want the rendering to be at top left (GL has origin in bottom left)
+ GrGLIRect fViewport;
+
+ // non-NULL if this RT was created by Gr with an associated GrGLTexture.
+ GrGLTexID* fTexIDObj;
+
+ void init(const Desc& desc, const GrGLIRect& viewport, GrGLTexID* texID);
+
+ typedef GrRenderTarget INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGLShaderVar.h b/src/gpu/GrGLShaderVar.h
new file mode 100644
index 0000000000..5c50079558
--- /dev/null
+++ b/src/gpu/GrGLShaderVar.h
@@ -0,0 +1,217 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGLShaderVar_DEFINED
+#define GrGLShaderVar_DEFINED
+
+#include "GrGLInterface.h"
+
+/**
+ * Represents a variable in a shader
+ */
+class GrGLShaderVar {
+public:
+
+ enum Type {
+ kFloat_Type,
+ kVec2f_Type,
+ kVec3f_Type,
+ kVec4f_Type,
+ kMat33f_Type,
+ kSampler2D_Type,
+ };
+
+ /**
+ * Defaults to a float with no precision specifier
+ */
+ GrGLShaderVar() {
+ fType = kFloat_Type;
+ fCount = kNonArray;
+ fEmitPrecision = false;
+ }
+
+ GrGLShaderVar(const GrGLShaderVar& var)
+ : fType(var.fType)
+ , fName(var.fName)
+ , fCount(var.fCount)
+ , fEmitPrecision(var.fEmitPrecision) {}
+
+ /**
+ * Values for array count that have special meaning. We allow 1-sized arrays.
+ */
+ enum {
+ kNonArray = 0, // not an array
+ kUnsizedArray = -1, // an unsized array (declared with [])
+ };
+
+ /**
+ * Sets as a non-array.
+ */
+ void set(Type type,
+ const GrStringBuilder& name,
+ bool emitPrecision = false) {
+ fType = type;
+ fName = name;
+ fCount = kNonArray;
+ fEmitPrecision = emitPrecision;
+ }
+
+ /**
+ * Sets as a non-array.
+ */
+ void set(Type type,
+ const char* name,
+ bool specifyPrecision = false) {
+ fType = type;
+ fName = name;
+ fCount = kNonArray;
+ fEmitPrecision = specifyPrecision;
+ }
+
+ /**
+ * Set all var options
+ */
+ void set(Type type,
+ const GrStringBuilder& name,
+ int count,
+ bool specifyPrecision = false) {
+ fType = type;
+ fName = name;
+ fCount = count;
+ fEmitPrecision = specifyPrecision;
+ }
+
+ /**
+ * Set all var options
+ */
+ void set(Type type,
+ const char* name,
+ int count,
+ bool specifyPrecision = false) {
+ fType = type;
+ fName = name;
+ fCount = count;
+ fEmitPrecision = specifyPrecision;
+ }
+
+ /**
+ * Is the var an array.
+ */
+ bool isArray() const { return kNonArray != fCount; }
+ /**
+ * Is this an unsized array, (i.e. declared with []).
+ */
+ bool isUnsizedArray() const { return kUnsizedArray == fCount; }
+ /**
+ * Get the array length of the var.
+ */
+ int getArrayCount() const { return fCount; }
+ /**
+ * Set the array length of the var
+ */
+ void setArrayCount(int count) { fCount = count; }
+ /**
+ * Set to be a non-array.
+ */
+ void setNonArray() { fCount = kNonArray; }
+ /**
+ * Set to be an unsized array.
+ */
+ void setUnsizedArray() { fCount = kUnsizedArray; }
+
+ /**
+ * Access the var name as a writable string
+ */
+ GrStringBuilder* accessName() { return &fName; }
+ /**
+ * Set the var name
+ */
+ void setName(const GrStringBuilder& n) { fName = n; }
+ void setName(const char* n) { fName = n; }
+ /**
+ * Get the var name.
+ */
+ const GrStringBuilder& getName() const { return fName; }
+
+ /**
+ * Get the type of the var
+ */
+ Type getType() const { return fType; }
+ /**
+ * Set the type of the var
+ */
+ void setType(Type type) { fType = type; }
+
+ /**
+ * Must the variable declaration emit a precision specifier
+ */
+ bool emitsPrecision() const { return fEmitPrecision; }
+ /**
+ * Specify whether the declaration should specify precision
+ */
+ void setEmitPrecision(bool p) { fEmitPrecision = p; }
+
+ /**
+ * Write a declaration of this variable to out.
+ */
+ void appendDecl(const GrGLInterface* gl, GrStringBuilder* out) const {
+ if (this->emitsPrecision()) {
+ out->append(PrecisionString(gl));
+ out->append(" ");
+ }
+ if (this->isArray()) {
+ if (this->isUnsizedArray()) {
+ out->appendf("%s %s[]",
+ TypeString(this->getType()),
+ this->getName().c_str());
+ } else {
+ GrAssert(this->getArrayCount() > 0);
+ out->appendf("%s %s[%d]",
+ TypeString(this->getType()),
+ this->getName().c_str(),
+ this->getArrayCount());
+ }
+ } else {
+ out->appendf("%s %s",
+ TypeString(this->getType()),
+ this->getName().c_str());
+ }
+ }
+
+ static const char* TypeString(Type t) {
+ switch (t) {
+ case kFloat_Type:
+ return "float";
+ case kVec2f_Type:
+ return "vec2";
+ case kVec3f_Type:
+ return "vec3";
+ case kVec4f_Type:
+ return "vec4";
+ case kMat33f_Type:
+ return "mat3";
+ case kSampler2D_Type:
+ return "sampler2D";
+ default:
+ GrCrash("Unknown shader var type.");
+ return ""; // suppress warning
+ }
+ }
+
+private:
+ static const char* PrecisionString(const GrGLInterface* gl) {
+ return gl->supportsDesktop() ? "" : "mediump";
+ }
+
+ Type fType;
+ GrStringBuilder fName;
+ int fCount;
+ bool fEmitPrecision;
+};
+
+#endif
diff --git a/src/gpu/GrGLStencilBuffer.cpp b/src/gpu/GrGLStencilBuffer.cpp
new file mode 100644
index 0000000000..bc0bb364e8
--- /dev/null
+++ b/src/gpu/GrGLStencilBuffer.cpp
@@ -0,0 +1,40 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLStencilBuffer.h"
+#include "GrGpuGL.h"
+
+GrGLStencilBuffer::~GrGLStencilBuffer() {
+ this->release();
+}
+
+size_t GrGLStencilBuffer::sizeInBytes() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= GrMax(1,this->numSamples());
+ return static_cast<size_t>(size / 8);
+}
+
+void GrGLStencilBuffer::onRelease() {
+ if (0 != fRenderbufferID) {
+ GrGpuGL* gpuGL = (GrGpuGL*) this->getGpu();
+ const GrGLInterface* gl = gpuGL->glInterface();
+ GR_GL_CALL(gl, DeleteRenderbuffers(1, &fRenderbufferID));
+ fRenderbufferID = 0;
+ }
+ INHERITED::onRelease();
+}
+
+void GrGLStencilBuffer::onAbandon() {
+ fRenderbufferID = 0;
+ INHERITED::onAbandon();
+}
+
+
diff --git a/src/gpu/GrGLStencilBuffer.h b/src/gpu/GrGLStencilBuffer.h
new file mode 100644
index 0000000000..eaf7942118
--- /dev/null
+++ b/src/gpu/GrGLStencilBuffer.h
@@ -0,0 +1,60 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLStencilBuffer_DEFINED
+#define GrGLStencilBuffer_DEFINED
+
+#include "GrGLInterface.h"
+#include "GrStencilBuffer.h"
+
+class GrGLStencilBuffer : public GrStencilBuffer {
+public:
+ static const GrGLenum kUnknownInternalFormat = ~0;
+ struct Format {
+ GrGLenum fInternalFormat;
+ GrGLuint fStencilBits;
+ GrGLuint fTotalBits;
+ bool fPacked;
+ };
+
+ GrGLStencilBuffer(GrGpu* gpu, GrGLint rbid,
+ int width, int height,
+ int sampleCnt,
+ const Format& format)
+ : GrStencilBuffer(gpu, width, height, format.fStencilBits, sampleCnt)
+ , fFormat(format)
+ , fRenderbufferID(rbid) {
+ }
+
+ virtual ~GrGLStencilBuffer();
+
+ virtual size_t sizeInBytes() const;
+
+ GrGLuint renderbufferID() const {
+ return fRenderbufferID;
+ }
+
+ const Format& format() const { return fFormat; }
+
+protected:
+ virtual void onRelease();
+
+ virtual void onAbandon();
+
+private:
+ Format fFormat;
+ // may be zero for external SBs associated with external RTs
+ // (we don't require the client to give us the id, just tell
+ // us how many bits of stencil there are).
+ GrGLuint fRenderbufferID;
+
+ typedef GrStencilBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGLTexture.cpp b/src/gpu/GrGLTexture.cpp
new file mode 100644
index 0000000000..e302694043
--- /dev/null
+++ b/src/gpu/GrGLTexture.cpp
@@ -0,0 +1,187 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLTexture.h"
+
+#include "GrGpuGL.h"
+
+#define GPUGL static_cast<GrGpuGL*>(getGpu())
+
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+const GrGLenum* GrGLTexture::WrapMode2GLWrap(GrGLBinding binding) {
+ static const GrGLenum mirrorRepeatModes[] = {
+ GR_GL_CLAMP_TO_EDGE,
+ GR_GL_REPEAT,
+ GR_GL_MIRRORED_REPEAT
+ };
+
+ static const GrGLenum repeatModes[] = {
+ GR_GL_CLAMP_TO_EDGE,
+ GR_GL_REPEAT,
+ GR_GL_REPEAT
+ };
+
+ if (kES1_GrGLBinding == binding) {
+ return repeatModes; // GL_MIRRORED_REPEAT not supported.
+ } else {
+ return mirrorRepeatModes;
+ }
+};
+
+void GrGLTexture::init(GrGpuGL* gpu,
+ const Desc& textureDesc,
+ const GrGLRenderTarget::Desc* rtDesc,
+ const TexParams& initialTexParams) {
+
+ GrAssert(0 != textureDesc.fTextureID);
+
+ fTexParams = initialTexParams;
+ fTexIDObj = new GrGLTexID(GPUGL->glInterface(),
+ textureDesc.fTextureID,
+ textureDesc.fOwnsID);
+ fUploadFormat = textureDesc.fUploadFormat;
+ fUploadByteCount = textureDesc.fUploadByteCount;
+ fUploadType = textureDesc.fUploadType;
+ fOrientation = textureDesc.fOrientation;
+ fScaleX = GrIntToScalar(textureDesc.fContentWidth) /
+ textureDesc.fAllocWidth;
+ fScaleY = GrIntToScalar(textureDesc.fContentHeight) /
+ textureDesc.fAllocHeight;
+
+ if (NULL != rtDesc) {
+ // we render to the top left
+ GrGLIRect vp;
+ vp.fLeft = 0;
+ vp.fWidth = textureDesc.fContentWidth;
+ vp.fHeight = textureDesc.fContentHeight;
+ vp.fBottom = textureDesc.fAllocHeight - textureDesc.fContentHeight;
+
+ fRenderTarget = new GrGLRenderTarget(gpu, *rtDesc, vp, fTexIDObj, this);
+ }
+}
+
+GrGLTexture::GrGLTexture(GrGpuGL* gpu,
+ const Desc& textureDesc,
+ const TexParams& initialTexParams)
+ : INHERITED(gpu,
+ textureDesc.fContentWidth,
+ textureDesc.fContentHeight,
+ textureDesc.fAllocWidth,
+ textureDesc.fAllocHeight,
+ textureDesc.fFormat) {
+ this->init(gpu, textureDesc, NULL, initialTexParams);
+}
+
+GrGLTexture::GrGLTexture(GrGpuGL* gpu,
+ const Desc& textureDesc,
+ const GrGLRenderTarget::Desc& rtDesc,
+ const TexParams& initialTexParams)
+ : INHERITED(gpu,
+ textureDesc.fContentWidth,
+ textureDesc.fContentHeight,
+ textureDesc.fAllocWidth,
+ textureDesc.fAllocHeight,
+ textureDesc.fFormat) {
+ this->init(gpu, textureDesc, &rtDesc, initialTexParams);
+}
+
+void GrGLTexture::onRelease() {
+ INHERITED::onRelease();
+ GPUGL->notifyTextureDelete(this);
+ if (NULL != fTexIDObj) {
+ fTexIDObj->unref();
+ fTexIDObj = NULL;
+ }
+}
+
+void GrGLTexture::onAbandon() {
+ INHERITED::onAbandon();
+ if (NULL != fTexIDObj) {
+ fTexIDObj->abandon();
+ }
+}
+
+void GrGLTexture::uploadTextureData(int x,
+ int y,
+ int width,
+ int height,
+ const void* srcData,
+ size_t rowBytes) {
+
+ GPUGL->setSpareTextureUnit();
+
+ // ES2 glCompressedTexSubImage2D doesn't support any formats
+ // (at least without extensions)
+ GrAssert(fUploadFormat != GR_GL_PALETTE8_RGBA8);
+
+ // in case we need a temporary, trimmed copy of the src pixels
+ SkAutoSMalloc<128 * 128> tempStorage;
+
+ if (!rowBytes) {
+ rowBytes = fUploadByteCount * width;
+ }
+ /*
+ * check whether to allocate a temporary buffer for flipping y or
+ * because our srcData has extra bytes past each row. If so, we need
+ * to trim those off here, since GL ES doesn't let us specify
+ * GL_UNPACK_ROW_LENGTH.
+ */
+ bool restoreGLRowLength = false;
+ bool flipY = kBottomUp_Orientation == fOrientation;
+ if (kDesktop_GrGLBinding == GPUGL->glBinding() && !flipY) {
+ // can't use this for flipping, only non-neg values allowed. :(
+ if (srcData && rowBytes) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH,
+ rowBytes / fUploadByteCount));
+ restoreGLRowLength = true;
+ }
+ } else {
+ size_t trimRowBytes = width * fUploadByteCount;
+ if (srcData && (trimRowBytes < rowBytes || flipY)) {
+ // copy the data into our new storage, skipping the trailing bytes
+ size_t trimSize = height * trimRowBytes;
+ const char* src = (const char*)srcData;
+ if (flipY) {
+ src += (height - 1) * rowBytes;
+ }
+ char* dst = (char*)tempStorage.reset(trimSize);
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, trimRowBytes);
+ if (flipY) {
+ src -= rowBytes;
+ } else {
+ src += rowBytes;
+ }
+ dst += trimRowBytes;
+ }
+ // now point srcData to our copied version
+ srcData = tempStorage.get();
+ }
+ }
+
+ if (flipY) {
+ y = this->height() - (y + height);
+ }
+ GL_CALL(BindTexture(GR_GL_TEXTURE_2D, fTexIDObj->id()));
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, fUploadByteCount));
+ GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 0, x, y, width, height,
+ fUploadFormat, fUploadType, srcData));
+
+ if (kDesktop_GrGLBinding == GPUGL->glBinding()) {
+ if (restoreGLRowLength) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
+ }
+}
+
+intptr_t GrGLTexture::getTextureHandle() const {
+ return fTexIDObj->id();
+}
+
diff --git a/src/gpu/GrGLTexture.h b/src/gpu/GrGLTexture.h
new file mode 100644
index 0000000000..b0dc368adf
--- /dev/null
+++ b/src/gpu/GrGLTexture.h
@@ -0,0 +1,154 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGLTexture_DEFINED
+#define GrGLTexture_DEFINED
+
+#include "GrGLRenderTarget.h"
+#include "GrScalar.h"
+#include "GrTexture.h"
+
+/**
+ * A ref counted tex id that deletes the texture in its destructor.
+ */
+class GrGLTexID : public GrRefCnt {
+
+public:
+ GrGLTexID(const GrGLInterface* gl, GrGLuint texID, bool ownsID)
+ : fGL(gl)
+ , fTexID(texID)
+ , fOwnsID(ownsID) {
+ }
+
+ virtual ~GrGLTexID() {
+ if (0 != fTexID && fOwnsID) {
+ GR_GL_CALL(fGL, DeleteTextures(1, &fTexID));
+ }
+ }
+
+ void abandon() { fTexID = 0; }
+ GrGLuint id() const { return fTexID; }
+
+private:
+ const GrGLInterface* fGL;
+ GrGLuint fTexID;
+ bool fOwnsID;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+class GrGLTexture : public GrTexture {
+
+public:
+ enum Orientation {
+ kBottomUp_Orientation,
+ kTopDown_Orientation,
+ };
+
+ struct TexParams {
+ GrGLenum fFilter;
+ GrGLenum fWrapS;
+ GrGLenum fWrapT;
+ void invalidate() { memset(this, 0xff, sizeof(TexParams)); }
+ };
+
+ struct Desc {
+ int fContentWidth;
+ int fContentHeight;
+ int fAllocWidth;
+ int fAllocHeight;
+ GrPixelConfig fFormat;
+ GrGLuint fTextureID;
+ bool fOwnsID;
+ GrGLenum fUploadFormat;
+ GrGLenum fUploadByteCount;
+ GrGLenum fUploadType;
+ Orientation fOrientation;
+ };
+
+ // creates a texture that is also an RT
+ GrGLTexture(GrGpuGL* gpu,
+ const Desc& textureDesc,
+ const GrGLRenderTarget::Desc& rtDesc,
+ const TexParams& initialTexParams);
+
+ // creates a non-RT texture
+ GrGLTexture(GrGpuGL* gpu,
+ const Desc& textureDesc,
+ const TexParams& initialTexParams);
+
+
+ virtual ~GrGLTexture() { this->release(); }
+
+ // overrides of GrTexture
+ virtual void uploadTextureData(int x,
+ int y,
+ int width,
+ int height,
+ const void* srcData,
+ size_t rowBytes);
+ virtual intptr_t getTextureHandle() const;
+
+ const TexParams& getTexParams() const { return fTexParams; }
+ void setTexParams(const TexParams& texParams) { fTexParams = texParams; }
+ GrGLuint textureID() const { return fTexIDObj->id(); }
+
+ GrGLenum uploadFormat() const { return fUploadFormat; }
+ GrGLenum uploadByteCount() const { return fUploadByteCount; }
+ GrGLenum uploadType() const { return fUploadType; }
+
+ /**
+ * @return width() / allocWidth()
+ */
+ GrScalar contentScaleX() const { return fScaleX; }
+
+ /**
+ * @return height() / allocHeight()
+ */
+ GrScalar contentScaleY() const { return fScaleY; }
+
+ // Ganesh assumes texture coordinates have their origin
+ // in the top-left corner of the image. OpenGL, however,
+ // has the origin in the lower-left corner. For content that
+ // is loaded by Ganesh we just push the content "upside down"
+ // (by GL's understanding of the world ) in glTex*Image and the
+ // addressing just works out. However, content generated by GL
+ // (FBO or externally imported texture) will be updside down
+ // and it is up to the GrGpuGL derivative to handle y-mirroing.
+ Orientation orientation() const { return fOrientation; }
+
+ static const GrGLenum* WrapMode2GLWrap(GrGLBinding binding);
+
+protected:
+
+ // overrides of GrTexture
+ virtual void onAbandon();
+ virtual void onRelease();
+
+private:
+ TexParams fTexParams;
+ GrGLTexID* fTexIDObj;
+ GrGLenum fUploadFormat;
+ GrGLenum fUploadByteCount;
+ GrGLenum fUploadType;
+ // precomputed content / alloc ratios
+ GrScalar fScaleX;
+ GrScalar fScaleY;
+ Orientation fOrientation;
+
+ void init(GrGpuGL* gpu,
+ const Desc& textureDesc,
+ const GrGLRenderTarget::Desc* rtDesc,
+ const TexParams& initialTexParams);
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGLUtil.cpp b/src/gpu/GrGLUtil.cpp
new file mode 100644
index 0000000000..8056e66216
--- /dev/null
+++ b/src/gpu/GrGLUtil.cpp
@@ -0,0 +1,48 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLConfig.h"
+#include "GrGLInterface.h"
+
+void GrGLClearErr(const GrGLInterface* gl) {
+ while (GR_GL_NO_ERROR != gl->fGetError()) {}
+}
+
+void GrGLCheckErr(const GrGLInterface* gl,
+ const char* location,
+ const char* call) {
+ uint32_t err = GR_GL_GET_ERROR(gl);
+ if (GR_GL_NO_ERROR != err) {
+ GrPrintf("---- glGetError %x", err);
+ if (NULL != location) {
+ GrPrintf(" at\n\t%s", location);
+ }
+ if (NULL != call) {
+ GrPrintf("\n\t\t%s", call);
+ }
+ GrPrintf("\n");
+ }
+}
+
+void GrGLResetRowLength(const GrGLInterface* gl) {
+ if (gl->supportsDesktop()) {
+ GR_GL_CALL(gl, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_GL_LOG_CALLS
+ bool gLogCallsGL = !!(GR_GL_LOG_CALLS_START);
+#endif
+
+#if GR_GL_CHECK_ERROR
+ bool gCheckErrorGL = !!(GR_GL_CHECK_ERROR_START);
+#endif
+
diff --git a/src/gpu/GrGLVertexBuffer.cpp b/src/gpu/GrGLVertexBuffer.cpp
new file mode 100644
index 0000000000..33c1e7e3e0
--- /dev/null
+++ b/src/gpu/GrGLVertexBuffer.cpp
@@ -0,0 +1,126 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrGLVertexBuffer.h"
+#include "GrGpuGL.h"
+
+#define GPUGL static_cast<GrGpuGL*>(getGpu())
+
+#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X)
+
+GrGLVertexBuffer::GrGLVertexBuffer(GrGpuGL* gpu,
+ GrGLuint id,
+ size_t sizeInBytes,
+ bool dynamic)
+ : INHERITED(gpu, sizeInBytes, dynamic)
+ , fBufferID(id)
+ , fLockPtr(NULL) {
+}
+
+void GrGLVertexBuffer::onRelease() {
+ // make sure we've not been abandoned
+ if (fBufferID) {
+ GPUGL->notifyVertexBufferDelete(this);
+ GL_CALL(DeleteBuffers(1, &fBufferID));
+ fBufferID = 0;
+ }
+}
+
+void GrGLVertexBuffer::onAbandon() {
+ fBufferID = 0;
+ fLockPtr = NULL;
+}
+
+void GrGLVertexBuffer::bind() const {
+ GL_CALL(BindBuffer(GR_GL_ARRAY_BUFFER, fBufferID));
+ GPUGL->notifyVertexBufferBind(this);
+}
+
+GrGLuint GrGLVertexBuffer::bufferID() const {
+ return fBufferID;
+}
+
+void* GrGLVertexBuffer::lock() {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (this->getGpu()->getCaps().fBufferLockSupport) {
+ this->bind();
+ // Let driver know it can discard the old data
+ GL_CALL(BufferData(GR_GL_ARRAY_BUFFER, this->sizeInBytes(), NULL,
+ this->dynamic() ? GR_GL_DYNAMIC_DRAW :
+ GR_GL_STATIC_DRAW));
+ GR_GL_CALL_RET(GPUGL->glInterface(),
+ fLockPtr,
+ MapBuffer(GR_GL_ARRAY_BUFFER, GR_GL_WRITE_ONLY));
+ return fLockPtr;
+ }
+ return NULL;
+}
+
+void* GrGLVertexBuffer::lockPtr() const {
+ return fLockPtr;
+}
+
+void GrGLVertexBuffer::unlock() {
+
+ GrAssert(fBufferID);
+ GrAssert(isLocked());
+ GrAssert(this->getGpu()->getCaps().fBufferLockSupport);
+
+ this->bind();
+ GL_CALL(UnmapBuffer(GR_GL_ARRAY_BUFFER));
+ fLockPtr = NULL;
+}
+
+bool GrGLVertexBuffer::isLocked() const {
+ GrAssert(!this->isValid() || fBufferID);
+#if GR_DEBUG
+ if (this->isValid() && this->getGpu()->getCaps().fBufferLockSupport) {
+ GrGLint mapped;
+ this->bind();
+ GL_CALL(GetBufferParameteriv(GR_GL_ARRAY_BUFFER,
+ GR_GL_BUFFER_MAPPED, &mapped));
+ GrAssert(!!mapped == !!fLockPtr);
+ }
+#endif
+ return NULL != fLockPtr;
+}
+
+bool GrGLVertexBuffer::updateData(const void* src, size_t srcSizeInBytes) {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (srcSizeInBytes > this->sizeInBytes()) {
+ return false;
+ }
+ this->bind();
+ GrGLenum usage = dynamic() ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW;
+#if !GR_GL_USE_BUFFER_DATA_NULL_HINT
+ // Note that we're cheating on the size here. Currently no methods
+ // allow a partial update that preserves contents of non-updated
+ // portions of the buffer (and lock() does a glBufferData(..size, NULL..))
+ GL_CALL(BufferData(GR_GL_ARRAY_BUFFER, srcSizeInBytes, src, usage));
+#else
+ if (this->sizeInBytes() == srcSizeInBytes) {
+ GL_CALL(BufferData(GR_GL_ARRAY_BUFFER, srcSizeInBytes, src, usage));
+ } else {
+ // Before we call glBufferSubData we give the driver a hint using
+ // glBufferData with NULL. This makes the old buffer contents
+ // inaccessible to future draws. The GPU may still be processing draws
+ // that reference the old contents. With this hint it can assign a
+ // different allocation for the new contents to avoid flushing the gpu
+ // past draws consuming the old contents.
+ GL_CALL(BufferData(GR_GL_ARRAY_BUFFER,
+ this->sizeInBytes(), NULL, usage));
+ GL_CALL(BufferSubData(GR_GL_ARRAY_BUFFER, 0, srcSizeInBytes, src));
+ }
+#endif
+ return true;
+}
+
diff --git a/src/gpu/GrGLVertexBuffer.h b/src/gpu/GrGLVertexBuffer.h
new file mode 100644
index 0000000000..15fc54a983
--- /dev/null
+++ b/src/gpu/GrGLVertexBuffer.h
@@ -0,0 +1,52 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGLVertexBuffer_DEFINED
+#define GrGLVertexBuffer_DEFINED
+
+#include "GrVertexBuffer.h"
+#include "GrGLInterface.h"
+
+class GrGpuGL;
+
+class GrGLVertexBuffer : public GrVertexBuffer {
+
+public:
+ virtual ~GrGLVertexBuffer() { this->release(); }
+ // overrides of GrVertexBuffer
+ virtual void* lock();
+ virtual void* lockPtr() const;
+ virtual void unlock();
+ virtual bool isLocked() const;
+ virtual bool updateData(const void* src, size_t srcSizeInBytes);
+ GrGLuint bufferID() const;
+
+protected:
+ GrGLVertexBuffer(GrGpuGL* gpu,
+ GrGLuint id,
+ size_t sizeInBytes,
+ bool dynamic);
+
+ // overrides of GrResource
+ virtual void onAbandon();
+ virtual void onRelease();
+
+private:
+ void bind() const;
+
+ GrGLuint fBufferID;
+ void* fLockPtr;
+
+ friend class GrGpuGL;
+
+ typedef GrVertexBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGeometryBuffer.h b/src/gpu/GrGeometryBuffer.h
new file mode 100644
index 0000000000..c74b25487d
--- /dev/null
+++ b/src/gpu/GrGeometryBuffer.h
@@ -0,0 +1,90 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGeometryBuffer_DEFINED
+#define GrGeometryBuffer_DEFINED
+
+#include "GrResource.h"
+
+class GrGpu;
+
+/**
+ * Parent class for vertex and index buffers
+ */
+class GrGeometryBuffer : public GrResource {
+public:
+
+ /**
+ *Retrieves whether the buffer was created with the dynamic flag
+ *
+ * @return true if the buffer was created with the dynamic flag
+ */
+ bool dynamic() const { return fDynamic; }
+
+ /**
+ * Locks the buffer to be written by the CPU.
+ *
+ * The previous content of the buffer is invalidated. It is an error
+ * to draw from the buffer while it is locked. It is an error to call lock
+ * on an already locked buffer.
+ *
+ * @return a pointer to the data or NULL if the lock fails.
+ */
+ virtual void* lock() = 0;
+
+ /**
+ * Returns the same ptr that lock() returned at time of lock or NULL if the
+ * is not locked.
+ *
+ * @return ptr to locked buffer data or undefined if buffer is not locked.
+ */
+ virtual void* lockPtr() const = 0;
+
+ /**
+ * Unlocks the buffer.
+ *
+ * The pointer returned by the previous lock call will no longer be valid.
+ */
+ virtual void unlock() = 0;
+
+ /**
+ Queries whether the buffer has been locked.
+
+ @return true if the buffer is locked, false otherwise.
+ */
+ virtual bool isLocked() const = 0;
+
+ /**
+ * Updates the buffer data.
+ *
+ * The size of the buffer will be preserved. The src data will be
+ * placed at the begining of the buffer and any remaining contents will
+ * be undefined.
+ *
+ * @return returns true if the update succeeds, false otherwise.
+ */
+ virtual bool updateData(const void* src, size_t srcSizeInBytes) = 0;
+
+ // GrResource overrides
+ virtual size_t sizeInBytes() const { return fSizeInBytes; }
+
+protected:
+ GrGeometryBuffer(GrGpu* gpu, size_t sizeInBytes, bool dynamic)
+ : INHERITED(gpu)
+ , fSizeInBytes(sizeInBytes)
+ , fDynamic(dynamic) {}
+
+private:
+ size_t fSizeInBytes;
+ bool fDynamic;
+
+ typedef GrResource INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
new file mode 100644
index 0000000000..b05f2c0b1d
--- /dev/null
+++ b/src/gpu/GrGpu.cpp
@@ -0,0 +1,937 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGpu.h"
+
+#include "GrBufferAllocPool.h"
+#include "GrClipIterator.h"
+#include "GrContext.h"
+#include "GrIndexBuffer.h"
+#include "GrPathRenderer.h"
+#include "GrGLStencilBuffer.h"
+#include "GrVertexBuffer.h"
+
+// probably makes no sense for this to be less than a page
+static const size_t VERTEX_POOL_VB_SIZE = 1 << 18;
+static const int VERTEX_POOL_VB_COUNT = 4;
+static const size_t INDEX_POOL_IB_SIZE = 1 << 16;
+static const int INDEX_POOL_IB_COUNT = 4;
+
+////////////////////////////////////////////////////////////////////////////////
+
+extern void gr_run_unittests();
+
+#define DEBUG_INVAL_BUFFER 0xdeadcafe
+#define DEBUG_INVAL_START_IDX -1
+
+GrGpu::GrGpu()
+ : fContext(NULL)
+ , fVertexPool(NULL)
+ , fIndexPool(NULL)
+ , fVertexPoolUseCnt(0)
+ , fIndexPoolUseCnt(0)
+ , fQuadIndexBuffer(NULL)
+ , fUnitSquareVertexBuffer(NULL)
+ , fPathRendererChain(NULL)
+ , fContextIsDirty(true)
+ , fResourceHead(NULL) {
+
+#if GR_DEBUG
+ //gr_run_unittests();
+#endif
+
+ fGeomPoolStateStack.push_back();
+#if GR_DEBUG
+ GeometryPoolState& poolState = fGeomPoolStateStack.back();
+ poolState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
+ poolState.fPoolStartVertex = DEBUG_INVAL_START_IDX;
+ poolState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
+ poolState.fPoolStartIndex = DEBUG_INVAL_START_IDX;
+#endif
+ resetStats();
+}
+
+GrGpu::~GrGpu() {
+ this->releaseResources();
+}
+
+void GrGpu::abandonResources() {
+
+ while (NULL != fResourceHead) {
+ fResourceHead->abandon();
+ }
+
+ GrAssert(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid());
+ GrAssert(NULL == fUnitSquareVertexBuffer ||
+ !fUnitSquareVertexBuffer->isValid());
+ GrSafeSetNull(fQuadIndexBuffer);
+ GrSafeSetNull(fUnitSquareVertexBuffer);
+ delete fVertexPool;
+ fVertexPool = NULL;
+ delete fIndexPool;
+ fIndexPool = NULL;
+ // in case path renderer has any GrResources, start from scratch
+ GrSafeSetNull(fPathRendererChain);
+}
+
+void GrGpu::releaseResources() {
+
+ while (NULL != fResourceHead) {
+ fResourceHead->release();
+ }
+
+ GrAssert(NULL == fQuadIndexBuffer || !fQuadIndexBuffer->isValid());
+ GrAssert(NULL == fUnitSquareVertexBuffer ||
+ !fUnitSquareVertexBuffer->isValid());
+ GrSafeSetNull(fQuadIndexBuffer);
+ GrSafeSetNull(fUnitSquareVertexBuffer);
+ delete fVertexPool;
+ fVertexPool = NULL;
+ delete fIndexPool;
+ fIndexPool = NULL;
+ // in case path renderer has any GrResources, start from scratch
+ GrSafeSetNull(fPathRendererChain);
+}
+
+void GrGpu::insertResource(GrResource* resource) {
+ GrAssert(NULL != resource);
+ GrAssert(this == resource->getGpu());
+ GrAssert(NULL == resource->fNext);
+ GrAssert(NULL == resource->fPrevious);
+
+ resource->fNext = fResourceHead;
+ if (NULL != fResourceHead) {
+ GrAssert(NULL == fResourceHead->fPrevious);
+ fResourceHead->fPrevious = resource;
+ }
+ fResourceHead = resource;
+}
+
+void GrGpu::removeResource(GrResource* resource) {
+ GrAssert(NULL != resource);
+ GrAssert(NULL != fResourceHead);
+
+ if (fResourceHead == resource) {
+ GrAssert(NULL == resource->fPrevious);
+ fResourceHead = resource->fNext;
+ } else {
+ GrAssert(NULL != fResourceHead);
+ resource->fPrevious->fNext = resource->fNext;
+ }
+ if (NULL != resource->fNext) {
+ resource->fNext->fPrevious = resource->fPrevious;
+ }
+ resource->fNext = NULL;
+ resource->fPrevious = NULL;
+}
+
+
+void GrGpu::unimpl(const char msg[]) {
+#if GR_DEBUG
+ GrPrintf("--- GrGpu unimplemented(\"%s\")\n", msg);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
+ const void* srcData, size_t rowBytes) {
+ this->handleDirtyContext();
+ GrTexture* tex = this->onCreateTexture(desc, srcData, rowBytes);
+ if (NULL != tex &&
+ (kRenderTarget_GrTextureFlagBit & desc.fFlags) &&
+ !(kNoStencil_GrTextureFlagBit & desc.fFlags)) {
+ GrAssert(NULL != tex->asRenderTarget());
+ // TODO: defer this and attach dynamically
+ if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) {
+ tex->unref();
+ return NULL;
+ }
+ }
+ return tex;
+}
+
+bool GrGpu::attachStencilBufferToRenderTarget(GrRenderTarget* rt) {
+ GrAssert(NULL == rt->getStencilBuffer());
+ GrStencilBuffer* sb =
+ this->getContext()->findStencilBuffer(rt->allocatedWidth(),
+ rt->allocatedHeight(),
+ rt->numSamples());
+ if (NULL != sb) {
+ rt->setStencilBuffer(sb);
+ bool attached = this->attachStencilBufferToRenderTarget(sb, rt);
+ if (!attached) {
+ rt->setStencilBuffer(NULL);
+ }
+ return attached;
+ }
+ if (this->createStencilBufferForRenderTarget(rt, rt->allocatedWidth(),
+ rt->allocatedHeight())) {
+ rt->getStencilBuffer()->ref();
+ rt->getStencilBuffer()->transferToCacheAndLock();
+
+ // Right now we're clearing the stencil buffer here after it is
+ // attached to an RT for the first time. When we start matching
+ // stencil buffers with smaller color targets this will no longer
+ // be correct because it won't be guaranteed to clear the entire
+ // sb.
+ // We used to clear down in the GL subclass using a special purpose
+ // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported
+ // FBO status.
+ GrRenderTarget* oldRT = fCurrDrawState.fRenderTarget;
+ fCurrDrawState.fRenderTarget = rt;
+ this->clearStencil();
+ fCurrDrawState.fRenderTarget = oldRT;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+GrResource* GrGpu::createPlatformSurface(const GrPlatformSurfaceDesc& desc) {
+ this->handleDirtyContext();
+ return this->onCreatePlatformSurface(desc);
+}
+
+GrVertexBuffer* GrGpu::createVertexBuffer(uint32_t size, bool dynamic) {
+ this->handleDirtyContext();
+ return this->onCreateVertexBuffer(size, dynamic);
+}
+
+GrIndexBuffer* GrGpu::createIndexBuffer(uint32_t size, bool dynamic) {
+ this->handleDirtyContext();
+ return this->onCreateIndexBuffer(size, dynamic);
+}
+
+void GrGpu::clear(const GrIRect* rect, GrColor color) {
+ this->handleDirtyContext();
+ this->onClear(rect, color);
+}
+
+void GrGpu::forceRenderTargetFlush() {
+ this->handleDirtyContext();
+ this->onForceRenderTargetFlush();
+}
+
+bool GrGpu::readPixels(GrRenderTarget* target,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer) {
+
+ this->handleDirtyContext();
+ return this->onReadPixels(target, left, top, width, height, config, buffer);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static const int MAX_QUADS = 1 << 12; // max possible: (1 << 14) - 1;
+
+GR_STATIC_ASSERT(4 * MAX_QUADS <= 65535);
+
+static inline void fill_indices(uint16_t* indices, int quadCount) {
+ for (int i = 0; i < quadCount; ++i) {
+ indices[6 * i + 0] = 4 * i + 0;
+ indices[6 * i + 1] = 4 * i + 1;
+ indices[6 * i + 2] = 4 * i + 2;
+ indices[6 * i + 3] = 4 * i + 0;
+ indices[6 * i + 4] = 4 * i + 2;
+ indices[6 * i + 5] = 4 * i + 3;
+ }
+}
+
+const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const {
+ if (NULL == fQuadIndexBuffer) {
+ static const int SIZE = sizeof(uint16_t) * 6 * MAX_QUADS;
+ GrGpu* me = const_cast<GrGpu*>(this);
+ fQuadIndexBuffer = me->createIndexBuffer(SIZE, false);
+ if (NULL != fQuadIndexBuffer) {
+ uint16_t* indices = (uint16_t*)fQuadIndexBuffer->lock();
+ if (NULL != indices) {
+ fill_indices(indices, MAX_QUADS);
+ fQuadIndexBuffer->unlock();
+ } else {
+ indices = (uint16_t*)GrMalloc(SIZE);
+ fill_indices(indices, MAX_QUADS);
+ if (!fQuadIndexBuffer->updateData(indices, SIZE)) {
+ fQuadIndexBuffer->unref();
+ fQuadIndexBuffer = NULL;
+ GrCrash("Can't get indices into buffer!");
+ }
+ GrFree(indices);
+ }
+ }
+ }
+
+ return fQuadIndexBuffer;
+}
+
+const GrVertexBuffer* GrGpu::getUnitSquareVertexBuffer() const {
+ if (NULL == fUnitSquareVertexBuffer) {
+
+ static const GrPoint DATA[] = {
+ { 0, 0 },
+ { GR_Scalar1, 0 },
+ { GR_Scalar1, GR_Scalar1 },
+ { 0, GR_Scalar1 }
+#if 0
+ GrPoint(0, 0),
+ GrPoint(GR_Scalar1,0),
+ GrPoint(GR_Scalar1,GR_Scalar1),
+ GrPoint(0, GR_Scalar1)
+#endif
+ };
+ static const size_t SIZE = sizeof(DATA);
+
+ GrGpu* me = const_cast<GrGpu*>(this);
+ fUnitSquareVertexBuffer = me->createVertexBuffer(SIZE, false);
+ if (NULL != fUnitSquareVertexBuffer) {
+ if (!fUnitSquareVertexBuffer->updateData(DATA, SIZE)) {
+ fUnitSquareVertexBuffer->unref();
+ fUnitSquareVertexBuffer = NULL;
+ GrCrash("Can't get vertices into buffer!");
+ }
+ }
+ }
+
+ return fUnitSquareVertexBuffer;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// stencil settings to use when clip is in stencil
+const GrStencilSettings GrGpu::gClipStencilSettings = {
+ kKeep_StencilOp, kKeep_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kAlwaysIfInClip_StencilFunc, kAlwaysIfInClip_StencilFunc,
+ 0, 0,
+ 0, 0,
+ 0, 0
+};
+
+// mapping of clip-respecting stencil funcs to normal stencil funcs
+// mapping depends on whether stencil-clipping is in effect.
+static const GrStencilFunc gGrClipToNormalStencilFunc[2][kClipStencilFuncCount] = {
+ {// Stencil-Clipping is DISABLED, effectively always inside the clip
+ // In the Clip Funcs
+ kAlways_StencilFunc, // kAlwaysIfInClip_StencilFunc
+ kEqual_StencilFunc, // kEqualIfInClip_StencilFunc
+ kLess_StencilFunc, // kLessIfInClip_StencilFunc
+ kLEqual_StencilFunc, // kLEqualIfInClip_StencilFunc
+ // Special in the clip func that forces user's ref to be 0.
+ kNotEqual_StencilFunc, // kNonZeroIfInClip_StencilFunc
+ // make ref 0 and do normal nequal.
+ },
+ {// Stencil-Clipping is ENABLED
+ // In the Clip Funcs
+ kEqual_StencilFunc, // kAlwaysIfInClip_StencilFunc
+ // eq stencil clip bit, mask
+ // out user bits.
+
+ kEqual_StencilFunc, // kEqualIfInClip_StencilFunc
+ // add stencil bit to mask and ref
+
+ kLess_StencilFunc, // kLessIfInClip_StencilFunc
+ kLEqual_StencilFunc, // kLEqualIfInClip_StencilFunc
+ // for both of these we can add
+ // the clip bit to the mask and
+ // ref and compare as normal
+ // Special in the clip func that forces user's ref to be 0.
+ kLess_StencilFunc, // kNonZeroIfInClip_StencilFunc
+ // make ref have only the clip bit set
+ // and make comparison be less
+ // 10..0 < 1..user_bits..
+ }
+};
+
+GrStencilFunc GrGpu::ConvertStencilFunc(bool stencilInClip, GrStencilFunc func) {
+ GrAssert(func >= 0);
+ if (func >= kBasicStencilFuncCount) {
+ GrAssert(func < kStencilFuncCount);
+ func = gGrClipToNormalStencilFunc[stencilInClip ? 1 : 0][func - kBasicStencilFuncCount];
+ GrAssert(func >= 0 && func < kBasicStencilFuncCount);
+ }
+ return func;
+}
+
+void GrGpu::ConvertStencilFuncAndMask(GrStencilFunc func,
+ bool clipInStencil,
+ unsigned int clipBit,
+ unsigned int userBits,
+ unsigned int* ref,
+ unsigned int* mask) {
+ if (func < kBasicStencilFuncCount) {
+ *mask &= userBits;
+ *ref &= userBits;
+ } else {
+ if (clipInStencil) {
+ switch (func) {
+ case kAlwaysIfInClip_StencilFunc:
+ *mask = clipBit;
+ *ref = clipBit;
+ break;
+ case kEqualIfInClip_StencilFunc:
+ case kLessIfInClip_StencilFunc:
+ case kLEqualIfInClip_StencilFunc:
+ *mask = (*mask & userBits) | clipBit;
+ *ref = (*ref & userBits) | clipBit;
+ break;
+ case kNonZeroIfInClip_StencilFunc:
+ *mask = (*mask & userBits) | clipBit;
+ *ref = clipBit;
+ break;
+ default:
+ GrCrash("Unknown stencil func");
+ }
+ } else {
+ *mask &= userBits;
+ *ref &= userBits;
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#define VISUALIZE_COMPLEX_CLIP 0
+
+#if VISUALIZE_COMPLEX_CLIP
+ #include "GrRandom.h"
+ GrRandom gRandom;
+ #define SET_RANDOM_COLOR this->setColor(0xff000000 | gRandom.nextU());
+#else
+ #define SET_RANDOM_COLOR
+#endif
+
+namespace {
+// determines how many elements at the head of the clip can be skipped and
+// whether the initial clear should be to the inside- or outside-the-clip value,
+// and what op should be used to draw the first element that isn't skipped.
+int process_initial_clip_elements(const GrClip& clip,
+ bool* clearToInside,
+ GrSetOp* startOp) {
+
+ // logically before the first element of the clip stack is
+ // processed the clip is entirely open. However, depending on the
+ // first set op we may prefer to clear to 0 for performance. We may
+ // also be able to skip the initial clip paths/rects. We loop until
+ // we cannot skip an element.
+ int curr;
+ bool done = false;
+ *clearToInside = true;
+ int count = clip.getElementCount();
+
+ for (curr = 0; curr < count && !done; ++curr) {
+ switch (clip.getOp(curr)) {
+ case kReplace_SetOp:
+ // replace ignores everything previous
+ *startOp = kReplace_SetOp;
+ *clearToInside = false;
+ done = true;
+ break;
+ case kIntersect_SetOp:
+ // if everything is initially clearToInside then intersect is
+ // same as clear to 0 and treat as a replace. Otherwise,
+ // set stays empty.
+ if (*clearToInside) {
+ *startOp = kReplace_SetOp;
+ *clearToInside = false;
+ done = true;
+ }
+ break;
+ // we can skip a leading union.
+ case kUnion_SetOp:
+ // if everything is initially outside then union is
+ // same as replace. Otherwise, every pixel is still
+ // clearToInside
+ if (!*clearToInside) {
+ *startOp = kReplace_SetOp;
+ done = true;
+ }
+ break;
+ case kXor_SetOp:
+ // xor is same as difference or replace both of which
+ // can be 1-pass instead of 2 for xor.
+ if (*clearToInside) {
+ *startOp = kDifference_SetOp;
+ } else {
+ *startOp = kReplace_SetOp;
+ }
+ done = true;
+ break;
+ case kDifference_SetOp:
+ // if all pixels are clearToInside then we have to process the
+ // difference, otherwise it has no effect and all pixels
+ // remain outside.
+ if (*clearToInside) {
+ *startOp = kDifference_SetOp;
+ done = true;
+ }
+ break;
+ case kReverseDifference_SetOp:
+ // if all pixels are clearToInside then reverse difference
+ // produces empty set. Otherise it is same as replace
+ if (*clearToInside) {
+ *clearToInside = false;
+ } else {
+ *startOp = kReplace_SetOp;
+ done = true;
+ }
+ break;
+ default:
+ GrCrash("Unknown set op.");
+ }
+ }
+ return done ? curr-1 : count;
+}
+}
+
+bool GrGpu::setupClipAndFlushState(GrPrimitiveType type) {
+ const GrIRect* r = NULL;
+ GrIRect clipRect;
+
+ // we check this early because we need a valid
+ // render target to setup stencil clipping
+ // before even going into flushGraphicsState
+ if (NULL == fCurrDrawState.fRenderTarget) {
+ GrAssert(!"No render target bound.");
+ return false;
+ }
+
+ if (fCurrDrawState.fFlagBits & kClip_StateBit) {
+ GrRenderTarget& rt = *fCurrDrawState.fRenderTarget;
+
+ GrRect bounds;
+ GrRect rtRect;
+ rtRect.setLTRB(0, 0,
+ GrIntToScalar(rt.width()), GrIntToScalar(rt.height()));
+ if (fClip.hasConservativeBounds()) {
+ bounds = fClip.getConservativeBounds();
+ if (!bounds.intersect(rtRect)) {
+ bounds.setEmpty();
+ }
+ } else {
+ bounds = rtRect;
+ }
+
+ bounds.roundOut(&clipRect);
+ if (clipRect.isEmpty()) {
+ clipRect.setLTRB(0,0,0,0);
+ }
+ r = &clipRect;
+
+ // use the stencil clip if we can't represent the clip as a rectangle.
+ fClipInStencil = !fClip.isRect() && !fClip.isEmpty() &&
+ !bounds.isEmpty();
+
+ // TODO: dynamically attach a SB when needed.
+ GrStencilBuffer* stencilBuffer = rt.getStencilBuffer();
+ if (fClipInStencil && NULL == stencilBuffer) {
+ return false;
+ }
+
+ if (fClipInStencil &&
+ stencilBuffer->mustRenderClip(fClip, rt.width(), rt.height())) {
+
+ stencilBuffer->setLastClip(fClip, rt.width(), rt.height());
+
+ // we set the current clip to the bounds so that our recursive
+ // draws are scissored to them. We use the copy of the complex clip
+ // we just stashed on the SB to render from. We set it back after
+ // we finish drawing it into the stencil.
+ const GrClip& clip = stencilBuffer->getLastClip();
+ fClip.setFromRect(bounds);
+
+ AutoStateRestore asr(this);
+ AutoGeometryPush agp(this);
+
+ this->setViewMatrix(GrMatrix::I());
+ this->flushScissor(NULL);
+#if !VISUALIZE_COMPLEX_CLIP
+ this->enableState(kNoColorWrites_StateBit);
+#else
+ this->disableState(kNoColorWrites_StateBit);
+#endif
+ int count = clip.getElementCount();
+ int clipBit = stencilBuffer->bits();
+ clipBit = (1 << (clipBit-1));
+
+ bool clearToInside;
+ GrSetOp startOp = kReplace_SetOp; // suppress warning
+ int start = process_initial_clip_elements(clip, &clearToInside,
+ &startOp);
+
+ this->clearStencilClip(clipRect, clearToInside);
+
+ // walk through each clip element and perform its set op
+ // with the existing clip.
+ for (int c = start; c < count; ++c) {
+ GrPathFill fill;
+ bool fillInverted;
+ // enabled at bottom of loop
+ this->disableState(kModifyStencilClip_StateBit);
+
+ bool canRenderDirectToStencil; // can the clip element be drawn
+ // directly to the stencil buffer
+ // with a non-inverted fill rule
+ // without extra passes to
+ // resolve in/out status.
+
+ GrPathRenderer* pr = NULL;
+ const GrPath* clipPath = NULL;
+ GrPathRenderer::AutoClearPath arp;
+ if (kRect_ClipType == clip.getElementType(c)) {
+ canRenderDirectToStencil = true;
+ fill = kEvenOdd_PathFill;
+ fillInverted = false;
+ } else {
+ fill = clip.getPathFill(c);
+ fillInverted = GrIsFillInverted(fill);
+ fill = GrNonInvertedFill(fill);
+ clipPath = &clip.getPath(c);
+ pr = this->getClipPathRenderer(*clipPath, fill);
+ if (NULL == pr) {
+ fClipInStencil = false;
+ fClip = clip;
+ return false;
+ }
+ canRenderDirectToStencil =
+ !pr->requiresStencilPass(this, *clipPath, fill);
+ arp.set(pr, this, clipPath, fill, NULL);
+ }
+
+ GrSetOp op = (c == start) ? startOp : clip.getOp(c);
+ int passes;
+ GrStencilSettings stencilSettings[GrStencilSettings::kMaxStencilClipPasses];
+
+ bool canDrawDirectToClip; // Given the renderer, the element,
+ // fill rule, and set operation can
+ // we render the element directly to
+ // stencil bit used for clipping.
+ canDrawDirectToClip =
+ GrStencilSettings::GetClipPasses(op,
+ canRenderDirectToStencil,
+ clipBit,
+ fillInverted,
+ &passes, stencilSettings);
+
+ // draw the element to the client stencil bits if necessary
+ if (!canDrawDirectToClip) {
+ static const GrStencilSettings gDrawToStencil = {
+ kIncClamp_StencilOp, kIncClamp_StencilOp,
+ kIncClamp_StencilOp, kIncClamp_StencilOp,
+ kAlways_StencilFunc, kAlways_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000,
+ 0xffffffff, 0xffffffff,
+ };
+ SET_RANDOM_COLOR
+ if (kRect_ClipType == clip.getElementType(c)) {
+ this->setStencil(gDrawToStencil);
+ this->drawSimpleRect(clip.getRect(c), NULL, 0);
+ } else {
+ if (canRenderDirectToStencil) {
+ this->setStencil(gDrawToStencil);
+ pr->drawPath(0);
+ } else {
+ pr->drawPathToStencil();
+ }
+ }
+ }
+
+ // now we modify the clip bit by rendering either the clip
+ // element directly or a bounding rect of the entire clip.
+ this->enableState(kModifyStencilClip_StateBit);
+ for (int p = 0; p < passes; ++p) {
+ this->setStencil(stencilSettings[p]);
+ if (canDrawDirectToClip) {
+ if (kRect_ClipType == clip.getElementType(c)) {
+ SET_RANDOM_COLOR
+ this->drawSimpleRect(clip.getRect(c), NULL, 0);
+ } else {
+ SET_RANDOM_COLOR
+ pr->drawPath(0);
+ }
+ } else {
+ SET_RANDOM_COLOR
+ this->drawSimpleRect(bounds, NULL, 0);
+ }
+ }
+ }
+ // restore clip
+ fClip = clip;
+ // recusive draws would have disabled this since they drew with
+ // the clip bounds as clip.
+ fClipInStencil = true;
+ }
+ }
+
+ // Must flush the scissor after graphics state
+ if (!this->flushGraphicsState(type)) {
+ return false;
+ }
+ this->flushScissor(r);
+ return true;
+}
+
+GrPathRenderer* GrGpu::getClipPathRenderer(const GrPath& path,
+ GrPathFill fill) {
+ if (NULL == fPathRendererChain) {
+ fPathRendererChain =
+ new GrPathRendererChain(this->getContext(),
+ GrPathRendererChain::kNonAAOnly_UsageFlag);
+ }
+ return fPathRendererChain->getPathRenderer(this, path, fill);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrGpu::geometrySourceWillPush() {
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+ if (kArray_GeometrySrcType == geoSrc.fVertexSrc ||
+ kReserved_GeometrySrcType == geoSrc.fVertexSrc) {
+ this->finalizeReservedVertices();
+ }
+ if (kArray_GeometrySrcType == geoSrc.fIndexSrc ||
+ kReserved_GeometrySrcType == geoSrc.fIndexSrc) {
+ this->finalizeReservedIndices();
+ }
+ GeometryPoolState& newState = fGeomPoolStateStack.push_back();
+#if GR_DEBUG
+ newState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
+ newState.fPoolStartVertex = DEBUG_INVAL_START_IDX;
+ newState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
+ newState.fPoolStartIndex = DEBUG_INVAL_START_IDX;
+#endif
+}
+
+void GrGpu::geometrySourceWillPop(const GeometrySrcState& restoredState) {
+ // if popping last entry then pops are unbalanced with pushes
+ GrAssert(fGeomPoolStateStack.count() > 1);
+ fGeomPoolStateStack.pop_back();
+}
+
+void GrGpu::onDrawIndexed(GrPrimitiveType type,
+ int startVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount) {
+
+ this->handleDirtyContext();
+
+ if (!this->setupClipAndFlushState(type)) {
+ return;
+ }
+
+#if GR_COLLECT_STATS
+ fStats.fVertexCnt += vertexCount;
+ fStats.fIndexCnt += indexCount;
+ fStats.fDrawCnt += 1;
+#endif
+
+ int sVertex = startVertex;
+ int sIndex = startIndex;
+ setupGeometry(&sVertex, &sIndex, vertexCount, indexCount);
+
+ this->onGpuDrawIndexed(type, sVertex, sIndex,
+ vertexCount, indexCount);
+}
+
+void GrGpu::onDrawNonIndexed(GrPrimitiveType type,
+ int startVertex,
+ int vertexCount) {
+ this->handleDirtyContext();
+
+ if (!this->setupClipAndFlushState(type)) {
+ return;
+ }
+#if GR_COLLECT_STATS
+ fStats.fVertexCnt += vertexCount;
+ fStats.fDrawCnt += 1;
+#endif
+
+ int sVertex = startVertex;
+ setupGeometry(&sVertex, NULL, vertexCount, 0);
+
+ this->onGpuDrawNonIndexed(type, sVertex, vertexCount);
+}
+
+void GrGpu::finalizeReservedVertices() {
+ GrAssert(NULL != fVertexPool);
+ fVertexPool->unlock();
+}
+
+void GrGpu::finalizeReservedIndices() {
+ GrAssert(NULL != fIndexPool);
+ fIndexPool->unlock();
+}
+
+void GrGpu::prepareVertexPool() {
+ if (NULL == fVertexPool) {
+ GrAssert(0 == fVertexPoolUseCnt);
+ fVertexPool = new GrVertexBufferAllocPool(this, true,
+ VERTEX_POOL_VB_SIZE,
+ VERTEX_POOL_VB_COUNT);
+ fVertexPool->releaseGpuRef();
+ } else if (!fVertexPoolUseCnt) {
+ // the client doesn't have valid data in the pool
+ fVertexPool->reset();
+ }
+}
+
+void GrGpu::prepareIndexPool() {
+ if (NULL == fIndexPool) {
+ GrAssert(0 == fIndexPoolUseCnt);
+ fIndexPool = new GrIndexBufferAllocPool(this, true,
+ INDEX_POOL_IB_SIZE,
+ INDEX_POOL_IB_COUNT);
+ fIndexPool->releaseGpuRef();
+ } else if (!fIndexPoolUseCnt) {
+ // the client doesn't have valid data in the pool
+ fIndexPool->reset();
+ }
+}
+
+bool GrGpu::onReserveVertexSpace(GrVertexLayout vertexLayout,
+ int vertexCount,
+ void** vertices) {
+ GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
+
+ GrAssert(vertexCount > 0);
+ GrAssert(NULL != vertices);
+
+ this->prepareVertexPool();
+
+ *vertices = fVertexPool->makeSpace(vertexLayout,
+ vertexCount,
+ &geomPoolState.fPoolVertexBuffer,
+ &geomPoolState.fPoolStartVertex);
+ if (NULL == *vertices) {
+ return false;
+ }
+ ++fVertexPoolUseCnt;
+ return true;
+}
+
+bool GrGpu::onReserveIndexSpace(int indexCount, void** indices) {
+ GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
+
+ GrAssert(indexCount > 0);
+ GrAssert(NULL != indices);
+
+ this->prepareIndexPool();
+
+ *indices = fIndexPool->makeSpace(indexCount,
+ &geomPoolState.fPoolIndexBuffer,
+ &geomPoolState.fPoolStartIndex);
+ if (NULL == *indices) {
+ return false;
+ }
+ ++fIndexPoolUseCnt;
+ return true;
+}
+
+void GrGpu::releaseReservedVertexSpace() {
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+ GrAssert(kReserved_GeometrySrcType == geoSrc.fVertexSrc);
+ size_t bytes = geoSrc.fVertexCount * VertexSize(geoSrc.fVertexLayout);
+ fVertexPool->putBack(bytes);
+ --fVertexPoolUseCnt;
+}
+
+void GrGpu::releaseReservedIndexSpace() {
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+ GrAssert(kReserved_GeometrySrcType == geoSrc.fIndexSrc);
+ size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t);
+ fIndexPool->putBack(bytes);
+ --fIndexPoolUseCnt;
+}
+
+void GrGpu::onSetVertexSourceToArray(const void* vertexArray, int vertexCount) {
+ this->prepareVertexPool();
+ GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
+#if GR_DEBUG
+ bool success =
+#endif
+ fVertexPool->appendVertices(this->getGeomSrc().fVertexLayout,
+ vertexCount,
+ vertexArray,
+ &geomPoolState.fPoolVertexBuffer,
+ &geomPoolState.fPoolStartVertex);
+ ++fVertexPoolUseCnt;
+ GR_DEBUGASSERT(success);
+}
+
+void GrGpu::onSetIndexSourceToArray(const void* indexArray, int indexCount) {
+ this->prepareIndexPool();
+ GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
+#if GR_DEBUG
+ bool success =
+#endif
+ fIndexPool->appendIndices(indexCount,
+ indexArray,
+ &geomPoolState.fPoolIndexBuffer,
+ &geomPoolState.fPoolStartIndex);
+ ++fIndexPoolUseCnt;
+ GR_DEBUGASSERT(success);
+}
+
+void GrGpu::releaseVertexArray() {
+ // if vertex source was array, we stowed data in the pool
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+ GrAssert(kArray_GeometrySrcType == geoSrc.fVertexSrc);
+ size_t bytes = geoSrc.fVertexCount * VertexSize(geoSrc.fVertexLayout);
+ fVertexPool->putBack(bytes);
+ --fVertexPoolUseCnt;
+}
+
+void GrGpu::releaseIndexArray() {
+ // if index source was array, we stowed data in the pool
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+ GrAssert(kArray_GeometrySrcType == geoSrc.fIndexSrc);
+ size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t);
+ fIndexPool->putBack(bytes);
+ --fIndexPoolUseCnt;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+const GrGpuStats& GrGpu::getStats() const {
+ return fStats;
+}
+
+void GrGpu::resetStats() {
+ memset(&fStats, 0, sizeof(fStats));
+}
+
+void GrGpu::printStats() const {
+ if (GR_COLLECT_STATS) {
+ GrPrintf(
+ "-v-------------------------GPU STATS----------------------------v-\n"
+ "Stats collection is: %s\n"
+ "Draws: %04d, Verts: %04d, Indices: %04d\n"
+ "ProgChanges: %04d, TexChanges: %04d, RTChanges: %04d\n"
+ "TexCreates: %04d, RTCreates:%04d\n"
+ "-^--------------------------------------------------------------^-\n",
+ (GR_COLLECT_STATS ? "ON" : "OFF"),
+ fStats.fDrawCnt, fStats.fVertexCnt, fStats.fIndexCnt,
+ fStats.fProgChngCnt, fStats.fTextureChngCnt, fStats.fRenderTargetChngCnt,
+ fStats.fTextureCreateCnt, fStats.fRenderTargetCreateCnt);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+const GrSamplerState GrSamplerState::gClampNoFilter(
+ GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kNormal_SampleMode,
+ GrMatrix::I(),
+ GrSamplerState::kNearest_Filter);
+
+
+
+
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
new file mode 100644
index 0000000000..9107554cd6
--- /dev/null
+++ b/src/gpu/GrGpu.h
@@ -0,0 +1,423 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrGpu_DEFINED
+#define GrGpu_DEFINED
+
+#include "GrDrawTarget.h"
+#include "GrRect.h"
+#include "GrRefCnt.h"
+#include "GrTexture.h"
+
+class GrContext;
+class GrIndexBufferAllocPool;
+class GrPathRenderer;
+class GrPathRendererChain;
+class GrResource;
+class GrStencilBuffer;
+class GrVertexBufferAllocPool;
+
+/**
+ * Gpu usage statistics.
+ */
+struct GrGpuStats {
+ uint32_t fVertexCnt; //<! Number of vertices drawn
+ uint32_t fIndexCnt; //<! Number of indices drawn
+ uint32_t fDrawCnt; //<! Number of draws
+
+ uint32_t fProgChngCnt;//<! Number of program changes (N/A for fixed)
+
+ /**
+ * Number of times the texture is set in 3D API
+ */
+ uint32_t fTextureChngCnt;
+ /**
+ * Number of times the render target is set in 3D API
+ */
+ uint32_t fRenderTargetChngCnt;
+ /**
+ * Number of textures created (includes textures that are rendertargets).
+ */
+ uint32_t fTextureCreateCnt;
+ /**
+ * Number of rendertargets created.
+ */
+ uint32_t fRenderTargetCreateCnt;
+};
+
+class GrGpu : public GrDrawTarget {
+
+public:
+
+ /**
+ * Additional blend coeffecients for dual source blending, not exposed
+ * through GrPaint/GrContext.
+ */
+ enum ExtendedBlendCoeffs {
+ // source 2 refers to second output color when
+ // using dual source blending.
+ kS2C_BlendCoeff = kPublicBlendCoeffCount,
+ kIS2C_BlendCoeff,
+ kS2A_BlendCoeff,
+ kIS2A_BlendCoeff,
+
+ kTotalBlendCoeffCount
+ };
+
+ /**
+ * Create an instance of GrGpu that matches the specified Engine backend.
+ * If the requested engine is not supported (at compile-time or run-time)
+ * this returns NULL.
+ */
+ static GrGpu* Create(GrEngine, GrPlatform3DContext context3D);
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ GrGpu();
+ virtual ~GrGpu();
+
+ // The GrContext sets itself as the owner of this Gpu object
+ void setContext(GrContext* context) {
+ GrAssert(NULL == fContext);
+ fContext = context;
+ }
+ GrContext* getContext() { return fContext; }
+ const GrContext* getContext() const { return fContext; }
+
+ /**
+ * The GrGpu object normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the GrGpu that the state was modified and it shouldn't make assumptions
+ * about the state.
+ */
+ void markContextDirty() { fContextIsDirty = true; }
+
+ void unimpl(const char[]);
+
+ /**
+ * Creates a texture object. If desc width or height is not a power of
+ * two but underlying API requires a power of two texture then srcData
+ * will be embedded in a power of two texture. The extra width and height
+ * is filled as though srcData were rendered clamped into the texture.
+ *
+ * If kRenderTarget_TextureFlag is specified the GrRenderTarget is
+ * accessible via GrTexture::asRenderTarget(). The texture will hold a ref
+ * on the render target until its releaseRenderTarget() is called or it is
+ * destroyed.
+ *
+ * @param desc describes the texture to be created.
+ * @param srcData texel data to load texture. Begins with full-size
+ * palette data for paletted textures. Contains width*
+ * height texels. If NULL texture data is uninitialized.
+ *
+ * @return The texture object if successful, otherwise NULL.
+ */
+ GrTexture* createTexture(const GrTextureDesc& desc,
+ const void* srcData, size_t rowBytes);
+
+ GrResource* createPlatformSurface(const GrPlatformSurfaceDesc& desc);
+
+ /**
+ * Creates a vertex buffer.
+ *
+ * @param size size in bytes of the vertex buffer
+ * @param dynamic hints whether the data will be frequently changed
+ * by either GrVertexBuffer::lock or
+ * GrVertexBuffer::updateData.
+ *
+ * @return The vertex buffer if successful, otherwise NULL.
+ */
+ GrVertexBuffer* createVertexBuffer(uint32_t size, bool dynamic);
+
+ /**
+ * Creates an index buffer.
+ *
+ * @param size size in bytes of the index buffer
+ * @param dynamic hints whether the data will be frequently changed
+ * by either GrIndexBuffer::lock or
+ * GrIndexBuffer::updateData.
+ *
+ * @return The index buffer if successful, otherwise NULL.
+ */
+ GrIndexBuffer* createIndexBuffer(uint32_t size, bool dynamic);
+
+ /**
+ * Returns an index buffer that can be used to render quads.
+ * Six indices per quad: 0, 1, 2, 0, 2, 3, etc.
+ * The max number of quads can be queried using GrIndexBuffer::maxQuads().
+ * Draw with kTriangles_PrimitiveType
+ * @ return the quad index buffer
+ */
+ const GrIndexBuffer* getQuadIndexBuffer() const;
+
+ /**
+ * Returns a vertex buffer with four position-only vertices [(0,0), (1,0),
+ * (1,1), (0,1)].
+ * @ return unit square vertex buffer
+ */
+ const GrVertexBuffer* getUnitSquareVertexBuffer() const;
+
+ /**
+ * Ensures that the current render target is actually set in the
+ * underlying 3D API. Used when client wants to use 3D API to directly
+ * render to the RT.
+ */
+ void forceRenderTargetFlush();
+
+ /**
+ * Reads a rectangle of pixels from a render target.
+ * @param renderTarget the render target to read from. NULL means the
+ * current render target.
+ * @param left left edge of the rectangle to read (inclusive)
+ * @param top top edge of the rectangle to read (inclusive)
+ * @param width width of rectangle to read in pixels.
+ * @param height height of rectangle to read in pixels.
+ * @param config the pixel config of the destination buffer
+ * @param buffer memory to read the rectangle into.
+ *
+ * @return true if the read succeeded, false if not. The read can fail
+ * because of a unsupported pixel config or because no render
+ * target is currently set.
+ */
+ bool readPixels(GrRenderTarget* renderTarget,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer);
+
+ const GrGpuStats& getStats() const;
+ void resetStats();
+ void printStats() const;
+
+ /**
+ * Called to tell Gpu object that all GrResources have been lost and should
+ * be abandoned. Overrides must call INHERITED::abandonResources().
+ */
+ virtual void abandonResources();
+
+ /**
+ * Called to tell Gpu object to release all GrResources. Overrides must call
+ * INHERITED::releaseResources().
+ */
+ void releaseResources();
+
+ /**
+ * Add resource to list of resources. Should only be called by GrResource.
+ * @param resource the resource to add.
+ */
+ void insertResource(GrResource* resource);
+
+ /**
+ * Remove resource from list of resources. Should only be called by
+ * GrResource.
+ * @param resource the resource to remove.
+ */
+ void removeResource(GrResource* resource);
+
+ // GrDrawTarget overrides
+ virtual void clear(const GrIRect* rect, GrColor color);
+
+protected:
+ enum PrivateStateBits {
+ kFirstBit = (kLastPublicStateBit << 1),
+
+ kModifyStencilClip_StateBit = kFirstBit, // allows draws to modify
+ // stencil bits used for
+ // clipping.
+ };
+
+ // keep track of whether we are using stencil clipping (as opposed to
+ // scissor).
+ bool fClipInStencil;
+
+ // prepares clip flushes gpu state before a draw
+ bool setupClipAndFlushState(GrPrimitiveType type);
+
+ // Functions used to map clip-respecting stencil tests into normal
+ // stencil funcs supported by GPUs.
+ static GrStencilFunc ConvertStencilFunc(bool stencilInClip,
+ GrStencilFunc func);
+ static void ConvertStencilFuncAndMask(GrStencilFunc func,
+ bool clipInStencil,
+ unsigned int clipBit,
+ unsigned int userBits,
+ unsigned int* ref,
+ unsigned int* mask);
+
+ // stencil settings to clip drawing when stencil clipping is in effect
+ // and the client isn't using the stencil test.
+ static const GrStencilSettings gClipStencilSettings;
+
+
+ GrGpuStats fStats;
+
+ struct GeometryPoolState {
+ const GrVertexBuffer* fPoolVertexBuffer;
+ int fPoolStartVertex;
+
+ const GrIndexBuffer* fPoolIndexBuffer;
+ int fPoolStartIndex;
+ };
+ const GeometryPoolState& getGeomPoolState() {
+ return fGeomPoolStateStack.back();
+ }
+
+ // GrDrawTarget overrides
+ virtual bool onReserveVertexSpace(GrVertexLayout vertexLayout,
+ int vertexCount,
+ void** vertices);
+ virtual bool onReserveIndexSpace(int indexCount, void** indices);
+ virtual void releaseReservedVertexSpace();
+ virtual void releaseReservedIndexSpace();
+ virtual void onSetVertexSourceToArray(const void* vertexArray,
+ int vertexCount);
+ virtual void onSetIndexSourceToArray(const void* indexArray,
+ int indexCount);
+ virtual void releaseVertexArray();
+ virtual void releaseIndexArray();
+ virtual void geometrySourceWillPush();
+ virtual void geometrySourceWillPop(const GeometrySrcState& restoredState);
+
+ // Helpers for setting up geometry state
+ void finalizeReservedVertices();
+ void finalizeReservedIndices();
+
+ // overridden by API-specific derived class to handle re-emitting 3D API
+ // preample and dirtying state cache.
+ virtual void resetContext() = 0;
+
+ // overridden by API-specific derived class to create objects.
+ virtual GrTexture* onCreateTexture(const GrTextureDesc& desc,
+ const void* srcData,
+ size_t rowBytes) = 0;
+ virtual GrResource* onCreatePlatformSurface(const GrPlatformSurfaceDesc& desc) = 0;
+ virtual GrVertexBuffer* onCreateVertexBuffer(uint32_t size,
+ bool dynamic) = 0;
+ virtual GrIndexBuffer* onCreateIndexBuffer(uint32_t size,
+ bool dynamic) = 0;
+
+ // overridden by API-specific derivated class to perform the clear and
+ // clearRect. NULL rect means clear whole target.
+ virtual void onClear(const GrIRect* rect, GrColor color) = 0;
+
+ // overridden by API-specific derived class to perform the draw call.
+ virtual void onGpuDrawIndexed(GrPrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) = 0;
+
+ virtual void onGpuDrawNonIndexed(GrPrimitiveType type,
+ uint32_t vertexCount,
+ uint32_t numVertices) = 0;
+
+ // overridden by API-specific derived class to perform flush
+ virtual void onForceRenderTargetFlush() = 0;
+
+ // overridden by API-specific derived class to perform the read pixels.
+ virtual bool onReadPixels(GrRenderTarget* target,
+ int left, int top, int width, int height,
+ GrPixelConfig, void* buffer) = 0;
+
+ // called to program the vertex data, indexCount will be 0 if drawing non-
+ // indexed geometry. The subclass may adjust the startVertex and/or
+ // startIndex since it may have already accounted for these in the setup.
+ virtual void setupGeometry(int* startVertex,
+ int* startIndex,
+ int vertexCount,
+ int indexCount) = 0;
+
+ // width and height may be larger than rt (if underlying API allows it).
+ // Should attach the SB to the RT. Returns false if compatible sb could
+ // not be created.
+ virtual bool createStencilBufferForRenderTarget(GrRenderTarget* rt,
+ int width,
+ int height) = 0;
+
+ // attaches an existing SB to an existing RT.
+ virtual bool attachStencilBufferToRenderTarget(GrStencilBuffer* sb,
+ GrRenderTarget* rt) = 0;
+
+ // The GrGpu typically records the clients requested state and then flushes
+ // deltas from previous state at draw time. This function does the
+ // API-specific flush of the state
+ // returns false if current state is unsupported.
+ virtual bool flushGraphicsState(GrPrimitiveType type) = 0;
+
+ // Sets the scissor rect, or disables if rect is NULL.
+ virtual void flushScissor(const GrIRect* rect) = 0;
+
+ // GrGpu subclass sets clip bit in the stencil buffer. The subclass is
+ // free to clear the remaining bits to zero if masked clears are more
+ // expensive than clearing all bits.
+ virtual void clearStencilClip(const GrIRect& rect, bool insideClip) = 0;
+
+ // clears the entire stencil buffer to 0
+ virtual void clearStencil() = 0;
+
+private:
+ GrContext* fContext; // not reffed (context refs gpu)
+
+ GrVertexBufferAllocPool* fVertexPool;
+
+ GrIndexBufferAllocPool* fIndexPool;
+
+ // counts number of uses of vertex/index pool in the geometry stack
+ int fVertexPoolUseCnt;
+ int fIndexPoolUseCnt;
+
+ enum {
+ kPreallocGeomPoolStateStackCnt = 4,
+ };
+ SkSTArray<kPreallocGeomPoolStateStackCnt,
+ GeometryPoolState, true> fGeomPoolStateStack;
+
+ mutable GrIndexBuffer* fQuadIndexBuffer; // mutable so it can be
+ // created on-demand
+
+ mutable GrVertexBuffer* fUnitSquareVertexBuffer; // mutable so it can be
+ // created on-demand
+
+ // must be instantiated after GrGpu object has been given its owning
+ // GrContext ptr. (GrGpu is constructed first then handed off to GrContext).
+ GrPathRendererChain* fPathRendererChain;
+
+ bool fContextIsDirty;
+
+ GrResource* fResourceHead;
+
+ // Given a rt, find or create a stencil buffer and attach it
+ bool attachStencilBufferToRenderTarget(GrRenderTarget* target);
+
+ // GrDrawTarget overrides
+ virtual void onDrawIndexed(GrPrimitiveType type,
+ int startVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount);
+ virtual void onDrawNonIndexed(GrPrimitiveType type,
+ int startVertex,
+ int vertexCount);
+
+ // readies the pools to provide vertex/index data.
+ void prepareVertexPool();
+ void prepareIndexPool();
+
+ // determines the path renderer used to draw a clip path element.
+ GrPathRenderer* getClipPathRenderer(const SkPath& path, GrPathFill fill);
+
+ void handleDirtyContext() {
+ if (fContextIsDirty) {
+ this->resetContext();
+ fContextIsDirty = false;
+ }
+ }
+
+ typedef GrDrawTarget INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGpuFactory.cpp b/src/gpu/GrGpuFactory.cpp
new file mode 100644
index 0000000000..9498da21c3
--- /dev/null
+++ b/src/gpu/GrGpuFactory.cpp
@@ -0,0 +1,69 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrTypes.h"
+
+// must be before GrGLConfig.h
+#if GR_WIN32_BUILD
+// #include "GrGpuD3D9.h"
+#endif
+
+#include "GrGLConfig.h"
+
+#include "GrGpu.h"
+#include "GrGpuGLFixed.h"
+#include "GrGpuGLShaders.h"
+
+GrGpu* GrGpu::Create(GrEngine engine, GrPlatform3DContext context3D) {
+
+ const GrGLInterface* glInterface = NULL;
+ SkAutoTUnref<const GrGLInterface> glInterfaceUnref;
+
+ if (kOpenGL_Shaders_GrEngine == engine ||
+ kOpenGL_Fixed_GrEngine == engine) {
+ glInterface = reinterpret_cast<const GrGLInterface*>(context3D);
+ if (NULL == glInterface) {
+ glInterface = GrGLDefaultInterface();
+ // By calling GrGLDefaultInterface we've taken a ref on the
+ // returned object. We only want to hold that ref until after
+ // the GrGpu is constructed and has taken ownership.
+ glInterfaceUnref.reset(glInterface);
+ }
+ if (NULL == glInterface) {
+#if GR_DEBUG
+ GrPrintf("No GL interface provided!\n");
+#endif
+ return NULL;
+ }
+ if (!glInterface->validate(engine)) {
+#if GR_DEBUG
+ GrPrintf("Failed GL interface validation!\n");
+#endif
+ return NULL;
+ }
+ }
+
+ GrGpu* gpu = NULL;
+
+ switch (engine) {
+ case kOpenGL_Shaders_GrEngine:
+ GrAssert(NULL != glInterface);
+ gpu = new GrGpuGLShaders(glInterface);
+ break;
+ case kOpenGL_Fixed_GrEngine:
+ GrAssert(NULL != glInterface);
+ gpu = new GrGpuGLFixed(glInterface);
+ break;
+ default:
+ GrAssert(!"unknown engine");
+ break;
+ }
+
+ return gpu;
+}
diff --git a/src/gpu/GrGpuGL.cpp b/src/gpu/GrGpuGL.cpp
new file mode 100644
index 0000000000..b27815b6d4
--- /dev/null
+++ b/src/gpu/GrGpuGL.cpp
@@ -0,0 +1,2260 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGpuGL.h"
+#include "GrGLStencilBuffer.h"
+#include "GrTypes.h"
+#include "SkTemplates.h"
+
+static const GrGLuint GR_MAX_GLUINT = ~0;
+static const GrGLint GR_INVAL_GLINT = ~0;
+
+#define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
+#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
+
+// we use a spare texture unit to avoid
+// mucking with the state of any of the stages.
+static const int SPARE_TEX_UNIT = GrGpuGL::kNumStages;
+
+#define SKIP_CACHE_CHECK true
+
+static const GrGLenum gXfermodeCoeff2Blend[] = {
+ GR_GL_ZERO,
+ GR_GL_ONE,
+ GR_GL_SRC_COLOR,
+ GR_GL_ONE_MINUS_SRC_COLOR,
+ GR_GL_DST_COLOR,
+ GR_GL_ONE_MINUS_DST_COLOR,
+ GR_GL_SRC_ALPHA,
+ GR_GL_ONE_MINUS_SRC_ALPHA,
+ GR_GL_DST_ALPHA,
+ GR_GL_ONE_MINUS_DST_ALPHA,
+ GR_GL_CONSTANT_COLOR,
+ GR_GL_ONE_MINUS_CONSTANT_COLOR,
+ GR_GL_CONSTANT_ALPHA,
+ GR_GL_ONE_MINUS_CONSTANT_ALPHA,
+
+ // extended blend coeffs
+ GR_GL_SRC1_COLOR,
+ GR_GL_ONE_MINUS_SRC1_COLOR,
+ GR_GL_SRC1_ALPHA,
+ GR_GL_ONE_MINUS_SRC1_ALPHA,
+};
+
+bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
+ static const bool gCoeffReferencesBlendConst[] = {
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ true,
+ true,
+
+ // extended blend coeffs
+ false,
+ false,
+ false,
+ false,
+ };
+ return gCoeffReferencesBlendConst[coeff];
+ GR_STATIC_ASSERT(kTotalBlendCoeffCount == GR_ARRAY_COUNT(gCoeffReferencesBlendConst));
+
+ GR_STATIC_ASSERT(0 == kZero_BlendCoeff);
+ GR_STATIC_ASSERT(1 == kOne_BlendCoeff);
+ GR_STATIC_ASSERT(2 == kSC_BlendCoeff);
+ GR_STATIC_ASSERT(3 == kISC_BlendCoeff);
+ GR_STATIC_ASSERT(4 == kDC_BlendCoeff);
+ GR_STATIC_ASSERT(5 == kIDC_BlendCoeff);
+ GR_STATIC_ASSERT(6 == kSA_BlendCoeff);
+ GR_STATIC_ASSERT(7 == kISA_BlendCoeff);
+ GR_STATIC_ASSERT(8 == kDA_BlendCoeff);
+ GR_STATIC_ASSERT(9 == kIDA_BlendCoeff);
+ GR_STATIC_ASSERT(10 == kConstC_BlendCoeff);
+ GR_STATIC_ASSERT(11 == kIConstC_BlendCoeff);
+ GR_STATIC_ASSERT(12 == kConstA_BlendCoeff);
+ GR_STATIC_ASSERT(13 == kIConstA_BlendCoeff);
+
+ GR_STATIC_ASSERT(14 == kS2C_BlendCoeff);
+ GR_STATIC_ASSERT(15 == kIS2C_BlendCoeff);
+ GR_STATIC_ASSERT(16 == kS2A_BlendCoeff);
+ GR_STATIC_ASSERT(17 == kIS2A_BlendCoeff);
+
+ // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
+ GR_STATIC_ASSERT(kTotalBlendCoeffCount == GR_ARRAY_COUNT(gXfermodeCoeff2Blend));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGpuGL::AdjustTextureMatrix(const GrGLTexture* texture,
+ GrSamplerState::SampleMode mode,
+ GrMatrix* matrix) {
+ GrAssert(NULL != texture);
+ GrAssert(NULL != matrix);
+ if (GR_Scalar1 != texture->contentScaleX() ||
+ GR_Scalar1 != texture->contentScaleY()) {
+ if (GrSamplerState::kRadial_SampleMode == mode) {
+ GrMatrix scale;
+ scale.setScale(texture->contentScaleX(), texture->contentScaleX());
+ matrix->postConcat(scale);
+ } else if (GrSamplerState::kNormal_SampleMode == mode) {
+ GrMatrix scale;
+ scale.setScale(texture->contentScaleX(), texture->contentScaleY());
+ matrix->postConcat(scale);
+ } else {
+ GrPrintf("We haven't handled NPOT adjustment for other sample modes!");
+ }
+ }
+ GrGLTexture::Orientation orientation = texture->orientation();
+ if (GrGLTexture::kBottomUp_Orientation == orientation) {
+ GrMatrix invY;
+ invY.setAll(GR_Scalar1, 0, 0,
+ 0, -GR_Scalar1, GR_Scalar1,
+ 0, 0, GrMatrix::I()[8]);
+ matrix->postConcat(invY);
+ } else {
+ GrAssert(GrGLTexture::kTopDown_Orientation == orientation);
+ }
+}
+
+bool GrGpuGL::TextureMatrixIsIdentity(const GrGLTexture* texture,
+ const GrSamplerState& sampler) {
+ GrAssert(NULL != texture);
+ if (!sampler.getMatrix().isIdentity()) {
+ return false;
+ }
+ if (GR_Scalar1 != texture->contentScaleX() ||
+ GR_Scalar1 != texture->contentScaleY()) {
+ return false;
+ }
+ GrGLTexture::Orientation orientation = texture->orientation();
+ if (GrGLTexture::kBottomUp_Orientation == orientation) {
+ return false;
+ } else {
+ GrAssert(GrGLTexture::kTopDown_Orientation == orientation);
+ }
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static bool gPrintStartupSpew;
+
+static bool fbo_test(const GrGLInterface* gl, int w, int h) {
+
+ GR_GL_CALL(gl, ActiveTexture(GR_GL_TEXTURE0 + SPARE_TEX_UNIT));
+
+ GrGLuint testFBO;
+ GR_GL_CALL(gl, GenFramebuffers(1, &testFBO));
+ GR_GL_CALL(gl, BindFramebuffer(GR_GL_FRAMEBUFFER, testFBO));
+ GrGLuint testRTTex;
+ GR_GL_CALL(gl, GenTextures(1, &testRTTex));
+ GR_GL_CALL(gl, BindTexture(GR_GL_TEXTURE_2D, testRTTex));
+ // some implementations require texture to be mip-map complete before
+ // FBO with level 0 bound as color attachment will be framebuffer complete.
+ GR_GL_CALL(gl, TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_MIN_FILTER,
+ GR_GL_NEAREST));
+ GR_GL_CALL(gl, TexImage2D(GR_GL_TEXTURE_2D, 0, GR_GL_RGBA, w, h,
+ 0, GR_GL_RGBA, GR_GL_UNSIGNED_BYTE, NULL));
+ GR_GL_CALL(gl, BindTexture(GR_GL_TEXTURE_2D, 0));
+ GR_GL_CALL(gl, FramebufferTexture2D(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_TEXTURE_2D, testRTTex, 0));
+ GrGLenum status;
+ GR_GL_CALL_RET(gl, status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ GR_GL_CALL(gl, DeleteFramebuffers(1, &testFBO));
+ GR_GL_CALL(gl, DeleteTextures(1, &testRTTex));
+
+ return status == GR_GL_FRAMEBUFFER_COMPLETE;
+}
+
+static bool probe_for_npot_render_target_support(const GrGLInterface* gl,
+ bool hasNPOTTextureSupport) {
+
+ /* Experimentation has found that some GLs that support NPOT textures
+ do not support FBOs with a NPOT texture. They report "unsupported" FBO
+ status. I don't know how to explicitly query for this. Do an
+ experiment. Note they may support NPOT with a renderbuffer but not a
+ texture. Presumably, the implementation bloats the renderbuffer
+ internally to the next POT.
+ */
+ if (hasNPOTTextureSupport) {
+ return fbo_test(gl, 200, 200);
+ }
+ return false;
+}
+
+static int probe_for_min_render_target_height(const GrGLInterface* gl,
+ bool hasNPOTRenderTargetSupport,
+ int maxRenderTargetSize) {
+ /* The iPhone 4 has a restriction that for an FBO with texture color
+ attachment with height <= 8 then the width must be <= height. Here
+ we look for such a limitation.
+ */
+ if (gPrintStartupSpew) {
+ GrPrintf("Small height FBO texture experiments\n");
+ }
+ int minRenderTargetHeight = GR_INVAL_GLINT;
+ for (GrGLuint i = 1; i <= 256; hasNPOTRenderTargetSupport ? ++i : i *= 2) {
+ GrGLuint w = maxRenderTargetSize;
+ GrGLuint h = i;
+ if (fbo_test(gl, w, h)) {
+ if (gPrintStartupSpew) {
+ GrPrintf("\t[%d, %d]: PASSED\n", w, h);
+ }
+ minRenderTargetHeight = i;
+ break;
+ } else {
+ if (gPrintStartupSpew) {
+ GrPrintf("\t[%d, %d]: FAILED\n", w, h);
+ }
+ }
+ }
+ GrAssert(GR_INVAL_GLINT != minRenderTargetHeight);
+
+ return minRenderTargetHeight;
+}
+
+static int probe_for_min_render_target_width(const GrGLInterface* gl,
+ bool hasNPOTRenderTargetSupport,
+ int maxRenderTargetSize) {
+
+ if (gPrintStartupSpew) {
+ GrPrintf("Small width FBO texture experiments\n");
+ }
+ int minRenderTargetWidth = GR_INVAL_GLINT;
+ for (GrGLuint i = 1; i <= 256; hasNPOTRenderTargetSupport ? i *= 2 : ++i) {
+ GrGLuint w = i;
+ GrGLuint h = maxRenderTargetSize;
+ if (fbo_test(gl, w, h)) {
+ if (gPrintStartupSpew) {
+ GrPrintf("\t[%d, %d]: PASSED\n", w, h);
+ }
+ minRenderTargetWidth = i;
+ break;
+ } else {
+ if (gPrintStartupSpew) {
+ GrPrintf("\t[%d, %d]: FAILED\n", w, h);
+ }
+ }
+ }
+ GrAssert(GR_INVAL_GLINT != minRenderTargetWidth);
+
+ return minRenderTargetWidth;
+}
+
+GrGpuGL::GrGpuGL(const GrGLInterface* gl, GrGLBinding glBinding) {
+
+ fPrintedCaps = false;
+
+ gl->ref();
+ fGL = gl;
+ fGLBinding = glBinding;
+ switch (glBinding) {
+ case kDesktop_GrGLBinding:
+ GrAssert(gl->supportsDesktop());
+ break;
+ case kES1_GrGLBinding:
+ GrAssert(gl->supportsES1());
+ break;
+ case kES2_GrGLBinding:
+ GrAssert(gl->supportsES2());
+ break;
+ default:
+ GrCrash("Expect exactly one valid GL binding bit to be in use.");
+ }
+
+ GrGLClearErr(fGL);
+
+ const GrGLubyte* ext;
+ GL_CALL_RET(ext, GetString(GR_GL_EXTENSIONS));
+ if (gPrintStartupSpew) {
+ const GrGLubyte* vendor;
+ const GrGLubyte* renderer;
+ const GrGLubyte* version;
+ GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
+ GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
+ GL_CALL_RET(version, GetString(GR_GL_VERSION));
+ GrPrintf("------------------------- create GrGpuGL %p --------------\n",
+ this);
+ GrPrintf("------ VENDOR %s\n", vendor);
+ GrPrintf("------ RENDERER %s\n", renderer);
+ GrPrintf("------ VERSION %s\n", version);
+ GrPrintf("------ EXTENSIONS\n %s \n", ext);
+ }
+
+ fGLVersion = GrGLGetVersion(gl);
+ GrAssert(0 != fGLVersion);
+ fExtensionString = (const char*) ext;
+
+ this->resetDirtyFlags();
+
+ this->initCaps();
+
+ fLastSuccessfulStencilFmtIdx = 0;
+}
+
+GrGpuGL::~GrGpuGL() {
+ // This subclass must do this before the base class destructor runs
+ // since we will unref the GrGLInterface.
+ this->releaseResources();
+ fGL->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const GrGLuint kUnknownBitCount = ~0;
+
+void GrGpuGL::initCaps() {
+ GrGLint maxTextureUnits;
+ // check FS and fixed-function texture unit limits
+ // we only use textures in the fragment stage currently.
+ // checks are > to make sure we have a spare unit.
+ if (kES1_GrGLBinding != this->glBinding()) {
+ GR_GL_GetIntegerv(fGL, GR_GL_MAX_TEXTURE_IMAGE_UNITS, &maxTextureUnits);
+ GrAssert(maxTextureUnits > kNumStages);
+ }
+ if (kES2_GrGLBinding != this->glBinding()) {
+ GR_GL_GetIntegerv(fGL, GR_GL_MAX_TEXTURE_UNITS, &maxTextureUnits);
+ GrAssert(maxTextureUnits > kNumStages);
+ }
+ if (kES2_GrGLBinding == this->glBinding()) {
+ GR_GL_GetIntegerv(fGL, GR_GL_MAX_FRAGMENT_UNIFORM_VECTORS,
+ &fGLCaps.fMaxFragmentUniformVectors);
+ } else if (kDesktop_GrGLBinding != this->glBinding()) {
+ GrGLint max;
+ GR_GL_GetIntegerv(fGL, GR_GL_MAX_FRAGMENT_UNIFORM_COMPONENTS, &max);
+ fGLCaps.fMaxFragmentUniformVectors = max / 4;
+ } else {
+ fGLCaps.fMaxFragmentUniformVectors = 16;
+ }
+
+ GrGLint numFormats;
+ GR_GL_GetIntegerv(fGL, GR_GL_NUM_COMPRESSED_TEXTURE_FORMATS, &numFormats);
+ SkAutoSTMalloc<10, GrGLint> formats(numFormats);
+ GR_GL_GetIntegerv(fGL, GR_GL_COMPRESSED_TEXTURE_FORMATS, formats);
+ for (int i = 0; i < numFormats; ++i) {
+ if (formats[i] == GR_GL_PALETTE8_RGBA8) {
+ fCaps.f8BitPaletteSupport = true;
+ break;
+ }
+ }
+
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ fCaps.fStencilWrapOpsSupport = (fGLVersion >= GR_GL_VER(1,4)) ||
+ this->hasExtension("GL_EXT_stencil_wrap");
+ } else {
+ fCaps.fStencilWrapOpsSupport = (fGLVersion >= GR_GL_VER(2,0)) ||
+ this->hasExtension("GL_OES_stencil_wrap");
+ }
+
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ // we could also look for GL_ATI_separate_stencil extension or
+ // GL_EXT_stencil_two_side but they use different function signatures
+ // than GL2.0+ (and than each other).
+ fCaps.fTwoSidedStencilSupport = (fGLVersion >= GR_GL_VER(2,0));
+ // supported on GL 1.4 and higher or by extension
+ fCaps.fStencilWrapOpsSupport = (fGLVersion >= GR_GL_VER(1,4)) ||
+ this->hasExtension("GL_EXT_stencil_wrap");
+ } else {
+ // ES 2 has two sided stencil but 1.1 doesn't. There doesn't seem to be
+ // an ES1 extension.
+ fCaps.fTwoSidedStencilSupport = (fGLVersion >= GR_GL_VER(2,0));
+ // stencil wrap support is in ES2, ES1 requires extension.
+ fCaps.fStencilWrapOpsSupport = (fGLVersion >= GR_GL_VER(2,0)) ||
+ this->hasExtension("GL_OES_stencil_wrap");
+ }
+
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ fGLCaps.fRGBA8Renderbuffer = true;
+ } else {
+ fGLCaps.fRGBA8Renderbuffer = this->hasExtension("GL_OES_rgb8_rgba8");
+ }
+
+
+ if (kDesktop_GrGLBinding != this->glBinding()) {
+ if (GR_GL_32BPP_COLOR_FORMAT == GR_GL_BGRA) {
+ GrAssert(this->hasExtension("GL_EXT_texture_format_BGRA8888"));
+ }
+ }
+
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ fCaps.fBufferLockSupport = true; // we require VBO support and the desktop VBO
+ // extension includes glMapBuffer.
+ } else {
+ fCaps.fBufferLockSupport = this->hasExtension("GL_OES_mapbuffer");
+ }
+
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ if (fGLVersion >= GR_GL_VER(2,0) ||
+ this->hasExtension("GL_ARB_texture_non_power_of_two")) {
+ fCaps.fNPOTTextureTileSupport = true;
+ fCaps.fNPOTTextureSupport = true;
+ } else {
+ fCaps.fNPOTTextureTileSupport = false;
+ fCaps.fNPOTTextureSupport = false;
+ }
+ } else {
+ if (fGLVersion >= GR_GL_VER(2,0)) {
+ fCaps.fNPOTTextureSupport = true;
+ fCaps.fNPOTTextureTileSupport = this->hasExtension("GL_OES_texture_npot");
+ } else {
+ fCaps.fNPOTTextureSupport =
+ this->hasExtension("GL_APPLE_texture_2D_limited_npot");
+ fCaps.fNPOTTextureTileSupport = false;
+ }
+ }
+
+ fCaps.fHWAALineSupport = (kDesktop_GrGLBinding == this->glBinding());
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Experiments to determine limitations that can't be queried.
+ // TODO: Make these a preprocess that generate some compile time constants.
+ // TODO: probe once at startup, rather than once per context creation.
+
+ int expectNPOTTargets = fGL->fNPOTRenderTargetSupport;
+ if (expectNPOTTargets == kProbe_GrGLCapability) {
+ fCaps.fNPOTRenderTargetSupport =
+ probe_for_npot_render_target_support(fGL, fCaps.fNPOTTextureSupport);
+ } else {
+ GrAssert(expectNPOTTargets == 0 || expectNPOTTargets == 1);
+ fCaps.fNPOTRenderTargetSupport = (0 != expectNPOTTargets);
+ }
+
+ GR_GL_GetIntegerv(fGL, GR_GL_MAX_TEXTURE_SIZE, &fCaps.fMaxTextureSize);
+ GR_GL_GetIntegerv(fGL, GR_GL_MAX_RENDERBUFFER_SIZE, &fCaps.fMaxRenderTargetSize);
+ // Our render targets are always created with textures as the color
+ // attachment, hence this min:
+ fCaps.fMaxRenderTargetSize = GrMin(fCaps.fMaxTextureSize, fCaps.fMaxRenderTargetSize);
+
+ fCaps.fMinRenderTargetHeight = fGL->fMinRenderTargetHeight;
+ if (fCaps.fMinRenderTargetHeight == kProbe_GrGLCapability) {
+ fCaps.fMinRenderTargetHeight =
+ probe_for_min_render_target_height(fGL, fCaps.fNPOTRenderTargetSupport,
+ fCaps.fMaxRenderTargetSize);
+ }
+
+ fCaps.fMinRenderTargetWidth = fGL->fMinRenderTargetWidth;
+ if (fCaps.fMinRenderTargetWidth == kProbe_GrGLCapability) {
+ fCaps.fMinRenderTargetWidth =
+ probe_for_min_render_target_width(fGL, fCaps.fNPOTRenderTargetSupport,
+ fCaps.fMaxRenderTargetSize);
+ }
+
+ this->initFSAASupport();
+ this->initStencilFormats();
+}
+
+void GrGpuGL::initFSAASupport() {
+ // TODO: Get rid of GrAALevel and use # samples directly.
+ GR_STATIC_ASSERT(0 == kNone_GrAALevel);
+ GR_STATIC_ASSERT(1 == kLow_GrAALevel);
+ GR_STATIC_ASSERT(2 == kMed_GrAALevel);
+ GR_STATIC_ASSERT(3 == kHigh_GrAALevel);
+ memset(fGLCaps.fAASamples, 0, sizeof(fGLCaps.fAASamples));
+
+ fGLCaps.fMSFBOType = GLCaps::kNone_MSFBO;
+ if (kDesktop_GrGLBinding != this->glBinding()) {
+ if (this->hasExtension("GL_CHROMIUM_framebuffer_multisample")) {
+ // chrome's extension is equivalent to the EXT msaa
+ // and fbo_blit extensions.
+ fGLCaps.fMSFBOType = GLCaps::kDesktopEXT_MSFBO;
+ } else if (this->hasExtension("GL_APPLE_framebuffer_multisample")) {
+ fGLCaps.fMSFBOType = GLCaps::kAppleES_MSFBO;
+ }
+ } else {
+ if ((fGLVersion >= GR_GL_VER(3,0)) || this->hasExtension("GL_ARB_framebuffer_object")) {
+ fGLCaps.fMSFBOType = GLCaps::kDesktopARB_MSFBO;
+ } else if (this->hasExtension("GL_EXT_framebuffer_multisample") &&
+ this->hasExtension("GL_EXT_framebuffer_blit")) {
+ fGLCaps.fMSFBOType = GLCaps::kDesktopEXT_MSFBO;
+ }
+ }
+
+ if (GLCaps::kNone_MSFBO != fGLCaps.fMSFBOType) {
+ GrGLint maxSamples;
+ GR_GL_GetIntegerv(fGL, GR_GL_MAX_SAMPLES, &maxSamples);
+ if (maxSamples > 1 ) {
+ fGLCaps.fAASamples[kNone_GrAALevel] = 0;
+ fGLCaps.fAASamples[kLow_GrAALevel] =
+ GrMax(2, GrFixedFloorToInt((GR_FixedHalf) * maxSamples));
+ fGLCaps.fAASamples[kMed_GrAALevel] =
+ GrMax(2, GrFixedFloorToInt(((GR_Fixed1*3)/4) * maxSamples));
+ fGLCaps.fAASamples[kHigh_GrAALevel] = maxSamples;
+ }
+ }
+ fCaps.fFSAASupport = fGLCaps.fAASamples[kHigh_GrAALevel] > 0;
+}
+
+void GrGpuGL::initStencilFormats() {
+
+ // Build up list of legal stencil formats (though perhaps not supported on
+ // the particular gpu/driver) from most preferred to least.
+
+ // these consts are in order of most preferred to least preferred
+ // we don't bother with GL_STENCIL_INDEX1 or GL_DEPTH32F_STENCIL8
+ static const GrGLStencilBuffer::Format
+ // internal Format stencil bits total bits packed?
+ gS8 = {GR_GL_STENCIL_INDEX8, 8, 8, false},
+ gS16 = {GR_GL_STENCIL_INDEX16, 16, 16, false},
+ gD24S8 = {GR_GL_DEPTH24_STENCIL8, 8, 32, true },
+ gS4 = {GR_GL_STENCIL_INDEX4, 4, 4, false},
+ gS = {GR_GL_STENCIL_INDEX, kUnknownBitCount, kUnknownBitCount, false},
+ gDS = {GR_GL_DEPTH_STENCIL, kUnknownBitCount, kUnknownBitCount, true };
+
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ bool supportsPackedDS = fGLVersion >= GR_GL_VER(3,0) ||
+ this->hasExtension("GL_EXT_packed_depth_stencil") ||
+ this->hasExtension("GL_ARB_framebuffer_object");
+
+ // S1 thru S16 formats are in GL 3.0+, EXT_FBO, and ARB_FBO since we
+ // require FBO support we can expect these are legal formats and don't
+ // check. These also all support the unsized GL_STENCIL_INDEX.
+ fGLCaps.fStencilFormats.push_back() = gS8;
+ fGLCaps.fStencilFormats.push_back() = gS16;
+ if (supportsPackedDS) {
+ fGLCaps.fStencilFormats.push_back() = gD24S8;
+ }
+ fGLCaps.fStencilFormats.push_back() = gS4;
+ if (supportsPackedDS) {
+ fGLCaps.fStencilFormats.push_back() = gDS;
+ }
+ } else {
+ // ES2 has STENCIL_INDEX8 without extensions.
+ // ES1 with GL_OES_framebuffer_object (which we require for ES1)
+ // introduces tokens for S1 thu S8 but there are separate extensions
+ // that make them legal (GL_OES_stencil1, ...).
+ // GL_OES_packed_depth_stencil adds DEPTH24_STENCIL8
+ // ES doesn't support using the unsized formats.
+
+ if (fGLVersion >= GR_GL_VER(2,0) ||
+ this->hasExtension("GL_OES_stencil8")) {
+ fGLCaps.fStencilFormats.push_back() = gS8;
+ }
+ //fStencilFormats.push_back() = gS16;
+ if (this->hasExtension("GL_OES_packed_depth_stencil")) {
+ fGLCaps.fStencilFormats.push_back() = gD24S8;
+ }
+ if (this->hasExtension("GL_OES_stencil4")) {
+ fGLCaps.fStencilFormats.push_back() = gS4;
+ }
+ // we require some stencil format.
+ GrAssert(fGLCaps.fStencilFormats.count() > 0);
+ }
+}
+
+void GrGpuGL::resetContext() {
+ if (gPrintStartupSpew && !fPrintedCaps) {
+ fPrintedCaps = true;
+ this->getCaps().print();
+ fGLCaps.print();
+ }
+
+ // We detect cases when blending is effectively off
+ fHWBlendDisabled = false;
+ GL_CALL(Enable(GR_GL_BLEND));
+
+ // we don't use the zb at all
+ GL_CALL(Disable(GR_GL_DEPTH_TEST));
+ GL_CALL(DepthMask(GR_GL_FALSE));
+
+ GL_CALL(Disable(GR_GL_CULL_FACE));
+ GL_CALL(FrontFace(GR_GL_CCW));
+ fHWDrawState.fDrawFace = kBoth_DrawFace;
+
+ GL_CALL(Disable(GR_GL_DITHER));
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ GL_CALL(Disable(GR_GL_LINE_SMOOTH));
+ GL_CALL(Disable(GR_GL_POINT_SMOOTH));
+ GL_CALL(Disable(GR_GL_MULTISAMPLE));
+ fHWAAState.fMSAAEnabled = false;
+ fHWAAState.fSmoothLineEnabled = false;
+ }
+
+ GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
+ fHWDrawState.fFlagBits = 0;
+
+ // we only ever use lines in hairline mode
+ GL_CALL(LineWidth(1));
+
+ // invalid
+ fActiveTextureUnitIdx = -1;
+
+ // illegal values
+ fHWDrawState.fSrcBlend = (GrBlendCoeff)-1;
+ fHWDrawState.fDstBlend = (GrBlendCoeff)-1;
+
+ fHWDrawState.fBlendConstant = 0x00000000;
+ GL_CALL(BlendColor(0,0,0,0));
+
+ fHWDrawState.fColor = GrColor_ILLEGAL;
+
+ fHWDrawState.fViewMatrix = GrMatrix::InvalidMatrix();
+
+ for (int s = 0; s < kNumStages; ++s) {
+ fHWDrawState.fTextures[s] = NULL;
+ fHWDrawState.fSamplerStates[s].setRadial2Params(-GR_ScalarMax,
+ -GR_ScalarMax,
+ true);
+ fHWDrawState.fSamplerStates[s].setMatrix(GrMatrix::InvalidMatrix());
+ fHWDrawState.fSamplerStates[s].setConvolutionParams(0, NULL, NULL);
+ }
+
+ fHWBounds.fScissorRect.invalidate();
+ fHWBounds.fScissorEnabled = false;
+ GL_CALL(Disable(GR_GL_SCISSOR_TEST));
+ fHWBounds.fViewportRect.invalidate();
+
+ fHWDrawState.fStencilSettings.invalidate();
+ fHWStencilClip = false;
+ fClipInStencil = false;
+
+ fHWGeometryState.fIndexBuffer = NULL;
+ fHWGeometryState.fVertexBuffer = NULL;
+
+ fHWGeometryState.fArrayPtrsDirty = true;
+
+ GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
+ fHWDrawState.fRenderTarget = NULL;
+}
+
+GrResource* GrGpuGL::onCreatePlatformSurface(const GrPlatformSurfaceDesc& desc) {
+
+ bool isTexture = kTexture_GrPlatformSurfaceType == desc.fSurfaceType ||
+ kTextureRenderTarget_GrPlatformSurfaceType == desc.fSurfaceType;
+ bool isRenderTarget = kRenderTarget_GrPlatformSurfaceType == desc.fSurfaceType ||
+ kTextureRenderTarget_GrPlatformSurfaceType == desc.fSurfaceType;
+
+ GrGLRenderTarget::Desc rtDesc;
+ SkAutoTUnref<GrGLStencilBuffer> sb;
+
+ if (isRenderTarget) {
+ rtDesc.fRTFBOID = desc.fPlatformRenderTarget;
+ rtDesc.fConfig = desc.fConfig;
+ if (desc.fSampleCnt) {
+ if (kGrCanResolve_GrPlatformRenderTargetFlagBit & desc.fRenderTargetFlags) {
+ rtDesc.fTexFBOID = desc.fPlatformResolveDestination;
+ } else {
+ GrAssert(!isTexture); // this should have been filtered by GrContext
+ rtDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
+ }
+ } else {
+ rtDesc.fTexFBOID = desc.fPlatformRenderTarget;
+ }
+ // we don't know what the RB ids are without glGets and we don't care
+ // since we aren't responsible for deleting them.
+ rtDesc.fMSColorRenderbufferID = 0;
+ rtDesc.fSampleCnt = desc.fSampleCnt;
+ if (desc.fStencilBits) {
+ GrGLStencilBuffer::Format format;
+ format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat;
+ format.fPacked = false;
+ format.fStencilBits = desc.fStencilBits;
+ format.fTotalBits = desc.fStencilBits;
+ sb.reset(new GrGLStencilBuffer(this, 0, desc.fWidth, desc.fHeight,
+ rtDesc.fSampleCnt, format));
+ }
+ rtDesc.fOwnIDs = false;
+ }
+
+ if (isTexture) {
+ GrGLTexture::Desc texDesc;
+ GrGLenum dontCare;
+ if (!canBeTexture(desc.fConfig, &dontCare,
+ &texDesc.fUploadFormat,
+ &texDesc.fUploadType)) {
+ return NULL;
+ }
+
+ GrGLTexture::TexParams params;
+
+ texDesc.fAllocWidth = texDesc.fContentWidth = desc.fWidth;
+ texDesc.fAllocHeight = texDesc.fContentHeight = desc.fHeight;
+
+ texDesc.fFormat = desc.fConfig;
+ texDesc.fOrientation = GrGLTexture::kBottomUp_Orientation;
+ texDesc.fTextureID = desc.fPlatformTexture;
+ texDesc.fUploadByteCount = GrBytesPerPixel(desc.fConfig);
+ texDesc.fOwnsID = false;
+
+ params.invalidate(); // rather than do glGets.
+ if (isRenderTarget) {
+ GrTexture* tex = new GrGLTexture(this, texDesc, rtDesc, params);
+ tex->asRenderTarget()->setStencilBuffer(sb.get());
+ return tex;
+ } else {
+ return new GrGLTexture(this, texDesc, params);
+ }
+ } else {
+ GrGLIRect viewport;
+ viewport.fLeft = 0;
+ viewport.fBottom = 0;
+ viewport.fWidth = desc.fWidth;
+ viewport.fHeight = desc.fHeight;
+
+ GrGLRenderTarget* rt = new GrGLRenderTarget(this, rtDesc, viewport);
+ rt->setStencilBuffer(sb.get());
+ return rt;
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrGpuGL::allocateAndUploadTexData(const GrGLTexture::Desc& desc,
+ GrGLenum internalFormat,
+ const void* data,
+ size_t rowBytes) {
+ // we assume the texture is bound;
+ if (!rowBytes) {
+ rowBytes = desc.fUploadByteCount * desc.fContentWidth;
+ }
+
+ // in case we need a temporary, trimmed copy of the src pixels
+ SkAutoSMalloc<128 * 128> tempStorage;
+
+ /*
+ * check whether to allocate a temporary buffer for flipping y or
+ * because our data has extra bytes past each row. If so, we need
+ * to trim those off here, since GL ES doesn't let us specify
+ * GL_UNPACK_ROW_LENGTH.
+ */
+ bool flipY = GrGLTexture::kBottomUp_Orientation == desc.fOrientation;
+ if (kDesktop_GrGLBinding == this->glBinding() && !flipY) {
+ if (data && rowBytes != desc.fContentWidth * desc.fUploadByteCount) {
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH,
+ rowBytes / desc.fUploadByteCount));
+ }
+ } else {
+ size_t trimRowBytes = desc.fContentWidth * desc.fUploadByteCount;
+ if (data && (trimRowBytes < rowBytes || flipY)) {
+ // copy the data into our new storage, skipping the trailing bytes
+ size_t trimSize = desc.fContentHeight * trimRowBytes;
+ const char* src = (const char*)data;
+ if (flipY) {
+ src += (desc.fContentHeight - 1) * rowBytes;
+ }
+ char* dst = (char*)tempStorage.reset(trimSize);
+ for (int y = 0; y < desc.fContentHeight; y++) {
+ memcpy(dst, src, trimRowBytes);
+ if (flipY) {
+ src -= rowBytes;
+ } else {
+ src += rowBytes;
+ }
+ dst += trimRowBytes;
+ }
+ // now point data to our trimmed version
+ data = tempStorage.get();
+ rowBytes = trimRowBytes;
+ }
+ }
+
+ GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, desc.fUploadByteCount));
+ if (kIndex_8_GrPixelConfig == desc.fFormat &&
+ this->getCaps().f8BitPaletteSupport) {
+ // ES only supports CompressedTexImage2D, not CompressedTexSubimage2D
+ GrAssert(desc.fContentWidth == desc.fAllocWidth);
+ GrAssert(desc.fContentHeight == desc.fAllocHeight);
+ GrGLsizei imageSize = desc.fAllocWidth * desc.fAllocHeight +
+ kGrColorTableSize;
+ GL_CALL(CompressedTexImage2D(GR_GL_TEXTURE_2D, 0, desc.fUploadFormat,
+ desc.fAllocWidth, desc.fAllocHeight,
+ 0, imageSize, data));
+ GrGLResetRowLength(this->glInterface());
+ } else {
+ if (NULL != data && (desc.fAllocWidth != desc.fContentWidth ||
+ desc.fAllocHeight != desc.fContentHeight)) {
+ GL_CALL(TexImage2D(GR_GL_TEXTURE_2D, 0, internalFormat,
+ desc.fAllocWidth, desc.fAllocHeight,
+ 0, desc.fUploadFormat, desc.fUploadType, NULL));
+ GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 0, 0, 0, desc.fContentWidth,
+ desc.fContentHeight, desc.fUploadFormat,
+ desc.fUploadType, data));
+ GrGLResetRowLength(this->glInterface());
+
+ int extraW = desc.fAllocWidth - desc.fContentWidth;
+ int extraH = desc.fAllocHeight - desc.fContentHeight;
+ int maxTexels = extraW * extraH;
+ maxTexels = GrMax(extraW * desc.fContentHeight, maxTexels);
+ maxTexels = GrMax(desc.fContentWidth * extraH, maxTexels);
+
+ SkAutoSMalloc<128*128> texels(desc.fUploadByteCount * maxTexels);
+
+ // rowBytes is actual stride between rows in data
+ // rowDataBytes is the actual amount of non-pad data in a row
+ // and the stride used for uploading extraH rows.
+ uint32_t rowDataBytes = desc.fContentWidth * desc.fUploadByteCount;
+ if (extraH) {
+ uint8_t* lastRowStart = (uint8_t*) data +
+ (desc.fContentHeight - 1) * rowBytes;
+ uint8_t* extraRowStart = (uint8_t*)texels.get();
+
+ for (int i = 0; i < extraH; ++i) {
+ memcpy(extraRowStart, lastRowStart, rowDataBytes);
+ extraRowStart += rowDataBytes;
+ }
+ GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 0, 0,
+ desc.fContentHeight, desc.fContentWidth,
+ extraH, desc.fUploadFormat,
+ desc.fUploadType, texels.get()));
+ }
+ if (extraW) {
+ uint8_t* edgeTexel = (uint8_t*)data +
+ rowDataBytes - desc.fUploadByteCount;
+ uint8_t* extraTexel = (uint8_t*)texels.get();
+ for (int j = 0; j < desc.fContentHeight; ++j) {
+ for (int i = 0; i < extraW; ++i) {
+ memcpy(extraTexel, edgeTexel, desc.fUploadByteCount);
+ extraTexel += desc.fUploadByteCount;
+ }
+ edgeTexel += rowBytes;
+ }
+ GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 0, desc.fContentWidth,
+ 0, extraW, desc.fContentHeight,
+ desc.fUploadFormat, desc.fUploadType,
+ texels.get()));
+ }
+ if (extraW && extraH) {
+ uint8_t* cornerTexel = (uint8_t*)data +
+ desc.fContentHeight * rowBytes -
+ desc.fUploadByteCount;
+ uint8_t* extraTexel = (uint8_t*)texels.get();
+ for (int i = 0; i < extraW*extraH; ++i) {
+ memcpy(extraTexel, cornerTexel, desc.fUploadByteCount);
+ extraTexel += desc.fUploadByteCount;
+ }
+ GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, 0, desc.fContentWidth,
+ desc.fContentHeight, extraW, extraH,
+ desc.fUploadFormat, desc.fUploadType,
+ texels.get()));
+ }
+
+ } else {
+ GL_CALL(TexImage2D(GR_GL_TEXTURE_2D, 0, internalFormat,
+ desc.fAllocWidth, desc.fAllocHeight, 0,
+ desc.fUploadFormat, desc.fUploadType, data));
+ GrGLResetRowLength(this->glInterface());
+ }
+ }
+}
+
+bool GrGpuGL::createRenderTargetObjects(int width, int height,
+ GrGLuint texID,
+ GrGLRenderTarget::Desc* desc) {
+ desc->fMSColorRenderbufferID = 0;
+ desc->fRTFBOID = 0;
+ desc->fTexFBOID = 0;
+ desc->fOwnIDs = true;
+
+ GrGLenum status;
+ GrGLint err;
+
+ GrGLenum msColorFormat = 0; // suppress warning
+
+ GL_CALL(GenFramebuffers(1, &desc->fTexFBOID));
+ if (!desc->fTexFBOID) {
+ goto FAILED;
+ }
+
+
+ // If we are using multisampling we will create two FBOS. We render
+ // to one and then resolve to the texture bound to the other.
+ if (desc->fSampleCnt > 1 && GLCaps::kNone_MSFBO != fGLCaps.fMSFBOType) {
+ GL_CALL(GenFramebuffers(1, &desc->fRTFBOID));
+ GL_CALL(GenRenderbuffers(1, &desc->fMSColorRenderbufferID));
+ if (!desc->fRTFBOID ||
+ !desc->fMSColorRenderbufferID ||
+ !this->fboInternalFormat(desc->fConfig, &msColorFormat)) {
+ goto FAILED;
+ }
+ } else {
+ desc->fRTFBOID = desc->fTexFBOID;
+ }
+
+ if (desc->fRTFBOID != desc->fTexFBOID) {
+ GrAssert(desc->fSampleCnt > 1);
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER,
+ desc->fMSColorRenderbufferID));
+ GR_GL_CALL_NOERRCHECK(this->glInterface(),
+ RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
+ desc->fSampleCnt,
+ msColorFormat,
+ width, height));
+ err = GR_GL_GET_ERROR(this->glInterface());
+ if (err != GR_GL_NO_ERROR) {
+ goto FAILED;
+ }
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fRTFBOID));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_RENDERBUFFER,
+ desc->fMSColorRenderbufferID));
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ goto FAILED;
+ }
+ }
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, desc->fTexFBOID));
+
+ GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
+ GR_GL_COLOR_ATTACHMENT0,
+ GR_GL_TEXTURE_2D,
+ texID, 0));
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ goto FAILED;
+ }
+
+ return true;
+
+FAILED:
+ if (desc->fMSColorRenderbufferID) {
+ GL_CALL(DeleteRenderbuffers(1, &desc->fMSColorRenderbufferID));
+ }
+ if (desc->fRTFBOID != desc->fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &desc->fRTFBOID));
+ }
+ if (desc->fTexFBOID) {
+ GL_CALL(DeleteFramebuffers(1, &desc->fTexFBOID));
+ }
+ return false;
+}
+
+// good to set a break-point here to know when createTexture fails
+static GrTexture* return_null_texture() {
+// GrAssert(!"null texture");
+ return NULL;
+}
+
+#if GR_DEBUG
+static size_t as_size_t(int x) {
+ return x;
+}
+#endif
+
+GrTexture* GrGpuGL::onCreateTexture(const GrTextureDesc& desc,
+ const void* srcData,
+ size_t rowBytes) {
+
+#if GR_COLLECT_STATS
+ ++fStats.fTextureCreateCnt;
+#endif
+
+ static const GrGLTexture::TexParams DEFAULT_PARAMS = {
+ GR_GL_NEAREST,
+ GR_GL_CLAMP_TO_EDGE,
+ GR_GL_CLAMP_TO_EDGE
+ };
+
+ GrGLTexture::Desc glTexDesc;
+ GrGLRenderTarget::Desc glRTDesc;
+ GrGLenum internalFormat;
+
+ glTexDesc.fContentWidth = desc.fWidth;
+ glTexDesc.fContentHeight = desc.fHeight;
+ glTexDesc.fAllocWidth = desc.fWidth;
+ glTexDesc.fAllocHeight = desc.fHeight;
+ glTexDesc.fFormat = desc.fFormat;
+ glTexDesc.fOwnsID = true;
+
+ glRTDesc.fMSColorRenderbufferID = 0;
+ glRTDesc.fRTFBOID = 0;
+ glRTDesc.fTexFBOID = 0;
+ glRTDesc.fOwnIDs = true;
+ glRTDesc.fConfig = glTexDesc.fFormat;
+
+ bool renderTarget = 0 != (desc.fFlags & kRenderTarget_GrTextureFlagBit);
+ if (!canBeTexture(desc.fFormat,
+ &internalFormat,
+ &glTexDesc.fUploadFormat,
+ &glTexDesc.fUploadType)) {
+ return return_null_texture();
+ }
+
+ const Caps& caps = this->getCaps();
+
+ // We keep GrRenderTargets in GL's normal orientation so that they
+ // can be drawn to by the outside world without the client having
+ // to render upside down.
+ glTexDesc.fOrientation = renderTarget ? GrGLTexture::kBottomUp_Orientation :
+ GrGLTexture::kTopDown_Orientation;
+
+ GrAssert(as_size_t(desc.fAALevel) < GR_ARRAY_COUNT(fGLCaps.fAASamples));
+ glRTDesc.fSampleCnt = fGLCaps.fAASamples[desc.fAALevel];
+ if (GLCaps::kNone_MSFBO == fGLCaps.fMSFBOType &&
+ desc.fAALevel != kNone_GrAALevel) {
+ GrPrintf("AA RT requested but not supported on this platform.");
+ }
+
+ glTexDesc.fUploadByteCount = GrBytesPerPixel(desc.fFormat);
+
+ if (renderTarget) {
+ if (!caps.fNPOTRenderTargetSupport) {
+ glTexDesc.fAllocWidth = GrNextPow2(desc.fWidth);
+ glTexDesc.fAllocHeight = GrNextPow2(desc.fHeight);
+ }
+
+ glTexDesc.fAllocWidth = GrMax(caps.fMinRenderTargetWidth,
+ glTexDesc.fAllocWidth);
+ glTexDesc.fAllocHeight = GrMax(caps.fMinRenderTargetHeight,
+ glTexDesc.fAllocHeight);
+ if (glTexDesc.fAllocWidth > caps.fMaxRenderTargetSize ||
+ glTexDesc.fAllocHeight > caps.fMaxRenderTargetSize) {
+ return return_null_texture();
+ }
+ } else if (!caps.fNPOTTextureSupport) {
+ glTexDesc.fAllocWidth = GrNextPow2(desc.fWidth);
+ glTexDesc.fAllocHeight = GrNextPow2(desc.fHeight);
+ if (glTexDesc.fAllocWidth > caps.fMaxTextureSize ||
+ glTexDesc.fAllocHeight > caps.fMaxTextureSize) {
+ return return_null_texture();
+ }
+ }
+
+ GL_CALL(GenTextures(1, &glTexDesc.fTextureID));
+ if (!glTexDesc.fTextureID) {
+ return return_null_texture();
+ }
+
+ this->setSpareTextureUnit();
+ GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTexDesc.fTextureID));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_MAG_FILTER,
+ DEFAULT_PARAMS.fFilter));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_MIN_FILTER,
+ DEFAULT_PARAMS.fFilter));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_WRAP_S,
+ DEFAULT_PARAMS.fWrapS));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_WRAP_T,
+ DEFAULT_PARAMS.fWrapT));
+
+ this->allocateAndUploadTexData(glTexDesc, internalFormat,srcData, rowBytes);
+
+ GrGLTexture* tex;
+ if (renderTarget) {
+ GrGLenum msColorRenderbufferFormat = -1;
+#if GR_COLLECT_STATS
+ ++fStats.fRenderTargetCreateCnt;
+#endif
+ if (!this->createRenderTargetObjects(glTexDesc.fAllocWidth,
+ glTexDesc.fAllocHeight,
+ glTexDesc.fTextureID,
+ &glRTDesc)) {
+ GL_CALL(DeleteTextures(1, &glTexDesc.fTextureID));
+ return return_null_texture();
+ }
+ tex = new GrGLTexture(this, glTexDesc, glRTDesc, DEFAULT_PARAMS);
+ } else {
+ tex = new GrGLTexture(this, glTexDesc, DEFAULT_PARAMS);
+ }
+#ifdef TRACE_TEXTURE_CREATION
+ GrPrintf("--- new texture [%d] size=(%d %d) bpp=%d\n",
+ tex->fTextureID, width, height, tex->fUploadByteCount);
+#endif
+ return tex;
+}
+
+namespace {
+void inline get_stencil_rb_sizes(const GrGLInterface* gl,
+ GrGLuint rb,
+ GrGLStencilBuffer::Format* format) {
+ // we shouldn't ever know one size and not the other
+ GrAssert((kUnknownBitCount == format->fStencilBits) ==
+ (kUnknownBitCount == format->fTotalBits));
+ if (kUnknownBitCount == format->fStencilBits) {
+ GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
+ GR_GL_RENDERBUFFER_STENCIL_SIZE,
+ (GrGLint*)&format->fStencilBits);
+ if (format->fPacked) {
+ GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
+ GR_GL_RENDERBUFFER_DEPTH_SIZE,
+ (GrGLint*)&format->fTotalBits);
+ format->fTotalBits += format->fStencilBits;
+ } else {
+ format->fTotalBits = format->fStencilBits;
+ }
+ }
+}
+}
+
+bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt,
+ int width, int height) {
+
+ // All internally created RTs are also textures. We don't create
+ // SBs for a client's standalone RT (that is RT that isnt also a texture).
+ GrAssert(rt->asTexture());
+ GrAssert(width >= rt->allocatedWidth());
+ GrAssert(height >= rt->allocatedHeight());
+
+ int samples = rt->numSamples();
+ GrGLuint sbID;
+ GL_CALL(GenRenderbuffers(1, &sbID));
+ if (!sbID) {
+ return false;
+ }
+
+ GrGLStencilBuffer* sb = NULL;
+
+ int stencilFmtCnt = fGLCaps.fStencilFormats.count();
+ for (int i = 0; i < stencilFmtCnt; ++i) {
+ GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID));
+ // we start with the last stencil format that succeeded in hopes
+ // that we won't go through this loop more than once after the
+ // first (painful) stencil creation.
+ int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt;
+ const GrGLStencilBuffer::Format& sFmt = fGLCaps.fStencilFormats[sIdx];
+ // we do this "if" so that we don't call the multisample
+ // version on a GL that doesn't have an MSAA extension.
+ if (samples > 1) {
+ GR_GL_CALL_NOERRCHECK(this->glInterface(),
+ RenderbufferStorageMultisample(
+ GR_GL_RENDERBUFFER,
+ samples,
+ sFmt.fInternalFormat,
+ width,
+ height));
+ } else {
+ GR_GL_CALL_NOERRCHECK(this->glInterface(),
+ RenderbufferStorage(GR_GL_RENDERBUFFER,
+ sFmt.fInternalFormat,
+ width, height));
+ }
+
+ GrGLenum err = GR_GL_GET_ERROR(this->glInterface());
+ if (err == GR_GL_NO_ERROR) {
+ // After sized formats we attempt an unsized format and take whatever
+ // sizes GL gives us. In that case we query for the size.
+ GrGLStencilBuffer::Format format = sFmt;
+ get_stencil_rb_sizes(this->glInterface(), sbID, &format);
+ sb = new GrGLStencilBuffer(this, sbID, width, height,
+ samples, format);
+ if (this->attachStencilBufferToRenderTarget(sb, rt)) {
+ fLastSuccessfulStencilFmtIdx = sIdx;
+ rt->setStencilBuffer(sb);
+ sb->unref();
+ return true;
+ }
+ sb->abandon(); // otherwise we lose sbID
+ sb->unref();
+ }
+ }
+ GL_CALL(DeleteRenderbuffers(1, &sbID));
+ return false;
+}
+
+bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb,
+ GrRenderTarget* rt) {
+ GrGLRenderTarget* glrt = (GrGLRenderTarget*) rt;
+
+ GrGLuint fbo = glrt->renderFBOID();
+
+ if (NULL == sb) {
+ if (NULL != rt->getStencilBuffer()) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+#if GR_DEBUG
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ GrAssert(GR_GL_FRAMEBUFFER_COMPLETE == status);
+#endif
+ }
+ return true;
+ } else {
+ GrGLStencilBuffer* glsb = (GrGLStencilBuffer*) sb;
+ GrGLuint rb = glsb->renderbufferID();
+
+ fHWDrawState.fRenderTarget = NULL;
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo));
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, rb));
+ if (glsb->format().fPacked) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, rb));
+ } else {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_STENCIL_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ if (glsb->format().fPacked) {
+ GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
+ GR_GL_DEPTH_ATTACHMENT,
+ GR_GL_RENDERBUFFER, 0));
+ }
+ return false;
+ } else {
+ return true;
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(uint32_t size, bool dynamic) {
+ GrGLuint id;
+ GL_CALL(GenBuffers(1, &id));
+ if (id) {
+ GL_CALL(BindBuffer(GR_GL_ARRAY_BUFFER, id));
+ fHWGeometryState.fArrayPtrsDirty = true;
+ GrGLClearErr(this->glInterface());
+ // make sure driver can allocate memory for this buffer
+ GR_GL_CALL_NOERRCHECK(this->glInterface(),
+ BufferData(GR_GL_ARRAY_BUFFER, size, NULL,
+ dynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
+ if (this->glInterface()->fGetError() != GR_GL_NO_ERROR) {
+ GL_CALL(DeleteBuffers(1, &id));
+ // deleting bound buffer does implicit bind to 0
+ fHWGeometryState.fVertexBuffer = NULL;
+ return NULL;
+ }
+ GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, id,
+ size, dynamic);
+ fHWGeometryState.fVertexBuffer = vertexBuffer;
+ return vertexBuffer;
+ }
+ return NULL;
+}
+
+GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(uint32_t size, bool dynamic) {
+ GrGLuint id;
+ GL_CALL(GenBuffers(1, &id));
+ if (id) {
+ GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, id));
+ GrGLClearErr(this->glInterface());
+ // make sure driver can allocate memory for this buffer
+ GR_GL_CALL_NOERRCHECK(this->glInterface(),
+ BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, size, NULL,
+ dynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
+ if (this->glInterface()->fGetError() != GR_GL_NO_ERROR) {
+ GL_CALL(DeleteBuffers(1, &id));
+ // deleting bound buffer does implicit bind to 0
+ fHWGeometryState.fIndexBuffer = NULL;
+ return NULL;
+ }
+ GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, id,
+ size, dynamic);
+ fHWGeometryState.fIndexBuffer = indexBuffer;
+ return indexBuffer;
+ }
+ return NULL;
+}
+
+void GrGpuGL::flushScissor(const GrIRect* rect) {
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+ const GrGLIRect& vp =
+ ((GrGLRenderTarget*)fCurrDrawState.fRenderTarget)->getViewport();
+
+ GrGLIRect scissor;
+ if (NULL != rect) {
+ scissor.setRelativeTo(vp, rect->fLeft, rect->fTop,
+ rect->width(), rect->height());
+ if (scissor.contains(vp)) {
+ rect = NULL;
+ }
+ }
+
+ if (NULL != rect) {
+ if (fHWBounds.fScissorRect != scissor) {
+ scissor.pushToGLScissor(this->glInterface());
+ fHWBounds.fScissorRect = scissor;
+ }
+ if (!fHWBounds.fScissorEnabled) {
+ GL_CALL(Enable(GR_GL_SCISSOR_TEST));
+ fHWBounds.fScissorEnabled = true;
+ }
+ } else {
+ if (fHWBounds.fScissorEnabled) {
+ GL_CALL(Disable(GR_GL_SCISSOR_TEST));
+ fHWBounds.fScissorEnabled = false;
+ }
+ }
+}
+
+void GrGpuGL::onClear(const GrIRect* rect, GrColor color) {
+ if (NULL == fCurrDrawState.fRenderTarget) {
+ return;
+ }
+ GrIRect r;
+ if (NULL != rect) {
+ // flushScissor expects rect to be clipped to the target.
+ r = *rect;
+ GrIRect rtRect = SkIRect::MakeWH(fCurrDrawState.fRenderTarget->width(),
+ fCurrDrawState.fRenderTarget->height());
+ if (r.intersect(rtRect)) {
+ rect = &r;
+ } else {
+ return;
+ }
+ }
+ this->flushRenderTarget(rect);
+ this->flushScissor(rect);
+ GL_CALL(ColorMask(GR_GL_TRUE,GR_GL_TRUE,GR_GL_TRUE,GR_GL_TRUE));
+ fHWDrawState.fFlagBits &= ~kNoColorWrites_StateBit;
+ GL_CALL(ClearColor(GrColorUnpackR(color)/255.f,
+ GrColorUnpackG(color)/255.f,
+ GrColorUnpackB(color)/255.f,
+ GrColorUnpackA(color)/255.f));
+ GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
+}
+
+void GrGpuGL::clearStencil() {
+ if (NULL == fCurrDrawState.fRenderTarget) {
+ return;
+ }
+
+ this->flushRenderTarget(&GrIRect::EmptyIRect());
+
+ if (fHWBounds.fScissorEnabled) {
+ GL_CALL(Disable(GR_GL_SCISSOR_TEST));
+ fHWBounds.fScissorEnabled = false;
+ }
+ GL_CALL(StencilMask(0xffffffff));
+ GL_CALL(ClearStencil(0));
+ GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
+ fHWDrawState.fStencilSettings.invalidate();
+}
+
+void GrGpuGL::clearStencilClip(const GrIRect& rect, bool insideClip) {
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+
+ // this should only be called internally when we know we have a
+ // stencil buffer.
+ GrAssert(NULL != fCurrDrawState.fRenderTarget->getStencilBuffer());
+ GrGLint stencilBitCount =
+ fCurrDrawState.fRenderTarget->getStencilBuffer()->bits();
+#if 0
+ GrAssert(stencilBitCount > 0);
+ GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
+#else
+ // we could just clear the clip bit but when we go through
+ // ANGLE a partial stencil mask will cause clears to be
+ // turned into draws. Our contract on GrDrawTarget says that
+ // changing the clip between stencil passes may or may not
+ // zero the client's clip bits. So we just clear the whole thing.
+ static const GrGLint clipStencilMask = ~0;
+#endif
+ GrGLint value;
+ if (insideClip) {
+ value = (1 << (stencilBitCount - 1));
+ } else {
+ value = 0;
+ }
+ this->flushRenderTarget(&GrIRect::EmptyIRect());
+ this->flushScissor(&rect);
+ GL_CALL(StencilMask(clipStencilMask));
+ GL_CALL(ClearStencil(value));
+ GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
+ fHWDrawState.fStencilSettings.invalidate();
+}
+
+void GrGpuGL::onForceRenderTargetFlush() {
+ this->flushRenderTarget(&GrIRect::EmptyIRect());
+}
+
+bool GrGpuGL::onReadPixels(GrRenderTarget* target,
+ int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer) {
+ GrGLenum internalFormat; // we don't use this for glReadPixels
+ GrGLenum format;
+ GrGLenum type;
+ if (!this->canBeTexture(config, &internalFormat, &format, &type)) {
+ return false;
+ }
+ GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target);
+ GrAutoTPtrValueRestore<GrRenderTarget*> autoTargetRestore;
+ switch (tgt->getResolveType()) {
+ case GrGLRenderTarget::kCantResolve_ResolveType:
+ return false;
+ case GrGLRenderTarget::kAutoResolves_ResolveType:
+ autoTargetRestore.save(&fCurrDrawState.fRenderTarget);
+ fCurrDrawState.fRenderTarget = target;
+ this->flushRenderTarget(&GrIRect::EmptyIRect());
+ break;
+ case GrGLRenderTarget::kCanResolve_ResolveType:
+ this->resolveRenderTarget(tgt);
+ // we don't track the state of the READ FBO ID.
+ GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER,
+ tgt->textureFBOID()));
+ break;
+ default:
+ GrCrash("Unknown resolve type");
+ }
+
+ const GrGLIRect& glvp = tgt->getViewport();
+
+ // the read rect is viewport-relative
+ GrGLIRect readRect;
+ readRect.setRelativeTo(glvp, left, top, width, height);
+ GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
+ readRect.fWidth, readRect.fHeight,
+ format, type, buffer));
+
+ // now reverse the order of the rows, since GL's are bottom-to-top, but our
+ // API presents top-to-bottom
+ {
+ size_t stride = width * GrBytesPerPixel(config);
+ SkAutoMalloc rowStorage(stride);
+ void* tmp = rowStorage.get();
+
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * stride;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmp, top, stride);
+ memcpy(top, bottom, stride);
+ memcpy(bottom, tmp, stride);
+ top += stride;
+ bottom -= stride;
+ }
+ }
+ return true;
+}
+
+void GrGpuGL::flushRenderTarget(const GrIRect* bound) {
+
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+
+ GrGLRenderTarget* rt = (GrGLRenderTarget*)fCurrDrawState.fRenderTarget;
+ if (fHWDrawState.fRenderTarget != fCurrDrawState.fRenderTarget) {
+ GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, rt->renderFBOID()));
+ #if GR_COLLECT_STATS
+ ++fStats.fRenderTargetChngCnt;
+ #endif
+ #if GR_DEBUG
+ GrGLenum status;
+ GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
+ if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
+ GrPrintf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x\n", status);
+ }
+ #endif
+ fDirtyFlags.fRenderTargetChanged = true;
+ fHWDrawState.fRenderTarget = fCurrDrawState.fRenderTarget;
+ const GrGLIRect& vp = rt->getViewport();
+ if (fHWBounds.fViewportRect != vp) {
+ vp.pushToGLViewport(this->glInterface());
+ fHWBounds.fViewportRect = vp;
+ }
+ }
+ if (NULL == bound || !bound->isEmpty()) {
+ rt->flagAsNeedingResolve(bound);
+ }
+}
+
+GrGLenum gPrimitiveType2GLMode[] = {
+ GR_GL_TRIANGLES,
+ GR_GL_TRIANGLE_STRIP,
+ GR_GL_TRIANGLE_FAN,
+ GR_GL_POINTS,
+ GR_GL_LINES,
+ GR_GL_LINE_STRIP
+};
+
+#define SWAP_PER_DRAW 0
+
+#if SWAP_PER_DRAW
+ #if GR_MAC_BUILD
+ #include <AGL/agl.h>
+ #elif GR_WIN32_BUILD
+ void SwapBuf() {
+ DWORD procID = GetCurrentProcessId();
+ HWND hwnd = GetTopWindow(GetDesktopWindow());
+ while(hwnd) {
+ DWORD wndProcID = 0;
+ GetWindowThreadProcessId(hwnd, &wndProcID);
+ if(wndProcID == procID) {
+ SwapBuffers(GetDC(hwnd));
+ }
+ hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
+ }
+ }
+ #endif
+#endif
+
+void GrGpuGL::onGpuDrawIndexed(GrPrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+ GrAssert((size_t)type < GR_ARRAY_COUNT(gPrimitiveType2GLMode));
+
+ GrGLvoid* indices = (GrGLvoid*)(sizeof(uint16_t) * startIndex);
+
+ GrAssert(NULL != fHWGeometryState.fIndexBuffer);
+ GrAssert(NULL != fHWGeometryState.fVertexBuffer);
+
+ // our setupGeometry better have adjusted this to zero since
+ // DrawElements always draws from the begining of the arrays for idx 0.
+ GrAssert(0 == startVertex);
+
+ GL_CALL(DrawElements(gPrimitiveType2GLMode[type], indexCount,
+ GR_GL_UNSIGNED_SHORT, indices));
+#if SWAP_PER_DRAW
+ glFlush();
+ #if GR_MAC_BUILD
+ aglSwapBuffers(aglGetCurrentContext());
+ int set_a_break_pt_here = 9;
+ aglSwapBuffers(aglGetCurrentContext());
+ #elif GR_WIN32_BUILD
+ SwapBuf();
+ int set_a_break_pt_here = 9;
+ SwapBuf();
+ #endif
+#endif
+}
+
+void GrGpuGL::onGpuDrawNonIndexed(GrPrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount) {
+ GrAssert((size_t)type < GR_ARRAY_COUNT(gPrimitiveType2GLMode));
+
+ GrAssert(NULL != fHWGeometryState.fVertexBuffer);
+
+ // our setupGeometry better have adjusted this to zero.
+ // DrawElements doesn't take an offset so we always adjus the startVertex.
+ GrAssert(0 == startVertex);
+
+ // pass 0 for parameter first. We have to adjust gl*Pointer() to
+ // account for startVertex in the DrawElements case. So we always
+ // rely on setupGeometry to have accounted for startVertex.
+ GL_CALL(DrawArrays(gPrimitiveType2GLMode[type], 0, vertexCount));
+#if SWAP_PER_DRAW
+ glFlush();
+ #if GR_MAC_BUILD
+ aglSwapBuffers(aglGetCurrentContext());
+ int set_a_break_pt_here = 9;
+ aglSwapBuffers(aglGetCurrentContext());
+ #elif GR_WIN32_BUILD
+ SwapBuf();
+ int set_a_break_pt_here = 9;
+ SwapBuf();
+ #endif
+#endif
+}
+
+void GrGpuGL::resolveRenderTarget(GrGLRenderTarget* rt) {
+
+ if (rt->needsResolve()) {
+ GrAssert(GLCaps::kNone_MSFBO != fGLCaps.fMSFBOType);
+ GrAssert(rt->textureFBOID() != rt->renderFBOID());
+ GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER,
+ rt->renderFBOID()));
+ GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER,
+ rt->textureFBOID()));
+ #if GR_COLLECT_STATS
+ ++fStats.fRenderTargetChngCnt;
+ #endif
+ // make sure we go through flushRenderTarget() since we've modified
+ // the bound DRAW FBO ID.
+ fHWDrawState.fRenderTarget = NULL;
+ const GrGLIRect& vp = rt->getViewport();
+ const GrIRect dirtyRect = rt->getResolveRect();
+ GrGLIRect r;
+ r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
+ dirtyRect.width(), dirtyRect.height());
+
+ if (GLCaps::kAppleES_MSFBO == fGLCaps.fMSFBOType) {
+ // Apple's extension uses the scissor as the blit bounds.
+ GL_CALL(Enable(GR_GL_SCISSOR_TEST));
+ GL_CALL(Scissor(r.fLeft, r.fBottom,
+ r.fWidth, r.fHeight));
+ GL_CALL(ResolveMultisampleFramebuffer());
+ fHWBounds.fScissorRect.invalidate();
+ fHWBounds.fScissorEnabled = true;
+ } else {
+ if (GLCaps::kDesktopARB_MSFBO != fGLCaps.fMSFBOType) {
+ // this respects the scissor during the blit, so disable it.
+ GrAssert(GLCaps::kDesktopEXT_MSFBO == fGLCaps.fMSFBOType);
+ this->flushScissor(NULL);
+ }
+ int right = r.fLeft + r.fWidth;
+ int top = r.fBottom + r.fHeight;
+ GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top,
+ r.fLeft, r.fBottom, right, top,
+ GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
+ }
+ rt->flagAsResolved();
+ }
+}
+
+static const GrGLenum grToGLStencilFunc[] = {
+ GR_GL_ALWAYS, // kAlways_StencilFunc
+ GR_GL_NEVER, // kNever_StencilFunc
+ GR_GL_GREATER, // kGreater_StencilFunc
+ GR_GL_GEQUAL, // kGEqual_StencilFunc
+ GR_GL_LESS, // kLess_StencilFunc
+ GR_GL_LEQUAL, // kLEqual_StencilFunc,
+ GR_GL_EQUAL, // kEqual_StencilFunc,
+ GR_GL_NOTEQUAL, // kNotEqual_StencilFunc,
+};
+GR_STATIC_ASSERT(GR_ARRAY_COUNT(grToGLStencilFunc) == kBasicStencilFuncCount);
+GR_STATIC_ASSERT(0 == kAlways_StencilFunc);
+GR_STATIC_ASSERT(1 == kNever_StencilFunc);
+GR_STATIC_ASSERT(2 == kGreater_StencilFunc);
+GR_STATIC_ASSERT(3 == kGEqual_StencilFunc);
+GR_STATIC_ASSERT(4 == kLess_StencilFunc);
+GR_STATIC_ASSERT(5 == kLEqual_StencilFunc);
+GR_STATIC_ASSERT(6 == kEqual_StencilFunc);
+GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc);
+
+static const GrGLenum grToGLStencilOp[] = {
+ GR_GL_KEEP, // kKeep_StencilOp
+ GR_GL_REPLACE, // kReplace_StencilOp
+ GR_GL_INCR_WRAP, // kIncWrap_StencilOp
+ GR_GL_INCR, // kIncClamp_StencilOp
+ GR_GL_DECR_WRAP, // kDecWrap_StencilOp
+ GR_GL_DECR, // kDecClamp_StencilOp
+ GR_GL_ZERO, // kZero_StencilOp
+ GR_GL_INVERT, // kInvert_StencilOp
+};
+GR_STATIC_ASSERT(GR_ARRAY_COUNT(grToGLStencilOp) == kStencilOpCount);
+GR_STATIC_ASSERT(0 == kKeep_StencilOp);
+GR_STATIC_ASSERT(1 == kReplace_StencilOp);
+GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
+GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
+GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
+GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
+GR_STATIC_ASSERT(6 == kZero_StencilOp);
+GR_STATIC_ASSERT(7 == kInvert_StencilOp);
+
+void GrGpuGL::flushStencil() {
+ const GrStencilSettings* settings = &fCurrDrawState.fStencilSettings;
+
+ // use stencil for clipping if clipping is enabled and the clip
+ // has been written into the stencil.
+ bool stencilClip = fClipInStencil &&
+ (kClip_StateBit & fCurrDrawState.fFlagBits);
+ bool stencilChange = fHWStencilClip != stencilClip ||
+ fHWDrawState.fStencilSettings != *settings ||
+ ((fHWDrawState.fFlagBits & kModifyStencilClip_StateBit) !=
+ (fCurrDrawState.fFlagBits & kModifyStencilClip_StateBit));
+
+ if (stencilChange) {
+
+ // we can't simultaneously perform stencil-clipping and modify the stencil clip
+ GrAssert(!stencilClip || !(fCurrDrawState.fFlagBits & kModifyStencilClip_StateBit));
+
+ if (settings->isDisabled()) {
+ if (stencilClip) {
+ settings = &gClipStencilSettings;
+ }
+ }
+
+ if (settings->isDisabled()) {
+ GL_CALL(Disable(GR_GL_STENCIL_TEST));
+ } else {
+ GL_CALL(Enable(GR_GL_STENCIL_TEST));
+ #if GR_DEBUG
+ if (!this->getCaps().fStencilWrapOpsSupport) {
+ GrAssert(settings->fFrontPassOp != kIncWrap_StencilOp);
+ GrAssert(settings->fFrontPassOp != kDecWrap_StencilOp);
+ GrAssert(settings->fFrontFailOp != kIncWrap_StencilOp);
+ GrAssert(settings->fBackFailOp != kDecWrap_StencilOp);
+ GrAssert(settings->fBackPassOp != kIncWrap_StencilOp);
+ GrAssert(settings->fBackPassOp != kDecWrap_StencilOp);
+ GrAssert(settings->fBackFailOp != kIncWrap_StencilOp);
+ GrAssert(settings->fFrontFailOp != kDecWrap_StencilOp);
+ }
+ #endif
+ int stencilBits = 0;
+ GrStencilBuffer* stencilBuffer =
+ fCurrDrawState.fRenderTarget->getStencilBuffer();
+ if (NULL != stencilBuffer) {
+ stencilBits = stencilBuffer->bits();
+ }
+ // TODO: dynamically attach a stencil buffer
+ GrAssert(stencilBits ||
+ (GrStencilSettings::gDisabled ==
+ fCurrDrawState.fStencilSettings));
+ GrGLuint clipStencilMask = 1 << (stencilBits - 1);
+ GrGLuint userStencilMask = clipStencilMask - 1;
+
+ unsigned int frontRef = settings->fFrontFuncRef;
+ unsigned int frontMask = settings->fFrontFuncMask;
+ unsigned int frontWriteMask = settings->fFrontWriteMask;
+ GrGLenum frontFunc;
+
+ if (fCurrDrawState.fFlagBits & kModifyStencilClip_StateBit) {
+
+ GrAssert(settings->fFrontFunc < kBasicStencilFuncCount);
+ frontFunc = grToGLStencilFunc[settings->fFrontFunc];
+ } else {
+ frontFunc = grToGLStencilFunc[ConvertStencilFunc(stencilClip, settings->fFrontFunc)];
+
+ ConvertStencilFuncAndMask(settings->fFrontFunc,
+ stencilClip,
+ clipStencilMask,
+ userStencilMask,
+ &frontRef,
+ &frontMask);
+ frontWriteMask &= userStencilMask;
+ }
+ GrAssert(settings->fFrontFailOp >= 0 &&
+ (unsigned) settings->fFrontFailOp < GR_ARRAY_COUNT(grToGLStencilOp));
+ GrAssert(settings->fFrontPassOp >= 0 &&
+ (unsigned) settings->fFrontPassOp < GR_ARRAY_COUNT(grToGLStencilOp));
+ GrAssert(settings->fBackFailOp >= 0 &&
+ (unsigned) settings->fBackFailOp < GR_ARRAY_COUNT(grToGLStencilOp));
+ GrAssert(settings->fBackPassOp >= 0 &&
+ (unsigned) settings->fBackPassOp < GR_ARRAY_COUNT(grToGLStencilOp));
+ if (this->getCaps().fTwoSidedStencilSupport) {
+ GrGLenum backFunc;
+
+ unsigned int backRef = settings->fBackFuncRef;
+ unsigned int backMask = settings->fBackFuncMask;
+ unsigned int backWriteMask = settings->fBackWriteMask;
+
+
+ if (fCurrDrawState.fFlagBits & kModifyStencilClip_StateBit) {
+ GrAssert(settings->fBackFunc < kBasicStencilFuncCount);
+ backFunc = grToGLStencilFunc[settings->fBackFunc];
+ } else {
+ backFunc = grToGLStencilFunc[ConvertStencilFunc(stencilClip, settings->fBackFunc)];
+ ConvertStencilFuncAndMask(settings->fBackFunc,
+ stencilClip,
+ clipStencilMask,
+ userStencilMask,
+ &backRef,
+ &backMask);
+ backWriteMask &= userStencilMask;
+ }
+
+ GL_CALL(StencilFuncSeparate(GR_GL_FRONT, frontFunc,
+ frontRef, frontMask));
+ GL_CALL(StencilMaskSeparate(GR_GL_FRONT, frontWriteMask));
+ GL_CALL(StencilFuncSeparate(GR_GL_BACK, backFunc,
+ backRef, backMask));
+ GL_CALL(StencilMaskSeparate(GR_GL_BACK, backWriteMask));
+ GL_CALL(StencilOpSeparate(GR_GL_FRONT,
+ grToGLStencilOp[settings->fFrontFailOp],
+ grToGLStencilOp[settings->fFrontPassOp],
+ grToGLStencilOp[settings->fFrontPassOp]));
+
+ GL_CALL(StencilOpSeparate(GR_GL_BACK,
+ grToGLStencilOp[settings->fBackFailOp],
+ grToGLStencilOp[settings->fBackPassOp],
+ grToGLStencilOp[settings->fBackPassOp]));
+ } else {
+ GL_CALL(StencilFunc(frontFunc, frontRef, frontMask));
+ GL_CALL(StencilMask(frontWriteMask));
+ GL_CALL(StencilOp(grToGLStencilOp[settings->fFrontFailOp],
+ grToGLStencilOp[settings->fFrontPassOp],
+ grToGLStencilOp[settings->fFrontPassOp]));
+ }
+ }
+ fHWDrawState.fStencilSettings = fCurrDrawState.fStencilSettings;
+ fHWStencilClip = stencilClip;
+ }
+}
+
+void GrGpuGL::flushAAState(GrPrimitiveType type) {
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ // ES doesn't support toggling GL_MULTISAMPLE and doesn't have
+ // smooth lines.
+
+ // we prefer smooth lines over multisampled lines
+ // msaa should be disabled if drawing smooth lines.
+ if (GrIsPrimTypeLines(type)) {
+ bool smooth = this->willUseHWAALines();
+ if (!fHWAAState.fSmoothLineEnabled && smooth) {
+ GL_CALL(Enable(GR_GL_LINE_SMOOTH));
+ fHWAAState.fSmoothLineEnabled = true;
+ } else if (fHWAAState.fSmoothLineEnabled && !smooth) {
+ GL_CALL(Disable(GR_GL_LINE_SMOOTH));
+ fHWAAState.fSmoothLineEnabled = false;
+ }
+ if (fCurrDrawState.fRenderTarget->isMultisampled() &&
+ fHWAAState.fMSAAEnabled) {
+ GL_CALL(Disable(GR_GL_MULTISAMPLE));
+ fHWAAState.fMSAAEnabled = false;
+ }
+ } else if (fCurrDrawState.fRenderTarget->isMultisampled() &&
+ !!(kAntialias_StateBit & fCurrDrawState.fFlagBits) !=
+ fHWAAState.fMSAAEnabled) {
+ if (fHWAAState.fMSAAEnabled) {
+ GL_CALL(Disable(GR_GL_MULTISAMPLE));
+ fHWAAState.fMSAAEnabled = false;
+ } else {
+ GL_CALL(Enable(GR_GL_MULTISAMPLE));
+ fHWAAState.fMSAAEnabled = true;
+ }
+ }
+ }
+}
+
+void GrGpuGL::flushBlend(GrPrimitiveType type,
+ GrBlendCoeff srcCoeff,
+ GrBlendCoeff dstCoeff) {
+ if (GrIsPrimTypeLines(type) && this->willUseHWAALines()) {
+ if (fHWBlendDisabled) {
+ GL_CALL(Enable(GR_GL_BLEND));
+ fHWBlendDisabled = false;
+ }
+ if (kSA_BlendCoeff != fHWDrawState.fSrcBlend ||
+ kISA_BlendCoeff != fHWDrawState.fDstBlend) {
+ GL_CALL(BlendFunc(gXfermodeCoeff2Blend[kSA_BlendCoeff],
+ gXfermodeCoeff2Blend[kISA_BlendCoeff]));
+ fHWDrawState.fSrcBlend = kSA_BlendCoeff;
+ fHWDrawState.fDstBlend = kISA_BlendCoeff;
+ }
+ } else {
+ // any optimization to disable blending should
+ // have already been applied and tweaked the coeffs
+ // to (1, 0).
+ bool blendOff = kOne_BlendCoeff == srcCoeff &&
+ kZero_BlendCoeff == dstCoeff;
+ if (fHWBlendDisabled != blendOff) {
+ if (blendOff) {
+ GL_CALL(Disable(GR_GL_BLEND));
+ } else {
+ GL_CALL(Enable(GR_GL_BLEND));
+ }
+ fHWBlendDisabled = blendOff;
+ }
+ if (!blendOff) {
+ if (fHWDrawState.fSrcBlend != srcCoeff ||
+ fHWDrawState.fDstBlend != dstCoeff) {
+ GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
+ gXfermodeCoeff2Blend[dstCoeff]));
+ fHWDrawState.fSrcBlend = srcCoeff;
+ fHWDrawState.fDstBlend = dstCoeff;
+ }
+ if ((BlendCoeffReferencesConstant(srcCoeff) ||
+ BlendCoeffReferencesConstant(dstCoeff)) &&
+ fHWDrawState.fBlendConstant != fCurrDrawState.fBlendConstant) {
+
+ float c[] = {
+ GrColorUnpackR(fCurrDrawState.fBlendConstant) / 255.f,
+ GrColorUnpackG(fCurrDrawState.fBlendConstant) / 255.f,
+ GrColorUnpackB(fCurrDrawState.fBlendConstant) / 255.f,
+ GrColorUnpackA(fCurrDrawState.fBlendConstant) / 255.f
+ };
+ GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
+ fHWDrawState.fBlendConstant = fCurrDrawState.fBlendConstant;
+ }
+ }
+ }
+}
+
+static unsigned grToGLFilter(GrSamplerState::Filter filter) {
+ switch (filter) {
+ case GrSamplerState::kBilinear_Filter:
+ case GrSamplerState::k4x4Downsample_Filter:
+ return GR_GL_LINEAR;
+ case GrSamplerState::kNearest_Filter:
+ case GrSamplerState::kConvolution_Filter:
+ return GR_GL_NEAREST;
+ default:
+ GrAssert(!"Unknown filter type");
+ return GR_GL_LINEAR;
+ }
+}
+
+bool GrGpuGL::flushGLStateCommon(GrPrimitiveType type) {
+
+ // GrGpu::setupClipAndFlushState should have already checked this
+ // and bailed if not true.
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+
+ for (int s = 0; s < kNumStages; ++s) {
+ // bind texture and set sampler state
+ if (this->isStageEnabled(s)) {
+ GrGLTexture* nextTexture = (GrGLTexture*)fCurrDrawState.fTextures[s];
+
+ // true for now, but maybe not with GrEffect.
+ GrAssert(NULL != nextTexture);
+ // if we created a rt/tex and rendered to it without using a
+ // texture and now we're texuring from the rt it will still be
+ // the last bound texture, but it needs resolving. So keep this
+ // out of the "last != next" check.
+ GrGLRenderTarget* texRT =
+ static_cast<GrGLRenderTarget*>(nextTexture->asRenderTarget());
+ if (NULL != texRT) {
+ resolveRenderTarget(texRT);
+ }
+
+ if (fHWDrawState.fTextures[s] != nextTexture) {
+ setTextureUnit(s);
+ GL_CALL(BindTexture(GR_GL_TEXTURE_2D, nextTexture->textureID()));
+ #if GR_COLLECT_STATS
+ ++fStats.fTextureChngCnt;
+ #endif
+ //GrPrintf("---- bindtexture %d\n", nextTexture->textureID());
+ fHWDrawState.fTextures[s] = nextTexture;
+ // The texture matrix has to compensate for texture width/height
+ // and NPOT-embedded-in-POT
+ fDirtyFlags.fTextureChangedMask |= (1 << s);
+ }
+
+ const GrSamplerState& sampler = fCurrDrawState.fSamplerStates[s];
+ const GrGLTexture::TexParams& oldTexParams =
+ nextTexture->getTexParams();
+ GrGLTexture::TexParams newTexParams;
+
+ newTexParams.fFilter = grToGLFilter(sampler.getFilter());
+
+ const GrGLenum* wraps =
+ GrGLTexture::WrapMode2GLWrap(this->glBinding());
+ newTexParams.fWrapS = wraps[sampler.getWrapX()];
+ newTexParams.fWrapT = wraps[sampler.getWrapY()];
+
+ if (newTexParams.fFilter != oldTexParams.fFilter) {
+ setTextureUnit(s);
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_MAG_FILTER,
+ newTexParams.fFilter));
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_MIN_FILTER,
+ newTexParams.fFilter));
+ }
+ if (newTexParams.fWrapS != oldTexParams.fWrapS) {
+ setTextureUnit(s);
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_WRAP_S,
+ newTexParams.fWrapS));
+ }
+ if (newTexParams.fWrapT != oldTexParams.fWrapT) {
+ setTextureUnit(s);
+ GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
+ GR_GL_TEXTURE_WRAP_T,
+ newTexParams.fWrapT));
+ }
+ nextTexture->setTexParams(newTexParams);
+ }
+ }
+
+ GrIRect* rect = NULL;
+ GrIRect clipBounds;
+ if ((fCurrDrawState.fFlagBits & kClip_StateBit) &&
+ fClip.hasConservativeBounds()) {
+ fClip.getConservativeBounds().roundOut(&clipBounds);
+ rect = &clipBounds;
+ }
+ this->flushRenderTarget(rect);
+ this->flushAAState(type);
+
+ if ((fCurrDrawState.fFlagBits & kDither_StateBit) !=
+ (fHWDrawState.fFlagBits & kDither_StateBit)) {
+ if (fCurrDrawState.fFlagBits & kDither_StateBit) {
+ GL_CALL(Enable(GR_GL_DITHER));
+ } else {
+ GL_CALL(Disable(GR_GL_DITHER));
+ }
+ }
+
+ if ((fCurrDrawState.fFlagBits & kNoColorWrites_StateBit) !=
+ (fHWDrawState.fFlagBits & kNoColorWrites_StateBit)) {
+ GrGLenum mask;
+ if (fCurrDrawState.fFlagBits & kNoColorWrites_StateBit) {
+ mask = GR_GL_FALSE;
+ } else {
+ mask = GR_GL_TRUE;
+ }
+ GL_CALL(ColorMask(mask, mask, mask, mask));
+ }
+
+ if (fHWDrawState.fDrawFace != fCurrDrawState.fDrawFace) {
+ switch (fCurrDrawState.fDrawFace) {
+ case kCCW_DrawFace:
+ GL_CALL(Enable(GR_GL_CULL_FACE));
+ GL_CALL(CullFace(GR_GL_BACK));
+ break;
+ case kCW_DrawFace:
+ GL_CALL(Enable(GR_GL_CULL_FACE));
+ GL_CALL(CullFace(GR_GL_FRONT));
+ break;
+ case kBoth_DrawFace:
+ GL_CALL(Disable(GR_GL_CULL_FACE));
+ break;
+ default:
+ GrCrash("Unknown draw face.");
+ }
+ fHWDrawState.fDrawFace = fCurrDrawState.fDrawFace;
+ }
+
+#if GR_DEBUG
+ // check for circular rendering
+ for (int s = 0; s < kNumStages; ++s) {
+ GrAssert(!this->isStageEnabled(s) ||
+ NULL == fCurrDrawState.fRenderTarget ||
+ NULL == fCurrDrawState.fTextures[s] ||
+ fCurrDrawState.fTextures[s]->asRenderTarget() !=
+ fCurrDrawState.fRenderTarget);
+ }
+#endif
+
+ flushStencil();
+
+ // flushStencil may look at the private state bits, so keep it before this.
+ fHWDrawState.fFlagBits = fCurrDrawState.fFlagBits;
+ return true;
+}
+
+void GrGpuGL::notifyVertexBufferBind(const GrGLVertexBuffer* buffer) {
+ if (fHWGeometryState.fVertexBuffer != buffer) {
+ fHWGeometryState.fArrayPtrsDirty = true;
+ fHWGeometryState.fVertexBuffer = buffer;
+ }
+}
+
+void GrGpuGL::notifyVertexBufferDelete(const GrGLVertexBuffer* buffer) {
+ if (fHWGeometryState.fVertexBuffer == buffer) {
+ // deleting bound buffer does implied bind to 0
+ fHWGeometryState.fVertexBuffer = NULL;
+ fHWGeometryState.fArrayPtrsDirty = true;
+ }
+}
+
+void GrGpuGL::notifyIndexBufferBind(const GrGLIndexBuffer* buffer) {
+ fHWGeometryState.fIndexBuffer = buffer;
+}
+
+void GrGpuGL::notifyIndexBufferDelete(const GrGLIndexBuffer* buffer) {
+ if (fHWGeometryState.fIndexBuffer == buffer) {
+ // deleting bound buffer does implied bind to 0
+ fHWGeometryState.fIndexBuffer = NULL;
+ }
+}
+
+void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) {
+ GrAssert(NULL != renderTarget);
+ if (fCurrDrawState.fRenderTarget == renderTarget) {
+ fCurrDrawState.fRenderTarget = NULL;
+ }
+ if (fHWDrawState.fRenderTarget == renderTarget) {
+ fHWDrawState.fRenderTarget = NULL;
+ }
+}
+
+void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) {
+ for (int s = 0; s < kNumStages; ++s) {
+ if (fCurrDrawState.fTextures[s] == texture) {
+ fCurrDrawState.fTextures[s] = NULL;
+ }
+ if (fHWDrawState.fTextures[s] == texture) {
+ // deleting bound texture does implied bind to 0
+ fHWDrawState.fTextures[s] = NULL;
+ }
+ }
+}
+
+bool GrGpuGL::canBeTexture(GrPixelConfig config,
+ GrGLenum* internalFormat,
+ GrGLenum* format,
+ GrGLenum* type) {
+ switch (config) {
+ case kRGBA_8888_GrPixelConfig:
+ case kRGBX_8888_GrPixelConfig: // todo: can we tell it our X?
+ *format = GR_GL_32BPP_COLOR_FORMAT;
+ if (kDesktop_GrGLBinding != this->glBinding()) {
+ // according to GL_EXT_texture_format_BGRA8888 the *internal*
+ // format for a BGRA is BGRA not RGBA (as on desktop)
+ *internalFormat = GR_GL_32BPP_COLOR_FORMAT;
+ } else {
+ *internalFormat = GR_GL_RGBA;
+ }
+ *type = GR_GL_UNSIGNED_BYTE;
+ break;
+ case kRGB_565_GrPixelConfig:
+ *format = GR_GL_RGB;
+ *internalFormat = GR_GL_RGB;
+ *type = GR_GL_UNSIGNED_SHORT_5_6_5;
+ break;
+ case kRGBA_4444_GrPixelConfig:
+ *format = GR_GL_RGBA;
+ *internalFormat = GR_GL_RGBA;
+ *type = GR_GL_UNSIGNED_SHORT_4_4_4_4;
+ break;
+ case kIndex_8_GrPixelConfig:
+ if (this->getCaps().f8BitPaletteSupport) {
+ *format = GR_GL_PALETTE8_RGBA8;
+ *internalFormat = GR_GL_PALETTE8_RGBA8;
+ *type = GR_GL_UNSIGNED_BYTE; // unused I think
+ } else {
+ return false;
+ }
+ break;
+ case kAlpha_8_GrPixelConfig:
+ *format = GR_GL_ALPHA;
+ *internalFormat = GR_GL_ALPHA;
+ *type = GR_GL_UNSIGNED_BYTE;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+void GrGpuGL::setTextureUnit(int unit) {
+ GrAssert(unit >= 0 && unit < kNumStages);
+ if (fActiveTextureUnitIdx != unit) {
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
+ fActiveTextureUnitIdx = unit;
+ }
+}
+
+void GrGpuGL::setSpareTextureUnit() {
+ if (fActiveTextureUnitIdx != (GR_GL_TEXTURE0 + SPARE_TEX_UNIT)) {
+ GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + SPARE_TEX_UNIT));
+ fActiveTextureUnitIdx = SPARE_TEX_UNIT;
+ }
+}
+
+/* On ES the internalFormat and format must match for TexImage and we use
+ GL_RGB, GL_RGBA for color formats. We also generally like having the driver
+ decide the internalFormat. However, on ES internalFormat for
+ RenderBufferStorage* has to be a specific format (not a base format like
+ GL_RGBA).
+ */
+bool GrGpuGL::fboInternalFormat(GrPixelConfig config, GrGLenum* format) {
+ switch (config) {
+ case kRGBA_8888_GrPixelConfig:
+ case kRGBX_8888_GrPixelConfig:
+ if (fGLCaps.fRGBA8Renderbuffer) {
+ *format = GR_GL_RGBA8;
+ return true;
+ } else {
+ return false;
+ }
+ case kRGB_565_GrPixelConfig:
+ // ES2 supports 565. ES1 supports it
+ // with FBO extension desktop GL has
+ // no such internal format
+ GrAssert(kDesktop_GrGLBinding != this->glBinding());
+ *format = GR_GL_RGB565;
+ return true;
+ case kRGBA_4444_GrPixelConfig:
+ *format = GR_GL_RGBA4;
+ return true;
+ default:
+ return false;
+ }
+}
+
+void GrGpuGL::resetDirtyFlags() {
+ Gr_bzero(&fDirtyFlags, sizeof(fDirtyFlags));
+}
+
+void GrGpuGL::setBuffers(bool indexed,
+ int* extraVertexOffset,
+ int* extraIndexOffset) {
+
+ GrAssert(NULL != extraVertexOffset);
+
+ const GeometryPoolState& geoPoolState = this->getGeomPoolState();
+
+ GrGLVertexBuffer* vbuf;
+ switch (this->getGeomSrc().fVertexSrc) {
+ case kBuffer_GeometrySrcType:
+ *extraVertexOffset = 0;
+ vbuf = (GrGLVertexBuffer*) this->getGeomSrc().fVertexBuffer;
+ break;
+ case kArray_GeometrySrcType:
+ case kReserved_GeometrySrcType:
+ this->finalizeReservedVertices();
+ *extraVertexOffset = geoPoolState.fPoolStartVertex;
+ vbuf = (GrGLVertexBuffer*) geoPoolState.fPoolVertexBuffer;
+ break;
+ default:
+ vbuf = NULL; // suppress warning
+ GrCrash("Unknown geometry src type!");
+ }
+
+ GrAssert(NULL != vbuf);
+ GrAssert(!vbuf->isLocked());
+ if (fHWGeometryState.fVertexBuffer != vbuf) {
+ GL_CALL(BindBuffer(GR_GL_ARRAY_BUFFER, vbuf->bufferID()));
+ fHWGeometryState.fArrayPtrsDirty = true;
+ fHWGeometryState.fVertexBuffer = vbuf;
+ }
+
+ if (indexed) {
+ GrAssert(NULL != extraIndexOffset);
+
+ GrGLIndexBuffer* ibuf;
+ switch (this->getGeomSrc().fIndexSrc) {
+ case kBuffer_GeometrySrcType:
+ *extraIndexOffset = 0;
+ ibuf = (GrGLIndexBuffer*)this->getGeomSrc().fIndexBuffer;
+ break;
+ case kArray_GeometrySrcType:
+ case kReserved_GeometrySrcType:
+ this->finalizeReservedIndices();
+ *extraIndexOffset = geoPoolState.fPoolStartIndex;
+ ibuf = (GrGLIndexBuffer*) geoPoolState.fPoolIndexBuffer;
+ break;
+ default:
+ ibuf = NULL; // suppress warning
+ GrCrash("Unknown geometry src type!");
+ }
+
+ GrAssert(NULL != ibuf);
+ GrAssert(!ibuf->isLocked());
+ if (fHWGeometryState.fIndexBuffer != ibuf) {
+ GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, ibuf->bufferID()));
+ fHWGeometryState.fIndexBuffer = ibuf;
+ }
+ }
+}
+
+int GrGpuGL::getMaxEdges() const {
+ // FIXME: This is a pessimistic estimate based on how many other things
+ // want to add uniforms. This should be centralized somewhere.
+ return GR_CT_MIN(fGLCaps.fMaxFragmentUniformVectors - 8, kMaxEdges);
+}
+
+void GrGpuGL::GLCaps::print() const {
+ for (int i = 0; i < fStencilFormats.count(); ++i) {
+ GrPrintf("Stencil Format %d, stencil bits: %02d, total bits: %02d\n",
+ i,
+ fStencilFormats[i].fStencilBits,
+ fStencilFormats[i].fTotalBits);
+ }
+
+ GR_STATIC_ASSERT(0 == kNone_MSFBO);
+ GR_STATIC_ASSERT(1 == kDesktopARB_MSFBO);
+ GR_STATIC_ASSERT(2 == kDesktopEXT_MSFBO);
+ GR_STATIC_ASSERT(3 == kAppleES_MSFBO);
+ static const char* gMSFBOExtStr[] = {
+ "None",
+ "ARB",
+ "EXT",
+ "Apple",
+ };
+ GrPrintf("MSAA Type: %s\n", gMSFBOExtStr[fMSFBOType]);
+ for (int i = 0; i < (int)GR_ARRAY_COUNT(fAASamples); ++i) {
+ GrPrintf("AA Level %d has %d samples\n", i, fAASamples[i]);
+ }
+ GrPrintf("Max FS Uniform Vectors: %d\n", fMaxFragmentUniformVectors);
+ GrPrintf("Support RGBA8 Render Buffer: %s\n",
+ (fRGBA8Renderbuffer ? "YES": "NO"));
+}
diff --git a/src/gpu/GrGpuGL.h b/src/gpu/GrGpuGL.h
new file mode 100644
index 0000000000..a2630c8b2c
--- /dev/null
+++ b/src/gpu/GrGpuGL.h
@@ -0,0 +1,264 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGpuGL_DEFINED
+#define GrGpuGL_DEFINED
+
+#include "GrGpu.h"
+#include "GrGLIndexBuffer.h"
+#include "GrGLIRect.h"
+#include "GrGLStencilBuffer.h"
+#include "GrGLTexture.h"
+#include "GrGLVertexBuffer.h"
+
+#include "SkString.h"
+
+class GrGpuGL : public GrGpu {
+public:
+ virtual ~GrGpuGL();
+
+ const GrGLInterface* glInterface() const { return fGL; }
+ GrGLBinding glBinding() const { return fGLBinding; }
+ GrGLVersion glVersion() const { return fGLVersion; }
+
+protected:
+ GrGpuGL(const GrGLInterface* glInterface, GrGLBinding glBinding);
+
+ struct {
+ size_t fVertexOffset;
+ GrVertexLayout fVertexLayout;
+ const GrVertexBuffer* fVertexBuffer;
+ const GrIndexBuffer* fIndexBuffer;
+ bool fArrayPtrsDirty;
+ } fHWGeometryState;
+
+ struct AAState {
+ bool fMSAAEnabled;
+ bool fSmoothLineEnabled;
+ } fHWAAState;
+
+ DrState fHWDrawState;
+ bool fHWStencilClip;
+
+ // As flush of GL state proceeds it updates fHDrawState
+ // to reflect the new state. Later parts of the state flush
+ // may perform cascaded changes but cannot refer to fHWDrawState.
+ // These code paths can refer to the dirty flags. Subclass should
+ // call resetDirtyFlags after its flush is complete
+ struct {
+ bool fRenderTargetChanged : 1;
+ int fTextureChangedMask;
+ } fDirtyFlags;
+ GR_STATIC_ASSERT(8 * sizeof(int) >= kNumStages);
+
+ // clears the dirty flags
+ void resetDirtyFlags();
+
+ // last scissor / viewport scissor state seen by the GL.
+ struct {
+ bool fScissorEnabled;
+ GrGLIRect fScissorRect;
+ GrGLIRect fViewportRect;
+ } fHWBounds;
+
+ // GrGpu overrides
+ virtual void resetContext();
+
+ virtual GrTexture* onCreateTexture(const GrTextureDesc& desc,
+ const void* srcData,
+ size_t rowBytes);
+ virtual GrVertexBuffer* onCreateVertexBuffer(uint32_t size,
+ bool dynamic);
+ virtual GrIndexBuffer* onCreateIndexBuffer(uint32_t size,
+ bool dynamic);
+ virtual GrResource* onCreatePlatformSurface(const GrPlatformSurfaceDesc& desc);
+ virtual bool createStencilBufferForRenderTarget(GrRenderTarget* rt,
+ int width, int height);
+ virtual bool attachStencilBufferToRenderTarget(GrStencilBuffer* sb,
+ GrRenderTarget* rt);
+
+ virtual void onClear(const GrIRect* rect, GrColor color);
+
+ virtual void onForceRenderTargetFlush();
+
+ virtual bool onReadPixels(GrRenderTarget* target,
+ int left, int top, int width, int height,
+ GrPixelConfig, void* buffer);
+
+ virtual void onGpuDrawIndexed(GrPrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+ virtual void onGpuDrawNonIndexed(GrPrimitiveType type,
+ uint32_t vertexCount,
+ uint32_t numVertices);
+ virtual void flushScissor(const GrIRect* rect);
+ virtual void clearStencil();
+ virtual void clearStencilClip(const GrIRect& rect, bool insideClip);
+ virtual int getMaxEdges() const;
+
+ // binds texture unit in GL
+ void setTextureUnit(int unitIdx);
+
+ // binds appropriate vertex and index buffers, also returns any extra
+ // extra verts or indices to offset by.
+ void setBuffers(bool indexed,
+ int* extraVertexOffset,
+ int* extraIndexOffset);
+
+ // flushes state that is common to fixed and programmable GL
+ // dither
+ // line smoothing
+ // texture binding
+ // sampler state (filtering, tiling)
+ // FBO binding
+ // line width
+ bool flushGLStateCommon(GrPrimitiveType type);
+
+ // Subclasses should call this to flush the blend state.
+ // The params should be the final coeffecients to apply
+ // (after any blending optimizations or dual source blending considerations
+ // have been accounted for).
+ void flushBlend(GrPrimitiveType type,
+ GrBlendCoeff srcCoeff,
+ GrBlendCoeff dstCoeff);
+
+ bool hasExtension(const char* ext) {
+ return GrGLHasExtensionFromString(ext, fExtensionString.c_str());
+ }
+
+ // adjusts texture matrix to account for orientation, size, and npotness
+ static void AdjustTextureMatrix(const GrGLTexture* texture,
+ GrSamplerState::SampleMode mode,
+ GrMatrix* matrix);
+
+ // subclass may try to take advantage of identity tex matrices.
+ // This helper determines if matrix will be identity after all
+ // adjustments are applied.
+ static bool TextureMatrixIsIdentity(const GrGLTexture* texture,
+ const GrSamplerState& sampler);
+
+ static bool BlendCoeffReferencesConstant(GrBlendCoeff coeff);
+
+private:
+ // Inits GrDrawTarget::Caps and GLCaps, sublcass may enable
+ // additional caps.
+ void initCaps();
+
+ void initFSAASupport();
+
+ // determines valid stencil formats
+ void initStencilFormats();
+
+ // notify callbacks to update state tracking when related
+ // objects are bound to GL or deleted outside of the class
+ void notifyVertexBufferBind(const GrGLVertexBuffer* buffer);
+ void notifyVertexBufferDelete(const GrGLVertexBuffer* buffer);
+ void notifyIndexBufferBind(const GrGLIndexBuffer* buffer);
+ void notifyIndexBufferDelete(const GrGLIndexBuffer* buffer);
+ void notifyTextureDelete(GrGLTexture* texture);
+ void notifyRenderTargetDelete(GrRenderTarget* renderTarget);
+
+ void setSpareTextureUnit();
+
+ // bound is region that may be modified and therefore has to be resolved.
+ // NULL means whole target. Can be an empty rect.
+ void flushRenderTarget(const GrIRect* bound);
+ void flushStencil();
+ void flushAAState(GrPrimitiveType type);
+
+ void resolveRenderTarget(GrGLRenderTarget* texture);
+
+ bool canBeTexture(GrPixelConfig config,
+ GrGLenum* internalFormat,
+ GrGLenum* format,
+ GrGLenum* type);
+ // helpers for onCreateTexture
+ void allocateAndUploadTexData(const GrGLTexture::Desc& desc,
+ GrGLenum internalFormat,
+ const void* data,
+ size_t rowBytes);
+
+ bool createRenderTargetObjects(int width, int height,
+ GrGLuint texID,
+ GrGLRenderTarget::Desc* desc);
+
+ bool fboInternalFormat(GrPixelConfig config, GrGLenum* format);
+
+ friend class GrGLVertexBuffer;
+ friend class GrGLIndexBuffer;
+ friend class GrGLTexture;
+ friend class GrGLRenderTarget;
+
+ // read these once at begining and then never again
+ SkString fExtensionString;
+ GrGLVersion fGLVersion;
+
+ struct GLCaps {
+ // prealloc space for 8 stencil formats
+ GLCaps() : fStencilFormats(8) {}
+ SkTArray<GrGLStencilBuffer::Format, true> fStencilFormats;
+
+ enum {
+ /**
+ * no support for MSAA FBOs
+ */
+ kNone_MSFBO = 0,
+ /**
+ * GL3.0-style MSAA FBO (GL_ARB_framebuffer_object)
+ */
+ kDesktopARB_MSFBO,
+ /**
+ * earlier GL_EXT_framebuffer* extensions
+ */
+ kDesktopEXT_MSFBO,
+ /**
+ * GL_APPLE_framebuffer_multisample ES extension
+ */
+ kAppleES_MSFBO,
+ } fMSFBOType;
+
+ // TODO: get rid of GrAALevel and use sample cnt directly
+ GrGLuint fAASamples[4];
+
+ // The maximum number of fragment uniform vectors (GLES has min. 16).
+ int fMaxFragmentUniformVectors;
+
+ // ES requires an extension to support RGBA8 in RenderBufferStorage
+ bool fRGBA8Renderbuffer;
+
+ void print() const;
+ } fGLCaps;
+
+
+ // we want to clear stencil buffers when they are created. We want to clear
+ // the entire buffer even if it is larger than the color attachment. We
+ // attach it to this fbo with no color attachment to do the initial clear.
+ GrGLuint fStencilClearFBO;
+
+ bool fHWBlendDisabled;
+
+ int fActiveTextureUnitIdx;
+
+ // we record what stencil format worked last time to hopefully exit early
+ // from our loop that tries stencil formats and calls check fb status.
+ int fLastSuccessfulStencilFmtIdx;
+
+ const GrGLInterface* fGL;
+ GrGLBinding fGLBinding;
+
+ bool fPrintedCaps;
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
+
diff --git a/src/gpu/GrGpuGLFixed.cpp b/src/gpu/GrGpuGLFixed.cpp
new file mode 100644
index 0000000000..336b687be8
--- /dev/null
+++ b/src/gpu/GrGpuGLFixed.cpp
@@ -0,0 +1,382 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrGLConfig.h"
+
+#include "GrGpuGLFixed.h"
+#include "GrGpuVertex.h"
+
+#define SKIP_CACHE_CHECK true
+
+struct GrGpuMatrix {
+ GrGLfloat fMat[16];
+
+ void reset() {
+ Gr_bzero(fMat, sizeof(fMat));
+ fMat[0] = fMat[5] = fMat[10] = fMat[15] = GR_Scalar1;
+ }
+
+ void set(const GrMatrix& m) {
+ Gr_bzero(fMat, sizeof(fMat));
+ fMat[0] = GrScalarToFloat(m[GrMatrix::kMScaleX]);
+ fMat[4] = GrScalarToFloat(m[GrMatrix::kMSkewX]);
+ fMat[12] = GrScalarToFloat(m[GrMatrix::kMTransX]);
+
+ fMat[1] = GrScalarToFloat(m[GrMatrix::kMSkewY]);
+ fMat[5] = GrScalarToFloat(m[GrMatrix::kMScaleY]);
+ fMat[13] = GrScalarToFloat(m[GrMatrix::kMTransY]);
+
+ fMat[3] = GrScalarToFloat(m[GrMatrix::kMPersp0]);
+ fMat[7] = GrScalarToFloat(m[GrMatrix::kMPersp1]);
+ fMat[15] = GrScalarToFloat(m[GrMatrix::kMPersp2]);
+
+ fMat[10] = 1.f; // z-scale
+ }
+};
+
+// these must match the order in the corresponding enum in GrGpu.h
+static const GrGLenum gMatrixMode2Enum[] = {
+ GR_GL_MODELVIEW, GR_GL_TEXTURE
+};
+
+#define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
+///////////////////////////////////////////////////////////////////////////////
+
+namespace {
+GrGLBinding get_binding_in_use(const GrGLInterface* gl) {
+ if (gl->supportsDesktop()) {
+ return kDesktop_GrGLBinding;
+ } else {
+ GrAssert(gl->supportsES1());
+ return kES1_GrGLBinding;
+ }
+}
+}
+
+GrGpuGLFixed::GrGpuGLFixed(const GrGLInterface* gl)
+ : GrGpuGL(gl, get_binding_in_use(gl)) {
+}
+
+GrGpuGLFixed::~GrGpuGLFixed() {
+}
+
+void GrGpuGLFixed::resetContext() {
+ INHERITED::resetContext();
+
+ GL_CALL(Disable(GR_GL_TEXTURE_2D));
+
+ for (int s = 0; s < kNumStages; ++s) {
+ setTextureUnit(s);
+ GL_CALL(EnableClientState(GR_GL_VERTEX_ARRAY));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_TEXTURE_ENV_MODE,
+ GR_GL_COMBINE));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_COMBINE_RGB,
+ GR_GL_MODULATE));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_SRC0_RGB,
+ GR_GL_TEXTURE0+s));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_SRC1_RGB,
+ GR_GL_PREVIOUS));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_OPERAND1_RGB,
+ GR_GL_SRC_COLOR));
+
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_COMBINE_ALPHA,
+ GR_GL_MODULATE));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_SRC0_ALPHA,
+ GR_GL_TEXTURE0+s));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_OPERAND0_ALPHA,
+ GR_GL_SRC_ALPHA));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_SRC1_ALPHA,
+ GR_GL_PREVIOUS));
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_OPERAND1_ALPHA,
+ GR_GL_SRC_ALPHA));
+
+ // color oprand0 changes between GL_SRC_COLR and GL_SRC_ALPHA depending
+ // upon whether we have a (premultiplied) RGBA texture or just an ALPHA
+ // texture, e.g.:
+ //glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR);
+ fHWRGBOperand0[s] = (TextureEnvRGBOperands) -1;
+ }
+
+ fHWGeometryState.fVertexLayout = 0;
+ fHWGeometryState.fVertexOffset = ~0;
+ GL_CALL(EnableClientState(GR_GL_VERTEX_ARRAY));
+ GL_CALL(DisableClientState(GR_GL_TEXTURE_COORD_ARRAY));
+ GL_CALL(ShadeModel(GR_GL_FLAT));
+ GL_CALL(DisableClientState(GR_GL_COLOR_ARRAY));
+
+ GL_CALL(PointSize(1.f));
+
+ GrGLClearErr(this->glInterface());
+ fTextVerts = false;
+
+ fBaseVertex = 0xffffffff;
+}
+
+
+void GrGpuGLFixed::flushProjectionMatrix() {
+ float mat[16];
+ Gr_bzero(mat, sizeof(mat));
+
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+
+ mat[0] = 2.f / fCurrDrawState.fRenderTarget->width();
+ mat[5] = -2.f / fCurrDrawState.fRenderTarget->height();
+ mat[10] = -1.f;
+ mat[15] = 1;
+
+ mat[12] = -1.f;
+ mat[13] = 1.f;
+
+ GL_CALL(MatrixMode(GR_GL_PROJECTION));
+ GL_CALL(LoadMatrixf(mat));
+}
+
+bool GrGpuGLFixed::flushGraphicsState(GrPrimitiveType type) {
+
+ bool usingTextures[kNumStages];
+
+ for (int s = 0; s < kNumStages; ++s) {
+ usingTextures[s] = this->isStageEnabled(s);
+ if (usingTextures[s] && fCurrDrawState.fSamplerStates[s].isGradient()) {
+ unimpl("Fixed pipe doesn't support radial/sweep gradients");
+ return false;
+ }
+ }
+
+ if (kES1_GrGLBinding == this->glBinding()) {
+ if (BlendCoeffReferencesConstant(fCurrDrawState.fSrcBlend) ||
+ BlendCoeffReferencesConstant(fCurrDrawState.fDstBlend)) {
+ unimpl("ES1 doesn't support blend constant");
+ return false;
+ }
+ }
+
+ if (!flushGLStateCommon(type)) {
+ return false;
+ }
+
+ GrBlendCoeff srcCoeff, dstCoeff;
+ if (kSkipDraw_BlendOptFlag &
+ this->getBlendOpts(false, &srcCoeff, &dstCoeff)) {
+ return false;
+ }
+
+ this->flushBlend(type, srcCoeff, dstCoeff);
+
+ if (fDirtyFlags.fRenderTargetChanged) {
+ flushProjectionMatrix();
+ }
+
+ for (int s = 0; s < kNumStages; ++s) {
+ bool wasUsingTexture = StageWillBeUsed(s, fHWGeometryState.fVertexLayout, fHWDrawState);
+ if (usingTextures[s] != wasUsingTexture) {
+ setTextureUnit(s);
+ if (usingTextures[s]) {
+ GL_CALL(Enable(GR_GL_TEXTURE_2D));
+ } else {
+ GL_CALL(Disable(GR_GL_TEXTURE_2D));
+ }
+ }
+ }
+
+ uint32_t vertColor = (this->getGeomSrc().fVertexLayout & kColor_VertexLayoutBit);
+ uint32_t prevVertColor = (fHWGeometryState.fVertexLayout &
+ kColor_VertexLayoutBit);
+
+ if (vertColor != prevVertColor) {
+ if (vertColor) {
+ GL_CALL(ShadeModel(GR_GL_SMOOTH));
+ // invalidate the immediate mode color
+ fHWDrawState.fColor = GrColor_ILLEGAL;
+ } else {
+ GL_CALL(ShadeModel(GR_GL_FLAT));
+ }
+ }
+
+
+ if (!vertColor && fHWDrawState.fColor != fCurrDrawState.fColor) {
+ GL_CALL(Color4ub(GrColorUnpackR(fCurrDrawState.fColor),
+ GrColorUnpackG(fCurrDrawState.fColor),
+ GrColorUnpackB(fCurrDrawState.fColor),
+ GrColorUnpackA(fCurrDrawState.fColor)));
+ fHWDrawState.fColor = fCurrDrawState.fColor;
+ }
+
+ // set texture environment, decide whether we are modulating by RGB or A.
+ for (int s = 0; s < kNumStages; ++s) {
+ if (usingTextures[s]) {
+ GrGLTexture* texture = (GrGLTexture*)fCurrDrawState.fTextures[s];
+ if (NULL != texture) {
+ TextureEnvRGBOperands nextRGBOperand0 =
+ (GrPixelConfigIsAlphaOnly(texture->config())) ?
+ kAlpha_TextureEnvRGBOperand :
+ kColor_TextureEnvRGBOperand;
+ if (fHWRGBOperand0[s] != nextRGBOperand0) {
+ setTextureUnit(s);
+ GL_CALL(TexEnvi(GR_GL_TEXTURE_ENV,
+ GR_GL_OPERAND0_RGB,
+ (nextRGBOperand0==kAlpha_TextureEnvRGBOperand) ?
+ GR_GL_SRC_ALPHA :
+ GR_GL_SRC_COLOR));
+ fHWRGBOperand0[s] = nextRGBOperand0;
+ }
+
+ if (((1 << s) & fDirtyFlags.fTextureChangedMask) ||
+ (fHWDrawState.fSamplerStates[s].getMatrix() !=
+ getSamplerMatrix(s))) {
+
+ GrMatrix texMat = getSamplerMatrix(s);
+ AdjustTextureMatrix(texture,
+ GrSamplerState::kNormal_SampleMode,
+ &texMat);
+ GrGpuMatrix glm;
+ glm.set(texMat);
+ setTextureUnit(s);
+ GL_CALL(MatrixMode(GR_GL_TEXTURE));
+ GL_CALL(LoadMatrixf(glm.fMat));
+ recordHWSamplerMatrix(s, getSamplerMatrix(s));
+ }
+ } else {
+ GrAssert(!"Rendering with texture vert flag set but no bound texture");
+ return false;
+ }
+ }
+ }
+
+ if (fHWDrawState.fViewMatrix != fCurrDrawState.fViewMatrix) {
+ GrGpuMatrix glm;
+ glm.set(fCurrDrawState.fViewMatrix);
+ GL_CALL(MatrixMode(GR_GL_MODELVIEW));
+ GL_CALL(LoadMatrixf(glm.fMat));
+ fHWDrawState.fViewMatrix =
+ fCurrDrawState.fViewMatrix;
+ }
+ resetDirtyFlags();
+ return true;
+}
+
+void GrGpuGLFixed::setupGeometry(int* startVertex,
+ int* startIndex,
+ int vertexCount,
+ int indexCount) {
+
+ int newColorOffset;
+ int newCoverageOffset;
+ int newTexCoordOffsets[kNumStages];
+ int newEdgeOffset;
+
+ GrGLsizei newStride = VertexSizeAndOffsetsByStage(this->getGeomSrc().fVertexLayout,
+ newTexCoordOffsets,
+ &newColorOffset,
+ &newCoverageOffset,
+ &newEdgeOffset);
+ GrAssert(-1 == newEdgeOffset); // not supported by fixed pipe
+ GrAssert(-1 == newCoverageOffset); // not supported by fixed pipe
+
+ int oldColorOffset;
+ int oldCoverageOffset;
+ int oldTexCoordOffsets[kNumStages];
+ int oldEdgeOffset;
+ GrGLsizei oldStride = VertexSizeAndOffsetsByStage(fHWGeometryState.fVertexLayout,
+ oldTexCoordOffsets,
+ &oldColorOffset,
+ &oldCoverageOffset,
+ &oldEdgeOffset);
+ GrAssert(-1 == oldEdgeOffset);
+ GrAssert(-1 == oldCoverageOffset);
+
+ bool indexed = NULL != startIndex;
+
+ int extraVertexOffset;
+ int extraIndexOffset;
+ setBuffers(indexed, &extraVertexOffset, &extraIndexOffset);
+
+ GrGLenum scalarType;
+ if (this->getGeomSrc().fVertexLayout & kTextFormat_VertexLayoutBit) {
+ scalarType = GrGLTextType;
+ } else {
+ scalarType = GrGLType;
+ }
+
+ size_t vertexOffset = (*startVertex + extraVertexOffset) * newStride;
+ *startVertex = 0;
+ if (indexed) {
+ *startIndex += extraIndexOffset;
+ }
+
+ // all the Pointers must be set if any of these are true
+ bool allOffsetsChange = fHWGeometryState.fArrayPtrsDirty ||
+ vertexOffset != fHWGeometryState.fVertexOffset ||
+ newStride != oldStride;
+
+ // position and tex coord offsets change if above conditions are true
+ // or the type changed based on text vs nontext type coords.
+ bool posAndTexChange = allOffsetsChange ||
+ ((GrGLTextType != GrGLType) &&
+ (kTextFormat_VertexLayoutBit &
+ (fHWGeometryState.fVertexLayout ^
+ this->getGeomSrc().fVertexLayout)));
+
+ if (posAndTexChange) {
+ GL_CALL(VertexPointer(2, scalarType,
+ newStride, (GrGLvoid*)vertexOffset));
+ fHWGeometryState.fVertexOffset = vertexOffset;
+ }
+
+ for (int s = 0; s < kNumStages; ++s) {
+ // need to enable array if tex coord offset is 0
+ // (using positions as coords)
+ if (newTexCoordOffsets[s] >= 0) {
+ GrGLvoid* texCoordOffset = (GrGLvoid*)(vertexOffset +
+ newTexCoordOffsets[s]);
+ if (oldTexCoordOffsets[s] < 0) {
+ GL_CALL(ClientActiveTexture(GR_GL_TEXTURE0+s));
+ GL_CALL(EnableClientState(GR_GL_TEXTURE_COORD_ARRAY));
+ GL_CALL(TexCoordPointer(2, scalarType,
+ newStride, texCoordOffset));
+ } else if (posAndTexChange ||
+ newTexCoordOffsets[s] != oldTexCoordOffsets[s]) {
+ GL_CALL(ClientActiveTexture(GR_GL_TEXTURE0+s));
+ GL_CALL(TexCoordPointer(2, scalarType,
+ newStride, texCoordOffset));
+ }
+ } else if (oldTexCoordOffsets[s] >= 0) {
+ GL_CALL(ClientActiveTexture(GR_GL_TEXTURE0+s));
+ GL_CALL(DisableClientState(GR_GL_TEXTURE_COORD_ARRAY));
+ }
+ }
+
+ if (newColorOffset > 0) {
+ GrGLvoid* colorOffset = (GrGLvoid*)(vertexOffset + newColorOffset);
+ if (oldColorOffset <= 0) {
+ GL_CALL(EnableClientState(GR_GL_COLOR_ARRAY));
+ GL_CALL(ColorPointer(4, GR_GL_UNSIGNED_BYTE,
+ newStride, colorOffset));
+ } else if (allOffsetsChange || newColorOffset != oldColorOffset) {
+ GL_CALL(ColorPointer(4, GR_GL_UNSIGNED_BYTE,
+ newStride, colorOffset));
+ }
+ } else if (oldColorOffset > 0) {
+ GL_CALL(DisableClientState(GR_GL_COLOR_ARRAY));
+ }
+
+ fHWGeometryState.fVertexLayout = this->getGeomSrc().fVertexLayout;
+ fHWGeometryState.fArrayPtrsDirty = false;
+}
diff --git a/src/gpu/GrGpuGLFixed.h b/src/gpu/GrGpuGLFixed.h
new file mode 100644
index 0000000000..0e624b59db
--- /dev/null
+++ b/src/gpu/GrGpuGLFixed.h
@@ -0,0 +1,65 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGpuGLFixed_DEFINED
+#define GrGpuGLFixed_DEFINED
+
+#include "GrGpuGL.h"
+
+// Fixed Pipeline OpenGL or OpenGL ES 1.x
+class GrGpuGLFixed : public GrGpuGL {
+public:
+ GrGpuGLFixed(const GrGLInterface* glInterface);
+ virtual ~GrGpuGLFixed();
+
+protected:
+ // overrides from GrGpu
+ virtual bool flushGraphicsState(GrPrimitiveType type);
+ virtual void setupGeometry(int* startVertex,
+ int* startIndex,
+ int vertexCount,
+ int indexCount);
+
+private:
+ virtual void resetContext();
+
+ // Helpers to make code more readable
+ const GrMatrix& getHWSamplerMatrix(int stage) const {
+ return fHWDrawState.fSamplerStates[stage].getMatrix();
+ }
+ void recordHWSamplerMatrix(int stage, const GrMatrix& matrix) {
+ fHWDrawState.fSamplerStates[stage].setMatrix(matrix);
+ }
+
+ // when the texture is GL_RGBA we set the GL_COMBINE texture
+ // environment rgb operand 0 to be GL_COLOR to modulate each incoming
+ // R,G, & B by the texture's R, G, & B. When the texture is alpha-only we
+ // set the operand to GL_ALPHA so that the incoming frag's R, G, &B are all
+ // modulated by the texture's A.
+ enum TextureEnvRGBOperands {
+ kAlpha_TextureEnvRGBOperand,
+ kColor_TextureEnvRGBOperand,
+ };
+ TextureEnvRGBOperands fHWRGBOperand0[kNumStages];
+
+ void flushProjectionMatrix();
+
+ // are the currently bound vertex buffers/arrays laid
+ // out for text or other drawing.
+ bool fTextVerts;
+
+ // On GL we have to build the base vertex offset into the
+ // glVertexPointer/glTexCoordPointer/etc
+ int fBaseVertex;
+
+ typedef GrGpuGL INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrGpuGLShaders.cpp b/src/gpu/GrGpuGLShaders.cpp
new file mode 100644
index 0000000000..f1bc5ecae7
--- /dev/null
+++ b/src/gpu/GrGpuGLShaders.cpp
@@ -0,0 +1,1059 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrBinHashKey.h"
+#include "GrGLProgram.h"
+#include "GrGpuGLShaders.h"
+#include "GrGpuVertex.h"
+#include "GrNoncopyable.h"
+#include "GrStringBuilder.h"
+#include "GrRandom.h"
+
+#define SKIP_CACHE_CHECK true
+#define GR_UINT32_MAX static_cast<uint32_t>(-1)
+
+#include "GrTHashCache.h"
+
+class GrGpuGLShaders::ProgramCache : public ::GrNoncopyable {
+private:
+ class Entry;
+
+ typedef GrBinHashKey<Entry, GrGLProgram::kProgramKeySize> ProgramHashKey;
+
+ class Entry : public ::GrNoncopyable {
+ public:
+ Entry() {}
+ void copyAndTakeOwnership(Entry& entry) {
+ fProgramData.copyAndTakeOwnership(entry.fProgramData);
+ fKey = entry.fKey; // ownership transfer
+ fLRUStamp = entry.fLRUStamp;
+ }
+
+ public:
+ int compare(const ProgramHashKey& key) const { return fKey.compare(key); }
+
+ public:
+ GrGLProgram::CachedData fProgramData;
+ ProgramHashKey fKey;
+ unsigned int fLRUStamp;
+ };
+
+ GrTHashTable<Entry, ProgramHashKey, 8> fHashCache;
+
+ // We may have kMaxEntries+1 shaders in the GL context because
+ // we create a new shader before evicting from the cache.
+ enum {
+ kMaxEntries = 32
+ };
+ Entry fEntries[kMaxEntries];
+ int fCount;
+ unsigned int fCurrLRUStamp;
+ const GrGLInterface* fGL;
+ GrGLProgram::GLSLVersion fGLSLVersion;
+
+public:
+ ProgramCache(const GrGLInterface* gl,
+ GrGLProgram::GLSLVersion glslVersion)
+ : fCount(0)
+ , fCurrLRUStamp(0)
+ , fGL(gl)
+ , fGLSLVersion(glslVersion) {
+ }
+
+ ~ProgramCache() {
+ for (int i = 0; i < fCount; ++i) {
+ GrGpuGLShaders::DeleteProgram(fGL, &fEntries[i].fProgramData);
+ }
+ }
+
+ void abandon() {
+ fCount = 0;
+ }
+
+ void invalidateViewMatrices() {
+ for (int i = 0; i < fCount; ++i) {
+ // set to illegal matrix
+ fEntries[i].fProgramData.fViewMatrix = GrMatrix::InvalidMatrix();
+ }
+ }
+
+ GrGLProgram::CachedData* getProgramData(const GrGLProgram& desc) {
+ Entry newEntry;
+ newEntry.fKey.setKeyData(desc.keyData());
+
+ Entry* entry = fHashCache.find(newEntry.fKey);
+ if (NULL == entry) {
+ if (!desc.genProgram(fGL, fGLSLVersion, &newEntry.fProgramData)) {
+ return NULL;
+ }
+ if (fCount < kMaxEntries) {
+ entry = fEntries + fCount;
+ ++fCount;
+ } else {
+ GrAssert(kMaxEntries == fCount);
+ entry = fEntries;
+ for (int i = 1; i < kMaxEntries; ++i) {
+ if (fEntries[i].fLRUStamp < entry->fLRUStamp) {
+ entry = fEntries + i;
+ }
+ }
+ fHashCache.remove(entry->fKey, entry);
+ GrGpuGLShaders::DeleteProgram(fGL, &entry->fProgramData);
+ }
+ entry->copyAndTakeOwnership(newEntry);
+ fHashCache.insert(entry->fKey, entry);
+ }
+
+ entry->fLRUStamp = fCurrLRUStamp;
+ if (GR_UINT32_MAX == fCurrLRUStamp) {
+ // wrap around! just trash our LRU, one time hit.
+ for (int i = 0; i < fCount; ++i) {
+ fEntries[i].fLRUStamp = 0;
+ }
+ }
+ ++fCurrLRUStamp;
+ return &entry->fProgramData;
+ }
+};
+
+void GrGpuGLShaders::abandonResources(){
+ INHERITED::abandonResources();
+ fProgramCache->abandon();
+}
+
+void GrGpuGLShaders::DeleteProgram(const GrGLInterface* gl,
+ CachedData* programData) {
+ GR_GL_CALL(gl, DeleteShader(programData->fVShaderID));
+ if (programData->fGShaderID) {
+ GR_GL_CALL(gl, DeleteShader(programData->fGShaderID));
+ }
+ GR_GL_CALL(gl, DeleteShader(programData->fFShaderID));
+ GR_GL_CALL(gl, DeleteProgram(programData->fProgramID));
+ GR_DEBUGCODE(memset(programData, 0, sizeof(*programData));)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
+
+namespace {
+
+GrGLProgram::GLSLVersion get_glsl_version(GrGLBinding binding,
+ const GrGLInterface* gl) {
+ GrGLSLVersion ver = GrGLGetGLSLVersion(gl);
+ switch (binding) {
+ case kDesktop_GrGLBinding:
+ GrAssert(ver >= GR_GLSL_VER(1,20));
+ if (ver >= GR_GLSL_VER(1,50)) {
+ return GrGLProgram::k150_GLSLVersion;
+ } else if (ver >= GR_GLSL_VER(1,30)) {
+ return GrGLProgram::k130_GLSLVersion;
+ } else {
+ return GrGLProgram::k120_GLSLVersion;
+ }
+ case kES2_GrGLBinding:
+ // version 1.00 of ES GLSL based on ver 1.20 of desktop GLSL
+ GrAssert(ver >= GR_GL_VER(1,00));
+ return GrGLProgram::k120_GLSLVersion;
+ default:
+ GrCrash("Attempting to get GLSL version in unknown or fixed-"
+ "function GL binding.");
+ return GrGLProgram::k120_GLSLVersion; // suppress warning
+ }
+}
+
+template <typename T>
+T random_val(GrRandom* r, T count) {
+ return (T)(int)(r->nextF() * count);
+}
+
+}
+
+bool GrGpuGLShaders::programUnitTest() {
+
+ GrGLProgram::GLSLVersion glslVersion =
+ get_glsl_version(this->glBinding(), this->glInterface());
+ static const int STAGE_OPTS[] = {
+ 0,
+ StageDesc::kNoPerspective_OptFlagBit,
+ StageDesc::kIdentity_CoordMapping
+ };
+ GrGLProgram program;
+ ProgramDesc& pdesc = program.fProgramDesc;
+
+ static const int NUM_TESTS = 512;
+
+ // GrRandoms nextU() values have patterns in the low bits
+ // So using nextU() % array_count might never take some values.
+ GrRandom random;
+ for (int t = 0; t < NUM_TESTS; ++t) {
+
+#if 0
+ GrPrintf("\nTest Program %d\n-------------\n", t);
+ static const int stop = -1;
+ if (t == stop) {
+ int breakpointhere = 9;
+ }
+#endif
+
+ pdesc.fVertexLayout = 0;
+ pdesc.fEmitsPointSize = random.nextF() > .5f;
+ pdesc.fColorType = static_cast<int>(random.nextF() *
+ ProgramDesc::kColorTypeCnt);
+
+ int idx = (int)(random.nextF() * (SkXfermode::kCoeffModesCnt));
+ pdesc.fColorFilterXfermode = (SkXfermode::Mode)idx;
+
+ idx = (int)(random.nextF() * (kNumStages+1));
+ pdesc.fFirstCoverageStage = idx;
+
+ pdesc.fVertexLayout |= (random.nextF() > .5f) ?
+ GrDrawTarget::kCoverage_VertexLayoutBit :
+ 0;
+
+#if GR_GL_EXPERIMENTAL_GS
+ pdesc.fExperimentalGS = this->getCaps().fGeometryShaderSupport &&
+ random.nextF() > .5f;
+#endif
+
+ bool edgeAA = random.nextF() > .5f;
+ if (edgeAA) {
+ bool vertexEdgeAA = random.nextF() > .5f;
+ if (vertexEdgeAA) {
+ pdesc.fVertexLayout |= GrDrawTarget::kEdge_VertexLayoutBit;
+ if (this->getCaps().fShaderDerivativeSupport) {
+ pdesc.fVertexEdgeType = random.nextF() > 0.5f ?
+ kHairQuad_EdgeType :
+ kHairLine_EdgeType;
+ } else {
+ pdesc.fVertexEdgeType = kHairLine_EdgeType;
+ }
+ pdesc.fEdgeAANumEdges = 0;
+ } else {
+ pdesc.fEdgeAANumEdges = static_cast<int>(1 + random.nextF() *
+ this->getMaxEdges());
+ pdesc.fEdgeAAConcave = random.nextF() > .5f;
+ }
+ } else {
+ pdesc.fEdgeAANumEdges = 0;
+ }
+
+ if (this->getCaps().fDualSourceBlendingSupport) {
+ pdesc.fDualSrcOutput =
+ (ProgramDesc::DualSrcOutput)
+ (int)(random.nextF() * ProgramDesc::kDualSrcOutputCnt);
+ } else {
+ pdesc.fDualSrcOutput = ProgramDesc::kNone_DualSrcOutput;
+ }
+
+ for (int s = 0; s < kNumStages; ++s) {
+ // enable the stage?
+ if (random.nextF() > .5f) {
+ // use separate tex coords?
+ if (random.nextF() > .5f) {
+ int t = (int)(random.nextF() * kMaxTexCoords);
+ pdesc.fVertexLayout |= StageTexCoordVertexLayoutBit(s, t);
+ } else {
+ pdesc.fVertexLayout |= StagePosAsTexCoordVertexLayoutBit(s);
+ }
+ }
+ // use text-formatted verts?
+ if (random.nextF() > .5f) {
+ pdesc.fVertexLayout |= kTextFormat_VertexLayoutBit;
+ }
+ idx = (int)(random.nextF() * GR_ARRAY_COUNT(STAGE_OPTS));
+ StageDesc& stage = pdesc.fStages[s];
+ stage.fOptFlags = STAGE_OPTS[idx];
+ stage.fModulation = random_val(&random, StageDesc::kModulationCnt);
+ stage.fCoordMapping = random_val(&random, StageDesc::kCoordMappingCnt);
+ stage.fFetchMode = random_val(&random, StageDesc::kFetchModeCnt);
+ // convolution shaders don't work with persp tex matrix
+ if (stage.fFetchMode == StageDesc::kConvolution_FetchMode) {
+ stage.fOptFlags |= StageDesc::kNoPerspective_OptFlagBit;
+ }
+ stage.setEnabled(VertexUsesStage(s, pdesc.fVertexLayout));
+ stage.fKernelWidth = static_cast<int8_t>(4 * random.nextF() + 2);
+ }
+ CachedData cachedData;
+ if (!program.genProgram(this->glInterface(),
+ glslVersion,
+ &cachedData)) {
+ return false;
+ }
+ DeleteProgram(this->glInterface(), &cachedData);
+ }
+ return true;
+}
+
+namespace {
+GrGLBinding get_binding_in_use(const GrGLInterface* gl) {
+ if (gl->supportsDesktop()) {
+ return kDesktop_GrGLBinding;
+ } else {
+ GrAssert(gl->supportsES2());
+ return kES2_GrGLBinding;
+ }
+}
+}
+
+GrGpuGLShaders::GrGpuGLShaders(const GrGLInterface* gl)
+ : GrGpuGL(gl, get_binding_in_use(gl)) {
+
+ GrGLProgram::GLSLVersion glslVersion =
+ get_glsl_version(this->glBinding(), gl);
+
+ // Enable supported shader-releated caps
+ fCaps.fShaderSupport = true;
+ fCaps.fSupportPerVertexCoverage = true;
+ if (kDesktop_GrGLBinding == this->glBinding()) {
+ fCaps.fDualSourceBlendingSupport =
+ this->glVersion() >= GR_GL_VER(3,3) ||
+ this->hasExtension("GL_ARB_blend_func_extended");
+ fCaps.fShaderDerivativeSupport = true;
+ // we don't support GL_ARB_geometry_shader4, just GL 3.2+ GS
+ fCaps.fGeometryShaderSupport =
+ this->glVersion() >= GR_GL_VER(3,2) &&
+ glslVersion >= GrGLProgram::k150_GLSLVersion;
+ } else {
+ fCaps.fShaderDerivativeSupport =
+ this->hasExtension("GL_OES_standard_derivatives");
+ }
+
+ fProgramData = NULL;
+ fProgramCache = new ProgramCache(gl, glslVersion);
+
+#if 0
+ this->programUnitTest();
+#endif
+}
+
+GrGpuGLShaders::~GrGpuGLShaders() {
+ delete fProgramCache;
+}
+
+const GrMatrix& GrGpuGLShaders::getHWSamplerMatrix(int stage) {
+ GrAssert(fProgramData);
+
+ if (GrGLProgram::kSetAsAttribute ==
+ fProgramData->fUniLocations.fStages[stage].fTextureMatrixUni) {
+ return fHWDrawState.fSamplerStates[stage].getMatrix();
+ } else {
+ return fProgramData->fTextureMatrices[stage];
+ }
+}
+
+void GrGpuGLShaders::recordHWSamplerMatrix(int stage, const GrMatrix& matrix) {
+ GrAssert(fProgramData);
+ if (GrGLProgram::kSetAsAttribute ==
+ fProgramData->fUniLocations.fStages[stage].fTextureMatrixUni) {
+ fHWDrawState.fSamplerStates[stage].setMatrix(matrix);
+ } else {
+ fProgramData->fTextureMatrices[stage] = matrix;
+ }
+}
+
+void GrGpuGLShaders::resetContext() {
+ INHERITED::resetContext();
+
+ fHWGeometryState.fVertexLayout = 0;
+ fHWGeometryState.fVertexOffset = ~0;
+ GL_CALL(DisableVertexAttribArray(GrGLProgram::ColorAttributeIdx()));
+ GL_CALL(DisableVertexAttribArray(GrGLProgram::EdgeAttributeIdx()));
+ for (int t = 0; t < kMaxTexCoords; ++t) {
+ GL_CALL(DisableVertexAttribArray(GrGLProgram::TexCoordAttributeIdx(t)));
+ }
+ GL_CALL(EnableVertexAttribArray(GrGLProgram::PositionAttributeIdx()));
+
+ fHWProgramID = 0;
+}
+
+void GrGpuGLShaders::flushViewMatrix() {
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+ GrMatrix m;
+ m.setAll(
+ GrIntToScalar(2) / fCurrDrawState.fRenderTarget->width(), 0, -GR_Scalar1,
+ 0,-GrIntToScalar(2) / fCurrDrawState.fRenderTarget->height(), GR_Scalar1,
+ 0, 0, GrMatrix::I()[8]);
+ m.setConcat(m, fCurrDrawState.fViewMatrix);
+
+ // ES doesn't allow you to pass true to the transpose param,
+ // so do our own transpose
+ GrGLfloat mt[] = {
+ GrScalarToFloat(m[GrMatrix::kMScaleX]),
+ GrScalarToFloat(m[GrMatrix::kMSkewY]),
+ GrScalarToFloat(m[GrMatrix::kMPersp0]),
+ GrScalarToFloat(m[GrMatrix::kMSkewX]),
+ GrScalarToFloat(m[GrMatrix::kMScaleY]),
+ GrScalarToFloat(m[GrMatrix::kMPersp1]),
+ GrScalarToFloat(m[GrMatrix::kMTransX]),
+ GrScalarToFloat(m[GrMatrix::kMTransY]),
+ GrScalarToFloat(m[GrMatrix::kMPersp2])
+ };
+
+ if (GrGLProgram::kSetAsAttribute ==
+ fProgramData->fUniLocations.fViewMatrixUni) {
+ int baseIdx = GrGLProgram::ViewMatrixAttributeIdx();
+ GL_CALL(VertexAttrib4fv(baseIdx + 0, mt+0));
+ GL_CALL(VertexAttrib4fv(baseIdx + 1, mt+3));
+ GL_CALL(VertexAttrib4fv(baseIdx + 2, mt+6));
+ } else {
+ GrAssert(GrGLProgram::kUnusedUniform !=
+ fProgramData->fUniLocations.fViewMatrixUni);
+ GL_CALL(UniformMatrix3fv(fProgramData->fUniLocations.fViewMatrixUni,
+ 1, false, mt));
+ }
+}
+
+void GrGpuGLShaders::flushTextureDomain(int s) {
+ const GrGLint& uni = fProgramData->fUniLocations.fStages[s].fTexDomUni;
+ if (GrGLProgram::kUnusedUniform != uni) {
+ const GrRect &texDom =
+ fCurrDrawState.fSamplerStates[s].getTextureDomain();
+
+ if (((1 << s) & fDirtyFlags.fTextureChangedMask) ||
+ fProgramData->fTextureDomain[s] != texDom) {
+
+ fProgramData->fTextureDomain[s] = texDom;
+
+ float values[4] = {
+ GrScalarToFloat(texDom.left()),
+ GrScalarToFloat(texDom.top()),
+ GrScalarToFloat(texDom.right()),
+ GrScalarToFloat(texDom.bottom())
+ };
+
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTextures[s];
+ GrGLTexture::Orientation orientation = texture->orientation();
+
+ // vertical flip if necessary
+ if (GrGLTexture::kBottomUp_Orientation == orientation) {
+ values[1] = 1.0f - values[1];
+ values[3] = 1.0f - values[3];
+ // The top and bottom were just flipped, so correct the ordering
+ // of elements so that values = (l, t, r, b).
+ SkTSwap(values[1], values[3]);
+ }
+
+ values[0] *= SkScalarToFloat(texture->contentScaleX());
+ values[2] *= SkScalarToFloat(texture->contentScaleX());
+ values[1] *= SkScalarToFloat(texture->contentScaleY());
+ values[3] *= SkScalarToFloat(texture->contentScaleY());
+
+ GL_CALL(Uniform4fv(uni, 1, values));
+ }
+ }
+}
+
+void GrGpuGLShaders::flushTextureMatrix(int s) {
+ const GrGLint& uni = fProgramData->fUniLocations.fStages[s].fTextureMatrixUni;
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTextures[s];
+ if (NULL != texture) {
+ if (GrGLProgram::kUnusedUniform != uni &&
+ (((1 << s) & fDirtyFlags.fTextureChangedMask) ||
+ getHWSamplerMatrix(s) != getSamplerMatrix(s))) {
+
+ GrAssert(NULL != fCurrDrawState.fTextures[s]);
+
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTextures[s];
+
+ GrMatrix m = getSamplerMatrix(s);
+ GrSamplerState::SampleMode mode =
+ fCurrDrawState.fSamplerStates[s].getSampleMode();
+ AdjustTextureMatrix(texture, mode, &m);
+
+ // ES doesn't allow you to pass true to the transpose param,
+ // so do our own transpose
+ GrGLfloat mt[] = {
+ GrScalarToFloat(m[GrMatrix::kMScaleX]),
+ GrScalarToFloat(m[GrMatrix::kMSkewY]),
+ GrScalarToFloat(m[GrMatrix::kMPersp0]),
+ GrScalarToFloat(m[GrMatrix::kMSkewX]),
+ GrScalarToFloat(m[GrMatrix::kMScaleY]),
+ GrScalarToFloat(m[GrMatrix::kMPersp1]),
+ GrScalarToFloat(m[GrMatrix::kMTransX]),
+ GrScalarToFloat(m[GrMatrix::kMTransY]),
+ GrScalarToFloat(m[GrMatrix::kMPersp2])
+ };
+
+ if (GrGLProgram::kSetAsAttribute ==
+ fProgramData->fUniLocations.fStages[s].fTextureMatrixUni) {
+ int baseIdx = GrGLProgram::TextureMatrixAttributeIdx(s);
+ GL_CALL(VertexAttrib4fv(baseIdx + 0, mt+0));
+ GL_CALL(VertexAttrib4fv(baseIdx + 1, mt+3));
+ GL_CALL(VertexAttrib4fv(baseIdx + 2, mt+6));
+ } else {
+ GL_CALL(UniformMatrix3fv(uni, 1, false, mt));
+ }
+ recordHWSamplerMatrix(s, getSamplerMatrix(s));
+ }
+ }
+}
+
+void GrGpuGLShaders::flushRadial2(int s) {
+
+ const int &uni = fProgramData->fUniLocations.fStages[s].fRadial2Uni;
+ const GrSamplerState& sampler = fCurrDrawState.fSamplerStates[s];
+ if (GrGLProgram::kUnusedUniform != uni &&
+ (fProgramData->fRadial2CenterX1[s] != sampler.getRadial2CenterX1() ||
+ fProgramData->fRadial2Radius0[s] != sampler.getRadial2Radius0() ||
+ fProgramData->fRadial2PosRoot[s] != sampler.isRadial2PosRoot())) {
+
+ GrScalar centerX1 = sampler.getRadial2CenterX1();
+ GrScalar radius0 = sampler.getRadial2Radius0();
+
+ GrScalar a = GrMul(centerX1, centerX1) - GR_Scalar1;
+
+ // when were in the degenerate (linear) case the second
+ // value will be INF but the program doesn't read it. (We
+ // use the same 6 uniforms even though we don't need them
+ // all in the linear case just to keep the code complexity
+ // down).
+ float values[6] = {
+ GrScalarToFloat(a),
+ 1 / (2.f * values[0]),
+ GrScalarToFloat(centerX1),
+ GrScalarToFloat(radius0),
+ GrScalarToFloat(GrMul(radius0, radius0)),
+ sampler.isRadial2PosRoot() ? 1.f : -1.f
+ };
+ GL_CALL(Uniform1fv(uni, 6, values));
+ fProgramData->fRadial2CenterX1[s] = sampler.getRadial2CenterX1();
+ fProgramData->fRadial2Radius0[s] = sampler.getRadial2Radius0();
+ fProgramData->fRadial2PosRoot[s] = sampler.isRadial2PosRoot();
+ }
+}
+
+void GrGpuGLShaders::flushConvolution(int s) {
+ const GrSamplerState& sampler = fCurrDrawState.fSamplerStates[s];
+ int kernelUni = fProgramData->fUniLocations.fStages[s].fKernelUni;
+ if (GrGLProgram::kUnusedUniform != kernelUni) {
+ GL_CALL(Uniform1fv(kernelUni, sampler.getKernelWidth(),
+ sampler.getKernel()));
+ }
+ int imageIncrementUni = fProgramData->fUniLocations.fStages[s].fImageIncrementUni;
+ if (GrGLProgram::kUnusedUniform != imageIncrementUni) {
+ GL_CALL(Uniform2fv(imageIncrementUni, 1, sampler.getImageIncrement()));
+ }
+}
+
+void GrGpuGLShaders::flushTexelSize(int s) {
+ const int& uni = fProgramData->fUniLocations.fStages[s].fNormalizedTexelSizeUni;
+ if (GrGLProgram::kUnusedUniform != uni) {
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTextures[s];
+ if (texture->allocatedWidth() != fProgramData->fTextureWidth[s] ||
+ texture->allocatedHeight() != fProgramData->fTextureWidth[s]) {
+
+ float texelSize[] = {1.f / texture->allocatedWidth(),
+ 1.f / texture->allocatedHeight()};
+ GL_CALL(Uniform2fv(uni, 1, texelSize));
+ }
+ }
+}
+
+void GrGpuGLShaders::flushEdgeAAData() {
+ const int& uni = fProgramData->fUniLocations.fEdgesUni;
+ if (GrGLProgram::kUnusedUniform != uni) {
+ int count = fCurrDrawState.fEdgeAANumEdges;
+ Edge edges[kMaxEdges];
+ // Flip the edges in Y
+ float height =
+ static_cast<float>(fCurrDrawState.fRenderTarget->height());
+ for (int i = 0; i < count; ++i) {
+ edges[i] = fCurrDrawState.fEdgeAAEdges[i];
+ float b = edges[i].fY;
+ edges[i].fY = -b;
+ edges[i].fZ += b * height;
+ }
+ GL_CALL(Uniform3fv(uni, count, &edges[0].fX));
+ }
+}
+
+static const float ONE_OVER_255 = 1.f / 255.f;
+
+#define GR_COLOR_TO_VEC4(color) {\
+ GrColorUnpackR(color) * ONE_OVER_255,\
+ GrColorUnpackG(color) * ONE_OVER_255,\
+ GrColorUnpackB(color) * ONE_OVER_255,\
+ GrColorUnpackA(color) * ONE_OVER_255 \
+}
+
+void GrGpuGLShaders::flushColor(GrColor color) {
+ const ProgramDesc& desc = fCurrentProgram.getDesc();
+ if (this->getGeomSrc().fVertexLayout & kColor_VertexLayoutBit) {
+ // color will be specified per-vertex as an attribute
+ // invalidate the const vertex attrib color
+ fHWDrawState.fColor = GrColor_ILLEGAL;
+ } else {
+ switch (desc.fColorType) {
+ case ProgramDesc::kAttribute_ColorType:
+ if (fHWDrawState.fColor != color) {
+ // OpenGL ES only supports the float varities of glVertexAttrib
+ float c[] = GR_COLOR_TO_VEC4(color);
+ GL_CALL(VertexAttrib4fv(GrGLProgram::ColorAttributeIdx(),
+ c));
+ fHWDrawState.fColor = color;
+ }
+ break;
+ case ProgramDesc::kUniform_ColorType:
+ if (fProgramData->fColor != color) {
+ // OpenGL ES only supports the float varities of glVertexAttrib
+ float c[] = GR_COLOR_TO_VEC4(color);
+ GrAssert(GrGLProgram::kUnusedUniform !=
+ fProgramData->fUniLocations.fColorUni);
+ GL_CALL(Uniform4fv(fProgramData->fUniLocations.fColorUni,
+ 1, c));
+ fProgramData->fColor = color;
+ }
+ break;
+ case ProgramDesc::kSolidWhite_ColorType:
+ case ProgramDesc::kTransBlack_ColorType:
+ break;
+ default:
+ GrCrash("Unknown color type.");
+ }
+ }
+ if (fProgramData->fUniLocations.fColorFilterUni
+ != GrGLProgram::kUnusedUniform
+ && fProgramData->fColorFilterColor
+ != fCurrDrawState.fColorFilterColor) {
+ float c[] = GR_COLOR_TO_VEC4(fCurrDrawState.fColorFilterColor);
+ GL_CALL(Uniform4fv(fProgramData->fUniLocations.fColorFilterUni, 1, c));
+ fProgramData->fColorFilterColor = fCurrDrawState.fColorFilterColor;
+ }
+}
+
+
+bool GrGpuGLShaders::flushGraphicsState(GrPrimitiveType type) {
+ if (!flushGLStateCommon(type)) {
+ return false;
+ }
+
+ if (fDirtyFlags.fRenderTargetChanged) {
+ // our coords are in pixel space and the GL matrices map to NDC
+ // so if the viewport changed, our matrix is now wrong.
+ fHWDrawState.fViewMatrix = GrMatrix::InvalidMatrix();
+ // we assume all shader matrices may be wrong after viewport changes
+ fProgramCache->invalidateViewMatrices();
+ }
+
+ GrBlendCoeff srcCoeff;
+ GrBlendCoeff dstCoeff;
+ BlendOptFlags blendOpts = this->getBlendOpts(false, &srcCoeff, &dstCoeff);
+ if (kSkipDraw_BlendOptFlag & blendOpts) {
+ return false;
+ }
+
+ this->buildProgram(type, blendOpts, dstCoeff);
+ fProgramData = fProgramCache->getProgramData(fCurrentProgram);
+ if (NULL == fProgramData) {
+ GrAssert(!"Failed to create program!");
+ return false;
+ }
+
+ if (fHWProgramID != fProgramData->fProgramID) {
+ GL_CALL(UseProgram(fProgramData->fProgramID));
+ fHWProgramID = fProgramData->fProgramID;
+ }
+ fCurrentProgram.overrideBlend(&srcCoeff, &dstCoeff);
+ this->flushBlend(type, srcCoeff, dstCoeff);
+
+ GrColor color;
+ if (blendOpts & kEmitTransBlack_BlendOptFlag) {
+ color = 0;
+ } else if (blendOpts & kEmitCoverage_BlendOptFlag) {
+ color = 0xffffffff;
+ } else {
+ color = fCurrDrawState.fColor;
+ }
+ this->flushColor(color);
+
+ GrMatrix* currViewMatrix;
+ if (GrGLProgram::kSetAsAttribute ==
+ fProgramData->fUniLocations.fViewMatrixUni) {
+ currViewMatrix = &fHWDrawState.fViewMatrix;
+ } else {
+ currViewMatrix = &fProgramData->fViewMatrix;
+ }
+
+ if (*currViewMatrix != fCurrDrawState.fViewMatrix) {
+ flushViewMatrix();
+ *currViewMatrix = fCurrDrawState.fViewMatrix;
+ }
+
+ for (int s = 0; s < kNumStages; ++s) {
+ this->flushTextureMatrix(s);
+
+ this->flushRadial2(s);
+
+ this->flushConvolution(s);
+
+ this->flushTexelSize(s);
+
+ this->flushTextureDomain(s);
+ }
+ this->flushEdgeAAData();
+ resetDirtyFlags();
+ return true;
+}
+
+void GrGpuGLShaders::postDraw() {
+}
+
+void GrGpuGLShaders::setupGeometry(int* startVertex,
+ int* startIndex,
+ int vertexCount,
+ int indexCount) {
+
+ int newColorOffset;
+ int newCoverageOffset;
+ int newTexCoordOffsets[kMaxTexCoords];
+ int newEdgeOffset;
+
+ GrGLsizei newStride = VertexSizeAndOffsetsByIdx(
+ this->getGeomSrc().fVertexLayout,
+ newTexCoordOffsets,
+ &newColorOffset,
+ &newCoverageOffset,
+ &newEdgeOffset);
+ int oldColorOffset;
+ int oldCoverageOffset;
+ int oldTexCoordOffsets[kMaxTexCoords];
+ int oldEdgeOffset;
+
+ GrGLsizei oldStride = VertexSizeAndOffsetsByIdx(
+ fHWGeometryState.fVertexLayout,
+ oldTexCoordOffsets,
+ &oldColorOffset,
+ &oldCoverageOffset,
+ &oldEdgeOffset);
+ bool indexed = NULL != startIndex;
+
+ int extraVertexOffset;
+ int extraIndexOffset;
+ this->setBuffers(indexed, &extraVertexOffset, &extraIndexOffset);
+
+ GrGLenum scalarType;
+ bool texCoordNorm;
+ if (this->getGeomSrc().fVertexLayout & kTextFormat_VertexLayoutBit) {
+ scalarType = GrGLTextType;
+ texCoordNorm = GR_GL_TEXT_TEXTURE_NORMALIZED;
+ } else {
+ scalarType = GrGLType;
+ texCoordNorm = false;
+ }
+
+ size_t vertexOffset = (*startVertex + extraVertexOffset) * newStride;
+ *startVertex = 0;
+ if (indexed) {
+ *startIndex += extraIndexOffset;
+ }
+
+ // all the Pointers must be set if any of these are true
+ bool allOffsetsChange = fHWGeometryState.fArrayPtrsDirty ||
+ vertexOffset != fHWGeometryState.fVertexOffset ||
+ newStride != oldStride;
+
+ // position and tex coord offsets change if above conditions are true
+ // or the type/normalization changed based on text vs nontext type coords.
+ bool posAndTexChange = allOffsetsChange ||
+ (((GrGLTextType != GrGLType) || GR_GL_TEXT_TEXTURE_NORMALIZED) &&
+ (kTextFormat_VertexLayoutBit &
+ (fHWGeometryState.fVertexLayout ^
+ this->getGeomSrc().fVertexLayout)));
+
+ if (posAndTexChange) {
+ int idx = GrGLProgram::PositionAttributeIdx();
+ GL_CALL(VertexAttribPointer(idx, 2, scalarType, false, newStride,
+ (GrGLvoid*)vertexOffset));
+ fHWGeometryState.fVertexOffset = vertexOffset;
+ }
+
+ for (int t = 0; t < kMaxTexCoords; ++t) {
+ if (newTexCoordOffsets[t] > 0) {
+ GrGLvoid* texCoordOffset = (GrGLvoid*)(vertexOffset + newTexCoordOffsets[t]);
+ int idx = GrGLProgram::TexCoordAttributeIdx(t);
+ if (oldTexCoordOffsets[t] <= 0) {
+ GL_CALL(EnableVertexAttribArray(idx));
+ GL_CALL(VertexAttribPointer(idx, 2, scalarType, texCoordNorm,
+ newStride, texCoordOffset));
+ } else if (posAndTexChange ||
+ newTexCoordOffsets[t] != oldTexCoordOffsets[t]) {
+ GL_CALL(VertexAttribPointer(idx, 2, scalarType, texCoordNorm,
+ newStride, texCoordOffset));
+ }
+ } else if (oldTexCoordOffsets[t] > 0) {
+ GL_CALL(DisableVertexAttribArray(GrGLProgram::TexCoordAttributeIdx(t)));
+ }
+ }
+
+ if (newColorOffset > 0) {
+ GrGLvoid* colorOffset = (int8_t*)(vertexOffset + newColorOffset);
+ int idx = GrGLProgram::ColorAttributeIdx();
+ if (oldColorOffset <= 0) {
+ GL_CALL(EnableVertexAttribArray(idx));
+ GL_CALL(VertexAttribPointer(idx, 4, GR_GL_UNSIGNED_BYTE,
+ true, newStride, colorOffset));
+ } else if (allOffsetsChange || newColorOffset != oldColorOffset) {
+ GL_CALL(VertexAttribPointer(idx, 4, GR_GL_UNSIGNED_BYTE,
+ true, newStride, colorOffset));
+ }
+ } else if (oldColorOffset > 0) {
+ GL_CALL(DisableVertexAttribArray(GrGLProgram::ColorAttributeIdx()));
+ }
+
+ if (newCoverageOffset > 0) {
+ // bind a single channel, they should all have the same value.
+ GrGLvoid* coverageOffset = (int8_t*)(vertexOffset + newCoverageOffset);
+ int idx = GrGLProgram::CoverageAttributeIdx();
+ if (oldCoverageOffset <= 0) {
+ GL_CALL(EnableVertexAttribArray(idx));
+ GL_CALL(VertexAttribPointer(idx, 1, GR_GL_UNSIGNED_BYTE,
+ true, newStride, coverageOffset));
+ } else if (allOffsetsChange || newCoverageOffset != oldCoverageOffset) {
+ GL_CALL(VertexAttribPointer(idx, 1, GR_GL_UNSIGNED_BYTE,
+ true, newStride, coverageOffset));
+ }
+ } else if (oldCoverageOffset > 0) {
+ GL_CALL(DisableVertexAttribArray(GrGLProgram::CoverageAttributeIdx()));
+ }
+
+ if (newEdgeOffset > 0) {
+ GrGLvoid* edgeOffset = (int8_t*)(vertexOffset + newEdgeOffset);
+ int idx = GrGLProgram::EdgeAttributeIdx();
+ if (oldEdgeOffset <= 0) {
+ GL_CALL(EnableVertexAttribArray(idx));
+ GL_CALL(VertexAttribPointer(idx, 4, scalarType,
+ false, newStride, edgeOffset));
+ } else if (allOffsetsChange || newEdgeOffset != oldEdgeOffset) {
+ GL_CALL(VertexAttribPointer(idx, 4, scalarType,
+ false, newStride, edgeOffset));
+ }
+ } else if (oldEdgeOffset > 0) {
+ GL_CALL(DisableVertexAttribArray(GrGLProgram::EdgeAttributeIdx()));
+ }
+
+ fHWGeometryState.fVertexLayout = this->getGeomSrc().fVertexLayout;
+ fHWGeometryState.fArrayPtrsDirty = false;
+}
+
+void GrGpuGLShaders::buildProgram(GrPrimitiveType type,
+ BlendOptFlags blendOpts,
+ GrBlendCoeff dstCoeff) {
+ ProgramDesc& desc = fCurrentProgram.fProgramDesc;
+
+ // This should already have been caught
+ GrAssert(!(kSkipDraw_BlendOptFlag & blendOpts));
+
+ bool skipCoverage = SkToBool(blendOpts & kEmitTransBlack_BlendOptFlag);
+
+ bool skipColor = SkToBool(blendOpts & (kEmitTransBlack_BlendOptFlag |
+ kEmitCoverage_BlendOptFlag));
+
+ // The descriptor is used as a cache key. Thus when a field of the
+ // descriptor will not affect program generation (because of the vertex
+ // layout in use or other descriptor field settings) it should be set
+ // to a canonical value to avoid duplicate programs with different keys.
+
+ // Must initialize all fields or cache will have false negatives!
+ desc.fVertexLayout = this->getGeomSrc().fVertexLayout;
+
+ desc.fEmitsPointSize = kPoints_PrimitiveType == type;
+
+ bool requiresAttributeColors =
+ !skipColor && SkToBool(desc.fVertexLayout & kColor_VertexLayoutBit);
+ // fColorType records how colors are specified for the program. Strip
+ // the bit from the layout to avoid false negatives when searching for an
+ // existing program in the cache.
+ desc.fVertexLayout &= ~(kColor_VertexLayoutBit);
+
+ desc.fColorFilterXfermode = skipColor ?
+ SkXfermode::kDst_Mode :
+ fCurrDrawState.fColorFilterXfermode;
+
+ // no reason to do edge aa or look at per-vertex coverage if coverage is
+ // ignored
+ if (skipCoverage) {
+ desc.fVertexLayout &= ~(kEdge_VertexLayoutBit |
+ kCoverage_VertexLayoutBit);
+ }
+
+ bool colorIsTransBlack = SkToBool(blendOpts & kEmitTransBlack_BlendOptFlag);
+ bool colorIsSolidWhite = (blendOpts & kEmitCoverage_BlendOptFlag) ||
+ (!requiresAttributeColors &&
+ 0xffffffff == fCurrDrawState.fColor);
+ if (GR_AGGRESSIVE_SHADER_OPTS && colorIsTransBlack) {
+ desc.fColorType = ProgramDesc::kTransBlack_ColorType;
+ } else if (GR_AGGRESSIVE_SHADER_OPTS && colorIsSolidWhite) {
+ desc.fColorType = ProgramDesc::kSolidWhite_ColorType;
+ } else if (GR_GL_NO_CONSTANT_ATTRIBUTES && !requiresAttributeColors) {
+ desc.fColorType = ProgramDesc::kUniform_ColorType;
+ } else {
+ desc.fColorType = ProgramDesc::kAttribute_ColorType;
+ }
+
+ desc.fEdgeAANumEdges = skipCoverage ? 0 : fCurrDrawState.fEdgeAANumEdges;
+ desc.fEdgeAAConcave = desc.fEdgeAANumEdges > 0 &&
+ SkToBool(fCurrDrawState.fFlagBits &
+ kEdgeAAConcave_StateBit);
+
+ int lastEnabledStage = -1;
+
+ if (!skipCoverage && (desc.fVertexLayout &
+ GrDrawTarget::kEdge_VertexLayoutBit)) {
+ desc.fVertexEdgeType = fCurrDrawState.fVertexEdgeType;
+ } else {
+ // use canonical value when not set to avoid cache misses
+ desc.fVertexEdgeType = GrDrawTarget::kHairLine_EdgeType;
+ }
+
+ for (int s = 0; s < kNumStages; ++s) {
+ StageDesc& stage = desc.fStages[s];
+
+ stage.fOptFlags = 0;
+ stage.setEnabled(this->isStageEnabled(s));
+
+ bool skip = s < fCurrDrawState.fFirstCoverageStage ? skipColor :
+ skipCoverage;
+
+ if (!skip && stage.isEnabled()) {
+ lastEnabledStage = s;
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTextures[s];
+ GrAssert(NULL != texture);
+ const GrSamplerState& sampler = fCurrDrawState.fSamplerStates[s];
+ // we matrix to invert when orientation is TopDown, so make sure
+ // we aren't in that case before flagging as identity.
+ if (TextureMatrixIsIdentity(texture, sampler)) {
+ stage.fOptFlags |= StageDesc::kIdentityMatrix_OptFlagBit;
+ } else if (!getSamplerMatrix(s).hasPerspective()) {
+ stage.fOptFlags |= StageDesc::kNoPerspective_OptFlagBit;
+ }
+ switch (sampler.getSampleMode()) {
+ case GrSamplerState::kNormal_SampleMode:
+ stage.fCoordMapping = StageDesc::kIdentity_CoordMapping;
+ break;
+ case GrSamplerState::kRadial_SampleMode:
+ stage.fCoordMapping = StageDesc::kRadialGradient_CoordMapping;
+ break;
+ case GrSamplerState::kRadial2_SampleMode:
+ if (sampler.radial2IsDegenerate()) {
+ stage.fCoordMapping =
+ StageDesc::kRadial2GradientDegenerate_CoordMapping;
+ } else {
+ stage.fCoordMapping =
+ StageDesc::kRadial2Gradient_CoordMapping;
+ }
+ break;
+ case GrSamplerState::kSweep_SampleMode:
+ stage.fCoordMapping = StageDesc::kSweepGradient_CoordMapping;
+ break;
+ default:
+ GrCrash("Unexpected sample mode!");
+ break;
+ }
+
+ switch (sampler.getFilter()) {
+ // these both can use a regular texture2D()
+ case GrSamplerState::kNearest_Filter:
+ case GrSamplerState::kBilinear_Filter:
+ stage.fFetchMode = StageDesc::kSingle_FetchMode;
+ break;
+ // performs 4 texture2D()s
+ case GrSamplerState::k4x4Downsample_Filter:
+ stage.fFetchMode = StageDesc::k2x2_FetchMode;
+ break;
+ // performs fKernelWidth texture2D()s
+ case GrSamplerState::kConvolution_Filter:
+ stage.fFetchMode = StageDesc::kConvolution_FetchMode;
+ break;
+ default:
+ GrCrash("Unexpected filter!");
+ break;
+ }
+
+ if (sampler.hasTextureDomain()) {
+ GrAssert(GrSamplerState::kClamp_WrapMode ==
+ sampler.getWrapX() &&
+ GrSamplerState::kClamp_WrapMode ==
+ sampler.getWrapY());
+ stage.fOptFlags |= StageDesc::kCustomTextureDomain_OptFlagBit;
+ }
+
+ if (GrPixelConfigIsAlphaOnly(texture->config())) {
+ stage.fModulation = StageDesc::kAlpha_Modulation;
+ } else {
+ stage.fModulation = StageDesc::kColor_Modulation;
+ }
+ if (sampler.getFilter() == GrSamplerState::kConvolution_Filter) {
+ stage.fKernelWidth = sampler.getKernelWidth();
+ } else {
+ stage.fKernelWidth = 0;
+ }
+ } else {
+ stage.fOptFlags = 0;
+ stage.fCoordMapping = (StageDesc::CoordMapping)0;
+ stage.fModulation = (StageDesc::Modulation)0;
+ }
+ }
+
+ desc.fDualSrcOutput = ProgramDesc::kNone_DualSrcOutput;
+
+ // currently the experimental GS will only work with triangle prims
+ // (and it doesn't do anything other than pass through values from
+ // the VS to the FS anyway).
+#if 0 && GR_GL_EXPERIMENTAL_GS
+ desc.fExperimentalGS = this->getCaps().fGeometryShaderSupport;
+#endif
+
+ // we want to avoid generating programs with different "first cov stage"
+ // values when they would compute the same result.
+ // We set field in the desc to kNumStages when either there are no
+ // coverage stages or the distinction between coverage and color is
+ // immaterial.
+ int firstCoverageStage = kNumStages;
+ desc.fFirstCoverageStage = kNumStages;
+ bool hasCoverage = fCurrDrawState.fFirstCoverageStage <= lastEnabledStage;
+ if (hasCoverage) {
+ firstCoverageStage = fCurrDrawState.fFirstCoverageStage;
+ }
+
+ // other coverage inputs
+ if (!hasCoverage) {
+ hasCoverage =
+ desc.fEdgeAANumEdges ||
+ (desc.fVertexLayout & GrDrawTarget::kCoverage_VertexLayoutBit) ||
+ (desc.fVertexLayout & GrDrawTarget::kEdge_VertexLayoutBit);
+ }
+
+ if (hasCoverage) {
+ // color filter is applied between color/coverage computation
+ if (SkXfermode::kDst_Mode != desc.fColorFilterXfermode) {
+ desc.fFirstCoverageStage = firstCoverageStage;
+ }
+
+ if (this->getCaps().fDualSourceBlendingSupport &&
+ !(blendOpts & (kEmitCoverage_BlendOptFlag |
+ kCoverageAsAlpha_BlendOptFlag))) {
+ if (kZero_BlendCoeff == dstCoeff) {
+ // write the coverage value to second color
+ desc.fDualSrcOutput = ProgramDesc::kCoverage_DualSrcOutput;
+ desc.fFirstCoverageStage = firstCoverageStage;
+ } else if (kSA_BlendCoeff == dstCoeff) {
+ // SA dst coeff becomes 1-(1-SA)*coverage when dst is partially
+ // cover
+ desc.fDualSrcOutput = ProgramDesc::kCoverageISA_DualSrcOutput;
+ desc.fFirstCoverageStage = firstCoverageStage;
+ } else if (kSC_BlendCoeff == dstCoeff) {
+ // SA dst coeff becomes 1-(1-SA)*coverage when dst is partially
+ // cover
+ desc.fDualSrcOutput = ProgramDesc::kCoverageISC_DualSrcOutput;
+ desc.fFirstCoverageStage = firstCoverageStage;
+ }
+ }
+ }
+}
diff --git a/src/gpu/GrGpuGLShaders.h b/src/gpu/GrGpuGLShaders.h
new file mode 100644
index 0000000000..0c8322b7b4
--- /dev/null
+++ b/src/gpu/GrGpuGLShaders.h
@@ -0,0 +1,93 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrGpuGLShaders_DEFINED
+#define GrGpuGLShaders_DEFINED
+
+#include "GrGpuGL.h"
+#include "GrGLProgram.h"
+
+class GrGpuGLProgram;
+
+// Programmable OpenGL or OpenGL ES 2.0
+class GrGpuGLShaders : public GrGpuGL {
+public:
+ GrGpuGLShaders(const GrGLInterface* glInterface);
+ virtual ~GrGpuGLShaders();
+
+ virtual void resetContext();
+
+ virtual void abandonResources();
+
+ bool programUnitTest();
+
+protected:
+ // overrides from GrGpu
+ virtual bool flushGraphicsState(GrPrimitiveType type);
+ virtual void setupGeometry(int* startVertex,
+ int* startIndex,
+ int vertexCount,
+ int indexCount);
+ virtual void postDraw();
+
+private:
+
+ // for readability of function impls
+ typedef GrGLProgram::ProgramDesc ProgramDesc;
+ typedef ProgramDesc::StageDesc StageDesc;
+ typedef GrGLProgram::CachedData CachedData;
+
+ class ProgramCache;
+
+ // Helpers to make code more readable
+ const GrMatrix& getHWSamplerMatrix(int stage);
+ void recordHWSamplerMatrix(int stage, const GrMatrix& matrix);
+
+ // sets the texture matrix uniform for currently bound program
+ void flushTextureMatrix(int stage);
+
+ // sets the texture domain uniform for currently bound program
+ void flushTextureDomain(int stage);
+
+ // sets the color specified by GrDrawTarget::setColor()
+ void flushColor(GrColor color);
+
+ // sets the MVP matrix uniform for currently bound program
+ void flushViewMatrix();
+
+ // flushes the parameters to two point radial gradient
+ void flushRadial2(int stage);
+
+ // flushes the parameters for convolution
+ void flushConvolution(int stage);
+
+ // flushes the normalized texel size
+ void flushTexelSize(int stage);
+
+ // flushes the edges for edge AA
+ void flushEdgeAAData();
+
+ static void DeleteProgram(const GrGLInterface* gl,
+ CachedData* programData);
+
+ void buildProgram(GrPrimitiveType typeBlend,
+ BlendOptFlags blendOpts,
+ GrBlendCoeff dstCoeff);
+
+ ProgramCache* fProgramCache;
+ CachedData* fProgramData;
+ GrGLuint fHWProgramID;
+ GrGLProgram fCurrentProgram;
+
+ typedef GrGpuGL INHERITED;
+};
+
+#endif
+
diff --git a/src/gpu/GrInOrderDrawBuffer.cpp b/src/gpu/GrInOrderDrawBuffer.cpp
new file mode 100644
index 0000000000..5e3c769d32
--- /dev/null
+++ b/src/gpu/GrInOrderDrawBuffer.cpp
@@ -0,0 +1,618 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrInOrderDrawBuffer.h"
+#include "GrTexture.h"
+#include "GrBufferAllocPool.h"
+#include "GrIndexBuffer.h"
+#include "GrVertexBuffer.h"
+#include "GrGpu.h"
+
+GrInOrderDrawBuffer::GrInOrderDrawBuffer(const GrGpu* gpu,
+ GrVertexBufferAllocPool* vertexPool,
+ GrIndexBufferAllocPool* indexPool)
+ : fClipSet(true)
+ , fLastRectVertexLayout(0)
+ , fQuadIndexBuffer(NULL)
+ , fMaxQuads(0)
+ , fCurrQuad(0)
+ , fVertexPool(*vertexPool)
+ , fIndexPool(*indexPool) {
+
+ fCaps = gpu->getCaps();
+
+ GrAssert(NULL != vertexPool);
+ GrAssert(NULL != indexPool);
+
+ GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
+ poolState.fUsedPoolVertexBytes = 0;
+ poolState.fUsedPoolIndexBytes = 0;
+#if GR_DEBUG
+ poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
+ poolState.fPoolStartVertex = ~0;
+ poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
+ poolState.fPoolStartIndex = ~0;
+#endif
+}
+
+GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
+ this->reset();
+ GrSafeUnref(fQuadIndexBuffer);
+}
+
+void GrInOrderDrawBuffer::initializeDrawStateAndClip(const GrDrawTarget& target) {
+ this->copyDrawState(target);
+ this->setClip(target.getClip());
+}
+
+void GrInOrderDrawBuffer::setQuadIndexBuffer(const GrIndexBuffer* indexBuffer) {
+ bool newIdxBuffer = fQuadIndexBuffer != indexBuffer;
+ if (newIdxBuffer) {
+ GrSafeUnref(fQuadIndexBuffer);
+ fQuadIndexBuffer = indexBuffer;
+ GrSafeRef(fQuadIndexBuffer);
+ fCurrQuad = 0;
+ fMaxQuads = (NULL == indexBuffer) ? 0 : indexBuffer->maxQuads();
+ } else {
+ GrAssert((NULL == indexBuffer && 0 == fMaxQuads) ||
+ (indexBuffer->maxQuads() == fMaxQuads));
+ }
+}
+
+void GrInOrderDrawBuffer::drawRect(const GrRect& rect,
+ const GrMatrix* matrix,
+ StageBitfield stageEnableBitfield,
+ const GrRect* srcRects[],
+ const GrMatrix* srcMatrices[]) {
+
+ GrAssert(!(NULL == fQuadIndexBuffer && fCurrQuad));
+ GrAssert(!(fDraws.empty() && fCurrQuad));
+ GrAssert(!(0 != fMaxQuads && NULL == fQuadIndexBuffer));
+
+ // if we have a quad IB then either append to the previous run of
+ // rects or start a new run
+ if (fMaxQuads) {
+
+ bool appendToPreviousDraw = false;
+ GrVertexLayout layout = GetRectVertexLayout(stageEnableBitfield, srcRects);
+ AutoReleaseGeometry geo(this, layout, 4, 0);
+ if (!geo.succeeded()) {
+ GrPrintf("Failed to get space for vertices!\n");
+ return;
+ }
+ AutoViewMatrixRestore avmr(this);
+ GrMatrix combinedMatrix = this->getViewMatrix();
+ this->setViewMatrix(GrMatrix::I());
+ if (NULL != matrix) {
+ combinedMatrix.preConcat(*matrix);
+ }
+
+ SetRectVertices(rect, &combinedMatrix, srcRects, srcMatrices, layout, geo.vertices());
+
+ // we don't want to miss an opportunity to batch rects together
+ // simply because the clip has changed if the clip doesn't affect
+ // the rect.
+ bool disabledClip = false;
+ if (this->isClipState() && fClip.isRect()) {
+
+ GrRect clipRect = fClip.getRect(0);
+ // If the clip rect touches the edge of the viewport, extended it
+ // out (close) to infinity to avoid bogus intersections.
+ // We might consider a more exact clip to viewport if this
+ // conservative test fails.
+ const GrRenderTarget* target = this->getRenderTarget();
+ if (0 >= clipRect.fLeft) {
+ clipRect.fLeft = GR_ScalarMin;
+ }
+ if (target->width() <= clipRect.fRight) {
+ clipRect.fRight = GR_ScalarMax;
+ }
+ if (0 >= clipRect.top()) {
+ clipRect.fTop = GR_ScalarMin;
+ }
+ if (target->height() <= clipRect.fBottom) {
+ clipRect.fBottom = GR_ScalarMax;
+ }
+ int stride = VertexSize(layout);
+ bool insideClip = true;
+ for (int v = 0; v < 4; ++v) {
+ const GrPoint& p = *GetVertexPoint(geo.vertices(), v, stride);
+ if (!clipRect.contains(p)) {
+ insideClip = false;
+ break;
+ }
+ }
+ if (insideClip) {
+ this->disableState(kClip_StateBit);
+ disabledClip = true;
+ }
+ }
+ if (!needsNewClip() && !needsNewState() && fCurrQuad > 0 &&
+ fCurrQuad < fMaxQuads && layout == fLastRectVertexLayout) {
+
+ int vsize = VertexSize(layout);
+
+ Draw& lastDraw = fDraws.back();
+
+ GrAssert(lastDraw.fIndexBuffer == fQuadIndexBuffer);
+ GrAssert(kTriangles_PrimitiveType == lastDraw.fPrimitiveType);
+ GrAssert(0 == lastDraw.fVertexCount % 4);
+ GrAssert(0 == lastDraw.fIndexCount % 6);
+ GrAssert(0 == lastDraw.fStartIndex);
+
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ bool clearSinceLastDraw =
+ fClears.count() &&
+ fClears.back().fBeforeDrawIdx == fDraws.count();
+
+ appendToPreviousDraw =
+ !clearSinceLastDraw &&
+ lastDraw.fVertexBuffer == poolState.fPoolVertexBuffer &&
+ (fCurrQuad * 4 + lastDraw.fStartVertex) == poolState.fPoolStartVertex;
+
+ if (appendToPreviousDraw) {
+ lastDraw.fVertexCount += 4;
+ lastDraw.fIndexCount += 6;
+ fCurrQuad += 1;
+ // we reserved above, so we should be the first
+ // use of this vertex reserveation.
+ GrAssert(0 == poolState.fUsedPoolVertexBytes);
+ poolState.fUsedPoolVertexBytes = 4 * vsize;
+ }
+ }
+ if (!appendToPreviousDraw) {
+ this->setIndexSourceToBuffer(fQuadIndexBuffer);
+ drawIndexed(kTriangles_PrimitiveType, 0, 0, 4, 6);
+ fCurrQuad = 1;
+ fLastRectVertexLayout = layout;
+ }
+ if (disabledClip) {
+ this->enableState(kClip_StateBit);
+ }
+ } else {
+ INHERITED::drawRect(rect, matrix, stageEnableBitfield, srcRects, srcMatrices);
+ }
+}
+
+void GrInOrderDrawBuffer::onDrawIndexed(GrPrimitiveType primitiveType,
+ int startVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount) {
+
+ if (!vertexCount || !indexCount) {
+ return;
+ }
+
+ fCurrQuad = 0;
+
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+
+ Draw& draw = fDraws.push_back();
+ draw.fPrimitiveType = primitiveType;
+ draw.fStartVertex = startVertex;
+ draw.fStartIndex = startIndex;
+ draw.fVertexCount = vertexCount;
+ draw.fIndexCount = indexCount;
+
+ draw.fClipChanged = this->needsNewClip();
+ if (draw.fClipChanged) {
+ this->pushClip();
+ }
+
+ draw.fStateChanged = this->needsNewState();
+ if (draw.fStateChanged) {
+ this->pushState();
+ }
+
+ draw.fVertexLayout = this->getGeomSrc().fVertexLayout;
+ switch (this->getGeomSrc().fVertexSrc) {
+ case kBuffer_GeometrySrcType:
+ draw.fVertexBuffer = this->getGeomSrc().fVertexBuffer;
+ break;
+ case kReserved_GeometrySrcType: // fallthrough
+ case kArray_GeometrySrcType: {
+ size_t vertexBytes = (vertexCount + startVertex) *
+ VertexSize(this->getGeomSrc().fVertexLayout);
+ poolState.fUsedPoolVertexBytes =
+ GrMax(poolState.fUsedPoolVertexBytes, vertexBytes);
+ draw.fVertexBuffer = poolState.fPoolVertexBuffer;
+ draw.fStartVertex += poolState.fPoolStartVertex;
+ break;
+ }
+ default:
+ GrCrash("unknown geom src type");
+ }
+ draw.fVertexBuffer->ref();
+
+ switch (this->getGeomSrc().fIndexSrc) {
+ case kBuffer_GeometrySrcType:
+ draw.fIndexBuffer = this->getGeomSrc().fIndexBuffer;
+ break;
+ case kReserved_GeometrySrcType: // fallthrough
+ case kArray_GeometrySrcType: {
+ size_t indexBytes = (indexCount + startIndex) * sizeof(uint16_t);
+ poolState.fUsedPoolIndexBytes =
+ GrMax(poolState.fUsedPoolIndexBytes, indexBytes);
+ draw.fIndexBuffer = poolState.fPoolIndexBuffer;
+ draw.fStartIndex += poolState.fPoolStartVertex;
+ break;
+ }
+ default:
+ GrCrash("unknown geom src type");
+ }
+ draw.fIndexBuffer->ref();
+}
+
+void GrInOrderDrawBuffer::onDrawNonIndexed(GrPrimitiveType primitiveType,
+ int startVertex,
+ int vertexCount) {
+ if (!vertexCount) {
+ return;
+ }
+
+ fCurrQuad = 0;
+
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+
+ Draw& draw = fDraws.push_back();
+ draw.fPrimitiveType = primitiveType;
+ draw.fStartVertex = startVertex;
+ draw.fStartIndex = 0;
+ draw.fVertexCount = vertexCount;
+ draw.fIndexCount = 0;
+
+ draw.fClipChanged = this->needsNewClip();
+ if (draw.fClipChanged) {
+ this->pushClip();
+ }
+
+ draw.fStateChanged = this->needsNewState();
+ if (draw.fStateChanged) {
+ this->pushState();
+ }
+
+ draw.fVertexLayout = this->getGeomSrc().fVertexLayout;
+ switch (this->getGeomSrc().fVertexSrc) {
+ case kBuffer_GeometrySrcType:
+ draw.fVertexBuffer = this->getGeomSrc().fVertexBuffer;
+ break;
+ case kReserved_GeometrySrcType: // fallthrough
+ case kArray_GeometrySrcType: {
+ size_t vertexBytes = (vertexCount + startVertex) *
+ VertexSize(this->getGeomSrc().fVertexLayout);
+ poolState.fUsedPoolVertexBytes =
+ GrMax(poolState.fUsedPoolVertexBytes, vertexBytes);
+ draw.fVertexBuffer = poolState.fPoolVertexBuffer;
+ draw.fStartVertex += poolState.fPoolStartVertex;
+ break;
+ }
+ default:
+ GrCrash("unknown geom src type");
+ }
+ draw.fVertexBuffer->ref();
+ draw.fIndexBuffer = NULL;
+}
+
+void GrInOrderDrawBuffer::clear(const GrIRect* rect, GrColor color) {
+ GrIRect r;
+ if (NULL == rect) {
+ // We could do something smart and remove previous draws and clears to
+ // the current render target. If we get that smart we have to make sure
+ // those draws aren't read before this clear (render-to-texture).
+ r.setLTRB(0, 0,
+ this->getRenderTarget()->width(),
+ this->getRenderTarget()->height());
+ rect = &r;
+ }
+ Clear& clr = fClears.push_back();
+ clr.fColor = color;
+ clr.fBeforeDrawIdx = fDraws.count();
+ clr.fRect = *rect;
+}
+
+void GrInOrderDrawBuffer::reset() {
+ GrAssert(1 == fGeoPoolStateStack.count());
+ this->resetVertexSource();
+ this->resetIndexSource();
+ uint32_t numStates = fStates.count();
+ for (uint32_t i = 0; i < numStates; ++i) {
+ const DrState& dstate = this->accessSavedDrawState(fStates[i]);
+ for (int s = 0; s < kNumStages; ++s) {
+ GrSafeUnref(dstate.fTextures[s]);
+ }
+ GrSafeUnref(dstate.fRenderTarget);
+ }
+ int numDraws = fDraws.count();
+ for (int d = 0; d < numDraws; ++d) {
+ // we always have a VB, but not always an IB
+ GrAssert(NULL != fDraws[d].fVertexBuffer);
+ fDraws[d].fVertexBuffer->unref();
+ GrSafeUnref(fDraws[d].fIndexBuffer);
+ }
+ fDraws.reset();
+ fStates.reset();
+
+ fClears.reset();
+
+ fVertexPool.reset();
+ fIndexPool.reset();
+
+ fClips.reset();
+
+ fCurrQuad = 0;
+}
+
+void GrInOrderDrawBuffer::playback(GrDrawTarget* target) {
+ GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
+ GrAssert(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
+ GrAssert(NULL != target);
+ GrAssert(target != this); // not considered and why?
+
+ int numDraws = fDraws.count();
+ if (!numDraws) {
+ return;
+ }
+
+ fVertexPool.unlock();
+ fIndexPool.unlock();
+
+ GrDrawTarget::AutoStateRestore asr(target);
+ GrDrawTarget::AutoClipRestore acr(target);
+ AutoGeometryPush agp(target);
+
+ int currState = ~0;
+ int currClip = ~0;
+ int currClear = 0;
+
+ for (int i = 0; i < numDraws; ++i) {
+ while (currClear < fClears.count() &&
+ i == fClears[currClear].fBeforeDrawIdx) {
+ target->clear(&fClears[currClear].fRect, fClears[currClear].fColor);
+ ++currClear;
+ }
+
+ const Draw& draw = fDraws[i];
+ if (draw.fStateChanged) {
+ ++currState;
+ target->restoreDrawState(fStates[currState]);
+ }
+ if (draw.fClipChanged) {
+ ++currClip;
+ target->setClip(fClips[currClip]);
+ }
+
+ target->setVertexSourceToBuffer(draw.fVertexLayout, draw.fVertexBuffer);
+
+ if (draw.fIndexCount) {
+ target->setIndexSourceToBuffer(draw.fIndexBuffer);
+ }
+
+ if (draw.fIndexCount) {
+ target->drawIndexed(draw.fPrimitiveType,
+ draw.fStartVertex,
+ draw.fStartIndex,
+ draw.fVertexCount,
+ draw.fIndexCount);
+ } else {
+ target->drawNonIndexed(draw.fPrimitiveType,
+ draw.fStartVertex,
+ draw.fVertexCount);
+ }
+ }
+ while (currClear < fClears.count()) {
+ GrAssert(fDraws.count() == fClears[currClear].fBeforeDrawIdx);
+ target->clear(&fClears[currClear].fRect, fClears[currClear].fColor);
+ ++currClear;
+ }
+}
+
+bool GrInOrderDrawBuffer::geometryHints(GrVertexLayout vertexLayout,
+ int* vertexCount,
+ int* indexCount) const {
+ // we will recommend a flush if the data could fit in a single
+ // preallocated buffer but none are left and it can't fit
+ // in the current buffer (which may not be prealloced).
+ bool flush = false;
+ if (NULL != indexCount) {
+ int32_t currIndices = fIndexPool.currentBufferIndices();
+ if (*indexCount > currIndices &&
+ (!fIndexPool.preallocatedBuffersRemaining() &&
+ *indexCount <= fIndexPool.preallocatedBufferIndices())) {
+
+ flush = true;
+ }
+ *indexCount = currIndices;
+ }
+ if (NULL != vertexCount) {
+ int32_t currVertices = fVertexPool.currentBufferVertices(vertexLayout);
+ if (*vertexCount > currVertices &&
+ (!fVertexPool.preallocatedBuffersRemaining() &&
+ *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexLayout))) {
+
+ flush = true;
+ }
+ *vertexCount = currVertices;
+ }
+ return flush;
+}
+
+bool GrInOrderDrawBuffer::onReserveVertexSpace(GrVertexLayout vertexLayout,
+ int vertexCount,
+ void** vertices) {
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ GrAssert(vertexCount > 0);
+ GrAssert(NULL != vertices);
+ GrAssert(0 == poolState.fUsedPoolVertexBytes);
+
+ *vertices = fVertexPool.makeSpace(vertexLayout,
+ vertexCount,
+ &poolState.fPoolVertexBuffer,
+ &poolState.fPoolStartVertex);
+ return NULL != *vertices;
+}
+
+bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ GrAssert(indexCount > 0);
+ GrAssert(NULL != indices);
+ GrAssert(0 == poolState.fUsedPoolIndexBytes);
+
+ *indices = fIndexPool.makeSpace(indexCount,
+ &poolState.fPoolIndexBuffer,
+ &poolState.fPoolStartIndex);
+ return NULL != *indices;
+}
+
+void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+
+ GrAssert(kReserved_GeometrySrcType == geoSrc.fVertexSrc);
+
+ size_t reservedVertexBytes = VertexSize(geoSrc.fVertexLayout) *
+ geoSrc.fVertexCount;
+ fVertexPool.putBack(reservedVertexBytes -
+ poolState.fUsedPoolVertexBytes);
+ poolState.fUsedPoolVertexBytes = 0;
+ poolState.fPoolVertexBuffer = 0;
+}
+
+void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+
+ GrAssert(kReserved_GeometrySrcType == geoSrc.fIndexSrc);
+
+ size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
+ fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
+ poolState.fUsedPoolIndexBytes = 0;
+ poolState.fPoolStartVertex = 0;
+}
+
+void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray,
+ int vertexCount) {
+
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ GrAssert(0 == poolState.fUsedPoolVertexBytes);
+#if GR_DEBUG
+ bool success =
+#endif
+ fVertexPool.appendVertices(this->getGeomSrc().fVertexLayout,
+ vertexCount,
+ vertexArray,
+ &poolState.fPoolVertexBuffer,
+ &poolState.fPoolStartVertex);
+ GR_DEBUGASSERT(success);
+}
+
+void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray,
+ int indexCount) {
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ GrAssert(0 == poolState.fUsedPoolIndexBytes);
+#if GR_DEBUG
+ bool success =
+#endif
+ fIndexPool.appendIndices(indexCount,
+ indexArray,
+ &poolState.fPoolIndexBuffer,
+ &poolState.fPoolStartIndex);
+ GR_DEBUGASSERT(success);
+}
+
+void GrInOrderDrawBuffer::geometrySourceWillPush() {
+ GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
+ poolState.fUsedPoolVertexBytes = 0;
+ poolState.fUsedPoolIndexBytes = 0;
+#if GR_DEBUG
+ poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
+ poolState.fPoolStartVertex = ~0;
+ poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
+ poolState.fPoolStartIndex = ~0;
+#endif
+}
+
+void GrInOrderDrawBuffer::releaseVertexArray() {
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+
+ size_t reservedVertexBytes = VertexSize(geoSrc.fVertexLayout) *
+ geoSrc.fVertexCount;
+ fVertexPool.putBack(reservedVertexBytes - poolState.fUsedPoolVertexBytes);
+
+ poolState.fUsedPoolVertexBytes = 0;
+}
+
+void GrInOrderDrawBuffer::releaseIndexArray() {
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ const GeometrySrcState& geoSrc = this->getGeomSrc();
+
+ size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
+ fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
+
+ poolState.fUsedPoolIndexBytes = 0;
+}
+
+void GrInOrderDrawBuffer::geometrySourceWillPop(
+ const GeometrySrcState& restoredState) {
+ GrAssert(fGeoPoolStateStack.count() > 1);
+ fGeoPoolStateStack.pop_back();
+ GeometryPoolState& poolState = fGeoPoolStateStack.back();
+ // we have to assume that any slack we had in our vertex/index data
+ // is now unreleasable because data may have been appended later in the
+ // pool.
+ if (kReserved_GeometrySrcType == restoredState.fVertexSrc ||
+ kArray_GeometrySrcType == restoredState.fVertexSrc) {
+ poolState.fUsedPoolVertexBytes =
+ VertexSize(restoredState.fVertexLayout) *
+ restoredState.fVertexCount;
+ }
+ if (kReserved_GeometrySrcType == restoredState.fIndexSrc ||
+ kArray_GeometrySrcType == restoredState.fIndexSrc) {
+ poolState.fUsedPoolVertexBytes = sizeof(uint16_t) *
+ restoredState.fIndexCount;
+ }
+}
+
+bool GrInOrderDrawBuffer::needsNewState() const {
+ if (fStates.empty()) {
+ return true;
+ } else {
+ const DrState& old = this->accessSavedDrawState(fStates.back());
+ return old != fCurrDrawState;
+ }
+}
+
+void GrInOrderDrawBuffer::pushState() {
+ for (int s = 0; s < kNumStages; ++s) {
+ GrSafeRef(fCurrDrawState.fTextures[s]);
+ }
+ GrSafeRef(fCurrDrawState.fRenderTarget);
+ this->saveCurrentDrawState(&fStates.push_back());
+ }
+
+bool GrInOrderDrawBuffer::needsNewClip() const {
+ if (fCurrDrawState.fFlagBits & kClip_StateBit) {
+ if (fClips.empty() || (fClipSet && fClips.back() != fClip)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void GrInOrderDrawBuffer::pushClip() {
+ fClips.push_back() = fClip;
+ fClipSet = false;
+}
+
+void GrInOrderDrawBuffer::clipWillBeSet(const GrClip& newClip) {
+ INHERITED::clipWillBeSet(newClip);
+ fClipSet = true;
+}
diff --git a/src/gpu/GrInOrderDrawBuffer.h b/src/gpu/GrInOrderDrawBuffer.h
new file mode 100644
index 0000000000..327352539e
--- /dev/null
+++ b/src/gpu/GrInOrderDrawBuffer.h
@@ -0,0 +1,188 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrInOrderDrawBuffer_DEFINED
+#define GrInOrderDrawBuffer_DEFINED
+
+#include "GrDrawTarget.h"
+#include "GrAllocPool.h"
+#include "GrAllocator.h"
+#include "GrClip.h"
+
+class GrGpu;
+class GrIndexBufferAllocPool;
+class GrVertexBufferAllocPool;
+
+/**
+ * GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up
+ * draws for eventual playback into a GrGpu. In theory one draw buffer could
+ * playback into another. When index or vertex buffers are used as geometry
+ * sources it is the callers the draw buffer only holds references to the
+ * buffers. It is the callers responsibility to ensure that the data is still
+ * valid when the draw buffer is played back into a GrGpu. Similarly, it is the
+ * caller's responsibility to ensure that all referenced textures, buffers,
+ * and rendertargets are associated in the GrGpu object that the buffer is
+ * played back into. The buffer requires VB and IB pools to store geometry.
+ */
+
+class GrInOrderDrawBuffer : public GrDrawTarget {
+public:
+
+ /**
+ * Creates a GrInOrderDrawBuffer
+ *
+ * @param gpu the gpu object where this will be played back
+ * (possible indirectly). GrResources used with the draw
+ * buffer are created by this gpu object.
+ * @param vertexPool pool where vertices for queued draws will be saved when
+ * the vertex source is either reserved or array.
+ * @param indexPool pool where indices for queued draws will be saved when
+ * the index source is either reserved or array.
+ */
+ GrInOrderDrawBuffer(const GrGpu* gpu,
+ GrVertexBufferAllocPool* vertexPool,
+ GrIndexBufferAllocPool* indexPool);
+
+ virtual ~GrInOrderDrawBuffer();
+
+ /**
+ * Copies the draw state and clip from target to this draw buffer.
+ *
+ * @param target the target whose clip and state should be copied.
+ */
+ void initializeDrawStateAndClip(const GrDrawTarget& target);
+
+ /**
+ * Provides the buffer with an index buffer that can be used for quad rendering.
+ * The buffer may be able to batch consecutive drawRects if this is provided.
+ * @param indexBuffer index buffer with quad indices.
+ */
+ void setQuadIndexBuffer(const GrIndexBuffer* indexBuffer);
+
+ /**
+ * Empties the draw buffer of any queued up draws.
+ */
+ void reset();
+
+ /**
+ * plays the queued up draws to another target. Does not empty this buffer so
+ * that it can be played back multiple times.
+ * @param target the target to receive the playback
+ */
+ void playback(GrDrawTarget* target);
+
+ // overrides from GrDrawTarget
+ virtual void drawRect(const GrRect& rect,
+ const GrMatrix* matrix = NULL,
+ int stageEnableMask = 0,
+ const GrRect* srcRects[] = NULL,
+ const GrMatrix* srcMatrices[] = NULL);
+
+ virtual bool geometryHints(GrVertexLayout vertexLayout,
+ int* vertexCount,
+ int* indexCount) const;
+
+ virtual void clear(const GrIRect* rect, GrColor color);
+
+private:
+
+ struct Draw {
+ GrPrimitiveType fPrimitiveType;
+ int fStartVertex;
+ int fStartIndex;
+ int fVertexCount;
+ int fIndexCount;
+ bool fStateChanged;
+ bool fClipChanged;
+ GrVertexLayout fVertexLayout;
+ const GrVertexBuffer* fVertexBuffer;
+ const GrIndexBuffer* fIndexBuffer;
+ };
+
+ struct Clear {
+ int fBeforeDrawIdx;
+ GrIRect fRect;
+ GrColor fColor;
+ };
+
+ // overrides from GrDrawTarget
+ virtual void onDrawIndexed(GrPrimitiveType primitiveType,
+ int startVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount);
+ virtual void onDrawNonIndexed(GrPrimitiveType primitiveType,
+ int startVertex,
+ int vertexCount);
+ virtual bool onReserveVertexSpace(GrVertexLayout layout,
+ int vertexCount,
+ void** vertices);
+ virtual bool onReserveIndexSpace(int indexCount, void** indices);
+ virtual void releaseReservedVertexSpace();
+ virtual void releaseReservedIndexSpace();
+ virtual void onSetVertexSourceToArray(const void* vertexArray,
+ int vertexCount);
+ virtual void onSetIndexSourceToArray(const void* indexArray,
+ int indexCount);
+ virtual void releaseVertexArray();
+ virtual void releaseIndexArray();
+ virtual void geometrySourceWillPush();
+ virtual void geometrySourceWillPop(const GeometrySrcState& restoredState);
+ virtual void clipWillBeSet(const GrClip& newClip);
+
+ bool needsNewState() const;
+ bool needsNewClip() const;
+
+ void pushState();
+ void pushClip();
+
+ enum {
+ kDrawPreallocCnt = 8,
+ kStatePreallocCnt = 8,
+ kClipPreallocCnt = 8,
+ kClearPreallocCnt = 4,
+ kGeoPoolStatePreAllocCnt = 4,
+ };
+
+ const GrGpu* fGpu;
+
+ GrSTAllocator<kDrawPreallocCnt, Draw> fDraws;
+ GrSTAllocator<kStatePreallocCnt, SavedDrawState> fStates;
+ GrSTAllocator<kClearPreallocCnt, Clear> fClears;
+ GrSTAllocator<kClipPreallocCnt, GrClip> fClips;
+
+ bool fClipSet;
+
+ GrVertexLayout fLastRectVertexLayout;
+ const GrIndexBuffer* fQuadIndexBuffer;
+ int fMaxQuads;
+ int fCurrQuad;
+
+ GrVertexBufferAllocPool& fVertexPool;
+
+ GrIndexBufferAllocPool& fIndexPool;
+
+ struct GeometryPoolState {
+ const GrVertexBuffer* fPoolVertexBuffer;
+ int fPoolStartVertex;
+ const GrIndexBuffer* fPoolIndexBuffer;
+ int fPoolStartIndex;
+ // caller may conservatively over reserve vertices / indices.
+ // we release unused space back to allocator if possible
+ // can only do this if there isn't an intervening pushGeometrySource()
+ size_t fUsedPoolVertexBytes;
+ size_t fUsedPoolIndexBytes;
+ };
+ SkSTArray<kGeoPoolStatePreAllocCnt, GeometryPoolState> fGeoPoolStateStack;
+
+ typedef GrDrawTarget INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrIndexBuffer.h b/src/gpu/GrIndexBuffer.h
new file mode 100644
index 0000000000..faa5018d32
--- /dev/null
+++ b/src/gpu/GrIndexBuffer.h
@@ -0,0 +1,33 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrIndexBuffer_DEFINED
+#define GrIndexBuffer_DEFINED
+
+#include "GrGeometryBuffer.h"
+
+class GrIndexBuffer : public GrGeometryBuffer {
+public:
+ /**
+ * Retrieves the maximum number of quads that could be rendered
+ * from the index buffer (using kTriangles_PrimitiveType).
+ * @return the maximum number of quads using full size of index buffer.
+ */
+ int maxQuads() const {
+ return this->sizeInBytes() / (sizeof(uint16_t) * 6);
+ }
+protected:
+ GrIndexBuffer(GrGpu* gpu, size_t sizeInBytes, bool dynamic)
+ : INHERITED(gpu, sizeInBytes, dynamic) {}
+private:
+ typedef GrGeometryBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrMatrix.cpp b/src/gpu/GrMatrix.cpp
new file mode 100644
index 0000000000..e71636b366
--- /dev/null
+++ b/src/gpu/GrMatrix.cpp
@@ -0,0 +1,713 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrMatrix.h"
+#include "GrRect.h"
+#include <stddef.h>
+
+#if 0
+#if GR_SCALAR_IS_FLOAT
+ const GrScalar GrMatrix::gRESCALE(GR_Scalar1);
+#else
+ GR_STATIC_ASSERT(GR_SCALAR_IS_FIXED);
+ // fixed point isn't supported right now
+ GR_STATIC_ASSERT(false);
+const GrScalar GrMatrix::gRESCALE(1 << 30);
+#endif
+
+const GrMatrix::MapProc GrMatrix::gMapProcs[] = {
+// Scales are not both zero
+ &GrMatrix::mapIdentity,
+ &GrMatrix::mapScale,
+ &GrMatrix::mapTranslate,
+ &GrMatrix::mapScaleAndTranslate,
+ &GrMatrix::mapSkew,
+ &GrMatrix::mapScaleAndSkew,
+ &GrMatrix::mapSkewAndTranslate,
+ &GrMatrix::mapNonPerspective,
+ // no optimizations for perspective matrices
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+
+// Scales are zero (every other is invalid because kScale_TypeBit must be set if
+// kZeroScale_TypeBit is set)
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapZero,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapSetToTranslate,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapSwappedScale,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapSwappedScaleAndTranslate,
+
+ // no optimizations for perspective matrices
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapZero,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapPerspective,
+};
+
+void GrMatrix::setIdentity() {
+ fM[0] = GR_Scalar1; fM[1] = 0; fM[2] = 0;
+ fM[3] = 0; fM[4] = GR_Scalar1; fM[5] = 0;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = 0;
+}
+
+void GrMatrix::setTranslate(GrScalar dx, GrScalar dy) {
+ fM[0] = GR_Scalar1; fM[1] = 0; fM[2] = dx;
+ fM[3] = 0; fM[4] = GR_Scalar1; fM[5] = dy;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = (0 != dx || 0 != dy) ? kTranslate_TypeBit : 0;
+}
+
+void GrMatrix::setScale(GrScalar sx, GrScalar sy) {
+ fM[0] = sx; fM[1] = 0; fM[2] = 0;
+ fM[3] = 0; fM[4] = sy; fM[5] = 0;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = (GR_Scalar1 != sx || GR_Scalar1 != sy) ? kScale_TypeBit : 0;
+}
+
+void GrMatrix::setSkew(GrScalar skx, GrScalar sky) {
+ fM[0] = GR_Scalar1; fM[1] = skx; fM[2] = 0;
+ fM[3] = sky; fM[4] = GR_Scalar1; fM[5] = 0;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = (0 != skx || 0 != sky) ? kSkew_TypeBit : 0;
+}
+
+void GrMatrix::setConcat(const GrMatrix& a, const GrMatrix& b) {
+ if (a.isIdentity()) {
+ if (this != &b) {
+ for (int i = 0; i < 9; ++i) {
+ fM[i] = b.fM[i];
+ }
+ fTypeMask = b.fTypeMask;
+ }
+ return;
+ }
+
+ if (b.isIdentity()) {
+ GrAssert(!a.isIdentity());
+ if (this != &a) {
+ for (int i = 0; i < 9; ++i) {
+ fM[i] = a.fM[i];
+ }
+ fTypeMask = a.fTypeMask;
+ }
+ return;
+ }
+
+ // a and/or b could be this
+ GrMatrix tmp;
+
+ // could do more optimizations based on type bits. Hopefully this call is
+ // low frequency.
+ // TODO: make this work for fixed point
+ if (!((b.fTypeMask | a.fTypeMask) & kPerspective_TypeBit)) {
+ tmp.fM[0] = a.fM[0] * b.fM[0] + a.fM[1] * b.fM[3];
+ tmp.fM[1] = a.fM[0] * b.fM[1] + a.fM[1] * b.fM[4];
+ tmp.fM[2] = a.fM[0] * b.fM[2] + a.fM[1] * b.fM[5] + a.fM[2] * gRESCALE;
+
+ tmp.fM[3] = a.fM[3] * b.fM[0] + a.fM[4] * b.fM[3];
+ tmp.fM[4] = a.fM[3] * b.fM[1] + a.fM[4] * b.fM[4];
+ tmp.fM[5] = a.fM[3] * b.fM[2] + a.fM[4] * b.fM[5] + a.fM[5] * gRESCALE;
+
+ tmp.fM[6] = 0;
+ tmp.fM[7] = 0;
+ tmp.fM[8] = gRESCALE * gRESCALE;
+ } else {
+ tmp.fM[0] = a.fM[0] * b.fM[0] + a.fM[1] * b.fM[3] + a.fM[2] * b.fM[6];
+ tmp.fM[1] = a.fM[0] * b.fM[1] + a.fM[1] * b.fM[4] + a.fM[2] * b.fM[7];
+ tmp.fM[2] = a.fM[0] * b.fM[2] + a.fM[1] * b.fM[5] + a.fM[2] * b.fM[8];
+
+ tmp.fM[3] = a.fM[3] * b.fM[0] + a.fM[4] * b.fM[3] + a.fM[5] * b.fM[6];
+ tmp.fM[4] = a.fM[3] * b.fM[1] + a.fM[4] * b.fM[4] + a.fM[5] * b.fM[7];
+ tmp.fM[5] = a.fM[3] * b.fM[2] + a.fM[4] * b.fM[5] + a.fM[5] * b.fM[8];
+
+ tmp.fM[6] = a.fM[6] * b.fM[0] + a.fM[7] * b.fM[3] + a.fM[8] * b.fM[6];
+ tmp.fM[7] = a.fM[6] * b.fM[1] + a.fM[7] * b.fM[4] + a.fM[8] * b.fM[7];
+ tmp.fM[8] = a.fM[6] * b.fM[2] + a.fM[7] * b.fM[5] + a.fM[8] * b.fM[8];
+ }
+ *this = tmp;
+ this->computeTypeMask();
+}
+
+void GrMatrix::preConcat(const GrMatrix& m) {
+ setConcat(*this, m);
+}
+
+void GrMatrix::postConcat(const GrMatrix& m) {
+ setConcat(m, *this);
+}
+
+double GrMatrix::determinant() const {
+ if (fTypeMask & kPerspective_TypeBit) {
+ return fM[0]*((double)fM[4]*fM[8] - (double)fM[5]*fM[7]) +
+ fM[1]*((double)fM[5]*fM[6] - (double)fM[3]*fM[8]) +
+ fM[2]*((double)fM[3]*fM[7] - (double)fM[4]*fM[6]);
+ } else {
+ return (double)fM[0]*fM[4]*gRESCALE -
+ (double)fM[1]*fM[3]*gRESCALE;
+ }
+}
+
+bool GrMatrix::invert(GrMatrix* inverted) const {
+
+ if (isIdentity()) {
+ if (inverted != this) {
+ inverted->setIdentity();
+ }
+ return true;
+ }
+ static const double MIN_DETERMINANT_SQUARED = 1.e-16;
+
+ // could do more optimizations based on type bits. Hopefully this call is
+ // low frequency.
+
+ double det = determinant();
+
+ // check if we can't be inverted
+ if (det*det <= MIN_DETERMINANT_SQUARED) {
+ return false;
+ } else if (NULL == inverted) {
+ return true;
+ }
+
+ double t[9];
+
+ if (fTypeMask & kPerspective_TypeBit) {
+ t[0] = ((double)fM[4]*fM[8] - (double)fM[5]*fM[7]);
+ t[1] = ((double)fM[2]*fM[7] - (double)fM[1]*fM[8]);
+ t[2] = ((double)fM[1]*fM[5] - (double)fM[2]*fM[4]);
+ t[3] = ((double)fM[5]*fM[6] - (double)fM[3]*fM[8]);
+ t[4] = ((double)fM[0]*fM[8] - (double)fM[2]*fM[6]);
+ t[5] = ((double)fM[2]*fM[3] - (double)fM[0]*fM[5]);
+ t[6] = ((double)fM[3]*fM[7] - (double)fM[4]*fM[6]);
+ t[7] = ((double)fM[1]*fM[6] - (double)fM[0]*fM[7]);
+ t[8] = ((double)fM[0]*fM[4] - (double)fM[1]*fM[3]);
+ det = 1.0 / det;
+ for (int i = 0; i < 9; ++i) {
+ inverted->fM[i] = (GrScalar)(t[i] * det);
+ }
+ } else {
+ t[0] = (double)fM[4]*gRESCALE;
+ t[1] = -(double)fM[1]*gRESCALE;
+ t[2] = (double)fM[1]*fM[5] - (double)fM[2]*fM[4];
+ t[3] = -(double)fM[3]*gRESCALE;
+ t[4] = (double)fM[0]*gRESCALE;
+ t[5] = (double)fM[2]*fM[3] - (double)fM[0]*fM[5];
+ //t[6] = 0.0;
+ //t[7] = 0.0;
+ t[8] = (double)fM[0]*fM[4] - (double)fM[1]*fM[3];
+ det = 1.0 / det;
+ for (int i = 0; i < 6; ++i) {
+ inverted->fM[i] = (GrScalar)(t[i] * det);
+ }
+ inverted->fM[6] = 0;
+ inverted->fM[7] = 0;
+ inverted->fM[8] = (GrScalar)(t[8] * det);
+ }
+ inverted->computeTypeMask();
+ return true;
+}
+
+void GrMatrix::mapRect(GrRect* dst, const GrRect& src) const {
+ GrPoint srcPts[4], dstPts[4];
+ srcPts[0].set(src.fLeft, src.fTop);
+ srcPts[1].set(src.fRight, src.fTop);
+ srcPts[2].set(src.fRight, src.fBottom);
+ srcPts[3].set(src.fLeft, src.fBottom);
+ this->mapPoints(dstPts, srcPts, 4);
+ dst->setBounds(dstPts, 4);
+}
+
+bool GrMatrix::hasPerspective() const {
+ GrAssert(!!(kPerspective_TypeBit & fTypeMask) ==
+ (fM[kPersp0] != 0 || fM[kPersp1] != 0 || fM[kPersp2] != gRESCALE));
+ return 0 != (kPerspective_TypeBit & fTypeMask);
+}
+
+bool GrMatrix::isIdentity() const {
+ GrAssert((0 == fTypeMask) ==
+ (GR_Scalar1 == fM[kScaleX] && 0 == fM[kSkewX] && 0 == fM[kTransX] &&
+ 0 == fM[kSkewY] && GR_Scalar1 == fM[kScaleY] && 0 == fM[kTransY] &&
+ 0 == fM[kPersp0] && 0 == fM[kPersp1] && gRESCALE == fM[kPersp2]));
+ return (0 == fTypeMask);
+}
+
+
+bool GrMatrix::preservesAxisAlignment() const {
+
+ // check if matrix is trans and scale only
+ static const int gAllowedMask1 = kScale_TypeBit | kTranslate_TypeBit;
+
+ if (!(~gAllowedMask1 & fTypeMask)) {
+ return true;
+ }
+
+ // check matrix is trans and skew only (0 scale)
+ static const int gAllowedMask2 = kScale_TypeBit | kSkew_TypeBit |
+ kTranslate_TypeBit | kZeroScale_TypeBit;
+
+ if (!(~gAllowedMask2 & fTypeMask) && (kZeroScale_TypeBit & fTypeMask)) {
+ return true;
+ }
+
+ return false;
+}
+
+GrScalar GrMatrix::getMaxStretch() const {
+
+ if (fTypeMask & kPerspective_TypeBit) {
+ return -GR_Scalar1;
+ }
+
+ GrScalar stretch;
+
+ if (isIdentity()) {
+ stretch = GR_Scalar1;
+ } else if (!(fTypeMask & kSkew_TypeBit)) {
+ stretch = GrMax(GrScalarAbs(fM[kScaleX]), GrScalarAbs(fM[kScaleY]));
+ } else if (fTypeMask & kZeroScale_TypeBit) {
+ stretch = GrMax(GrScalarAbs(fM[kSkewX]), GrScalarAbs(fM[kSkewY]));
+ } else {
+ // ignore the translation part of the matrix, just look at 2x2 portion.
+ // compute singular values, take largest abs value.
+ // [a b; b c] = A^T*A
+ GrScalar a = GrMul(fM[kScaleX], fM[kScaleX]) + GrMul(fM[kSkewY], fM[kSkewY]);
+ GrScalar b = GrMul(fM[kScaleX], fM[kSkewX]) + GrMul(fM[kScaleY], fM[kSkewY]);
+ GrScalar c = GrMul(fM[kSkewX], fM[kSkewX]) + GrMul(fM[kScaleY], fM[kScaleY]);
+ // eigenvalues of A^T*A are the squared singular values of A.
+ // characteristic equation is det((A^T*A) - l*I) = 0
+ // l^2 - (a + c)l + (ac-b^2)
+ // solve using quadratic equation (divisor is non-zero since l^2 has 1 coeff
+ // and roots are guaraunteed to be pos and real).
+ GrScalar largerRoot;
+ GrScalar bSqd = GrMul(b,b);
+ // TODO: fixed point tolerance value.
+ if (bSqd < 1e-10) { // will be true if upper left 2x2 is orthogonal, which is common, so save some math
+ largerRoot = GrMax(a, c);
+ } else {
+ GrScalar aminusc = a - c;
+ GrScalar apluscdiv2 = (a + c) / 2;
+ GrScalar x = sqrtf(GrMul(aminusc,aminusc) + GrMul(4,(bSqd))) / 2;
+ largerRoot = apluscdiv2 + x;
+ }
+
+ stretch = sqrtf(largerRoot);
+ }
+#if GR_DEBUG && 0
+ // test a bunch of vectors. None should be scaled by more than stretch
+ // (modulo some error) and we should find a vector that is scaled by almost
+ // stretch.
+ GrPoint pt;
+ GrScalar max = 0;
+ for (int i = 0; i < 1000; ++i) {
+ GrScalar x = (float)rand() / RAND_MAX;
+ GrScalar y = sqrtf(1 - (x*x));
+ pt.fX = fM[kScaleX]*x + fM[kSkewX]*y;
+ pt.fY = fM[kSkewY]*x + fM[kScaleY]*y;
+ GrScalar d = pt.distanceToOrigin();
+ GrAssert(d <= (1.0001 * stretch));
+ max = GrMax(max, pt.distanceToOrigin());
+ }
+ GrAssert((stretch - max) < .05*stretch);
+#endif
+ return stretch;
+}
+
+bool GrMatrix::operator == (const GrMatrix& m) const {
+ if (fTypeMask != m.fTypeMask) {
+ return false;
+ }
+ if (!fTypeMask) {
+ return true;
+ }
+ for (int i = 0; i < 9; ++i) {
+ if (m.fM[i] != fM[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool GrMatrix::operator != (const GrMatrix& m) const {
+ return !(*this == m);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Matrix transformation procs
+//////
+
+void GrMatrix::mapIdentity(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i] = src[i];
+ }
+ }
+}
+
+void GrMatrix::mapScale(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fX, fM[kScaleX]);
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]);
+ }
+}
+
+
+void GrMatrix::mapTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = src[i].fX + fM[kTransX];
+ dst[i].fY = src[i].fY + fM[kTransY];
+ }
+}
+
+void GrMatrix::mapScaleAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fX, fM[kScaleX]) + fM[kTransX];
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]) + fM[kTransY];
+ }
+}
+
+void GrMatrix::mapSkew(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]);
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapScaleAndSkew(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fX, fM[kScaleX]) + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]) + GrMul(src[i].fX, fM[kSkewY]);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(src[i].fX, fM[kScaleX]) + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]) + GrMul(src[i].fX, fM[kSkewY]);
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapSkewAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapNonPerspective(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(fM[kScaleX], src[i].fX) + GrMul(fM[kSkewX], src[i].fY) + fM[kTransX];
+ dst[i].fY = GrMul(fM[kSkewY], src[i].fX) + GrMul(fM[kScaleY], src[i].fY) + fM[kTransY];
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(fM[kScaleX], src[i].fX) + GrMul(fM[kSkewX], src[i].fY) + fM[kTransX];
+ dst[i].fY = GrMul(fM[kSkewY], src[i].fX) + GrMul(fM[kScaleY], src[i].fY) + fM[kTransY];
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapPerspective(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar x, y, w;
+ x = GrMul(fM[kScaleX], src[i].fX) + GrMul(fM[kSkewX], src[i].fY) + fM[kTransX];
+ y = GrMul(fM[kSkewY], src[i].fX) + GrMul(fM[kScaleY], src[i].fY) + fM[kTransY];
+ w = GrMul(fM[kPersp0], src[i].fX) + GrMul(fM[kPersp1], src[i].fY) + fM[kPersp2];
+ // TODO need fixed point invert
+ if (w) {
+ w = 1 / w;
+ }
+ dst[i].fX = GrMul(x, w);
+ dst[i].fY = GrMul(y, w);
+ }
+}
+
+void GrMatrix::mapInvalid(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ GrAssert(0);
+}
+
+void GrMatrix::mapZero(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ memset(dst, 0, sizeof(GrPoint)*count);
+}
+
+void GrMatrix::mapSetToTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = fM[kTransX];
+ dst[i].fY = fM[kTransY];
+ }
+}
+
+void GrMatrix::mapSwappedScale(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]);
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapSwappedScaleAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ dst[i].fX = newX;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Unit test
+//////
+
+#include "GrRandom.h"
+
+#if GR_DEBUG
+enum MatrixType {
+ kRotate_MatrixType,
+ kScaleX_MatrixType,
+ kScaleY_MatrixType,
+ kSkewX_MatrixType,
+ kSkewY_MatrixType,
+ kTranslateX_MatrixType,
+ kTranslateY_MatrixType,
+ kSwapScaleXY_MatrixType,
+ kPersp_MatrixType,
+
+ kMatrixTypeCount
+};
+
+static void create_matrix(GrMatrix* matrix, GrRandom& rand) {
+ MatrixType type = (MatrixType)(rand.nextU() % kMatrixTypeCount);
+ switch (type) {
+ case kRotate_MatrixType: {
+ float angle = rand.nextF() * 2 *3.14159265358979323846f;
+ GrScalar cosa = GrFloatToScalar(cosf(angle));
+ GrScalar sina = GrFloatToScalar(sinf(angle));
+ matrix->setAll(cosa, -sina, 0,
+ sina, cosa, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kScaleX_MatrixType: {
+ GrScalar scale = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(scale, 0, 0,
+ 0, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kScaleY_MatrixType: {
+ GrScalar scale = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ 0, scale, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kSkewX_MatrixType: {
+ GrScalar skew = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(GR_Scalar1, skew, 0,
+ 0, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kSkewY_MatrixType: {
+ GrScalar skew = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ skew, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kTranslateX_MatrixType: {
+ GrScalar trans = GrFloatToScalar(rand.nextF(-10, 10));
+ matrix->setAll(GR_Scalar1, 0, trans,
+ 0, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kTranslateY_MatrixType: {
+ GrScalar trans = GrFloatToScalar(rand.nextF(-10, 10));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ 0, GR_Scalar1, trans,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kSwapScaleXY_MatrixType: {
+ GrScalar xy = GrFloatToScalar(rand.nextF(-2, 2));
+ GrScalar yx = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(0, xy, 0,
+ yx, 0, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kPersp_MatrixType: {
+ GrScalar p0 = GrFloatToScalar(rand.nextF(-2, 2));
+ GrScalar p1 = GrFloatToScalar(rand.nextF(-2, 2));
+ GrScalar p2 = GrFloatToScalar(rand.nextF(-0.5f, 0.75f));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ 0, GR_Scalar1, 0,
+ p0, p1, GrMul(p2,GrMatrix::I()[8]));
+ } break;
+ default:
+ GrAssert(0);
+ break;
+ }
+}
+#endif
+
+void GrMatrix::UnitTest() {
+ GrRandom rand;
+
+ // Create a bunch of matrices and test point mapping, max stretch calc,
+ // inversion and multiply-by-inverse.
+#if GR_DEBUG
+ for (int i = 0; i < 10000; ++i) {
+ GrMatrix a, b;
+ a.setIdentity();
+ int num = rand.nextU() % 6;
+ // force testing of I and swapXY
+ if (0 == i) {
+ num = 0;
+ GrAssert(a.isIdentity());
+ } else if (1 == i) {
+ num = 0;
+ a.setAll(0, GR_Scalar1, 0,
+ GR_Scalar1, 0, 0,
+ 0, 0, I()[8]);
+ }
+ for (int j = 0; j < num; ++j) {
+ create_matrix(&b, rand);
+ a.preConcat(b);
+ }
+
+ GrScalar maxStretch = a.getMaxStretch();
+ if (maxStretch > 0) {
+ maxStretch = GrMul(GR_Scalar1 + GR_Scalar1 / 100, maxStretch);
+ }
+ GrPoint origin = a.mapPoint(GrPoint::Make(0,0));
+
+ for (int j = 0; j < 9; ++j) {
+ int mask, origMask = a.fTypeMask;
+ GrScalar old = a[j];
+
+ a.set(j, GR_Scalar1);
+ mask = a.fTypeMask;
+ a.computeTypeMask();
+ GrAssert(mask == a.fTypeMask);
+
+ a.set(j, 0);
+ mask = a.fTypeMask;
+ a.computeTypeMask();
+ GrAssert(mask == a.fTypeMask);
+
+ a.set(j, 10 * GR_Scalar1);
+ mask = a.fTypeMask;
+ a.computeTypeMask();
+ GrAssert(mask == a.fTypeMask);
+
+ a.set(j, old);
+ GrAssert(a.fTypeMask == origMask);
+ }
+
+ for (int j = 0; j < 100; ++j) {
+ GrPoint pt;
+ pt.fX = GrFloatToScalar(rand.nextF(-10, 10));
+ pt.fY = GrFloatToScalar(rand.nextF(-10, 10));
+
+ GrPoint t0, t1, t2;
+ t0 = a.mapPoint(pt); // map to a new point
+ t1 = pt;
+ a.mapPoints(&t1, &t1, 1); // in place
+ a.mapPerspective(&t2, &pt, 1); // full mult
+ GrAssert(t0 == t1 && t1 == t2);
+ if (maxStretch >= 0.f) {
+ GrVec vec = origin - t0;
+// vec.setBetween(t0, origin);
+ GrScalar stretch = vec.length() / pt.distanceToOrigin();
+ GrAssert(stretch <= maxStretch);
+ }
+ }
+ double det = a.determinant();
+ if (fabs(det) > 1e-3 && a.invert(&b)) {
+ GrMatrix c;
+ c.setConcat(a,b);
+ for (int i = 0; i < 9; ++i) {
+ GrScalar diff = GrScalarAbs(c[i] - I()[i]);
+ GrAssert(diff < (5*GR_Scalar1 / 100));
+ }
+ }
+ }
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#endif
+
+int Gr_clz(uint32_t n) {
+ if (0 == n) {
+ return 32;
+ }
+
+ int count = 0;
+ if (0 == (n & 0xFFFF0000)) {
+ count += 16;
+ n <<= 16;
+ }
+ if (0 == (n & 0xFF000000)) {
+ count += 8;
+ n <<= 8;
+ }
+ if (0 == (n & 0xF0000000)) {
+ count += 4;
+ n <<= 4;
+ }
+ if (0 == (n & 0xC0000000)) {
+ count += 2;
+ n <<= 2;
+ }
+ if (0 == (n & 0x80000000)) {
+ count += 1;
+ }
+ return count;
+}
diff --git a/src/gpu/GrMemory.cpp b/src/gpu/GrMemory.cpp
new file mode 100644
index 0000000000..fa6ee2fda3
--- /dev/null
+++ b/src/gpu/GrMemory.cpp
@@ -0,0 +1,27 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include <stdlib.h>
+
+void* GrMalloc(size_t bytes) {
+ void* ptr = ::malloc(bytes);
+ if (NULL == ptr) {
+ ::exit(-1);
+ }
+ return ptr;
+}
+
+void GrFree(void* ptr) {
+ if (ptr) {
+ ::free(ptr);
+ }
+}
+
+
diff --git a/src/gpu/GrPathRenderer.cpp b/src/gpu/GrPathRenderer.cpp
new file mode 100644
index 0000000000..929941a47e
--- /dev/null
+++ b/src/gpu/GrPathRenderer.cpp
@@ -0,0 +1,41 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrPathRenderer.h"
+
+GrPathRenderer::GrPathRenderer()
+ : fPath(NULL)
+ , fTarget(NULL) {
+}
+
+void GrPathRenderer::setPath(GrDrawTarget* target,
+ const SkPath* path,
+ GrPathFill fill,
+ const GrPoint* translate) {
+ GrAssert(NULL == fPath);
+ GrAssert(NULL == fTarget);
+ GrAssert(NULL != target);
+
+ fTarget = target;
+ fPath = path;
+ fFill = fill;
+ if (NULL != translate) {
+ fTranslate = *translate;
+ } else {
+ fTranslate.fX = fTranslate.fY = 0;
+ }
+ this->pathWasSet();
+}
+
+void GrPathRenderer::clearPath() {
+ this->pathWillClear();
+ fTarget->resetVertexSource();
+ fTarget->resetIndexSource();
+ fTarget = NULL;
+ fPath = NULL;
+}
diff --git a/src/gpu/GrPathRenderer.h b/src/gpu/GrPathRenderer.h
new file mode 100644
index 0000000000..d95cc85afc
--- /dev/null
+++ b/src/gpu/GrPathRenderer.h
@@ -0,0 +1,229 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrPathRenderer_DEFINED
+#define GrPathRenderer_DEFINED
+
+#include "GrDrawTarget.h"
+#include "GrPathRendererChain.h"
+
+#include "SkTArray.h"
+
+class SkPath;
+
+struct GrPoint;
+
+/**
+ * Base class for drawing paths into a GrDrawTarget.
+ * Paths may be drawn multiple times as when tiling for supersampling. The
+ * calls on GrPathRenderer to draw a path will look like this:
+ *
+ * pr->setPath(target, path, fill, translate); // sets the path to draw
+ * pr->drawPath(...); // draw the path
+ * pr->drawPath(...);
+ * ...
+ * pr->clearPath(); // finished with the path
+ */
+class GR_API GrPathRenderer : public GrRefCnt {
+public:
+
+ /**
+ * This is called to install custom path renderers in every GrContext at
+ * create time. The default implementation in GrCreatePathRenderer_none.cpp
+ * does not add any additional renderers. Link against another
+ * implementation to install your own. The first added is the most preferred
+ * path renderer, second is second most preferred, etc.
+ *
+ * @param context the context that will use the path renderer
+ * @param flags flags indicating how path renderers will be used
+ * @param prChain the chain to add path renderers to.
+ */
+ static void AddPathRenderers(GrContext* context,
+ GrPathRendererChain::UsageFlags flags,
+ GrPathRendererChain* prChain);
+
+
+ GrPathRenderer(void);
+ /**
+ * Returns true if this path renderer is able to render the path.
+ * Returning false allows the caller to fallback to another path renderer.
+ * When searching for a path renderer capable of rendering a path this
+ * function is called. The path renderer can examine the path, fill rule,
+ * and draw settings that will be used (via the targetparameter). If "true"
+ * is reported note that the caller is permitted to make modifications to
+ * the following settings of the target between the calls to canDrawPath and
+ * drawPath:
+ * 1. view matrix: The matrix at drawPath time may have additional scale
+ * scale and translation applied
+ * 2. render target: The render target may change between canDrawPath
+ * and drawPath.
+ * The GrPathRenderer subclass's decision about whether to return true
+ * or false in its implementation of this function should consider these
+ * possible state changes.
+ *
+ * @param path The path to draw
+ * @param fill The fill rule to use
+ *
+ * @return true if the path can be drawn by this object, false otherwise.
+ */
+ virtual bool canDrawPath(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const = 0;
+
+ /**
+ * For complex clips Gr uses the stencil buffer. The path renderer must be
+ * able to render paths into the stencil buffer. However, the path renderer
+ * itself may require the stencil buffer to resolve the path fill rule.
+ * This function queries whether the path render needs its own stencil
+ * pass. If this returns false then drawPath() should not modify the
+ * the target's stencil settings but use those already set on target. The
+ * target is passed as a param in case the answer depends upon draw state.
+ * The view matrix and render target set on the draw target may change
+ * before setPath/drawPath is called and so shouldn't be considered.
+ *
+ * @param target target that the path will be rendered to
+ * @param path the path that will be drawn
+ * @param fill the fill rule that will be used, will never be an inverse
+ * rule.
+ *
+ * @return false if this path renderer can generate interior-only fragments
+ * without changing the stencil settings on the target. If it
+ * returns true the drawPathToStencil will be used when rendering
+ * clips.
+ */
+ virtual bool requiresStencilPass(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const { return false; }
+
+ /**
+ * @return true if the path renderer can perform anti-aliasing (aside from
+ * having FSAA enabled for a render target). Target is provided to
+ * communicate the draw state (blend mode, stage settings, etc).
+ */
+ virtual bool supportsAA(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const { return false; }
+
+ /**
+ * Sets the path to render and target to render into. All calls to drawPath
+ * and drawPathToStencil must occur between setPath and clearPath. The
+ * path cannot be modified externally between setPath and clearPath. The
+ * path may be drawn several times (e.g. tiled supersampler). The target's
+ * state may change between setPath and drawPath* calls. However, if the
+ * path renderer specified vertices/indices during setPath or drawPath*
+ * they will still be set at subsequent drawPath* calls until the next
+ * clearPath. The target's draw state may change between drawPath* calls
+ * so if the subclass does any caching of tesselation, etc. then it must
+ * validate that target parameters that guided the decisions still hold.
+ *
+ * @param target the target to draw into.
+ * @param path the path to draw.
+ * @param fill the fill rule to apply.
+ * @param translate optional additional translation to apply to
+ * the path. NULL means (0,0).
+ */
+ void setPath(GrDrawTarget* target,
+ const SkPath* path,
+ GrPathFill fill,
+ const GrPoint* translate);
+
+ /**
+ * Notifies path renderer that path set in setPath is no longer in use.
+ */
+ void clearPath();
+
+ /**
+ * Draws the path into the draw target. If requiresStencilBuffer returned
+ * false then the target may be setup for stencil rendering (since the
+ * path renderer didn't claim that it needs to use the stencil internally).
+ *
+ * Only called between setPath / clearPath.
+ *
+ * @param stages bitfield that indicates which stages are
+ * in use. All enabled stages expect positions
+ * as texture coordinates. The path renderer
+ * use the remaining stages for its path
+ * filling algorithm.
+ */
+ virtual void drawPath(GrDrawTarget::StageBitfield stages) = 0;
+
+ /**
+ * Draws the path to the stencil buffer. Assume the writable stencil bits
+ * are already initialized to zero. Fill will always be either
+ * kWinding_PathFill or kEvenOdd_PathFill.
+ *
+ * Only called if requiresStencilPass returns true for the same combo of
+ * target, path, and fill. Never called with an inverse fill.
+ *
+ * The default implementation assumes the path filling algorithm doesn't
+ * require a separate stencil pass and so crashes.
+ *
+ * Only called between setPath / clearPath.
+ */
+ virtual void drawPathToStencil() {
+ GrCrash("Unexpected call to drawPathToStencil.");
+ }
+
+ /**
+ * Helper that sets a path and automatically remove it in destructor.
+ */
+ class AutoClearPath {
+ public:
+ AutoClearPath() {
+ fPathRenderer = NULL;
+ }
+ AutoClearPath(GrPathRenderer* pr,
+ GrDrawTarget* target,
+ const SkPath* path,
+ GrPathFill fill,
+ const GrPoint* translate) {
+ GrAssert(NULL != pr);
+ pr->setPath(target, path, fill, translate);
+ fPathRenderer = pr;
+ }
+ void set(GrPathRenderer* pr,
+ GrDrawTarget* target,
+ const SkPath* path,
+ GrPathFill fill,
+ const GrPoint* translate) {
+ if (NULL != fPathRenderer) {
+ fPathRenderer->clearPath();
+ }
+ GrAssert(NULL != pr);
+ pr->setPath(target, path, fill, translate);
+ fPathRenderer = pr;
+ }
+ ~AutoClearPath() {
+ if (NULL != fPathRenderer) {
+ fPathRenderer->clearPath();
+ }
+ }
+ private:
+ GrPathRenderer* fPathRenderer;
+ };
+
+protected:
+
+ // subclass can override these to be notified just after a path is set
+ // and just before the path is cleared.
+ virtual void pathWasSet() {}
+ virtual void pathWillClear() {}
+
+ const SkPath* fPath;
+ GrDrawTarget* fTarget;
+ GrPathFill fFill;
+ GrPoint fTranslate;
+
+private:
+
+ typedef GrRefCnt INHERITED;
+};
+
+#endif
+
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
new file mode 100644
index 0000000000..7a064e8c9c
--- /dev/null
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -0,0 +1,64 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrPathRendererChain.h"
+
+#include "GrContext.h"
+#include "GrDefaultPathRenderer.h"
+#include "GrGpu.h"
+
+GrPathRendererChain::GrPathRendererChain(GrContext* context, UsageFlags flags)
+ : fInit(false)
+ , fOwner(context)
+ , fFlags(flags) {
+ fInit = false;
+}
+
+GrPathRendererChain::~GrPathRendererChain() {
+ for (int i = 0; i < fChain.count(); ++i) {
+ fChain[i]->unref();
+ }
+}
+
+GrPathRenderer* GrPathRendererChain::addPathRenderer(GrPathRenderer* pr) {
+ fChain.push_back() = pr;
+ pr->ref();
+ return pr;
+}
+
+GrPathRenderer* GrPathRendererChain::getPathRenderer(const GrDrawTarget* target,
+ const GrPath& path,
+ GrPathFill fill) {
+ if (!fInit) {
+ this->init();
+ }
+ bool preferAA = target->isAntialiasState() &&
+ !target->getRenderTarget()->isMultisampled();
+ GrPathRenderer* nonAAPR = NULL;
+ for (int i = 0; i < fChain.count(); ++i) {
+ if (fChain[i]->canDrawPath(target, path, fill)) {
+ if (!preferAA || fChain[i]->supportsAA(target, path, fill)) {
+ return fChain[i];
+ } else {
+ nonAAPR = fChain[i];
+ }
+ }
+ }
+ return nonAAPR;
+}
+
+void GrPathRendererChain::init() {
+ GrAssert(!fInit);
+ GrGpu* gpu = fOwner->getGpu();
+ bool twoSided = gpu->getCaps().fTwoSidedStencilSupport;
+ bool wrapOp = gpu->getCaps().fStencilWrapOpsSupport;
+ GrPathRenderer::AddPathRenderers(fOwner, fFlags, this);
+ this->addPathRenderer(new GrDefaultPathRenderer(twoSided, wrapOp))->unref();
+ fInit = true;
+}
diff --git a/src/gpu/GrPathRendererChain.h b/src/gpu/GrPathRendererChain.h
new file mode 100644
index 0000000000..5719484921
--- /dev/null
+++ b/src/gpu/GrPathRendererChain.h
@@ -0,0 +1,63 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrPathRendererChain_DEFINED
+#define GrPathRendererChain_DEFINED
+
+#include "GrRefCnt.h"
+#include "SkTArray.h"
+
+class GrContext;
+class GrDrawTarget;
+class SkPath;
+class GrPathRenderer;
+
+/**
+ * Keeps track of a ordered list of path renderers. When a path needs to be
+ * drawn this list is scanned to find the most preferred renderer. To add your
+ * path renderer to the list implement the GrPathRenderer::AddPathRenderers
+ * function.
+ */
+class GrPathRendererChain : public SkRefCnt {
+public:
+
+ enum UsageFlags {
+ kNone_UsageFlag = 0,
+ kNonAAOnly_UsageFlag = 1,
+ };
+
+ GrPathRendererChain(GrContext* context, UsageFlags flags);
+
+ ~GrPathRendererChain();
+
+ // takes a ref and unrefs in destructor
+ GrPathRenderer* addPathRenderer(GrPathRenderer* pr);
+
+ GrPathRenderer* getPathRenderer(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill);
+
+private:
+
+ GrPathRendererChain();
+
+ void init();
+
+ enum {
+ kPreAllocCount = 8,
+ };
+ bool fInit;
+ GrContext* fOwner;
+ UsageFlags fFlags;
+ SkSTArray<kPreAllocCount, GrPathRenderer*, true> fChain;
+};
+
+GR_MAKE_BITFIELD_OPS(GrPathRendererChain::UsageFlags)
+
+#endif
diff --git a/src/gpu/GrPathUtils.cpp b/src/gpu/GrPathUtils.cpp
new file mode 100644
index 0000000000..0a7759d8a1
--- /dev/null
+++ b/src/gpu/GrPathUtils.cpp
@@ -0,0 +1,188 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrPathUtils.h"
+
+#include "GrPoint.h"
+
+GrScalar GrPathUtils::scaleToleranceToSrc(GrScalar devTol,
+ const GrMatrix& viewM,
+ const GrRect& pathBounds) {
+ // In order to tesselate the path we get a bound on how much the matrix can
+ // stretch when mapping to screen coordinates.
+ GrScalar stretch = viewM.getMaxStretch();
+ GrScalar srcTol = devTol;
+
+ if (stretch < 0) {
+ // take worst case mapRadius amoung four corners.
+ // (less than perfect)
+ for (int i = 0; i < 4; ++i) {
+ GrMatrix mat;
+ mat.setTranslate((i % 2) ? pathBounds.fLeft : pathBounds.fRight,
+ (i < 2) ? pathBounds.fTop : pathBounds.fBottom);
+ mat.postConcat(viewM);
+ stretch = SkMaxScalar(stretch, mat.mapRadius(SK_Scalar1));
+ }
+ }
+ srcTol = GrScalarDiv(srcTol, stretch);
+ return srcTol;
+}
+
+static const int MAX_POINTS_PER_CURVE = 1 << 10;
+static const GrScalar gMinCurveTol = GrFloatToScalar(0.0001f);
+
+uint32_t GrPathUtils::quadraticPointCount(const GrPoint points[],
+ GrScalar tol) {
+ if (tol < gMinCurveTol) {
+ tol = gMinCurveTol;
+ }
+ GrAssert(tol > 0);
+
+ GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
+ if (d <= tol) {
+ return 1;
+ } else {
+ // Each time we subdivide, d should be cut in 4. So we need to
+ // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
+ // points.
+ // 2^(log4(x)) = sqrt(x);
+ int temp = SkScalarCeil(SkScalarSqrt(SkScalarDiv(d, tol)));
+ int pow2 = GrNextPow2(temp);
+ // Because of NaNs & INFs we can wind up with a degenerate temp
+ // such that pow2 comes out negative. Also, our point generator
+ // will always output at least one pt.
+ if (pow2 < 1) {
+ pow2 = 1;
+ }
+ return GrMin(pow2, MAX_POINTS_PER_CURVE);
+ }
+}
+
+uint32_t GrPathUtils::generateQuadraticPoints(const GrPoint& p0,
+ const GrPoint& p1,
+ const GrPoint& p2,
+ GrScalar tolSqd,
+ GrPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (p1.distanceToLineSegmentBetweenSqd(p0, p2)) < tolSqd) {
+ (*points)[0] = p2;
+ *points += 1;
+ return 1;
+ }
+
+ GrPoint q[] = {
+ { GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY) },
+ { GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY) },
+ };
+ GrPoint r = { GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY) };
+
+ pointsLeft >>= 1;
+ uint32_t a = generateQuadraticPoints(p0, q[0], r, tolSqd, points, pointsLeft);
+ uint32_t b = generateQuadraticPoints(r, q[1], p2, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+uint32_t GrPathUtils::cubicPointCount(const GrPoint points[],
+ GrScalar tol) {
+ if (tol < gMinCurveTol) {
+ tol = gMinCurveTol;
+ }
+ GrAssert(tol > 0);
+
+ GrScalar d = GrMax(
+ points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
+ points[2].distanceToLineSegmentBetweenSqd(points[0], points[3]));
+ d = SkScalarSqrt(d);
+ if (d <= tol) {
+ return 1;
+ } else {
+ int temp = SkScalarCeil(SkScalarSqrt(SkScalarDiv(d, tol)));
+ int pow2 = GrNextPow2(temp);
+ // Because of NaNs & INFs we can wind up with a degenerate temp
+ // such that pow2 comes out negative. Also, our point generator
+ // will always output at least one pt.
+ if (pow2 < 1) {
+ pow2 = 1;
+ }
+ return GrMin(pow2, MAX_POINTS_PER_CURVE);
+ }
+}
+
+uint32_t GrPathUtils::generateCubicPoints(const GrPoint& p0,
+ const GrPoint& p1,
+ const GrPoint& p2,
+ const GrPoint& p3,
+ GrScalar tolSqd,
+ GrPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (p1.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd &&
+ p2.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd)) {
+ (*points)[0] = p3;
+ *points += 1;
+ return 1;
+ }
+ GrPoint q[] = {
+ { GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY) },
+ { GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY) },
+ { GrScalarAve(p2.fX, p3.fX), GrScalarAve(p2.fY, p3.fY) }
+ };
+ GrPoint r[] = {
+ { GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY) },
+ { GrScalarAve(q[1].fX, q[2].fX), GrScalarAve(q[1].fY, q[2].fY) }
+ };
+ GrPoint s = { GrScalarAve(r[0].fX, r[1].fX), GrScalarAve(r[0].fY, r[1].fY) };
+ pointsLeft >>= 1;
+ uint32_t a = generateCubicPoints(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
+ uint32_t b = generateCubicPoints(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+int GrPathUtils::worstCasePointCount(const GrPath& path, int* subpaths,
+ GrScalar tol) {
+ if (tol < gMinCurveTol) {
+ tol = gMinCurveTol;
+ }
+ GrAssert(tol > 0);
+
+ int pointCount = 0;
+ *subpaths = 1;
+
+ bool first = true;
+
+ SkPath::Iter iter(path, false);
+ GrPathCmd cmd;
+
+ GrPoint pts[4];
+ while ((cmd = (GrPathCmd)iter.next(pts)) != kEnd_PathCmd) {
+
+ switch (cmd) {
+ case kLine_PathCmd:
+ pointCount += 1;
+ break;
+ case kQuadratic_PathCmd:
+ pointCount += quadraticPointCount(pts, tol);
+ break;
+ case kCubic_PathCmd:
+ pointCount += cubicPointCount(pts, tol);
+ break;
+ case kMove_PathCmd:
+ pointCount += 1;
+ if (!first) {
+ ++(*subpaths);
+ }
+ break;
+ default:
+ break;
+ }
+ first = false;
+ }
+ return pointCount;
+}
diff --git a/src/gpu/GrPathUtils.h b/src/gpu/GrPathUtils.h
new file mode 100644
index 0000000000..5dc06aaf41
--- /dev/null
+++ b/src/gpu/GrPathUtils.h
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrPathUtils_DEFINED
+#define GrPathUtils_DEFINED
+
+#include "GrMatrix.h"
+#include "GrPath.h"
+
+/**
+ * Utilities for evaluating paths.
+ */
+namespace GrPathUtils {
+ GrScalar scaleToleranceToSrc(GrScalar devTol,
+ const GrMatrix& viewM,
+ const GrRect& pathBounds);
+
+ /// Since we divide by tol if we're computing exact worst-case bounds,
+ /// very small tolerances will be increased to gMinCurveTol.
+ int worstCasePointCount(const GrPath&,
+ int* subpaths,
+ GrScalar tol);
+ /// Since we divide by tol if we're computing exact worst-case bounds,
+ /// very small tolerances will be increased to gMinCurveTol.
+ uint32_t quadraticPointCount(const GrPoint points[], GrScalar tol);
+ uint32_t generateQuadraticPoints(const GrPoint& p0,
+ const GrPoint& p1,
+ const GrPoint& p2,
+ GrScalar tolSqd,
+ GrPoint** points,
+ uint32_t pointsLeft);
+ /// Since we divide by tol if we're computing exact worst-case bounds,
+ /// very small tolerances will be increased to gMinCurveTol.
+ uint32_t cubicPointCount(const GrPoint points[], GrScalar tol);
+ uint32_t generateCubicPoints(const GrPoint& p0,
+ const GrPoint& p1,
+ const GrPoint& p2,
+ const GrPoint& p3,
+ GrScalar tolSqd,
+ GrPoint** points,
+ uint32_t pointsLeft);
+
+};
+#endif
diff --git a/src/gpu/GrPrintf_printf.cpp b/src/gpu/GrPrintf_printf.cpp
new file mode 100644
index 0000000000..909a4f0a64
--- /dev/null
+++ b/src/gpu/GrPrintf_printf.cpp
@@ -0,0 +1,29 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrTypes.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+void GrPrintf(const char format[], ...) {
+ const size_t MAX_BUFFER_SIZE = 2048;
+
+ char buffer[MAX_BUFFER_SIZE + 1];
+ va_list args;
+
+ va_start(args, format);
+ vsnprintf(buffer, MAX_BUFFER_SIZE, format, args);
+ va_end(args);
+
+ printf("%s", buffer);
+}
+
+
diff --git a/src/gpu/GrRectanizer.cpp b/src/gpu/GrRectanizer.cpp
new file mode 100644
index 0000000000..628b89074c
--- /dev/null
+++ b/src/gpu/GrRectanizer.cpp
@@ -0,0 +1,123 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrRectanizer.h"
+#include "GrTBSearch.h"
+
+#define MIN_HEIGHT_POW2 2
+
+class GrRectanizerPow2 : public GrRectanizer {
+public:
+ GrRectanizerPow2(int w, int h) : GrRectanizer(w, h) {
+ fNextStripY = 0;
+ fAreaSoFar = 0;
+ Gr_bzero(fRows, sizeof(fRows));
+ }
+
+ virtual ~GrRectanizerPow2() {
+ }
+
+ virtual bool addRect(int w, int h, GrIPoint16* loc);
+
+ virtual float percentFull() const {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+ virtual int stripToPurge(int height) const { return -1; }
+ virtual void purgeStripAtY(int yCoord) { }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ struct Row {
+ GrIPoint16 fLoc;
+ int fRowHeight;
+
+ bool canAddWidth(int width, int containerWidth) const {
+ return fLoc.fX + width <= containerWidth;
+ }
+ };
+
+ Row fRows[16];
+
+ static int HeightToRowIndex(int height) {
+ GrAssert(height >= MIN_HEIGHT_POW2);
+ return 32 - Gr_clz(height - 1);
+ }
+
+ int fNextStripY;
+ int32_t fAreaSoFar;
+
+ bool canAddStrip(int height) const {
+ return fNextStripY + height <= this->height();
+ }
+
+ void initRow(Row* row, int rowHeight) {
+ row->fLoc.set(0, fNextStripY);
+ row->fRowHeight = rowHeight;
+ fNextStripY += rowHeight;
+ }
+};
+
+bool GrRectanizerPow2::addRect(int width, int height, GrIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ int32_t area = width * height;
+
+ /*
+ We use bsearch, but there may be more than one row with the same height,
+ so we actually search for height-1, which can only be a pow2 itself if
+ height == 2. Thus we set a minimum height.
+ */
+ height = GrNextPow2(height);
+ if (height < MIN_HEIGHT_POW2) {
+ height = MIN_HEIGHT_POW2;
+ }
+
+ Row* row = &fRows[HeightToRowIndex(height)];
+ GrAssert(row->fRowHeight == 0 || row->fRowHeight == height);
+
+ if (0 == row->fRowHeight) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ this->initRow(row, height);
+ } else {
+ if (!row->canAddWidth(width, this->width())) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ // that row is now "full", so retarget our Row record for
+ // another one
+ this->initRow(row, height);
+ }
+ }
+
+ GrAssert(row->fRowHeight == height);
+ GrAssert(row->canAddWidth(width, this->width()));
+ *loc = row->fLoc;
+ row->fLoc.fX += width;
+
+ GrAssert(row->fLoc.fX <= this->width());
+ GrAssert(row->fLoc.fY <= this->height());
+ GrAssert(fNextStripY <= this->height());
+ fAreaSoFar += area;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrRectanizer* GrRectanizer::Factory(int width, int height) {
+ return new GrRectanizerPow2(width, height);
+}
+
+
diff --git a/src/gpu/GrRectanizer_fifo.cpp b/src/gpu/GrRectanizer_fifo.cpp
new file mode 100644
index 0000000000..3bfc46f4a3
--- /dev/null
+++ b/src/gpu/GrRectanizer_fifo.cpp
@@ -0,0 +1,123 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrRectanizer.h"
+#include "GrTBSearch.h"
+
+#define MIN_HEIGHT_POW2 2
+
+class GrRectanizerFIFO : public GrRectanizer {
+public:
+ GrRectanizerFIFO(int w, int h) : GrRectanizer(w, h) {
+ fNextStripY = 0;
+ fAreaSoFar = 0;
+ Gr_bzero(fRows, sizeof(fRows));
+ }
+
+ virtual ~GrRectanizerFIFO() {
+ }
+
+ virtual bool addRect(int w, int h, GrIPoint16* loc);
+
+ virtual float percentFull() const {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+ virtual int stripToPurge(int height) const { return -1; }
+ virtual void purgeStripAtY(int yCoord) { }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ struct Row {
+ GrIPoint16 fLoc;
+ int fRowHeight;
+
+ bool canAddWidth(int width, int containerWidth) const {
+ return fLoc.fX + width <= containerWidth;
+ }
+ };
+
+ Row fRows[16];
+
+ static int HeightToRowIndex(int height) {
+ GrAssert(height >= MIN_HEIGHT_POW2);
+ return 32 - Gr_clz(height - 1);
+ }
+
+ int fNextStripY;
+ int32_t fAreaSoFar;
+
+ bool canAddStrip(int height) const {
+ return fNextStripY + height <= this->height();
+ }
+
+ void initRow(Row* row, int rowHeight) {
+ row->fLoc.set(0, fNextStripY);
+ row->fRowHeight = rowHeight;
+ fNextStripY += rowHeight;
+ }
+};
+
+bool GrRectanizerFIFO::addRect(int width, int height, GrIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ int32_t area = width * height;
+
+ /*
+ We use bsearch, but there may be more than one row with the same height,
+ so we actually search for height-1, which can only be a pow2 itself if
+ height == 2. Thus we set a minimum height.
+ */
+ height = GrNextPow2(height);
+ if (height < MIN_HEIGHT_POW2) {
+ height = MIN_HEIGHT_POW2;
+ }
+
+ Row* row = &fRows[HeightToRowIndex(height)];
+ GrAssert(row->fRowHeight == 0 || row->fRowHeight == height);
+
+ if (0 == row->fRowHeight) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ this->initRow(row, height);
+ } else {
+ if (!row->canAddWidth(width, this->width())) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ // that row is now "full", so retarget our Row record for
+ // another one
+ this->initRow(row, height);
+ }
+ }
+
+ GrAssert(row->fRowHeight == height);
+ GrAssert(row->canAddWidth(width, this->width()));
+ *loc = row->fLoc;
+ row->fLoc.fX += width;
+
+ GrAssert(row->fLoc.fX <= this->width());
+ GrAssert(row->fLoc.fY <= this->height());
+ GrAssert(fNextStripY <= this->height());
+ fAreaSoFar += area;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrRectanizer* GrRectanizer::Factory(int width, int height) {
+ return new GrRectanizerFIFO(width, height);
+}
+
+
diff --git a/src/gpu/GrRedBlackTree.h b/src/gpu/GrRedBlackTree.h
new file mode 100644
index 0000000000..da5ae3e3b2
--- /dev/null
+++ b/src/gpu/GrRedBlackTree.h
@@ -0,0 +1,1118 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrRedBlackTree_DEFINED
+#define GrRedBlackTree_DEFINED
+
+#include "GrNoncopyable.h"
+
+template <typename T>
+class GrLess {
+public:
+ bool operator()(const T& a, const T& b) const { return a < b; }
+};
+
+template <typename T>
+class GrLess<T*> {
+public:
+ bool operator()(const T* a, const T* b) const { return *a < *b; }
+};
+
+/**
+ * In debug build this will cause full traversals of the tree when the validate
+ * is called on insert and remove. Useful for debugging but very slow.
+ */
+#define DEEP_VALIDATE 0
+
+/**
+ * A sorted tree that uses the red-black tree algorithm. Allows duplicate
+ * entries. Data is of type T and is compared using functor C. A single C object
+ * will be created and used for all comparisons.
+ */
+template <typename T, typename C = GrLess<T> >
+class GrRedBlackTree : public GrNoncopyable {
+public:
+ /**
+ * Creates an empty tree.
+ */
+ GrRedBlackTree();
+ virtual ~GrRedBlackTree();
+
+ /**
+ * Class used to iterater through the tree. The valid range of the tree
+ * is given by [begin(), end()). It is legal to dereference begin() but not
+ * end(). The iterator has preincrement and predecrement operators, it is
+ * legal to decerement end() if the tree is not empty to get the last
+ * element. However, a last() helper is provided.
+ */
+ class Iter;
+
+ /**
+ * Add an element to the tree. Duplicates are allowed.
+ * @param t the item to add.
+ * @return an iterator to the item.
+ */
+ Iter insert(const T& t);
+
+ /**
+ * Removes all items in the tree.
+ */
+ void reset();
+
+ /**
+ * @return true if there are no items in the tree, false otherwise.
+ */
+ bool empty() const {return 0 == fCount;}
+
+ /**
+ * @return the number of items in the tree.
+ */
+ int count() const {return fCount;}
+
+ /**
+ * @return an iterator to the first item in sorted order, or end() if empty
+ */
+ Iter begin();
+ /**
+ * Gets the last valid iterator. This is always valid, even on an empty.
+ * However, it can never be dereferenced. Useful as a loop terminator.
+ * @return an iterator that is just beyond the last item in sorted order.
+ */
+ Iter end();
+ /**
+ * @return an iterator that to the last item in sorted order, or end() if
+ * empty.
+ */
+ Iter last();
+
+ /**
+ * Finds an occurrence of an item.
+ * @param t the item to find.
+ * @return an iterator to a tree element equal to t or end() if none exists.
+ */
+ Iter find(const T& t);
+ /**
+ * Finds the first of an item in iterator order.
+ * @param t the item to find.
+ * @return an iterator to the first element equal to t or end() if
+ * none exists.
+ */
+ Iter findFirst(const T& t);
+ /**
+ * Finds the last of an item in iterator order.
+ * @param t the item to find.
+ * @return an iterator to the last element equal to t or end() if
+ * none exists.
+ */
+ Iter findLast(const T& t);
+ /**
+ * Gets the number of items in the tree equal to t.
+ * @param t the item to count.
+ * @return number of items equal to t in the tree
+ */
+ int countOf(const T& t) const;
+
+ /**
+ * Removes the item indicated by an iterator. The iterator will not be valid
+ * afterwards.
+ *
+ * @param iter iterator of item to remove. Must be valid (not end()).
+ */
+ void remove(const Iter& iter) { deleteAtNode(iter.fN); }
+
+ static void UnitTest();
+
+private:
+ enum Color {
+ kRed_Color,
+ kBlack_Color
+ };
+
+ enum Child {
+ kLeft_Child = 0,
+ kRight_Child = 1
+ };
+
+ struct Node {
+ T fItem;
+ Color fColor;
+
+ Node* fParent;
+ Node* fChildren[2];
+ };
+
+ void rotateRight(Node* n);
+ void rotateLeft(Node* n);
+
+ static Node* SuccessorNode(Node* x);
+ static Node* PredecessorNode(Node* x);
+
+ void deleteAtNode(Node* x);
+ static void RecursiveDelete(Node* x);
+
+ int onCountOf(const Node* n, const T& t) const;
+
+#if GR_DEBUG
+ void validate() const;
+ int checkNode(Node* n, int* blackHeight) const;
+ // checks relationship between a node and its children. allowRedRed means
+ // node may be in an intermediate state where a red parent has a red child.
+ bool validateChildRelations(const Node* n, bool allowRedRed) const;
+ // place to stick break point if validateChildRelations is failing.
+ bool validateChildRelationsFailed() const { return false; }
+#else
+ void validate() const {}
+#endif
+
+ int fCount;
+ Node* fRoot;
+ Node* fFirst;
+ Node* fLast;
+
+ const C fComp;
+};
+
+template <typename T, typename C>
+class GrRedBlackTree<T,C>::Iter {
+public:
+ Iter() {};
+ Iter(const Iter& i) {fN = i.fN; fTree = i.fTree;}
+ Iter& operator =(const Iter& i) {
+ fN = i.fN;
+ fTree = i.fTree;
+ return *this;
+ }
+ // altering the sort value of the item using this method will cause
+ // errors.
+ T& operator *() const { return fN->fItem; }
+ bool operator ==(const Iter& i) const {
+ return fN == i.fN && fTree == i.fTree;
+ }
+ bool operator !=(const Iter& i) const { return !(*this == i); }
+ Iter& operator ++() {
+ GrAssert(*this != fTree->end());
+ fN = SuccessorNode(fN);
+ return *this;
+ }
+ Iter& operator --() {
+ GrAssert(*this != fTree->begin());
+ if (NULL != fN) {
+ fN = PredecessorNode(fN);
+ } else {
+ *this = fTree->last();
+ }
+ return *this;
+ }
+
+private:
+ friend class GrRedBlackTree;
+ explicit Iter(Node* n, GrRedBlackTree* tree) {
+ fN = n;
+ fTree = tree;
+ }
+ Node* fN;
+ GrRedBlackTree* fTree;
+};
+
+template <typename T, typename C>
+GrRedBlackTree<T,C>::GrRedBlackTree() : fComp() {
+ fRoot = NULL;
+ fFirst = NULL;
+ fLast = NULL;
+ fCount = 0;
+ validate();
+}
+
+template <typename T, typename C>
+GrRedBlackTree<T,C>::~GrRedBlackTree() {
+ RecursiveDelete(fRoot);
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::begin() {
+ return Iter(fFirst, this);
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::end() {
+ return Iter(NULL, this);
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::last() {
+ return Iter(fLast, this);
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::find(const T& t) {
+ Node* n = fRoot;
+ while (NULL != n) {
+ if (fComp(t, n->fItem)) {
+ n = n->fChildren[kLeft_Child];
+ } else {
+ if (!fComp(n->fItem, t)) {
+ return Iter(n, this);
+ }
+ n = n->fChildren[kRight_Child];
+ }
+ }
+ return end();
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::findFirst(const T& t) {
+ Node* n = fRoot;
+ Node* leftMost = NULL;
+ while (NULL != n) {
+ if (fComp(t, n->fItem)) {
+ n = n->fChildren[kLeft_Child];
+ } else {
+ if (!fComp(n->fItem, t)) {
+ // found one. check if another in left subtree.
+ leftMost = n;
+ n = n->fChildren[kLeft_Child];
+ } else {
+ n = n->fChildren[kRight_Child];
+ }
+ }
+ }
+ return Iter(leftMost, this);
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::findLast(const T& t) {
+ Node* n = fRoot;
+ Node* rightMost = NULL;
+ while (NULL != n) {
+ if (fComp(t, n->fItem)) {
+ n = n->fChildren[kLeft_Child];
+ } else {
+ if (!fComp(n->fItem, t)) {
+ // found one. check if another in right subtree.
+ rightMost = n;
+ }
+ n = n->fChildren[kRight_Child];
+ }
+ }
+ return Iter(rightMost, this);
+}
+
+template <typename T, typename C>
+int GrRedBlackTree<T,C>::countOf(const T& t) const {
+ return onCountOf(fRoot, t);
+}
+
+template <typename T, typename C>
+int GrRedBlackTree<T,C>::onCountOf(const Node* n, const T& t) const {
+ // this is count*log(n) :(
+ while (NULL != n) {
+ if (fComp(t, n->fItem)) {
+ n = n->fChildren[kLeft_Child];
+ } else {
+ if (!fComp(n->fItem, t)) {
+ int count = 1;
+ count += onCountOf(n->fChildren[kLeft_Child], t);
+ count += onCountOf(n->fChildren[kRight_Child], t);
+ return count;
+ }
+ n = n->fChildren[kRight_Child];
+ }
+ }
+ return 0;
+
+}
+
+template <typename T, typename C>
+void GrRedBlackTree<T,C>::reset() {
+ RecursiveDelete(fRoot);
+ fRoot = NULL;
+ fFirst = NULL;
+ fLast = NULL;
+ fCount = 0;
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Iter GrRedBlackTree<T,C>::insert(const T& t) {
+ validate();
+
+ ++fCount;
+
+ Node* x = new Node;
+ x->fChildren[kLeft_Child] = NULL;
+ x->fChildren[kRight_Child] = NULL;
+ x->fItem = t;
+
+ Node* returnNode = x;
+
+ Node* gp = NULL;
+ Node* p = NULL;
+ Node* n = fRoot;
+ Child pc = kLeft_Child; // suppress uninit warning
+ Child gpc = kLeft_Child;
+
+ bool first = true;
+ bool last = true;
+ while (NULL != n) {
+ gpc = pc;
+ pc = fComp(x->fItem, n->fItem) ? kLeft_Child : kRight_Child;
+ first = first && kLeft_Child == pc;
+ last = last && kRight_Child == pc;
+ gp = p;
+ p = n;
+ n = p->fChildren[pc];
+ }
+ if (last) {
+ fLast = x;
+ }
+ if (first) {
+ fFirst = x;
+ }
+
+ if (NULL == p) {
+ fRoot = x;
+ x->fColor = kBlack_Color;
+ x->fParent = NULL;
+ GrAssert(1 == fCount);
+ return Iter(returnNode, this);
+ }
+ p->fChildren[pc] = x;
+ x->fColor = kRed_Color;
+ x->fParent = p;
+
+ do {
+ // assumptions at loop start.
+ GrAssert(NULL != x);
+ GrAssert(kRed_Color == x->fColor);
+ // can't have a grandparent but no parent.
+ GrAssert(!(NULL != gp && NULL == p));
+ // make sure pc and gpc are correct
+ GrAssert(NULL == p || p->fChildren[pc] == x);
+ GrAssert(NULL == gp || gp->fChildren[gpc] == p);
+
+ // if x's parent is black then we didn't violate any of the
+ // red/black properties when we added x as red.
+ if (kBlack_Color == p->fColor) {
+ return Iter(returnNode, this);
+ }
+ // gp must be valid because if p was the root then it is black
+ GrAssert(NULL != gp);
+ // gp must be black since it's child, p, is red.
+ GrAssert(kBlack_Color == gp->fColor);
+
+
+ // x and its parent are red, violating red-black property.
+ Node* u = gp->fChildren[1-gpc];
+ // if x's uncle (p's sibling) is also red then we can flip
+ // p and u to black and make gp red. But then we have to recurse
+ // up to gp since it's parent may also be red.
+ if (NULL != u && kRed_Color == u->fColor) {
+ p->fColor = kBlack_Color;
+ u->fColor = kBlack_Color;
+ gp->fColor = kRed_Color;
+ x = gp;
+ p = x->fParent;
+ if (NULL == p) {
+ // x (prev gp) is the root, color it black and be done.
+ GrAssert(fRoot == x);
+ x->fColor = kBlack_Color;
+ validate();
+ return Iter(returnNode, this);
+ }
+ gp = p->fParent;
+ pc = (p->fChildren[kLeft_Child] == x) ? kLeft_Child :
+ kRight_Child;
+ if (NULL != gp) {
+ gpc = (gp->fChildren[kLeft_Child] == p) ? kLeft_Child :
+ kRight_Child;
+ }
+ continue;
+ } break;
+ } while (true);
+ // Here p is red but u is black and we still have to resolve the fact
+ // that x and p are both red.
+ GrAssert(NULL == gp->fChildren[1-gpc] || kBlack_Color == gp->fChildren[1-gpc]->fColor);
+ GrAssert(kRed_Color == x->fColor);
+ GrAssert(kRed_Color == p->fColor);
+ GrAssert(kBlack_Color == gp->fColor);
+
+ // make x be on the same side of p as p is of gp. If it isn't already
+ // the case then rotate x up to p and swap their labels.
+ if (pc != gpc) {
+ if (kRight_Child == pc) {
+ rotateLeft(p);
+ Node* temp = p;
+ p = x;
+ x = temp;
+ pc = kLeft_Child;
+ } else {
+ rotateRight(p);
+ Node* temp = p;
+ p = x;
+ x = temp;
+ pc = kRight_Child;
+ }
+ }
+ // we now rotate gp down, pulling up p to be it's new parent.
+ // gp's child, u, that is not affected we know to be black. gp's new
+ // child is p's previous child (x's pre-rotation sibling) which must be
+ // black since p is red.
+ GrAssert(NULL == p->fChildren[1-pc] ||
+ kBlack_Color == p->fChildren[1-pc]->fColor);
+ // Since gp's two children are black it can become red if p is made
+ // black. This leaves the black-height of both of p's new subtrees
+ // preserved and removes the red/red parent child relationship.
+ p->fColor = kBlack_Color;
+ gp->fColor = kRed_Color;
+ if (kLeft_Child == pc) {
+ rotateRight(gp);
+ } else {
+ rotateLeft(gp);
+ }
+ validate();
+ return Iter(returnNode, this);
+}
+
+
+template <typename T, typename C>
+void GrRedBlackTree<T,C>::rotateRight(Node* n) {
+ /* d? d?
+ * / /
+ * n s
+ * / \ ---> / \
+ * s a? c? n
+ * / \ / \
+ * c? b? b? a?
+ */
+ Node* d = n->fParent;
+ Node* s = n->fChildren[kLeft_Child];
+ GrAssert(NULL != s);
+ Node* b = s->fChildren[kRight_Child];
+
+ if (NULL != d) {
+ Child c = d->fChildren[kLeft_Child] == n ? kLeft_Child :
+ kRight_Child;
+ d->fChildren[c] = s;
+ } else {
+ GrAssert(fRoot == n);
+ fRoot = s;
+ }
+ s->fParent = d;
+ s->fChildren[kRight_Child] = n;
+ n->fParent = s;
+ n->fChildren[kLeft_Child] = b;
+ if (NULL != b) {
+ b->fParent = n;
+ }
+
+ GR_DEBUGASSERT(validateChildRelations(d, true));
+ GR_DEBUGASSERT(validateChildRelations(s, true));
+ GR_DEBUGASSERT(validateChildRelations(n, false));
+ GR_DEBUGASSERT(validateChildRelations(n->fChildren[kRight_Child], true));
+ GR_DEBUGASSERT(validateChildRelations(b, true));
+ GR_DEBUGASSERT(validateChildRelations(s->fChildren[kLeft_Child], true));
+}
+
+template <typename T, typename C>
+void GrRedBlackTree<T,C>::rotateLeft(Node* n) {
+
+ Node* d = n->fParent;
+ Node* s = n->fChildren[kRight_Child];
+ GrAssert(NULL != s);
+ Node* b = s->fChildren[kLeft_Child];
+
+ if (NULL != d) {
+ Child c = d->fChildren[kRight_Child] == n ? kRight_Child :
+ kLeft_Child;
+ d->fChildren[c] = s;
+ } else {
+ GrAssert(fRoot == n);
+ fRoot = s;
+ }
+ s->fParent = d;
+ s->fChildren[kLeft_Child] = n;
+ n->fParent = s;
+ n->fChildren[kRight_Child] = b;
+ if (NULL != b) {
+ b->fParent = n;
+ }
+
+ GR_DEBUGASSERT(validateChildRelations(d, true));
+ GR_DEBUGASSERT(validateChildRelations(s, true));
+ GR_DEBUGASSERT(validateChildRelations(n, true));
+ GR_DEBUGASSERT(validateChildRelations(n->fChildren[kLeft_Child], true));
+ GR_DEBUGASSERT(validateChildRelations(b, true));
+ GR_DEBUGASSERT(validateChildRelations(s->fChildren[kRight_Child], true));
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Node* GrRedBlackTree<T,C>::SuccessorNode(Node* x) {
+ GrAssert(NULL != x);
+ if (NULL != x->fChildren[kRight_Child]) {
+ x = x->fChildren[kRight_Child];
+ while (NULL != x->fChildren[kLeft_Child]) {
+ x = x->fChildren[kLeft_Child];
+ }
+ return x;
+ }
+ while (NULL != x->fParent && x == x->fParent->fChildren[kRight_Child]) {
+ x = x->fParent;
+ }
+ return x->fParent;
+}
+
+template <typename T, typename C>
+typename GrRedBlackTree<T,C>::Node* GrRedBlackTree<T,C>::PredecessorNode(Node* x) {
+ GrAssert(NULL != x);
+ if (NULL != x->fChildren[kLeft_Child]) {
+ x = x->fChildren[kLeft_Child];
+ while (NULL != x->fChildren[kRight_Child]) {
+ x = x->fChildren[kRight_Child];
+ }
+ return x;
+ }
+ while (NULL != x->fParent && x == x->fParent->fChildren[kLeft_Child]) {
+ x = x->fParent;
+ }
+ return x->fParent;
+}
+
+template <typename T, typename C>
+void GrRedBlackTree<T,C>::deleteAtNode(Node* x) {
+ GrAssert(NULL != x);
+ validate();
+ --fCount;
+
+ bool hasLeft = NULL != x->fChildren[kLeft_Child];
+ bool hasRight = NULL != x->fChildren[kRight_Child];
+ Child c = hasLeft ? kLeft_Child : kRight_Child;
+
+ if (hasLeft && hasRight) {
+ // first and last can't have two children.
+ GrAssert(fFirst != x);
+ GrAssert(fLast != x);
+ // if x is an interior node then we find it's successor
+ // and swap them.
+ Node* s = x->fChildren[kRight_Child];
+ while (NULL != s->fChildren[kLeft_Child]) {
+ s = s->fChildren[kLeft_Child];
+ }
+ GrAssert(NULL != s);
+ // this might be expensive relative to swapping node ptrs around.
+ // depends on T.
+ x->fItem = s->fItem;
+ x = s;
+ c = kRight_Child;
+ } else if (NULL == x->fParent) {
+ // if x was the root we just replace it with its child and make
+ // the new root (if the tree is not empty) black.
+ GrAssert(fRoot == x);
+ fRoot = x->fChildren[c];
+ if (NULL != fRoot) {
+ fRoot->fParent = NULL;
+ fRoot->fColor = kBlack_Color;
+ if (x == fLast) {
+ GrAssert(c == kLeft_Child);
+ fLast = fRoot;
+ } else if (x == fFirst) {
+ GrAssert(c == kRight_Child);
+ fFirst = fRoot;
+ }
+ } else {
+ GrAssert(fFirst == fLast && x == fFirst);
+ fFirst = NULL;
+ fLast = NULL;
+ GrAssert(0 == fCount);
+ }
+ delete x;
+ validate();
+ return;
+ }
+
+ Child pc;
+ Node* p = x->fParent;
+ pc = p->fChildren[kLeft_Child] == x ? kLeft_Child : kRight_Child;
+
+ if (NULL == x->fChildren[c]) {
+ if (fLast == x) {
+ fLast = p;
+ GrAssert(p == PredecessorNode(x));
+ } else if (fFirst == x) {
+ fFirst = p;
+ GrAssert(p == SuccessorNode(x));
+ }
+ // x has two implicit black children.
+ Color xcolor = x->fColor;
+ p->fChildren[pc] = NULL;
+ delete x;
+ x = NULL;
+ // when x is red it can be with an implicit black leaf without
+ // violating any of the red-black tree properties.
+ if (kRed_Color == xcolor) {
+ validate();
+ return;
+ }
+ // s is p's other child (x's sibling)
+ Node* s = p->fChildren[1-pc];
+
+ //s cannot be an implicit black node because the original
+ // black-height at x was >= 2 and s's black-height must equal the
+ // initial black height of x.
+ GrAssert(NULL != s);
+ GrAssert(p == s->fParent);
+
+ // assigned in loop
+ Node* sl;
+ Node* sr;
+ bool slRed;
+ bool srRed;
+
+ do {
+ // When we start this loop x may already be deleted it is/was
+ // p's child on its pc side. x's children are/were black. The
+ // first time through the loop they are implict children.
+ // On later passes we will be walking up the tree and they will
+ // be real nodes.
+ // The x side of p has a black-height that is one less than the
+ // s side. It must be rebalanced.
+ GrAssert(NULL != s);
+ GrAssert(p == s->fParent);
+ GrAssert(NULL == x || x->fParent == p);
+
+ //sl and sr are s's children, which may be implicit.
+ sl = s->fChildren[kLeft_Child];
+ sr = s->fChildren[kRight_Child];
+
+ // if the s is red we will rotate s and p, swap their colors so
+ // that x's new sibling is black
+ if (kRed_Color == s->fColor) {
+ // if s is red then it's parent must be black.
+ GrAssert(kBlack_Color == p->fColor);
+ // s's children must also be black since s is red. They can't
+ // be implicit since s is red and it's black-height is >= 2.
+ GrAssert(NULL != sl && kBlack_Color == sl->fColor);
+ GrAssert(NULL != sr && kBlack_Color == sr->fColor);
+ p->fColor = kRed_Color;
+ s->fColor = kBlack_Color;
+ if (kLeft_Child == pc) {
+ rotateLeft(p);
+ s = sl;
+ } else {
+ rotateRight(p);
+ s = sr;
+ }
+ sl = s->fChildren[kLeft_Child];
+ sr = s->fChildren[kRight_Child];
+ }
+ // x and s are now both black.
+ GrAssert(kBlack_Color == s->fColor);
+ GrAssert(NULL == x || kBlack_Color == x->fColor);
+ GrAssert(p == s->fParent);
+ GrAssert(NULL == x || p == x->fParent);
+
+ // when x is deleted its subtree will have reduced black-height.
+ slRed = (NULL != sl && kRed_Color == sl->fColor);
+ srRed = (NULL != sr && kRed_Color == sr->fColor);
+ if (!slRed && !srRed) {
+ // if s can be made red that will balance out x's removal
+ // to make both subtrees of p have the same black-height.
+ if (kBlack_Color == p->fColor) {
+ s->fColor = kRed_Color;
+ // now subtree at p has black-height of one less than
+ // p's parent's other child's subtree. We move x up to
+ // p and go through the loop again. At the top of loop
+ // we assumed x and x's children are black, which holds
+ // by above ifs.
+ // if p is the root there is no other subtree to balance
+ // against.
+ x = p;
+ p = x->fParent;
+ if (NULL == p) {
+ GrAssert(fRoot == x);
+ validate();
+ return;
+ } else {
+ pc = p->fChildren[kLeft_Child] == x ? kLeft_Child :
+ kRight_Child;
+
+ }
+ s = p->fChildren[1-pc];
+ GrAssert(NULL != s);
+ GrAssert(p == s->fParent);
+ continue;
+ } else if (kRed_Color == p->fColor) {
+ // we can make p black and s red. This balance out p's
+ // two subtrees and keep the same black-height as it was
+ // before the delete.
+ s->fColor = kRed_Color;
+ p->fColor = kBlack_Color;
+ validate();
+ return;
+ }
+ }
+ break;
+ } while (true);
+ // if we made it here one or both of sl and sr is red.
+ // s and x are black. We make sure that a red child is on
+ // the same side of s as s is of p.
+ GrAssert(slRed || srRed);
+ if (kLeft_Child == pc && !srRed) {
+ s->fColor = kRed_Color;
+ sl->fColor = kBlack_Color;
+ rotateRight(s);
+ sr = s;
+ s = sl;
+ //sl = s->fChildren[kLeft_Child]; don't need this
+ } else if (kRight_Child == pc && !slRed) {
+ s->fColor = kRed_Color;
+ sr->fColor = kBlack_Color;
+ rotateLeft(s);
+ sl = s;
+ s = sr;
+ //sr = s->fChildren[kRight_Child]; don't need this
+ }
+ // now p is either red or black, x and s are red and s's 1-pc
+ // child is red.
+ // We rotate p towards x, pulling s up to replace p. We make
+ // p be black and s takes p's old color.
+ // Whether p was red or black, we've increased its pc subtree
+ // rooted at x by 1 (balancing the imbalance at the start) and
+ // we've also its subtree rooted at s's black-height by 1. This
+ // can be balanced by making s's red child be black.
+ s->fColor = p->fColor;
+ p->fColor = kBlack_Color;
+ if (kLeft_Child == pc) {
+ GrAssert(NULL != sr && kRed_Color == sr->fColor);
+ sr->fColor = kBlack_Color;
+ rotateLeft(p);
+ } else {
+ GrAssert(NULL != sl && kRed_Color == sl->fColor);
+ sl->fColor = kBlack_Color;
+ rotateRight(p);
+ }
+ }
+ else {
+ // x has exactly one implicit black child. x cannot be red.
+ // Proof by contradiction: Assume X is red. Let c0 be x's implicit
+ // child and c1 be its non-implicit child. c1 must be black because
+ // red nodes always have two black children. Then the two subtrees
+ // of x rooted at c0 and c1 will have different black-heights.
+ GrAssert(kBlack_Color == x->fColor);
+ // So we know x is black and has one implicit black child, c0. c1
+ // must be red, otherwise the subtree at c1 will have a different
+ // black-height than the subtree rooted at c0.
+ GrAssert(kRed_Color == x->fChildren[c]->fColor);
+ // replace x with c1, making c1 black, preserves all red-black tree
+ // props.
+ Node* c1 = x->fChildren[c];
+ if (x == fFirst) {
+ GrAssert(c == kRight_Child);
+ fFirst = c1;
+ while (NULL != fFirst->fChildren[kLeft_Child]) {
+ fFirst = fFirst->fChildren[kLeft_Child];
+ }
+ GrAssert(fFirst == SuccessorNode(x));
+ } else if (x == fLast) {
+ GrAssert(c == kLeft_Child);
+ fLast = c1;
+ while (NULL != fLast->fChildren[kRight_Child]) {
+ fLast = fLast->fChildren[kRight_Child];
+ }
+ GrAssert(fLast == PredecessorNode(x));
+ }
+ c1->fParent = p;
+ p->fChildren[pc] = c1;
+ c1->fColor = kBlack_Color;
+ delete x;
+ validate();
+ }
+ validate();
+}
+
+template <typename T, typename C>
+void GrRedBlackTree<T,C>::RecursiveDelete(Node* x) {
+ if (NULL != x) {
+ RecursiveDelete(x->fChildren[kLeft_Child]);
+ RecursiveDelete(x->fChildren[kRight_Child]);
+ delete x;
+ }
+}
+
+#if GR_DEBUG
+template <typename T, typename C>
+void GrRedBlackTree<T,C>::validate() const {
+ if (fCount) {
+ GrAssert(NULL == fRoot->fParent);
+ GrAssert(NULL != fFirst);
+ GrAssert(NULL != fLast);
+
+ GrAssert(kBlack_Color == fRoot->fColor);
+ if (1 == fCount) {
+ GrAssert(fFirst == fRoot);
+ GrAssert(fLast == fRoot);
+ GrAssert(0 == fRoot->fChildren[kLeft_Child]);
+ GrAssert(0 == fRoot->fChildren[kRight_Child]);
+ }
+ } else {
+ GrAssert(NULL == fRoot);
+ GrAssert(NULL == fFirst);
+ GrAssert(NULL == fLast);
+ }
+#if DEEP_VALIDATE
+ int bh;
+ int count = checkNode(fRoot, &bh);
+ GrAssert(count == fCount);
+#endif
+}
+
+template <typename T, typename C>
+int GrRedBlackTree<T,C>::checkNode(Node* n, int* bh) const {
+ if (NULL != n) {
+ GrAssert(validateChildRelations(n, false));
+ if (kBlack_Color == n->fColor) {
+ *bh += 1;
+ }
+ GrAssert(!fComp(n->fItem, fFirst->fItem));
+ GrAssert(!fComp(fLast->fItem, n->fItem));
+ int leftBh = *bh;
+ int rightBh = *bh;
+ int cl = checkNode(n->fChildren[kLeft_Child], &leftBh);
+ int cr = checkNode(n->fChildren[kRight_Child], &rightBh);
+ GrAssert(leftBh == rightBh);
+ *bh = leftBh;
+ return 1 + cl + cr;
+ }
+ return 0;
+}
+
+template <typename T, typename C>
+bool GrRedBlackTree<T,C>::validateChildRelations(const Node* n,
+ bool allowRedRed) const {
+ if (NULL != n) {
+ if (NULL != n->fChildren[kLeft_Child] ||
+ NULL != n->fChildren[kRight_Child]) {
+ if (n->fChildren[kLeft_Child] == n->fChildren[kRight_Child]) {
+ return validateChildRelationsFailed();
+ }
+ if (n->fChildren[kLeft_Child] == n->fParent &&
+ NULL != n->fParent) {
+ return validateChildRelationsFailed();
+ }
+ if (n->fChildren[kRight_Child] == n->fParent &&
+ NULL != n->fParent) {
+ return validateChildRelationsFailed();
+ }
+ if (NULL != n->fChildren[kLeft_Child]) {
+ if (!allowRedRed &&
+ kRed_Color == n->fChildren[kLeft_Child]->fColor &&
+ kRed_Color == n->fColor) {
+ return validateChildRelationsFailed();
+ }
+ if (n->fChildren[kLeft_Child]->fParent != n) {
+ return validateChildRelationsFailed();
+ }
+ if (!(fComp(n->fChildren[kLeft_Child]->fItem, n->fItem) ||
+ (!fComp(n->fChildren[kLeft_Child]->fItem, n->fItem) &&
+ !fComp(n->fItem, n->fChildren[kLeft_Child]->fItem)))) {
+ return validateChildRelationsFailed();
+ }
+ }
+ if (NULL != n->fChildren[kRight_Child]) {
+ if (!allowRedRed &&
+ kRed_Color == n->fChildren[kRight_Child]->fColor &&
+ kRed_Color == n->fColor) {
+ return validateChildRelationsFailed();
+ }
+ if (n->fChildren[kRight_Child]->fParent != n) {
+ return validateChildRelationsFailed();
+ }
+ if (!(fComp(n->fItem, n->fChildren[kRight_Child]->fItem) ||
+ (!fComp(n->fChildren[kRight_Child]->fItem, n->fItem) &&
+ !fComp(n->fItem, n->fChildren[kRight_Child]->fItem)))) {
+ return validateChildRelationsFailed();
+ }
+ }
+ }
+ }
+ return true;
+}
+#endif
+
+#include "GrRandom.h"
+
+template <typename T, typename C>
+void GrRedBlackTree<T,C>::UnitTest() {
+ GrRedBlackTree<int> tree;
+ typedef GrRedBlackTree<int>::Iter iter;
+
+ GrRandom r;
+
+ int count[100] = {0};
+ // add 10K ints
+ for (int i = 0; i < 10000; ++i) {
+ int x = r.nextU()%100;
+ Iter xi = tree.insert(x);
+ GrAssert(*xi == x);
+ ++count[x];
+ }
+
+ tree.insert(0);
+ ++count[0];
+ tree.insert(99);
+ ++count[99];
+ GrAssert(*tree.begin() == 0);
+ GrAssert(*tree.last() == 99);
+ GrAssert(--(++tree.begin()) == tree.begin());
+ GrAssert(--tree.end() == tree.last());
+ GrAssert(tree.count() == 10002);
+
+ int c = 0;
+ // check that we iterate through the correct number of
+ // elements and they are properly sorted.
+ for (Iter a = tree.begin(); tree.end() != a; ++a) {
+ Iter b = a;
+ ++b;
+ ++c;
+ GrAssert(b == tree.end() || *a <= *b);
+ }
+ GrAssert(c == tree.count());
+
+ // check that the tree reports the correct number of each int
+ // and that we can iterate through them correctly both forward
+ // and backward.
+ for (int i = 0; i < 100; ++i) {
+ int c;
+ c = tree.countOf(i);
+ GrAssert(c == count[i]);
+ c = 0;
+ Iter iter = tree.findFirst(i);
+ while (iter != tree.end() && *iter == i) {
+ ++c;
+ ++iter;
+ }
+ GrAssert(count[i] == c);
+ c = 0;
+ iter = tree.findLast(i);
+ if (iter != tree.end()) {
+ do {
+ if (*iter == i) {
+ ++c;
+ } else {
+ break;
+ }
+ if (iter != tree.begin()) {
+ --iter;
+ } else {
+ break;
+ }
+ } while (true);
+ }
+ GrAssert(c == count[i]);
+ }
+ // remove all the ints between 25 and 74. Randomly chose to remove
+ // the first, last, or any entry for each.
+ for (int i = 25; i < 75; ++i) {
+ while (0 != tree.countOf(i)) {
+ --count[i];
+ int x = r.nextU() % 3;
+ Iter iter;
+ switch (x) {
+ case 0:
+ iter = tree.findFirst(i);
+ break;
+ case 1:
+ iter = tree.findLast(i);
+ break;
+ case 2:
+ default:
+ iter = tree.find(i);
+ break;
+ }
+ tree.remove(iter);
+ }
+ GrAssert(0 == count[i]);
+ GrAssert(tree.findFirst(i) == tree.end());
+ GrAssert(tree.findLast(i) == tree.end());
+ GrAssert(tree.find(i) == tree.end());
+ }
+ // remove all of the 0 entries. (tests removing begin())
+ GrAssert(*tree.begin() == 0);
+ GrAssert(*(--tree.end()) == 99);
+ while (0 != tree.countOf(0)) {
+ --count[0];
+ tree.remove(tree.find(0));
+ }
+ GrAssert(0 == count[0]);
+ GrAssert(tree.findFirst(0) == tree.end());
+ GrAssert(tree.findLast(0) == tree.end());
+ GrAssert(tree.find(0) == tree.end());
+ GrAssert(0 < *tree.begin());
+
+ // remove all the 99 entries (tests removing last()).
+ while (0 != tree.countOf(99)) {
+ --count[99];
+ tree.remove(tree.find(99));
+ }
+ GrAssert(0 == count[99]);
+ GrAssert(tree.findFirst(99) == tree.end());
+ GrAssert(tree.findLast(99) == tree.end());
+ GrAssert(tree.find(99) == tree.end());
+ GrAssert(99 > *(--tree.end()));
+ GrAssert(tree.last() == --tree.end());
+
+ // Make sure iteration still goes through correct number of entries
+ // and is still sorted correctly.
+ c = 0;
+ for (Iter a = tree.begin(); tree.end() != a; ++a) {
+ Iter b = a;
+ ++b;
+ ++c;
+ GrAssert(b == tree.end() || *a <= *b);
+ }
+ GrAssert(c == tree.count());
+
+ // repeat check that correct number of each entry is in the tree
+ // and iterates correctly both forward and backward.
+ for (int i = 0; i < 100; ++i) {
+ GrAssert(tree.countOf(i) == count[i]);
+ int c = 0;
+ Iter iter = tree.findFirst(i);
+ while (iter != tree.end() && *iter == i) {
+ ++c;
+ ++iter;
+ }
+ GrAssert(count[i] == c);
+ c = 0;
+ iter = tree.findLast(i);
+ if (iter != tree.end()) {
+ do {
+ if (*iter == i) {
+ ++c;
+ } else {
+ break;
+ }
+ if (iter != tree.begin()) {
+ --iter;
+ } else {
+ break;
+ }
+ } while (true);
+ }
+ GrAssert(count[i] == c);
+ }
+
+ // remove all entries
+ while (!tree.empty()) {
+ tree.remove(tree.begin());
+ }
+
+ // test reset on empty tree.
+ tree.reset();
+}
+
+#endif
diff --git a/src/gpu/GrRenderTarget.cpp b/src/gpu/GrRenderTarget.cpp
new file mode 100644
index 0000000000..a5f1216571
--- /dev/null
+++ b/src/gpu/GrRenderTarget.cpp
@@ -0,0 +1,75 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrRenderTarget.h"
+
+#include "GrContext.h"
+#include "GrGpu.h"
+#include "GrStencilBuffer.h"
+
+bool GrRenderTarget::readPixels(int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer) {
+ // go through context so that all necessary flushing occurs
+ GrContext* context = this->getGpu()->getContext();
+ GrAssert(NULL != context);
+ return context->readRenderTargetPixels(this,
+ left, top,
+ width, height,
+ config, buffer);
+}
+
+size_t GrRenderTarget::sizeInBytes() const {
+ int colorBits;
+ if (kUnknown_GrPixelConfig == fConfig) {
+ colorBits = 32; // don't know, make a guess
+ } else {
+ colorBits = GrBytesPerPixel(fConfig);
+ }
+ uint64_t size = fAllocatedWidth;
+ size *= fAllocatedHeight;
+ size *= colorBits;
+ size *= GrMax(1,fSampleCnt);
+ return (size_t)(size / 8);
+}
+
+void GrRenderTarget::flagAsNeedingResolve(const GrIRect* rect) {
+ if (kCanResolve_ResolveType == getResolveType()) {
+ if (NULL != rect) {
+ fResolveRect.join(*rect);
+ if (!fResolveRect.intersect(0, 0, this->width(), this->height())) {
+ fResolveRect.setEmpty();
+ }
+ } else {
+ fResolveRect.setLTRB(0, 0, this->width(), this->height());
+ }
+ }
+}
+
+void GrRenderTarget::overrideResolveRect(const GrIRect rect) {
+ fResolveRect = rect;
+ if (fResolveRect.isEmpty()) {
+ fResolveRect.setLargestInverted();
+ } else {
+ if (!fResolveRect.intersect(0, 0, this->width(), this->height())) {
+ fResolveRect.setLargestInverted();
+ }
+ }
+}
+
+void GrRenderTarget::setStencilBuffer(GrStencilBuffer* stencilBuffer) {
+ if (NULL != fStencilBuffer) {
+ fStencilBuffer->wasDetachedFromRenderTarget(this);
+ fStencilBuffer->unref();
+ }
+ fStencilBuffer = stencilBuffer;
+ if (NULL != fStencilBuffer) {
+ fStencilBuffer->wasAttachedToRenderTarget(this);
+ fStencilBuffer->ref();
+ }
+}
diff --git a/src/gpu/GrResource.cpp b/src/gpu/GrResource.cpp
new file mode 100644
index 0000000000..5d7375ffb9
--- /dev/null
+++ b/src/gpu/GrResource.cpp
@@ -0,0 +1,34 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrResource.h"
+#include "GrGpu.h"
+
+GrResource::GrResource(GrGpu* gpu) {
+ fGpu = gpu;
+ fNext = NULL;
+ fPrevious = NULL;
+ fGpu->insertResource(this);
+}
+
+void GrResource::release() {
+ if (NULL != fGpu) {
+ this->onRelease();
+ fGpu->removeResource(this);
+ fGpu = NULL;
+ }
+}
+
+void GrResource::abandon() {
+ if (NULL != fGpu) {
+ this->onAbandon();
+ fGpu->removeResource(this);
+ fGpu = NULL;
+ }
+}
diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp
new file mode 100644
index 0000000000..3094721cee
--- /dev/null
+++ b/src/gpu/GrResourceCache.cpp
@@ -0,0 +1,376 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrResourceCache.h"
+#include "GrResource.h"
+
+GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource)
+ : fKey(key), fResource(resource) {
+ fLockCount = 0;
+ fPrev = fNext = NULL;
+
+ // we assume ownership of the resource, and will unref it when we die
+ GrAssert(resource);
+}
+
+GrResourceEntry::~GrResourceEntry() {
+ fResource->unref();
+}
+
+#if GR_DEBUG
+void GrResourceEntry::validate() const {
+ GrAssert(fLockCount >= 0);
+ GrAssert(fResource);
+ fResource->validate();
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
+ fMaxCount(maxCount),
+ fMaxBytes(maxBytes) {
+ fEntryCount = 0;
+ fUnlockedEntryCount = 0;
+ fEntryBytes = 0;
+ fClientDetachedCount = 0;
+ fClientDetachedBytes = 0;
+
+ fHead = fTail = NULL;
+ fPurging = false;
+}
+
+GrResourceCache::~GrResourceCache() {
+ GrAutoResourceCacheValidate atcv(this);
+
+ this->removeAll();
+}
+
+void GrResourceCache::getLimits(int* maxResources, size_t* maxResourceBytes) const{
+ if (maxResources) {
+ *maxResources = fMaxCount;
+ }
+ if (maxResourceBytes) {
+ *maxResourceBytes = fMaxBytes;
+ }
+}
+
+void GrResourceCache::setLimits(int maxResources, size_t maxResourceBytes) {
+ bool smaller = (maxResources < fMaxCount) || (maxResourceBytes < fMaxBytes);
+
+ fMaxCount = maxResources;
+ fMaxBytes = maxResourceBytes;
+
+ if (smaller) {
+ this->purgeAsNeeded();
+ }
+}
+
+void GrResourceCache::internalDetach(GrResourceEntry* entry,
+ bool clientDetach) {
+ GrResourceEntry* prev = entry->fPrev;
+ GrResourceEntry* next = entry->fNext;
+
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ fHead = next;
+ }
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ fTail = prev;
+ }
+ if (!entry->isLocked()) {
+ --fUnlockedEntryCount;
+ }
+
+ // update our stats
+ if (clientDetach) {
+ fClientDetachedCount += 1;
+ fClientDetachedBytes += entry->resource()->sizeInBytes();
+ } else {
+ fEntryCount -= 1;
+ fEntryBytes -= entry->resource()->sizeInBytes();
+ }
+}
+
+void GrResourceCache::attachToHead(GrResourceEntry* entry,
+ bool clientReattach) {
+ entry->fPrev = NULL;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ }
+ fHead = entry;
+ if (NULL == fTail) {
+ fTail = entry;
+ }
+ if (!entry->isLocked()) {
+ ++fUnlockedEntryCount;
+ }
+
+ // update our stats
+ if (clientReattach) {
+ fClientDetachedCount -= 1;
+ fClientDetachedBytes -= entry->resource()->sizeInBytes();
+ } else {
+ fEntryCount += 1;
+ fEntryBytes += entry->resource()->sizeInBytes();
+ }
+}
+
+class GrResourceCache::Key {
+ typedef GrResourceEntry T;
+
+ const GrResourceKey& fKey;
+public:
+ Key(const GrResourceKey& key) : fKey(key) {}
+
+ uint32_t getHash() const { return fKey.hashIndex(); }
+
+ static bool LT(const T& entry, const Key& key) {
+ return entry.key() < key.fKey;
+ }
+ static bool EQ(const T& entry, const Key& key) {
+ return entry.key() == key.fKey;
+ }
+#if GR_DEBUG
+ static uint32_t GetHash(const T& entry) {
+ return entry.key().hashIndex();
+ }
+ static bool LT(const T& a, const T& b) {
+ return a.key() < b.key();
+ }
+ static bool EQ(const T& a, const T& b) {
+ return a.key() == b.key();
+ }
+#endif
+};
+
+GrResourceEntry* GrResourceCache::findAndLock(const GrResourceKey& key,
+ LockType type) {
+ GrAutoResourceCacheValidate atcv(this);
+
+ GrResourceEntry* entry = fCache.find(key);
+ if (entry) {
+ this->internalDetach(entry, false);
+ // mark the entry as "busy" so it doesn't get purged
+ // do this between detach and attach for locked count tracking
+ if (kNested_LockType == type || !entry->isLocked()) {
+ entry->lock();
+ }
+ this->attachToHead(entry, false);
+ }
+ return entry;
+}
+
+GrResourceEntry* GrResourceCache::createAndLock(const GrResourceKey& key,
+ GrResource* resource) {
+ // we don't expect to create new resources during a purge. In theory
+ // this could cause purgeAsNeeded() into an infinite loop (e.g.
+ // each resource destroyed creates and locks 2 resources and
+ // unlocks 1 thereby causing a new purge).
+ GrAssert(!fPurging);
+ GrAutoResourceCacheValidate atcv(this);
+
+ GrResourceEntry* entry = new GrResourceEntry(key, resource);
+
+ // mark the entry as "busy" so it doesn't get purged
+ // do this before attach for locked count tracking
+ entry->lock();
+
+ this->attachToHead(entry, false);
+ fCache.insert(key, entry);
+
+#if GR_DUMP_TEXTURE_UPLOAD
+ GrPrintf("--- add resource to cache %p, count=%d bytes= %d %d\n",
+ entry, fEntryCount, resource->sizeInBytes(), fEntryBytes);
+#endif
+
+ this->purgeAsNeeded();
+ return entry;
+}
+
+void GrResourceCache::detach(GrResourceEntry* entry) {
+ GrAutoResourceCacheValidate atcv(this);
+ internalDetach(entry, true);
+ fCache.remove(entry->fKey, entry);
+}
+
+void GrResourceCache::reattachAndUnlock(GrResourceEntry* entry) {
+ GrAutoResourceCacheValidate atcv(this);
+ if (entry->resource()->isValid()) {
+ attachToHead(entry, true);
+ fCache.insert(entry->key(), entry);
+ } else {
+ // If the resource went invalid while it was detached then purge it
+ // This can happen when a 3D context was lost,
+ // the client called GrContext::contextDestroyed() to notify Gr,
+ // and then later an SkGpuDevice's destructor releases its backing
+ // texture (which was invalidated at contextDestroyed time).
+ fClientDetachedCount -= 1;
+ fEntryCount -= 1;
+ size_t size = entry->resource()->sizeInBytes();
+ fClientDetachedBytes -= size;
+ fEntryBytes -= size;
+ }
+ this->unlock(entry);
+}
+
+void GrResourceCache::unlock(GrResourceEntry* entry) {
+ GrAutoResourceCacheValidate atcv(this);
+
+ GrAssert(entry);
+ GrAssert(entry->isLocked());
+ GrAssert(fCache.find(entry->key()));
+
+ entry->unlock();
+ if (!entry->isLocked()) {
+ ++fUnlockedEntryCount;
+ }
+ this->purgeAsNeeded();
+}
+
+/**
+ * Destroying a resource may potentially trigger the unlock of additional
+ * resources which in turn will trigger a nested purge. We block the nested
+ * purge using the fPurging variable. However, the initial purge will keep
+ * looping until either all resources in the cache are unlocked or we've met
+ * the budget. There is an assertion in createAndLock to check against a
+ * resource's destructor inserting new resources into the cache. If these
+ * new resources were unlocked before purgeAsNeeded completed it could
+ * potentially make purgeAsNeeded loop infinitely.
+ */
+void GrResourceCache::purgeAsNeeded() {
+ if (!fPurging) {
+ fPurging = true;
+ bool withinBudget = false;
+ do {
+ GrResourceEntry* entry = fTail;
+ while (entry && fUnlockedEntryCount) {
+ GrAutoResourceCacheValidate atcv(this);
+ if (fEntryCount <= fMaxCount && fEntryBytes <= fMaxBytes) {
+ withinBudget = true;
+ break;
+ }
+
+ GrResourceEntry* prev = entry->fPrev;
+ if (!entry->isLocked()) {
+ // remove from our cache
+ fCache.remove(entry->fKey, entry);
+
+ // remove from our llist
+ this->internalDetach(entry, false);
+
+ #if GR_DUMP_TEXTURE_UPLOAD
+ GrPrintf("--- ~resource from cache %p [%d %d]\n",
+ entry->resource(),
+ entry->resource()->width(),
+ entry->resource()->height());
+ #endif
+ delete entry;
+ }
+ entry = prev;
+ }
+ } while (!withinBudget && fUnlockedEntryCount);
+ fPurging = false;
+ }
+}
+
+void GrResourceCache::removeAll() {
+ GrAutoResourceCacheValidate atcv(this);
+
+ GrResourceEntry* entry = fHead;
+
+ // we can have one GrResource holding a lock on another
+ // so we don't want to just do a simple loop kicking each
+ // entry out. Instead change the budget and purge.
+
+ int savedMaxBytes = fMaxBytes;
+ int savedMaxCount = fMaxCount;
+ fMaxBytes = -1;
+ fMaxCount = 0;
+ this->purgeAsNeeded();
+
+ GrAssert(!fCache.count());
+ GrAssert(!fUnlockedEntryCount);
+ // Items may have been detached from the cache (such as the backing texture
+ // for an SkGpuDevice). The above purge would not have removed them.
+ GrAssert(fEntryCount == fClientDetachedCount);
+ GrAssert(fEntryBytes == fClientDetachedBytes);
+ GrAssert(NULL == fHead);
+ GrAssert(NULL == fTail);
+
+ fMaxBytes = savedMaxBytes;
+ fMaxCount = savedMaxCount;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_DEBUG
+static int countMatches(const GrResourceEntry* head, const GrResourceEntry* target) {
+ const GrResourceEntry* entry = head;
+ int count = 0;
+ while (entry) {
+ if (target == entry) {
+ count += 1;
+ }
+ entry = entry->next();
+ }
+ return count;
+}
+
+#if GR_DEBUG
+static bool both_zero_or_nonzero(int count, size_t bytes) {
+ return (count == 0 && bytes == 0) || (count > 0 && bytes > 0);
+}
+#endif
+
+void GrResourceCache::validate() const {
+ GrAssert(!fHead == !fTail);
+ GrAssert(both_zero_or_nonzero(fEntryCount, fEntryBytes));
+ GrAssert(both_zero_or_nonzero(fClientDetachedCount, fClientDetachedBytes));
+ GrAssert(fClientDetachedBytes <= fEntryBytes);
+ GrAssert(fClientDetachedCount <= fEntryCount);
+ GrAssert((fEntryCount - fClientDetachedCount) == fCache.count());
+
+ fCache.validate();
+
+ GrResourceEntry* entry = fHead;
+ int count = 0;
+ int unlockCount = 0;
+ size_t bytes = 0;
+ while (entry) {
+ entry->validate();
+ GrAssert(fCache.find(entry->key()));
+ count += 1;
+ bytes += entry->resource()->sizeInBytes();
+ if (!entry->isLocked()) {
+ unlockCount += 1;
+ }
+ entry = entry->fNext;
+ }
+ GrAssert(count == fEntryCount - fClientDetachedCount);
+ GrAssert(bytes == fEntryBytes - fClientDetachedBytes);
+ GrAssert(unlockCount == fUnlockedEntryCount);
+
+ count = 0;
+ for (entry = fTail; entry; entry = entry->fPrev) {
+ count += 1;
+ }
+ GrAssert(count == fEntryCount - fClientDetachedCount);
+
+ for (int i = 0; i < count; i++) {
+ int matches = countMatches(fHead, fCache.getArray()[i]);
+ GrAssert(1 == matches);
+ }
+}
+#endif
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
new file mode 100644
index 0000000000..d3a8f03138
--- /dev/null
+++ b/src/gpu/GrResourceCache.h
@@ -0,0 +1,312 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrResourceCache_DEFINED
+#define GrResourceCache_DEFINED
+
+#include "GrTypes.h"
+#include "GrTHashCache.h"
+
+class GrResource;
+
+// return true if a<b, or false if b<a
+//
+#define RET_IF_LT_OR_GT(a, b) \
+ do { \
+ if ((a) < (b)) { \
+ return true; \
+ } \
+ if ((b) < (a)) { \
+ return false; \
+ } \
+ } while (0)
+
+/**
+ * Helper class for GrResourceCache, the Key is used to identify src data for
+ * a resource. It is identified by 2 32bit data fields which can hold any
+ * data (uninterpreted by the cache) and a width/height.
+ */
+class GrResourceKey {
+public:
+ enum {
+ kHashBits = 7,
+ kHashCount = 1 << kHashBits,
+ kHashMask = kHashCount - 1
+ };
+
+ GrResourceKey(uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) {
+ fP[0] = p0;
+ fP[1] = p1;
+ fP[2] = p2;
+ fP[3] = p3;
+ this->computeHashIndex();
+ }
+
+ GrResourceKey(uint32_t v[4]) {
+ memcpy(fP, v, 4 * sizeof(uint32_t));
+ this->computeHashIndex();
+ }
+
+ GrResourceKey(const GrResourceKey& src) {
+ memcpy(fP, src.fP, 4 * sizeof(uint32_t));
+#if GR_DEBUG
+ this->computeHashIndex();
+ GrAssert(fHashIndex == src.fHashIndex);
+#endif
+ fHashIndex = src.fHashIndex;
+ }
+
+ //!< returns hash value [0..kHashMask] for the key
+ int hashIndex() const { return fHashIndex; }
+
+ friend bool operator==(const GrResourceKey& a, const GrResourceKey& b) {
+ GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
+ return 0 == memcmp(a.fP, b.fP, 4 * sizeof(uint32_t));
+ }
+
+ friend bool operator!=(const GrResourceKey& a, const GrResourceKey& b) {
+ GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
+ return !(a == b);
+ }
+
+ friend bool operator<(const GrResourceKey& a, const GrResourceKey& b) {
+ RET_IF_LT_OR_GT(a.fP[0], b.fP[0]);
+ RET_IF_LT_OR_GT(a.fP[1], b.fP[1]);
+ RET_IF_LT_OR_GT(a.fP[2], b.fP[2]);
+ return a.fP[3] < b.fP[3];
+ }
+
+ uint32_t getValue32(int i) const {
+ GrAssert(i >=0 && i < 4);
+ return fP[i];
+ }
+private:
+
+ static uint32_t rol(uint32_t x) {
+ return (x >> 24) | (x << 8);
+ }
+ static uint32_t ror(uint32_t x) {
+ return (x >> 8) | (x << 24);
+ }
+ static uint32_t rohalf(uint32_t x) {
+ return (x >> 16) | (x << 16);
+ }
+
+ void computeHashIndex() {
+ uint32_t hash = fP[0] ^ rol(fP[1]) ^ ror(fP[2]) ^ rohalf(fP[3]);
+ // this way to mix and reduce hash to its index may have to change
+ // depending on how many bits we allocate to the index
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+ fHashIndex = hash & kHashMask;
+ }
+
+ uint32_t fP[4];
+
+ // this is computed from the fP... fields
+ int fHashIndex;
+
+ friend class GrContext;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrResourceEntry {
+public:
+ GrResource* resource() const { return fResource; }
+ const GrResourceKey& key() const { return fKey; }
+
+#if GR_DEBUG
+ GrResourceEntry* next() const { return fNext; }
+ GrResourceEntry* prev() const { return fPrev; }
+#endif
+
+#if GR_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ GrResourceEntry(const GrResourceKey& key, GrResource* resource);
+ ~GrResourceEntry();
+
+ bool isLocked() const { return fLockCount != 0; }
+ void lock() { ++fLockCount; }
+ void unlock() {
+ GrAssert(fLockCount > 0);
+ --fLockCount;
+ }
+
+ GrResourceKey fKey;
+ GrResource* fResource;
+
+ // track if we're in use, used when we need to purge
+ // we only purge unlocked entries
+ int fLockCount;
+
+ // we're a dlinklist
+ GrResourceEntry* fPrev;
+ GrResourceEntry* fNext;
+
+ friend class GrResourceCache;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "GrTHashCache.h"
+
+/**
+ * Cache of GrResource objects.
+ *
+ * These have a corresponding GrResourceKey, built from 128bits identifying the
+ * resource.
+ *
+ * The cache stores the entries in a double-linked list, which is its LRU.
+ * When an entry is "locked" (i.e. given to the caller), it is moved to the
+ * head of the list. If/when we must purge some of the entries, we walk the
+ * list backwards from the tail, since those are the least recently used.
+ *
+ * For fast searches, we maintain a sorted array (based on the GrResourceKey)
+ * which we can bsearch. When a new entry is added, it is inserted into this
+ * array.
+ *
+ * For even faster searches, a hash is computed from the Key. If there is
+ * a collision between two keys with the same hash, we fall back on the
+ * bsearch, and update the hash to reflect the most recent Key requested.
+ */
+class GrResourceCache {
+public:
+ GrResourceCache(int maxCount, size_t maxBytes);
+ ~GrResourceCache();
+
+ /**
+ * Return the current resource cache limits.
+ *
+ * @param maxResource If non-null, returns maximum number of resources
+ * that can be held in the cache.
+ * @param maxBytes If non-null, returns maximum number of bytes of
+ * gpu memory that can be held in the cache.
+ */
+ void getLimits(int* maxResources, size_t* maxBytes) const;
+
+ /**
+ * Specify the resource cache limits. If the current cache exceeds either
+ * of these, it will be purged (LRU) to keep the cache within these limits.
+ *
+ * @param maxResources The maximum number of resources that can be held in
+ * the cache.
+ * @param maxBytes The maximum number of bytes of resource memory that
+ * can be held in the cache.
+ */
+ void setLimits(int maxResource, size_t maxResourceBytes);
+
+ /**
+ * Controls whether locks should be nestable or not.
+ */
+ enum LockType {
+ kNested_LockType,
+ kSingle_LockType,
+ };
+
+ /**
+ * Search for an entry with the same Key. If found, "lock" it and return it.
+ * If not found, return null.
+ */
+ GrResourceEntry* findAndLock(const GrResourceKey&, LockType style);
+
+ /**
+ * Create a new entry, based on the specified key and resource, and return
+ * its "locked" entry.
+ *
+ * Ownership of the resource is transferred to the Entry, which will unref()
+ * it when we are purged or deleted.
+ */
+ GrResourceEntry* createAndLock(const GrResourceKey&, GrResource*);
+
+ /**
+ * Detach removes an entry from the cache. This prevents the entry from
+ * being found by a subsequent findAndLock() until it is reattached. The
+ * entry still counts against the cache's budget and should be reattached
+ * when exclusive access is no longer needed.
+ */
+ void detach(GrResourceEntry*);
+
+ /**
+ * Reattaches a resource to the cache and unlocks it. Allows it to be found
+ * by a subsequent findAndLock or be purged (provided its lock count is
+ * now 0.)
+ */
+ void reattachAndUnlock(GrResourceEntry*);
+
+ /**
+ * When done with an entry, call unlock(entry) on it, which returns it to
+ * a purgable state.
+ */
+ void unlock(GrResourceEntry*);
+
+ void removeAll();
+
+#if GR_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ void internalDetach(GrResourceEntry*, bool);
+ void attachToHead(GrResourceEntry*, bool);
+ void purgeAsNeeded();
+
+ class Key;
+ GrTHashTable<GrResourceEntry, Key, 8> fCache;
+
+ // manage the dlink list
+ GrResourceEntry* fHead;
+ GrResourceEntry* fTail;
+
+ // our budget, used in purgeAsNeeded()
+ int fMaxCount;
+ size_t fMaxBytes;
+
+ // our current stats, related to our budget
+ int fEntryCount;
+ int fUnlockedEntryCount;
+ size_t fEntryBytes;
+ int fClientDetachedCount;
+ size_t fClientDetachedBytes;
+
+ // prevents recursive purging
+ bool fPurging;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_DEBUG
+ class GrAutoResourceCacheValidate {
+ public:
+ GrAutoResourceCacheValidate(GrResourceCache* cache) : fCache(cache) {
+ cache->validate();
+ }
+ ~GrAutoResourceCacheValidate() {
+ fCache->validate();
+ }
+ private:
+ GrResourceCache* fCache;
+ };
+#else
+ class GrAutoResourceCacheValidate {
+ public:
+ GrAutoResourceCacheValidate(GrResourceCache*) {}
+ };
+#endif
+
+#endif
+
diff --git a/src/gpu/GrStencil.cpp b/src/gpu/GrStencil.cpp
new file mode 100644
index 0000000000..376e057754
--- /dev/null
+++ b/src/gpu/GrStencil.cpp
@@ -0,0 +1,376 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrStencil.h"
+
+const GrStencilSettings GrStencilSettings::gDisabled = {
+ kKeep_StencilOp, kKeep_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kAlways_StencilFunc, kAlways_StencilFunc,
+ 0x0, 0x0,
+ 0x0, 0x0,
+ 0x0, 0x0
+};
+GR_STATIC_ASSERT(0 == kKeep_StencilOp);
+GR_STATIC_ASSERT(0 == kAlways_StencilFunc);
+
+////////////////////////////////////////////////////////////////////////////////
+// Stencil Rules for Merging user stencil space into clip
+
+// We can't include the clip bit in the ref or mask values because the division
+// between user and clip bits in the stencil depends on the number of stencil
+// bits in the runtime. Comments below indicate what the code should do to
+// incorporate the clip bit into these settings.
+
+///////
+// Replace
+
+// set the ref to be the clip bit, but mask it out for the test
+static const GrStencilSettings gUserToClipReplace = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kLess_StencilFunc, kLess_StencilFunc,
+ 0xffffffff, 0xffffffff, // unset clip bit
+ 0x0, 0x0, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+static const GrStencilSettings gInvUserToClipReplace = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff, // unset clip bit
+ 0x0, 0x0, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+///////
+// Intersect
+static const GrStencilSettings gUserToClipIsect = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kLess_StencilFunc, kLess_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+static const GrStencilSettings gInvUserToClipIsect = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+///////
+// Difference
+static const GrStencilSettings gUserToClipDiff = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+static const GrStencilSettings gInvUserToClipDiff = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kLess_StencilFunc, kLess_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x0, 0x0, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+///////
+// Union
+
+// first pass makes all the passing cases >= just clip bit set.
+static const GrStencilSettings gUserToClipUnionPass0 = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kLEqual_StencilFunc, kLEqual_StencilFunc,
+ 0xffffffff, 0xffffffff, // unset clip bit
+ 0x00000001, 0x00000001, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+// second pass allows anything greater than just clip bit set to pass
+static const GrStencilSettings gUserToClipUnionPass1 = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kLEqual_StencilFunc, kLEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+// for inverse first pass finds non-zerp user with clip bit set
+// and converts it to just clip bit set
+static const GrStencilSettings gInvUserToClipUnionPass0 = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kLess_StencilFunc, kLess_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+// second pass lets anything through with a nonzero user portion
+// and writes a ref value with just the clip bit set to it.
+static const GrStencilSettings gInvUserToClipUnionPass1 = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kLess_StencilFunc, kLess_StencilFunc,
+ 0xffffffff, 0xffffffff, // unset clip bit
+ 0x00000000, 0x00000000, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+///////
+// Xor
+static const GrStencilSettings gUserToClipXorPass0 = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff, // unset clip bit
+ 0x00000000, 0x00000000,
+ 0xffffffff, 0xffffffff
+};
+
+static const GrStencilSettings gUserToClipXorPass1 = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kGreater_StencilFunc, kGreater_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+static const GrStencilSettings gInvUserToClipXorPass0 = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kKeep_StencilOp, kKeep_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff, // unset clip bit
+ 0x00000000, 0x00000000,
+ 0xffffffff, 0xffffffff
+};
+
+static const GrStencilSettings gInvUserToClipXorPass1 = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kLess_StencilFunc, kLess_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+///////
+// Reverse Diff
+static const GrStencilSettings gUserToClipRDiffPass0 = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kLess_StencilFunc, kLess_StencilFunc,
+ 0xffffffff, 0xffffffff, // unset clip bit
+ 0x00000000, 0x00000000, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+static const GrStencilSettings gUserToClipRDiffPass1 = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0x00000000, 0x00000000, // set clip bit
+ 0x00000000, 0x00000000, // set clip bit
+ 0xffffffff, 0xffffffff
+};
+
+static const GrStencilSettings gInvUserToClipRDiff = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kEqual_StencilFunc, kEqual_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000 // set clip bit
+};
+///////
+// Direct to Stencil
+
+// We can render a clip element directly without first writing to the client
+// portion of the clip when the fill is not inverse and the set operation will
+// only modify the in/out status of samples covered by the clip element.
+
+// this one only works if used right after stencil clip was cleared.
+// Our GrClip doesn't allow midstream replace ops.
+static const GrStencilSettings gReplaceClip = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kReplace_StencilOp, kReplace_StencilOp,
+ kAlways_StencilFunc, kAlways_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000, // set clip bit
+ 0x00000000, 0x00000000 // set clipBit
+};
+
+static const GrStencilSettings gUnionClip = {
+ kReplace_StencilOp, kReplace_StencilOp,
+ kReplace_StencilOp, kReplace_StencilOp,
+ kAlways_StencilFunc, kAlways_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000, // set clip bit
+ 0x00000000, 0x00000000 // set clip bit
+};
+
+static const GrStencilSettings gXorClip = {
+ kInvert_StencilOp, kInvert_StencilOp,
+ kInvert_StencilOp, kInvert_StencilOp,
+ kAlways_StencilFunc, kAlways_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000 // set clip bit
+};
+
+static const GrStencilSettings gDiffClip = {
+ kZero_StencilOp, kZero_StencilOp,
+ kZero_StencilOp, kZero_StencilOp,
+ kAlways_StencilFunc, kAlways_StencilFunc,
+ 0xffffffff, 0xffffffff,
+ 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000 // set clip bit
+};
+
+bool GrStencilSettings::GetClipPasses(GrSetOp op,
+ bool canBeDirect,
+ unsigned int stencilClipMask,
+ bool invertedFill,
+ int* numPasses,
+ GrStencilSettings settings[kMaxStencilClipPasses]) {
+ if (canBeDirect && !invertedFill) {
+ *numPasses = 0;
+ switch (op) {
+ case kReplace_SetOp:
+ *numPasses = 1;
+ settings[0] = gReplaceClip;
+ break;
+ case kUnion_SetOp:
+ *numPasses = 1;
+ settings[0] = gUnionClip;
+ break;
+ case kXor_SetOp:
+ *numPasses = 1;
+ settings[0] = gXorClip;
+ break;
+ case kDifference_SetOp:
+ *numPasses = 1;
+ settings[0] = gDiffClip;
+ break;
+ default: // suppress warning
+ break;
+ }
+ if (1 == *numPasses) {
+ settings[0].fFrontFuncRef |= stencilClipMask;
+ settings[0].fFrontWriteMask |= stencilClipMask;
+ settings[0].fBackFuncRef = settings[0].fFrontFuncRef;
+ settings[0].fBackWriteMask = settings[0].fFrontWriteMask;
+ return true;
+ }
+ }
+ switch (op) {
+ // if we make the path renderer go to stencil we always give it a
+ // non-inverted fill and we use the stencil rules on the client->clipbit
+ // pass to select either the zeros or nonzeros.
+ case kReplace_SetOp:
+ *numPasses= 1;
+ settings[0] = invertedFill ? gInvUserToClipReplace : gUserToClipReplace;
+ settings[0].fFrontFuncMask &= ~stencilClipMask;
+ settings[0].fFrontFuncRef |= stencilClipMask;
+ settings[0].fBackFuncMask = settings[0].fFrontFuncMask;
+ settings[0].fBackFuncRef = settings[0].fFrontFuncRef;
+ break;
+ case kIntersect_SetOp:
+ *numPasses = 1;
+ settings[0] = invertedFill ? gInvUserToClipIsect : gUserToClipIsect;
+ settings[0].fFrontFuncRef = stencilClipMask;
+ settings[0].fBackFuncRef = settings[0].fFrontFuncRef;
+ break;
+ case kUnion_SetOp:
+ *numPasses = 2;
+ if (invertedFill) {
+ settings[0] = gInvUserToClipUnionPass0;
+ settings[0].fFrontFuncRef |= stencilClipMask;
+ settings[0].fBackFuncRef = settings[0].fFrontFuncMask;
+
+ settings[1] = gInvUserToClipUnionPass1;
+ settings[1].fFrontFuncMask &= ~stencilClipMask;
+ settings[1].fFrontFuncRef |= stencilClipMask;
+ settings[1].fBackFuncMask = settings[1].fFrontFuncMask;
+ settings[1].fBackFuncRef = settings[1].fFrontFuncRef;
+
+ } else {
+ settings[0] = gUserToClipUnionPass0;
+ settings[0].fFrontFuncMask &= ~stencilClipMask;
+ settings[0].fFrontFuncRef |= stencilClipMask;
+ settings[0].fBackFuncMask = settings[0].fFrontFuncMask;
+ settings[0].fBackFuncRef = settings[0].fFrontFuncRef;
+
+ settings[1] = gUserToClipUnionPass1;
+ settings[1].fFrontFuncRef |= stencilClipMask;
+ settings[1].fBackFuncRef = settings[1].fFrontFuncRef;
+ }
+ break;
+ case kXor_SetOp:
+ *numPasses = 2;
+ if (invertedFill) {
+ settings[0] = gInvUserToClipXorPass0;
+ settings[0].fFrontFuncMask &= ~stencilClipMask;
+ settings[0].fBackFuncMask = settings[0].fFrontFuncMask;
+
+ settings[1] = gInvUserToClipXorPass1;
+ settings[1].fFrontFuncRef |= stencilClipMask;
+ settings[1].fBackFuncRef = settings[1].fFrontFuncRef;
+ } else {
+ settings[0] = gUserToClipXorPass0;
+ settings[0].fFrontFuncMask &= ~stencilClipMask;
+ settings[0].fBackFuncMask = settings[0].fFrontFuncMask;
+
+ settings[1] = gUserToClipXorPass1;
+ settings[1].fFrontFuncRef |= stencilClipMask;
+ settings[1].fBackFuncRef = settings[1].fFrontFuncRef;
+ }
+ break;
+ case kDifference_SetOp:
+ *numPasses = 1;
+ settings[0] = invertedFill ? gInvUserToClipDiff : gUserToClipDiff;
+ settings[0].fFrontFuncRef |= stencilClipMask;
+ settings[0].fBackFuncRef = settings[0].fFrontFuncRef;
+ break;
+ case kReverseDifference_SetOp:
+ if (invertedFill) {
+ *numPasses = 1;
+ settings[0] = gInvUserToClipRDiff;
+ settings[0].fFrontWriteMask |= stencilClipMask;
+ settings[0].fBackWriteMask = settings[0].fFrontWriteMask;
+ } else {
+ *numPasses = 2;
+ settings[0] = gUserToClipRDiffPass0;
+ settings[0].fFrontFuncMask &= ~stencilClipMask;
+ settings[0].fBackFuncMask = settings[0].fFrontFuncMask;
+ settings[0].fFrontFuncRef |= stencilClipMask;
+ settings[0].fBackFuncRef = settings[0].fFrontFuncRef;
+
+ settings[1] = gUserToClipRDiffPass1;
+ settings[1].fFrontFuncMask |= stencilClipMask;
+ settings[1].fFrontFuncRef |= stencilClipMask;
+ settings[1].fBackFuncMask = settings[1].fFrontFuncMask;
+ settings[1].fBackFuncRef = settings[1].fFrontFuncRef;
+ }
+ break;
+ default:
+ GrCrash("Unknown set op");
+ }
+ return false;
+}
diff --git a/src/gpu/GrStencilBuffer.cpp b/src/gpu/GrStencilBuffer.cpp
new file mode 100644
index 0000000000..4b08e238ef
--- /dev/null
+++ b/src/gpu/GrStencilBuffer.cpp
@@ -0,0 +1,55 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrStencilBuffer.h"
+
+#include "GrContext.h"
+#include "GrGpu.h"
+
+void GrStencilBuffer::wasDetachedFromRenderTarget(const GrRenderTarget* rt) {
+ GrAssert(fRTAttachmentCnt > 0);
+ if (0 == --fRTAttachmentCnt) {
+ this->unlockInCache();
+ // At this point we could be deleted!
+ }
+}
+
+void GrStencilBuffer::transferToCacheAndLock() {
+ GrAssert(NULL == fCacheEntry);
+ fCacheEntry =
+ this->getGpu()->getContext()->addAndLockStencilBuffer(this);
+}
+
+void GrStencilBuffer::onRelease() {
+ // When the GrGpu rips through its list of resources and releases
+ // them it may release an SB before it releases its attached RTs.
+ // In that case when GrStencilBuffer sees its last detach it no
+ // long has a gpu ptr (gets nulled in GrResource::release()) and can't
+ // access the cache to unlock itself. So if we're being released and still
+ // have attachments go ahead and unlock now.
+ if (fRTAttachmentCnt) {
+ this->unlockInCache();
+ // we shouldn't be deleted here because some RT still has a ref on us.
+ }
+ fCacheEntry = NULL;
+}
+
+void GrStencilBuffer::onAbandon() {
+ // we can use the same behavior as release.
+ this->onRelease();
+}
+
+void GrStencilBuffer::unlockInCache() {
+ if (NULL != fCacheEntry) {
+ GrGpu* gpu = this->getGpu();
+ if (NULL != gpu) {
+ GrAssert(NULL != gpu->getContext());
+ gpu->getContext()->unlockStencilBuffer(fCacheEntry);
+ }
+ }
+}
diff --git a/src/gpu/GrStencilBuffer.h b/src/gpu/GrStencilBuffer.h
new file mode 100644
index 0000000000..5249ce8466
--- /dev/null
+++ b/src/gpu/GrStencilBuffer.h
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrStencilBuffer_DEFINED
+#define GrStencilBuffer_DEFINED
+
+#include "GrClip.h"
+#include "GrResource.h"
+
+class GrRenderTarget;
+class GrResourceEntry;
+
+class GrStencilBuffer : public GrResource {
+public:
+ virtual ~GrStencilBuffer() {
+ // currently each rt that has attached this sb keeps a ref
+ // TODO: allow SB to be purged and detach itself from rts
+ GrAssert(0 == fRTAttachmentCnt);
+ }
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+ int bits() const { return fBits; }
+ int numSamples() const { return fSampleCnt; }
+
+ // called to note the last clip drawn to this buffer.
+ void setLastClip(const GrClip& clip, int width, int height) {
+ fLastClip = clip;
+ fLastClipWidth = width;
+ fLastClipHeight = height;
+ GrAssert(width <= fWidth);
+ GrAssert(height <= fHeight);
+ }
+
+ // called to determine if we have to render the clip into SB.
+ bool mustRenderClip(const GrClip& clip, int width, int height) const {
+ // The clip is in device space. That is it doesn't scale to fit a
+ // smaller RT. It is just truncated on the right / bottom edges.
+ // Note that this assumes that the viewport origin never moves within
+ // the stencil buffer. This is valid today.
+ return width > fLastClipWidth ||
+ height > fLastClipHeight ||
+ clip != fLastClip;
+ }
+
+ const GrClip& getLastClip() const {
+ return fLastClip;
+ }
+
+ // places the sb in the cache and locks it. Caller transfers
+ // a ref to the the cache which will unref when purged.
+ void transferToCacheAndLock();
+
+ void wasAttachedToRenderTarget(const GrRenderTarget* rt) {
+ ++fRTAttachmentCnt;
+ }
+
+ void wasDetachedFromRenderTarget(const GrRenderTarget* rt);
+
+protected:
+ GrStencilBuffer(GrGpu* gpu, int width, int height, int bits, int sampleCnt)
+ : GrResource(gpu)
+ , fWidth(width)
+ , fHeight(height)
+ , fBits(bits)
+ , fSampleCnt(sampleCnt)
+ , fLastClip()
+ , fLastClipWidth(-1)
+ , fLastClipHeight(-1)
+ , fCacheEntry(NULL)
+ , fRTAttachmentCnt(0) {
+ }
+
+ // GrResource overrides
+
+ // subclass override must call INHERITED::onRelease
+ virtual void onRelease();
+ // subclass override must call INHERITED::onAbandon
+ virtual void onAbandon();
+
+private:
+
+ void unlockInCache();
+
+ int fWidth;
+ int fHeight;
+ int fBits;
+ int fSampleCnt;
+
+ GrClip fLastClip;
+ int fLastClipWidth;
+ int fLastClipHeight;
+
+ GrResourceEntry* fCacheEntry;
+ int fRTAttachmentCnt;
+
+ typedef GrResource INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrTesselatedPathRenderer.cpp b/src/gpu/GrTesselatedPathRenderer.cpp
new file mode 100644
index 0000000000..3c4bb0100c
--- /dev/null
+++ b/src/gpu/GrTesselatedPathRenderer.cpp
@@ -0,0 +1,607 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrTesselatedPathRenderer.h"
+
+#include "GrPathUtils.h"
+#include "GrPoint.h"
+#include "GrTDArray.h"
+
+#include "SkTemplates.h"
+
+#include <limits.h>
+#include <sk_glu.h>
+
+typedef GrTDArray<GrDrawTarget::Edge> GrEdgeArray;
+typedef GrTDArray<GrPoint> GrPointArray;
+typedef GrTDArray<uint16_t> GrIndexArray;
+typedef void (*TESSCB)();
+
+// limit the allowable vertex range to approximately half of the representable
+// IEEE exponent in order to avoid overflow when doing multiplies between
+// vertex components,
+const float kMaxVertexValue = 1e18f;
+
+static inline GrDrawTarget::Edge computeEdge(const GrPoint& p,
+ const GrPoint& q,
+ float sign) {
+ GrVec tangent = GrVec::Make(p.fY - q.fY, q.fX - p.fX);
+ float scale = sign / tangent.length();
+ float cross2 = p.fX * q.fY - q.fX * p.fY;
+ return GrDrawTarget::Edge(tangent.fX * scale,
+ tangent.fY * scale,
+ cross2 * scale);
+}
+
+static inline GrPoint sanitizePoint(const GrPoint& pt) {
+ GrPoint r;
+ r.fX = SkScalarPin(pt.fX, -kMaxVertexValue, kMaxVertexValue);
+ r.fY = SkScalarPin(pt.fY, -kMaxVertexValue, kMaxVertexValue);
+ return r;
+}
+
+class GrTess {
+public:
+ GrTess(int count, unsigned winding_rule) {
+ fTess = Sk_gluNewTess();
+ Sk_gluTessProperty(fTess, GLU_TESS_WINDING_RULE, winding_rule);
+ Sk_gluTessNormal(fTess, 0.0f, 0.0f, 1.0f);
+ Sk_gluTessCallback(fTess, GLU_TESS_BEGIN_DATA, (TESSCB) &beginCB);
+ Sk_gluTessCallback(fTess, GLU_TESS_VERTEX_DATA, (TESSCB) &vertexCB);
+ Sk_gluTessCallback(fTess, GLU_TESS_END_DATA, (TESSCB) &endCB);
+ Sk_gluTessCallback(fTess, GLU_TESS_EDGE_FLAG_DATA, (TESSCB) &edgeFlagCB);
+ Sk_gluTessCallback(fTess, GLU_TESS_COMBINE_DATA, (TESSCB) &combineCB);
+ fInVertices = new double[count * 3];
+ }
+ ~GrTess() {
+ Sk_gluDeleteTess(fTess);
+ delete[] fInVertices;
+ }
+ void addVertex(const GrPoint& pt, int index) {
+ if (index > USHRT_MAX) return;
+ double* inVertex = &fInVertices[index * 3];
+ inVertex[0] = pt.fX;
+ inVertex[1] = pt.fY;
+ inVertex[2] = 0.0;
+ *fVertices.append() = pt;
+ Sk_gluTessVertex(fTess, inVertex, reinterpret_cast<void*>(index));
+ }
+ void addVertices(const GrPoint* points, const uint16_t* contours, int numContours) {
+ Sk_gluTessBeginPolygon(fTess, this);
+ size_t i = 0;
+ for (int j = 0; j < numContours; ++j) {
+ Sk_gluTessBeginContour(fTess);
+ size_t end = i + contours[j];
+ for (; i < end; ++i) {
+ addVertex(points[i], i);
+ }
+ Sk_gluTessEndContour(fTess);
+ }
+ Sk_gluTessEndPolygon(fTess);
+ }
+ GLUtesselator* tess() { return fTess; }
+ const GrPointArray& vertices() const { return fVertices; }
+protected:
+ virtual void begin(GLenum type) = 0;
+ virtual void vertex(int index) = 0;
+ virtual void edgeFlag(bool flag) = 0;
+ virtual void end() = 0;
+ virtual int combine(GLdouble coords[3], int vertexIndices[4],
+ GLfloat weight[4]) = 0;
+ static void beginCB(GLenum type, void* data) {
+ static_cast<GrTess*>(data)->begin(type);
+ }
+ static void vertexCB(void* vertexData, void* data) {
+ static_cast<GrTess*>(data)->vertex(reinterpret_cast<long>(vertexData));
+ }
+ static void edgeFlagCB(GLboolean flag, void* data) {
+ static_cast<GrTess*>(data)->edgeFlag(flag != 0);
+ }
+ static void endCB(void* data) {
+ static_cast<GrTess*>(data)->end();
+ }
+ static void combineCB(GLdouble coords[3], void* vertexData[4],
+ GLfloat weight[4], void **outData, void* data) {
+ int vertexIndex[4];
+ vertexIndex[0] = reinterpret_cast<long>(vertexData[0]);
+ vertexIndex[1] = reinterpret_cast<long>(vertexData[1]);
+ vertexIndex[2] = reinterpret_cast<long>(vertexData[2]);
+ vertexIndex[3] = reinterpret_cast<long>(vertexData[3]);
+ GrTess* tess = static_cast<GrTess*>(data);
+ int outIndex = tess->combine(coords, vertexIndex, weight);
+ *reinterpret_cast<long*>(outData) = outIndex;
+ }
+protected:
+ GLUtesselator* fTess;
+ GrPointArray fVertices;
+ double* fInVertices;
+};
+
+class GrPolygonTess : public GrTess {
+public:
+ GrPolygonTess(int count, unsigned winding_rule)
+ : GrTess(count, winding_rule) {
+ }
+ ~GrPolygonTess() {
+ }
+ const GrIndexArray& indices() const { return fIndices; }
+protected:
+ virtual void begin(GLenum type) {
+ GR_DEBUGASSERT(type == GL_TRIANGLES);
+ }
+ virtual void vertex(int index) {
+ *fIndices.append() = index;
+ }
+ virtual void edgeFlag(bool flag) {}
+ virtual void end() {}
+ virtual int combine(GLdouble coords[3], int vertexIndices[4],
+ GLfloat weight[4]) {
+ int index = fVertices.count();
+ GrPoint p = GrPoint::Make(static_cast<float>(coords[0]),
+ static_cast<float>(coords[1]));
+ *fVertices.append() = p;
+ return index;
+ }
+protected:
+ GrIndexArray fIndices;
+};
+
+class GrEdgePolygonTess : public GrPolygonTess {
+public:
+ GrEdgePolygonTess(int count, unsigned winding_rule, const SkMatrix& matrix)
+ : GrPolygonTess(count, winding_rule),
+ fMatrix(matrix),
+ fEdgeFlag(false),
+ fEdgeVertex(-1),
+ fTriStartVertex(-1),
+ fEdges(NULL) {
+ }
+ ~GrEdgePolygonTess() {
+ delete[] fEdges;
+ }
+ const GrDrawTarget::Edge* edges() const { return fEdges; }
+private:
+ void addEdge(int index0, int index1) {
+ GrPoint p = fVertices[index0];
+ GrPoint q = fVertices[index1];
+ fMatrix.mapPoints(&p, 1);
+ fMatrix.mapPoints(&q, 1);
+ p = sanitizePoint(p);
+ q = sanitizePoint(q);
+ if (p == q) return;
+ GrDrawTarget::Edge edge = computeEdge(p, q, 1.0f);
+ fEdges[index0 * 2 + 1] = edge;
+ fEdges[index1 * 2] = edge;
+ }
+ virtual void begin(GLenum type) {
+ GR_DEBUGASSERT(type == GL_TRIANGLES);
+ int count = fVertices.count() * 2;
+ fEdges = new GrDrawTarget::Edge[count];
+ memset(fEdges, 0, count * sizeof(GrDrawTarget::Edge));
+ }
+ virtual void edgeFlag(bool flag) {
+ fEdgeFlag = flag;
+ }
+ virtual void vertex(int index) {
+ bool triStart = fIndices.count() % 3 == 0;
+ GrPolygonTess::vertex(index);
+ if (fEdgeVertex != -1) {
+ if (triStart) {
+ addEdge(fEdgeVertex, fTriStartVertex);
+ } else {
+ addEdge(fEdgeVertex, index);
+ }
+ }
+ if (triStart) {
+ fTriStartVertex = index;
+ }
+ if (fEdgeFlag) {
+ fEdgeVertex = index;
+ } else {
+ fEdgeVertex = -1;
+ }
+ }
+ virtual void end() {
+ if (fEdgeVertex != -1) {
+ addEdge(fEdgeVertex, fTriStartVertex);
+ }
+ }
+ GrMatrix fMatrix;
+ bool fEdgeFlag;
+ int fEdgeVertex, fTriStartVertex;
+ GrDrawTarget::Edge* fEdges;
+};
+
+class GrBoundaryTess : public GrTess {
+public:
+ GrBoundaryTess(int count, unsigned winding_rule)
+ : GrTess(count, winding_rule),
+ fContourStart(0) {
+ Sk_gluTessProperty(fTess, GLU_TESS_BOUNDARY_ONLY, 1);
+ }
+ ~GrBoundaryTess() {
+ }
+ GrPointArray& contourPoints() { return fContourPoints; }
+ const GrIndexArray& contours() const { return fContours; }
+private:
+ virtual void begin(GLenum type) {
+ fContourStart = fContourPoints.count();
+ }
+ virtual void vertex(int index) {
+ *fContourPoints.append() = fVertices.at(index);
+ }
+ virtual void edgeFlag(bool flag) {}
+ virtual void end() {
+ *fContours.append() = fContourPoints.count() - fContourStart;
+ }
+ virtual int combine(GLdouble coords[3], int vertexIndices[4],
+ GLfloat weight[4]) {
+ int index = fVertices.count();
+ *fVertices.append() = GrPoint::Make(static_cast<float>(coords[0]),
+ static_cast<float>(coords[1]));
+ return index;
+ }
+ GrPointArray fContourPoints;
+ GrIndexArray fContours;
+ size_t fContourStart;
+};
+
+static bool nearlyEqual(float a, float b) {
+ return fabsf(a - b) < 0.0001f;
+}
+
+static bool nearlyEqual(const GrPoint& a, const GrPoint& b) {
+ return nearlyEqual(a.fX, b.fX) && nearlyEqual(a.fY, b.fY);
+}
+
+static bool parallel(const GrDrawTarget::Edge& a, const GrDrawTarget::Edge& b) {
+ return (nearlyEqual(a.fX, b.fX) && nearlyEqual(a.fY, b.fY)) ||
+ (nearlyEqual(a.fX, -b.fX) && nearlyEqual(a.fY, -b.fY));
+}
+
+static unsigned fill_type_to_glu_winding_rule(GrPathFill fill) {
+ switch (fill) {
+ case kWinding_PathFill:
+ return GLU_TESS_WINDING_NONZERO;
+ case kEvenOdd_PathFill:
+ return GLU_TESS_WINDING_ODD;
+ case kInverseWinding_PathFill:
+ return GLU_TESS_WINDING_POSITIVE;
+ case kInverseEvenOdd_PathFill:
+ return GLU_TESS_WINDING_ODD;
+ case kHairLine_PathFill:
+ return GLU_TESS_WINDING_NONZERO; // FIXME: handle this
+ default:
+ GrAssert(!"Unknown path fill!");
+ return 0;
+ }
+}
+
+GrTesselatedPathRenderer::GrTesselatedPathRenderer() {
+}
+
+static bool isCCW(const GrPoint* pts, int count) {
+ GrVec v1, v2;
+ do {
+ v1 = pts[1] - pts[0];
+ v2 = pts[2] - pts[1];
+ pts++;
+ count--;
+ } while (nearlyEqual(v1, v2) && count > 3);
+ return v1.cross(v2) < 0;
+}
+
+static bool validEdge(const GrDrawTarget::Edge& edge) {
+ return !(edge.fX == 0.0f && edge.fY == 0.0f && edge.fZ == 0.0f);
+}
+
+static size_t computeEdgesAndIntersect(const GrMatrix& matrix,
+ const GrMatrix& inverse,
+ GrPoint* vertices,
+ size_t numVertices,
+ GrEdgeArray* edges,
+ float sign) {
+ if (numVertices < 3) {
+ return 0;
+ }
+ matrix.mapPoints(vertices, numVertices);
+ if (sign == 0.0f) {
+ sign = isCCW(vertices, numVertices) ? -1.0f : 1.0f;
+ }
+ GrPoint p = sanitizePoint(vertices[numVertices - 1]);
+ for (size_t i = 0; i < numVertices; ++i) {
+ GrPoint q = sanitizePoint(vertices[i]);
+ if (p == q) {
+ continue;
+ }
+ GrDrawTarget::Edge edge = computeEdge(p, q, sign);
+ edge.fZ += 0.5f; // Offset by half a pixel along the tangent.
+ *edges->append() = edge;
+ p = q;
+ }
+ int count = edges->count();
+ if (count == 0) {
+ return 0;
+ }
+ GrDrawTarget::Edge prev_edge = edges->at(0);
+ for (int i = 0; i < count; ++i) {
+ GrDrawTarget::Edge edge = edges->at(i < count - 1 ? i + 1 : 0);
+ if (parallel(edge, prev_edge)) {
+ // 3 points are collinear; offset by half the tangent instead
+ vertices[i].fX -= edge.fX * 0.5f;
+ vertices[i].fY -= edge.fY * 0.5f;
+ } else {
+ vertices[i] = prev_edge.intersect(edge);
+ }
+ inverse.mapPoints(&vertices[i], 1);
+ prev_edge = edge;
+ }
+ return edges->count();
+}
+
+void GrTesselatedPathRenderer::drawPath(GrDrawTarget::StageBitfield stages) {
+ GrDrawTarget::AutoStateRestore asr(fTarget);
+ // face culling doesn't make sense here
+ GrAssert(GrDrawTarget::kBoth_DrawFace == fTarget->getDrawFace());
+
+ GrMatrix viewM = fTarget->getViewMatrix();
+
+ GrScalar tol = GR_Scalar1;
+ tol = GrPathUtils::scaleToleranceToSrc(tol, viewM, fPath->getBounds());
+ GrScalar tolSqd = GrMul(tol, tol);
+
+ int subpathCnt;
+ int maxPts = GrPathUtils::worstCasePointCount(*fPath, &subpathCnt, tol);
+
+ GrVertexLayout layout = 0;
+ for (int s = 0; s < GrDrawTarget::kNumStages; ++s) {
+ if ((1 << s) & stages) {
+ layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s);
+ }
+ }
+
+ bool inverted = GrIsFillInverted(fFill);
+ if (inverted) {
+ maxPts += 4;
+ subpathCnt++;
+ }
+ if (maxPts > USHRT_MAX) {
+ return;
+ }
+ SkAutoSTMalloc<8, GrPoint> baseMem(maxPts);
+ GrPoint* base = baseMem;
+ GrPoint* vert = base;
+ GrPoint* subpathBase = base;
+
+ SkAutoSTMalloc<8, uint16_t> subpathVertCount(subpathCnt);
+
+ GrPoint pts[4];
+ SkPath::Iter iter(*fPath, false);
+
+ bool first = true;
+ int subpath = 0;
+
+ for (;;) {
+ switch (iter.next(pts)) {
+ case kMove_PathCmd:
+ if (!first) {
+ subpathVertCount[subpath] = vert-subpathBase;
+ subpathBase = vert;
+ ++subpath;
+ }
+ *vert = pts[0];
+ vert++;
+ break;
+ case kLine_PathCmd:
+ *vert = pts[1];
+ vert++;
+ break;
+ case kQuadratic_PathCmd: {
+ GrPathUtils::generateQuadraticPoints(pts[0], pts[1], pts[2],
+ tolSqd, &vert,
+ GrPathUtils::quadraticPointCount(pts, tol));
+ break;
+ }
+ case kCubic_PathCmd: {
+ GrPathUtils::generateCubicPoints(pts[0], pts[1], pts[2], pts[3],
+ tolSqd, &vert,
+ GrPathUtils::cubicPointCount(pts, tol));
+ break;
+ }
+ case kClose_PathCmd:
+ break;
+ case kEnd_PathCmd:
+ subpathVertCount[subpath] = vert-subpathBase;
+ ++subpath; // this could be only in debug
+ goto FINISHED;
+ }
+ first = false;
+ }
+FINISHED:
+ if (0 != fTranslate.fX || 0 != fTranslate.fY) {
+ for (int i = 0; i < vert - base; i++) {
+ base[i].offset(fTranslate.fX, fTranslate.fY);
+ }
+ }
+
+ if (inverted) {
+ GrRect bounds;
+ GrAssert(NULL != fTarget->getRenderTarget());
+ bounds.setLTRB(0, 0,
+ GrIntToScalar(fTarget->getRenderTarget()->width()),
+ GrIntToScalar(fTarget->getRenderTarget()->height()));
+ GrMatrix vmi;
+ if (fTarget->getViewInverse(&vmi)) {
+ vmi.mapRect(&bounds);
+ }
+ *vert++ = GrPoint::Make(bounds.fLeft, bounds.fTop);
+ *vert++ = GrPoint::Make(bounds.fLeft, bounds.fBottom);
+ *vert++ = GrPoint::Make(bounds.fRight, bounds.fBottom);
+ *vert++ = GrPoint::Make(bounds.fRight, bounds.fTop);
+ subpathVertCount[subpath++] = 4;
+ }
+
+ GrAssert(subpath == subpathCnt);
+ GrAssert((vert - base) <= maxPts);
+
+ size_t count = vert - base;
+
+ if (count < 3) {
+ return;
+ }
+
+ if (subpathCnt == 1 && !inverted && fPath->isConvex()) {
+ if (fTarget->isAntialiasState()) {
+ GrEdgeArray edges;
+ GrMatrix inverse, matrix = fTarget->getViewMatrix();
+ fTarget->getViewInverse(&inverse);
+
+ count = computeEdgesAndIntersect(matrix, inverse, base, count, &edges, 0.0f);
+ size_t maxEdges = fTarget->getMaxEdges();
+ if (count == 0) {
+ return;
+ }
+ if (count <= maxEdges) {
+ // All edges fit; upload all edges and draw all verts as a fan
+ fTarget->setVertexSourceToArray(layout, base, count);
+ fTarget->setEdgeAAData(&edges[0], count);
+ fTarget->drawNonIndexed(kTriangleFan_PrimitiveType, 0, count);
+ } else {
+ // Upload "maxEdges" edges and verts at a time, and draw as
+ // separate fans
+ for (size_t i = 0; i < count - 2; i += maxEdges - 2) {
+ edges[i] = edges[0];
+ base[i] = base[0];
+ int size = GR_CT_MIN(count - i, maxEdges);
+ fTarget->setVertexSourceToArray(layout, &base[i], size);
+ fTarget->setEdgeAAData(&edges[i], size);
+ fTarget->drawNonIndexed(kTriangleFan_PrimitiveType, 0, size);
+ }
+ }
+ fTarget->setEdgeAAData(NULL, 0);
+ } else {
+ fTarget->setVertexSourceToArray(layout, base, count);
+ fTarget->drawNonIndexed(kTriangleFan_PrimitiveType, 0, count);
+ }
+ return;
+ }
+
+ if (fTarget->isAntialiasState()) {
+ // Run the tesselator once to get the boundaries.
+ GrBoundaryTess btess(count, fill_type_to_glu_winding_rule(fFill));
+ btess.addVertices(base, subpathVertCount, subpathCnt);
+
+ GrMatrix inverse, matrix = fTarget->getViewMatrix();
+ if (!fTarget->getViewInverse(&inverse)) {
+ return;
+ }
+
+ if (btess.vertices().count() > USHRT_MAX) {
+ return;
+ }
+
+ // Inflate the boundary, and run the tesselator again to generate
+ // interior polys.
+ const GrPointArray& contourPoints = btess.contourPoints();
+ const GrIndexArray& contours = btess.contours();
+ GrEdgePolygonTess ptess(contourPoints.count(), GLU_TESS_WINDING_NONZERO, matrix);
+
+ size_t i = 0;
+ Sk_gluTessBeginPolygon(ptess.tess(), &ptess);
+ for (int contour = 0; contour < contours.count(); ++contour) {
+ int count = contours[contour];
+ GrEdgeArray edges;
+ int newCount = computeEdgesAndIntersect(matrix, inverse, &btess.contourPoints()[i], count, &edges, 1.0f);
+ Sk_gluTessBeginContour(ptess.tess());
+ for (int j = 0; j < newCount; j++) {
+ ptess.addVertex(contourPoints[i + j], ptess.vertices().count());
+ }
+ i += count;
+ Sk_gluTessEndContour(ptess.tess());
+ }
+
+ Sk_gluTessEndPolygon(ptess.tess());
+
+ if (ptess.vertices().count() > USHRT_MAX) {
+ return;
+ }
+
+ // Draw the resulting polys and upload their edge data.
+ fTarget->enableState(GrDrawTarget::kEdgeAAConcave_StateBit);
+ const GrPointArray& vertices = ptess.vertices();
+ const GrIndexArray& indices = ptess.indices();
+ const GrDrawTarget::Edge* edges = ptess.edges();
+ GR_DEBUGASSERT(indices.count() % 3 == 0);
+ for (int i = 0; i < indices.count(); i += 3) {
+ GrPoint tri_verts[3];
+ int index0 = indices[i];
+ int index1 = indices[i + 1];
+ int index2 = indices[i + 2];
+ tri_verts[0] = vertices[index0];
+ tri_verts[1] = vertices[index1];
+ tri_verts[2] = vertices[index2];
+ GrDrawTarget::Edge tri_edges[6];
+ int t = 0;
+ const GrDrawTarget::Edge& edge0 = edges[index0 * 2];
+ const GrDrawTarget::Edge& edge1 = edges[index0 * 2 + 1];
+ const GrDrawTarget::Edge& edge2 = edges[index1 * 2];
+ const GrDrawTarget::Edge& edge3 = edges[index1 * 2 + 1];
+ const GrDrawTarget::Edge& edge4 = edges[index2 * 2];
+ const GrDrawTarget::Edge& edge5 = edges[index2 * 2 + 1];
+ if (validEdge(edge0) && validEdge(edge1)) {
+ tri_edges[t++] = edge0;
+ tri_edges[t++] = edge1;
+ }
+ if (validEdge(edge2) && validEdge(edge3)) {
+ tri_edges[t++] = edge2;
+ tri_edges[t++] = edge3;
+ }
+ if (validEdge(edge4) && validEdge(edge5)) {
+ tri_edges[t++] = edge4;
+ tri_edges[t++] = edge5;
+ }
+ fTarget->setEdgeAAData(&tri_edges[0], t);
+ fTarget->setVertexSourceToArray(layout, &tri_verts[0], 3);
+ fTarget->drawNonIndexed(kTriangles_PrimitiveType, 0, 3);
+ }
+ fTarget->setEdgeAAData(NULL, 0);
+ fTarget->disableState(GrDrawTarget::kEdgeAAConcave_StateBit);
+ return;
+ }
+
+ GrPolygonTess ptess(count, fill_type_to_glu_winding_rule(fFill));
+ ptess.addVertices(base, subpathVertCount, subpathCnt);
+ const GrPointArray& vertices = ptess.vertices();
+ const GrIndexArray& indices = ptess.indices();
+ if (indices.count() > 0) {
+ fTarget->setVertexSourceToArray(layout, vertices.begin(), vertices.count());
+ fTarget->setIndexSourceToArray(indices.begin(), indices.count());
+ fTarget->drawIndexed(kTriangles_PrimitiveType,
+ 0,
+ 0,
+ vertices.count(),
+ indices.count());
+ }
+}
+
+bool GrTesselatedPathRenderer::canDrawPath(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const {
+ return kHairLine_PathFill != fill;
+}
+
+void GrTesselatedPathRenderer::drawPathToStencil() {
+ GrAlwaysAssert(!"multipass stencil should not be needed");
+}
+
+bool GrTesselatedPathRenderer::supportsAA(const GrDrawTarget* target,
+ const SkPath& path,
+ GrPathFill fill) const {
+ return true;
+}
diff --git a/src/gpu/GrTesselatedPathRenderer.h b/src/gpu/GrTesselatedPathRenderer.h
new file mode 100644
index 0000000000..d4f22438c6
--- /dev/null
+++ b/src/gpu/GrTesselatedPathRenderer.h
@@ -0,0 +1,33 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrTesselatedPathRenderer_DEFINED
+#define GrTesselatedPathRenderer_DEFINED
+
+#include "GrPathRenderer.h"
+
+class GrTesselatedPathRenderer : public GrPathRenderer {
+public:
+ GrTesselatedPathRenderer();
+
+ virtual void drawPath(GrDrawTarget::StageBitfield stages);
+ virtual bool canDrawPath(const GrDrawTarget* target,
+ const GrPath& path,
+ GrPathFill fill) const;
+
+ virtual bool requiresStencilPass(const GrDrawTarget* target,
+ const GrPath& path,
+ GrPathFill fill) const { return false; }
+ virtual void drawPathToStencil();
+ virtual bool supportsAA(const GrDrawTarget* target,
+ const GrPath& path,
+ GrPathFill fill) const;
+};
+
+#endif
diff --git a/src/gpu/GrTextContext.cpp b/src/gpu/GrTextContext.cpp
new file mode 100644
index 0000000000..06cc2a125f
--- /dev/null
+++ b/src/gpu/GrTextContext.cpp
@@ -0,0 +1,314 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrTextContext.h"
+#include "GrAtlas.h"
+#include "GrContext.h"
+#include "GrTextStrike.h"
+#include "GrTextStrike_impl.h"
+#include "GrFontScaler.h"
+#include "GrIndexBuffer.h"
+#include "GrGpuVertex.h"
+#include "GrDrawTarget.h"
+
+enum {
+ kGlyphMaskStage = GrPaint::kTotalStages,
+};
+
+void GrTextContext::flushGlyphs() {
+ if (fCurrVertex > 0) {
+ GrDrawTarget::AutoStateRestore asr(fDrawTarget);
+
+ // setup our sampler state for our text texture/atlas
+ GrSamplerState::Filter filter;
+ if (fExtMatrix.isIdentity()) {
+ filter = GrSamplerState::kNearest_Filter;
+ } else {
+ filter = GrSamplerState::kBilinear_Filter;
+ }
+ GrSamplerState sampler(GrSamplerState::kRepeat_WrapMode,
+ GrSamplerState::kRepeat_WrapMode,
+ filter);
+ fDrawTarget->setSamplerState(kGlyphMaskStage, sampler);
+
+ GrAssert(GrIsALIGN4(fCurrVertex));
+ int nIndices = fCurrVertex + (fCurrVertex >> 1);
+ GrAssert(fCurrTexture);
+ fDrawTarget->setTexture(kGlyphMaskStage, fCurrTexture);
+
+ if (!GrPixelConfigIsAlphaOnly(fCurrTexture->config())) {
+ if (kOne_BlendCoeff != fPaint.fSrcBlendCoeff ||
+ kISA_BlendCoeff != fPaint.fDstBlendCoeff ||
+ fPaint.hasTexture()) {
+ GrPrintf("LCD Text will not draw correctly.\n");
+ }
+ // setup blend so that we get mask * paintColor + (1-mask)*dstColor
+ fDrawTarget->setBlendConstant(fPaint.fColor);
+ fDrawTarget->setBlendFunc(kConstC_BlendCoeff, kISC_BlendCoeff);
+ // don't modulate by the paint's color in the frag since we're
+ // already doing it via the blend const.
+ fDrawTarget->setColor(0xffffffff);
+ } else {
+ // set back to normal in case we took LCD path previously.
+ fDrawTarget->setBlendFunc(fPaint.fSrcBlendCoeff, fPaint.fDstBlendCoeff);
+ fDrawTarget->setColor(fPaint.fColor);
+ }
+
+ fDrawTarget->setIndexSourceToBuffer(fContext->getQuadIndexBuffer());
+
+ fDrawTarget->drawIndexed(kTriangles_PrimitiveType,
+ 0, 0, fCurrVertex, nIndices);
+ fDrawTarget->resetVertexSource();
+ fVertices = NULL;
+ fMaxVertices = 0;
+ fCurrVertex = 0;
+ fCurrTexture->unref();
+ fCurrTexture = NULL;
+ }
+}
+
+GrTextContext::GrTextContext(GrContext* context,
+ const GrPaint& paint,
+ const GrMatrix* extMatrix) : fPaint(paint) {
+ fContext = context;
+ fStrike = NULL;
+
+ fCurrTexture = NULL;
+ fCurrVertex = 0;
+
+ if (NULL != extMatrix) {
+ fExtMatrix = *extMatrix;
+ } else {
+ fExtMatrix = GrMatrix::I();
+ }
+ if (context->getClip().hasConservativeBounds()) {
+ if (!fExtMatrix.isIdentity()) {
+ GrMatrix inverse;
+ GrRect r = context->getClip().getConservativeBounds();
+ if (fExtMatrix.invert(&inverse)) {
+ inverse.mapRect(&r);
+ r.roundOut(&fClipRect);
+ }
+ } else {
+ context->getClip().getConservativeBounds().roundOut(&fClipRect);
+ }
+ } else {
+ fClipRect.setLargest();
+ }
+
+ // save the context's original matrix off and restore in destructor
+ // this must be done before getTextTarget.
+ fOrigViewMatrix = fContext->getMatrix();
+ fContext->setMatrix(fExtMatrix);
+
+ /*
+ We need to call preConcatMatrix with our viewmatrix's inverse, for each
+ texture and mask in the paint. However, computing the inverse can be
+ expensive, and its possible we may not have any textures or masks, so these
+ two loops are written such that we only compute the inverse (once) if we
+ need it. We do this on our copy of the paint rather than directly on the
+ draw target because we re-provide the paint to the context when we have
+ to flush our glyphs or draw a glyph as a path midstream.
+ */
+ bool invVMComputed = false;
+ GrMatrix invVM;
+ for (int t = 0; t < GrPaint::kMaxTextures; ++t) {
+ if (NULL != fPaint.getTexture(t)) {
+ if (invVMComputed || fOrigViewMatrix.invert(&invVM)) {
+ invVMComputed = true;
+ fPaint.getTextureSampler(t)->preConcatMatrix(invVM);
+ }
+ }
+ }
+ for (int m = 0; m < GrPaint::kMaxMasks; ++m) {
+ if (NULL != fPaint.getMask(m)) {
+ if (invVMComputed || fOrigViewMatrix.invert(&invVM)) {
+ invVMComputed = true;
+ fPaint.getMaskSampler(m)->preConcatMatrix(invVM);
+ }
+ }
+ }
+
+ fDrawTarget = fContext->getTextTarget(fPaint);
+
+ fVertices = NULL;
+ fMaxVertices = 0;
+
+ fVertexLayout =
+ GrDrawTarget::kTextFormat_VertexLayoutBit |
+ GrDrawTarget::StageTexCoordVertexLayoutBit(kGlyphMaskStage, 0);
+
+ int stageMask = paint.getActiveStageMask();
+ if (stageMask) {
+ for (int i = 0; i < GrPaint::kTotalStages; ++i) {
+ if ((1 << i) & stageMask) {
+ fVertexLayout |=
+ GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i);
+ GrAssert(i != kGlyphMaskStage);
+ }
+ }
+ }
+}
+
+GrTextContext::~GrTextContext() {
+ this->flushGlyphs();
+ fContext->setMatrix(fOrigViewMatrix);
+}
+
+void GrTextContext::flush() {
+ this->flushGlyphs();
+}
+
+static inline void setRectFan(GrGpuTextVertex v[4], int l, int t, int r, int b,
+ int stride) {
+ v[0 * stride].setI(l, t);
+ v[1 * stride].setI(l, b);
+ v[2 * stride].setI(r, b);
+ v[3 * stride].setI(r, t);
+}
+
+void GrTextContext::drawPackedGlyph(GrGlyph::PackedID packed,
+ GrFixed vx, GrFixed vy,
+ GrFontScaler* scaler) {
+ if (NULL == fStrike) {
+ fStrike = fContext->getFontCache()->getStrike(scaler);
+ }
+
+ GrGlyph* glyph = fStrike->getGlyph(packed, scaler);
+ if (NULL == glyph || glyph->fBounds.isEmpty()) {
+ return;
+ }
+
+ vx += GrIntToFixed(glyph->fBounds.fLeft);
+ vy += GrIntToFixed(glyph->fBounds.fTop);
+
+ // keep them as ints until we've done the clip-test
+ GrFixed width = glyph->fBounds.width();
+ GrFixed height = glyph->fBounds.height();
+
+ // check if we clipped out
+ if (true || NULL == glyph->fAtlas) {
+ int x = vx >> 16;
+ int y = vy >> 16;
+ if (fClipRect.quickReject(x, y, x + width, y + height)) {
+// Gr_clz(3); // so we can set a break-point in the debugger
+ return;
+ }
+ }
+
+ if (NULL == glyph->fAtlas) {
+ if (fStrike->getGlyphAtlas(glyph, scaler)) {
+ goto HAS_ATLAS;
+ }
+
+ // before we purge the cache, we must flush any accumulated draws
+ this->flushGlyphs();
+ fContext->flushText();
+
+ // try to purge
+ fContext->getFontCache()->purgeExceptFor(fStrike);
+ if (fStrike->getGlyphAtlas(glyph, scaler)) {
+ goto HAS_ATLAS;
+ }
+
+ if (NULL == glyph->fPath) {
+ GrPath* path = new GrPath;
+ if (!scaler->getGlyphPath(glyph->glyphID(), path)) {
+ // flag the glyph as being dead?
+ delete path;
+ return;
+ }
+ glyph->fPath = path;
+ }
+
+ GrPoint translate;
+ translate.set(GrFixedToScalar(vx - GrIntToFixed(glyph->fBounds.fLeft)),
+ GrFixedToScalar(vy - GrIntToFixed(glyph->fBounds.fTop)));
+ fContext->drawPath(fPaint, *glyph->fPath, kWinding_PathFill,
+ &translate);
+ return;
+ }
+
+HAS_ATLAS:
+ GrAssert(glyph->fAtlas);
+
+ // now promote them to fixed
+ width = GrIntToFixed(width);
+ height = GrIntToFixed(height);
+
+ GrTexture* texture = glyph->fAtlas->texture();
+ GrAssert(texture);
+
+ if (fCurrTexture != texture || fCurrVertex + 4 > fMaxVertices) {
+ this->flushGlyphs();
+ fCurrTexture = texture;
+ fCurrTexture->ref();
+ }
+
+ if (NULL == fVertices) {
+ // If we need to reserve vertices allow the draw target to suggest
+ // a number of verts to reserve and whether to perform a flush.
+ fMaxVertices = kMinRequestedVerts;
+ bool flush = fDrawTarget->geometryHints(fVertexLayout,
+ &fMaxVertices,
+ NULL);
+ if (flush) {
+ this->flushGlyphs();
+ fContext->flushText();
+ fDrawTarget = fContext->getTextTarget(fPaint);
+ fMaxVertices = kDefaultRequestedVerts;
+ // ignore return, no point in flushing again.
+ fDrawTarget->geometryHints(fVertexLayout,
+ &fMaxVertices,
+ NULL);
+ }
+
+ int maxQuadVertices = 4 * fContext->getQuadIndexBuffer()->maxQuads();
+ if (fMaxVertices < kMinRequestedVerts) {
+ fMaxVertices = kDefaultRequestedVerts;
+ } else if (fMaxVertices > maxQuadVertices) {
+ // don't exceed the limit of the index buffer
+ fMaxVertices = maxQuadVertices;
+ }
+ bool success = fDrawTarget->reserveVertexSpace(fVertexLayout,
+ fMaxVertices,
+ GrTCast<void**>(&fVertices));
+ GrAlwaysAssert(success);
+ }
+
+ GrFixed tx = GrIntToFixed(glyph->fAtlasLocation.fX);
+ GrFixed ty = GrIntToFixed(glyph->fAtlasLocation.fY);
+
+#if GR_GL_TEXT_TEXTURE_NORMALIZED
+ int x = vx >> 16;
+ int y = vy >> 16;
+ int w = width >> 16;
+ int h = height >> 16;
+
+ setRectFan(&fVertices[2*fCurrVertex], x, y, x + w, y + h, 2);
+ setRectFan(&fVertices[2*fCurrVertex+1],
+ texture->normalizeFixedX(tx),
+ texture->normalizeFixedY(ty),
+ texture->normalizeFixedX(tx + width),
+ texture->normalizeFixedY(ty + height),
+ 2);
+#else
+ fVertices[2*fCurrVertex].setXRectFan(vx, vy, vx + width, vy + height,
+ 2 * sizeof(GrGpuTextVertex));
+ fVertices[2*fCurrVertex+1].setXRectFan(texture->normalizeFixedX(tx),
+ texture->normalizeFixedY(ty),
+ texture->normalizeFixedX(tx + width),
+ texture->normalizeFixedY(ty + height),
+ 2 * sizeof(GrGpuTextVertex));
+#endif
+ fCurrVertex += 4;
+}
+
+
diff --git a/src/gpu/GrTextStrike.cpp b/src/gpu/GrTextStrike.cpp
new file mode 100644
index 0000000000..b8762adc11
--- /dev/null
+++ b/src/gpu/GrTextStrike.cpp
@@ -0,0 +1,205 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrAtlas.h"
+#include "GrGpu.h"
+#include "GrRectanizer.h"
+#include "GrTextStrike.h"
+#include "GrTextStrike_impl.h"
+#include "GrRect.h"
+
+GrFontCache::GrFontCache(GrGpu* gpu) : fGpu(gpu) {
+ gpu->ref();
+ fAtlasMgr = NULL;
+
+ fHead = fTail = NULL;
+}
+
+GrFontCache::~GrFontCache() {
+ fCache.deleteAll();
+ delete fAtlasMgr;
+ fGpu->unref();
+}
+
+GrTextStrike* GrFontCache::generateStrike(GrFontScaler* scaler,
+ const Key& key) {
+ if (NULL == fAtlasMgr) {
+ fAtlasMgr = new GrAtlasMgr(fGpu);
+ }
+ GrTextStrike* strike = new GrTextStrike(this, scaler->getKey(),
+ scaler->getMaskFormat(), fAtlasMgr);
+ fCache.insert(key, strike);
+
+ if (fHead) {
+ fHead->fPrev = strike;
+ } else {
+ GrAssert(NULL == fTail);
+ fTail = strike;
+ }
+ strike->fPrev = NULL;
+ strike->fNext = fHead;
+ fHead = strike;
+
+ return strike;
+}
+
+void GrFontCache::freeAll() {
+ fCache.deleteAll();
+ delete fAtlasMgr;
+ fAtlasMgr = NULL;
+ fHead = NULL;
+ fTail = NULL;
+}
+
+void GrFontCache::purgeExceptFor(GrTextStrike* preserveStrike) {
+ GrTextStrike* strike = fTail;
+ while (strike) {
+ if (strike == preserveStrike) {
+ strike = strike->fPrev;
+ continue;
+ }
+ GrTextStrike* strikeToPurge = strike;
+ // keep going if we won't free up any atlases with this strike.
+ strike = (NULL == strikeToPurge->fAtlas) ? strikeToPurge->fPrev : NULL;
+ int index = fCache.slowFindIndex(strikeToPurge);
+ GrAssert(index >= 0);
+ fCache.removeAt(index, strikeToPurge->fFontScalerKey->getHash());
+ this->detachStrikeFromList(strikeToPurge);
+ delete strikeToPurge;
+ }
+}
+
+#if GR_DEBUG
+void GrFontCache::validate() const {
+ int count = fCache.count();
+ if (0 == count) {
+ GrAssert(!fHead);
+ GrAssert(!fTail);
+ } else if (1 == count) {
+ GrAssert(fHead == fTail);
+ } else {
+ GrAssert(fHead != fTail);
+ }
+
+ int count2 = 0;
+ const GrTextStrike* strike = fHead;
+ while (strike) {
+ count2 += 1;
+ strike = strike->fNext;
+ }
+ GrAssert(count == count2);
+
+ count2 = 0;
+ strike = fTail;
+ while (strike) {
+ count2 += 1;
+ strike = strike->fPrev;
+ }
+ GrAssert(count == count2);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_DEBUG
+ static int gCounter;
+#endif
+
+/*
+ The text strike is specific to a given font/style/matrix setup, which is
+ represented by the GrHostFontScaler object we are given in getGlyph().
+
+ We map a 32bit glyphID to a GrGlyph record, which in turn points to a
+ atlas and a position within that texture.
+ */
+
+GrTextStrike::GrTextStrike(GrFontCache* cache, const GrKey* key,
+ GrMaskFormat format,
+ GrAtlasMgr* atlasMgr) : fPool(64) {
+ fFontScalerKey = key;
+ fFontScalerKey->ref();
+
+ fFontCache = cache; // no need to ref, it won't go away before we do
+ fAtlasMgr = atlasMgr; // no need to ref, it won't go away before we do
+ fAtlas = NULL;
+
+ fMaskFormat = format;
+
+#if GR_DEBUG
+// GrPrintf(" GrTextStrike %p %d\n", this, gCounter);
+ gCounter += 1;
+#endif
+}
+
+static void FreeGlyph(GrGlyph*& glyph) { glyph->free(); }
+
+GrTextStrike::~GrTextStrike() {
+ GrAtlas::FreeLList(fAtlas);
+ fFontScalerKey->unref();
+ fCache.getArray().visit(FreeGlyph);
+
+#if GR_DEBUG
+ gCounter -= 1;
+// GrPrintf("~GrTextStrike %p %d\n", this, gCounter);
+#endif
+}
+
+GrGlyph* GrTextStrike::generateGlyph(GrGlyph::PackedID packed,
+ GrFontScaler* scaler) {
+ GrIRect bounds;
+ if (!scaler->getPackedGlyphBounds(packed, &bounds)) {
+ return NULL;
+ }
+
+ GrGlyph* glyph = fPool.alloc();
+ glyph->init(packed, bounds);
+ fCache.insert(packed, glyph);
+ return glyph;
+}
+
+bool GrTextStrike::getGlyphAtlas(GrGlyph* glyph, GrFontScaler* scaler) {
+#if 0 // testing hack to force us to flush our cache often
+ static int gCounter;
+ if ((++gCounter % 10) == 0) return false;
+#endif
+
+ GrAssert(glyph);
+ GrAssert(scaler);
+ GrAssert(fCache.contains(glyph));
+ if (glyph->fAtlas) {
+ return true;
+ }
+
+ GrAutoRef ar(scaler);
+
+ int bytesPerPixel = GrMaskFormatBytesPerPixel(fMaskFormat);
+ size_t size = glyph->fBounds.area() * bytesPerPixel;
+ SkAutoSMalloc<1024> storage(size);
+ if (!scaler->getPackedGlyphImage(glyph->fPackedID, glyph->width(),
+ glyph->height(),
+ glyph->width() * bytesPerPixel,
+ storage.get())) {
+ return false;
+ }
+
+ GrAtlas* atlas = fAtlasMgr->addToAtlas(fAtlas, glyph->width(),
+ glyph->height(), storage.get(),
+ fMaskFormat,
+ &glyph->fAtlasLocation);
+ if (NULL == atlas) {
+ return false;
+ }
+
+ // update fAtlas as well, since they may be chained in a linklist
+ glyph->fAtlas = fAtlas = atlas;
+ return true;
+}
+
+
diff --git a/src/gpu/GrTextStrike_impl.h b/src/gpu/GrTextStrike_impl.h
new file mode 100644
index 0000000000..1b6392c1fe
--- /dev/null
+++ b/src/gpu/GrTextStrike_impl.h
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrTextStrike_impl_DEFINED
+#define GrTextStrike_impl_DEFINED
+
+class GrFontCache::Key {
+public:
+ Key(GrFontScaler* scaler) {
+ fFontScalerKey = scaler->getKey();
+ }
+
+ uint32_t getHash() const { return fFontScalerKey->getHash(); }
+
+ static bool LT(const GrTextStrike& strike, const Key& key) {
+ return *strike.getFontScalerKey() < *key.fFontScalerKey;
+ }
+ static bool EQ(const GrTextStrike& strike, const Key& key) {
+ return *strike.getFontScalerKey() == *key.fFontScalerKey;
+ }
+
+private:
+ const GrKey* fFontScalerKey;
+};
+
+void GrFontCache::detachStrikeFromList(GrTextStrike* strike) {
+ if (strike->fPrev) {
+ GrAssert(fHead != strike);
+ strike->fPrev->fNext = strike->fNext;
+ } else {
+ GrAssert(fHead == strike);
+ fHead = strike->fNext;
+ }
+
+ if (strike->fNext) {
+ GrAssert(fTail != strike);
+ strike->fNext->fPrev = strike->fPrev;
+ } else {
+ GrAssert(fTail == strike);
+ fTail = strike->fPrev;
+ }
+}
+
+GrTextStrike* GrFontCache::getStrike(GrFontScaler* scaler) {
+ this->validate();
+
+ Key key(scaler);
+ GrTextStrike* strike = fCache.find(key);
+ if (NULL == strike) {
+ strike = this->generateStrike(scaler, key);
+ } else if (strike->fPrev) {
+ // Need to put the strike at the head of its dllist, since that is how
+ // we age the strikes for purging (we purge from the back of the list
+ this->detachStrikeFromList(strike);
+ // attach at the head
+ fHead->fPrev = strike;
+ strike->fNext = fHead;
+ strike->fPrev = NULL;
+ fHead = strike;
+ }
+
+ this->validate();
+ return strike;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * This Key just wraps a glyphID, and matches the protocol need for
+ * GrTHashTable
+ */
+class GrTextStrike::Key {
+public:
+ Key(GrGlyph::PackedID id) : fPackedID(id) {}
+
+ uint32_t getHash() const { return fPackedID; }
+
+ static bool LT(const GrGlyph& glyph, const Key& key) {
+ return glyph.fPackedID < key.fPackedID;
+ }
+ static bool EQ(const GrGlyph& glyph, const Key& key) {
+ return glyph.fPackedID == key.fPackedID;
+ }
+
+private:
+ GrGlyph::PackedID fPackedID;
+};
+
+GrGlyph* GrTextStrike::getGlyph(GrGlyph::PackedID packed,
+ GrFontScaler* scaler) {
+ GrGlyph* glyph = fCache.find(packed);
+ if (NULL == glyph) {
+ glyph = this->generateGlyph(packed, scaler);
+ }
+ return glyph;
+}
+
+#endif
+
diff --git a/src/gpu/GrTexture.cpp b/src/gpu/GrTexture.cpp
new file mode 100644
index 0000000000..77868dfc9f
--- /dev/null
+++ b/src/gpu/GrTexture.cpp
@@ -0,0 +1,41 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrTexture.h"
+
+#include "GrContext.h"
+#include "GrGpu.h"
+#include "GrRenderTarget.h"
+
+bool GrTexture::readPixels(int left, int top, int width, int height,
+ GrPixelConfig config, void* buffer) {
+ // go through context so that all necessary flushing occurs
+ GrContext* context = this->getGpu()->getContext();
+ GrAssert(NULL != context);
+ return context->readTexturePixels(this,
+ left, top,
+ width, height,
+ config, buffer);
+}
+
+void GrTexture::releaseRenderTarget() {
+ if (NULL != fRenderTarget) {
+ GrAssert(fRenderTarget->asTexture() == this);
+ fRenderTarget->onTextureReleaseRenderTarget();
+ fRenderTarget->unref();
+ fRenderTarget = NULL;
+ }
+}
+
+void GrTexture::onAbandon() {
+ if (NULL != fRenderTarget) {
+ fRenderTarget->abandon();
+ }
+}
+
diff --git a/src/gpu/GrVertexBuffer.h b/src/gpu/GrVertexBuffer.h
new file mode 100644
index 0000000000..bda235c7b9
--- /dev/null
+++ b/src/gpu/GrVertexBuffer.h
@@ -0,0 +1,24 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#ifndef GrVertexBuffer_DEFINED
+#define GrVertexBuffer_DEFINED
+
+#include "GrGeometryBuffer.h"
+
+class GrVertexBuffer : public GrGeometryBuffer {
+protected:
+ GrVertexBuffer(GrGpu* gpu, size_t sizeInBytes, bool dynamic)
+ : INHERITED(gpu, sizeInBytes, dynamic) {}
+private:
+ typedef GrGeometryBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/android/GrGLDefaultInterface_android.cpp b/src/gpu/android/GrGLDefaultInterface_android.cpp
new file mode 100644
index 0000000000..7de4bd1a65
--- /dev/null
+++ b/src/gpu/android/GrGLDefaultInterface_android.cpp
@@ -0,0 +1,120 @@
+// Modified from chromium/src/webkit/glue/gl_bindings_skia_cmd_buffer.cc
+
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "GrGLInterface.h"
+
+#ifndef GL_GLEXT_PROTOTYPES
+#define GL_GLEXT_PROTOTYPES
+#endif
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+const GrGLInterface* GrGLDefaultInterface() {
+ static SkAutoTUnref<GrGLInterface> glInterface;
+ if (!glInterface.get()) {
+ GrGLInteface* interface = new GrGLInterface;
+ glInterface.reset(interface);
+ interface->fBindingsExported = kES2_GrGLBinding;
+ interface->fActiveTexture = glActiveTexture;
+ interface->fAttachShader = glAttachShader;
+ interface->fBindAttribLocation = glBindAttribLocation;
+ interface->fBindBuffer = glBindBuffer;
+ interface->fBindTexture = glBindTexture;
+ interface->fBlendColor = glBlendColor;
+ interface->fBlendFunc = glBlendFunc;
+ interface->fBufferData = glBufferData;
+ interface->fBufferSubData = glBufferSubData;
+ interface->fClear = glClear;
+ interface->fClearColor = glClearColor;
+ interface->fClearStencil = glClearStencil;
+ interface->fColorMask = glColorMask;
+ interface->fCompileShader = glCompileShader;
+ interface->fCompressedTexImage2D = glCompressedTexImage2D;
+ interface->fCreateProgram = glCreateProgram;
+ interface->fCreateShader = glCreateShader;
+ interface->fCullFace = glCullFace;
+ interface->fDeleteBuffers = glDeleteBuffers;
+ interface->fDeleteProgram = glDeleteProgram;
+ interface->fDeleteShader = glDeleteShader;
+ interface->fDeleteTextures = glDeleteTextures;
+ interface->fDepthMask = glDepthMask;
+ interface->fDisable = glDisable;
+ interface->fDisableVertexAttribArray = glDisableVertexAttribArray;
+ interface->fDrawArrays = glDrawArrays;
+ interface->fDrawElements = glDrawElements;
+ interface->fEnable = glEnable;
+ interface->fEnableVertexAttribArray = glEnableVertexAttribArray;
+ interface->fFrontFace = glFrontFace;
+ interface->fGenBuffers = glGenBuffers;
+ interface->fGenTextures = glGenTextures;
+ interface->fGetBufferParameteriv = glGetBufferParameteriv;
+ interface->fGetError = glGetError;
+ interface->fGetIntegerv = glGetIntegerv;
+ interface->fGetProgramInfoLog = glGetProgramInfoLog;
+ interface->fGetProgramiv = glGetProgramiv;
+ interface->fGetShaderInfoLog = glGetShaderInfoLog;
+ interface->fGetShaderiv = glGetShaderiv;
+ interface->fGetString = glGetString;
+ interface->fGetUniformLocation = glGetUniformLocation;
+ interface->fLineWidth = glLineWidth;
+ interface->fLinkProgram = glLinkProgram;
+ interface->fPixelStorei = glPixelStorei;
+ interface->fReadPixels = glReadPixels;
+ interface->fScissor = glScissor;
+ interface->fShaderSource = glShaderSource;
+ interface->fStencilFunc = glStencilFunc;
+ interface->fStencilFuncSeparate = glStencilFuncSeparate;
+ interface->fStencilMask = glStencilMask;
+ interface->fStencilMaskSeparate = glStencilMaskSeparate;
+ interface->fStencilOp = glStencilOp;
+ interface->fStencilOpSeparate = glStencilOpSeparate;
+ interface->fTexImage2D = glTexImage2D;
+ interface->fTexParameteri = glTexParameteri;
+ interface->fTexSubImage2D = glTexSubImage2D;
+ interface->fUniform1f = glUniform1f;
+ interface->fUniform1i = glUniform1i;
+ interface->fUniform1fv = glUniform1fv;
+ interface->fUniform1iv = glUniform1iv;
+ interface->fUniform2f = glUniform2f;
+ interface->fUniform2i = glUniform2i;
+ interface->fUniform2fv = glUniform2fv;
+ interface->fUniform2iv = glUniform2iv;
+ interface->fUniform3f = glUniform3f;
+ interface->fUniform3i = glUniform3i;
+ interface->fUniform3fv = glUniform3fv;
+ interface->fUniform3iv = glUniform3iv;
+ interface->fUniform4f = glUniform4f;
+ interface->fUniform4i = glUniform4i;
+ interface->fUniform4fv = glUniform4fv;
+ interface->fUniform4iv = glUniform4iv;
+ interface->fUniformMatrix2fv = glUniformMatrix2fv;
+ interface->fUniformMatrix3fv = glUniformMatrix3fv;
+ interface->fUniformMatrix4fv = glUniformMatrix4fv;
+ interface->fUseProgram = glUseProgram;
+ interface->fVertexAttrib4fv = glVertexAttrib4fv;
+ interface->fVertexAttribPointer = glVertexAttribPointer;
+ interface->fViewport = glViewport;
+ interface->fBindFramebuffer = glBindFramebuffer;
+ interface->fBindRenderbuffer = glBindRenderbuffer;
+ interface->fCheckFramebufferStatus = glCheckFramebufferStatus;
+ interface->fDeleteFramebuffers = glDeleteFramebuffers;
+ interface->fDeleteRenderbuffers = glDeleteRenderbuffers;
+ interface->fFramebufferRenderbuffer = glFramebufferRenderbuffer;
+ interface->fFramebufferTexture2D = glFramebufferTexture2D;
+ interface->fGenFramebuffers = glGenFramebuffers;
+ interface->fGenRenderbuffers = glGenRenderbuffers;
+ interface->fGetFramebufferAttachmentParameteriv = glGetFramebufferAttachmentParameteriv;
+ interface->fGetRenderbufferParameteriv = glGetRenderbufferParameteriv;
+ interface->fRenderbufferStorage = glRenderbufferStorage;
+ #if GL_OES_mapbuffer
+ interface->fMapBuffer = glMapBufferOES;
+ interface->fUnmapBuffer = glUnmapBufferOES;
+ #endif
+ }
+ glInterface.get()->ref();
+ return glInterface.get();
+}
diff --git a/src/gpu/app-android.cpp b/src/gpu/app-android.cpp
new file mode 100644
index 0000000000..39cca6ce1a
--- /dev/null
+++ b/src/gpu/app-android.cpp
@@ -0,0 +1,394 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include <jni.h>
+#include <sys/time.h>
+#include <time.h>
+#include <android/log.h>
+#include <stdint.h>
+
+#include "GrContext.h"
+#include "SkGpuCanvas.h"
+#include "SkPaint.h"
+#include "SkString.h"
+#include "SkTime.h"
+
+#include "GrGLConfig.h"
+
+static GrContext* make_context() {
+ SkDebugf("---- before create\n");
+ GrContext* ctx = GrContext::Create(GrGpu::kOpenGL_Shaders_Engine, 0);
+// GrContext* ctx = GrContext::Create(GrGpu::kOpenGL_Fixed_Engine, 0);
+ SkDebugf("---- after create %p\n", ctx);
+ return ctx;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void gr_run_unittests() {}
+
+#include "FlingState.h"
+#include "SkTouchGesture.h"
+#include "SkView.h"
+
+typedef SkView* (*SkViewFactory)();
+
+// these values must match those in Ganesh.java
+enum TouchState {
+ kUnknown_TouchState,
+ kDown_TouchState,
+ kMoved_TouchState,
+ kUp_TouchState
+};
+
+struct State {
+ State();
+ ~State();
+
+ int countSlides() const { return fFactory.count(); }
+ const char* getSlideTitle(int index) const;
+ void chooseSlide(int index) {
+ SkDebugf("----- index %d\n", index);
+ if (index < fFactory.count()) {
+ this->setView(fFactory[index]());
+ }
+ }
+
+ void setViewport(int w, int h);
+ int getWidth() const { return fViewport.fX; }
+ int getHeight() const { return fViewport.fY; }
+
+ void handleTouch(void*, TouchState, float x, float y);
+ void applyMatrix(SkCanvas*);
+
+ SkView* getView() const { return fView; }
+
+private:
+ SkView* fView;
+ SkIPoint fViewport;
+
+ SkTouchGesture fGesture;
+
+ SkTDArray<SkViewFactory> fFactory;
+
+ void setView(SkView* view) {
+ SkSafeUnref(fView);
+ fView = view;
+
+ view->setVisibleP(true);
+ view->setClipToBounds(false);
+ view->setSize(SkIntToScalar(fViewport.fX),
+ SkIntToScalar(fViewport.fY));
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SampleCode.h"
+
+SkViewRegister* SkViewRegister::gHead;
+SkViewRegister::SkViewRegister(SkViewFactory fact) : fFact(fact) {
+ static bool gOnce;
+ if (!gOnce) {
+ gHead = NULL;
+ gOnce = true;
+ }
+
+ fChain = gHead;
+ gHead = this;
+}
+
+static const char gCharEvtName[] = "SampleCode_Char_Event";
+static const char gKeyEvtName[] = "SampleCode_Key_Event";
+static const char gTitleEvtName[] = "SampleCode_Title_Event";
+static const char gPrefSizeEvtName[] = "SampleCode_PrefSize_Event";
+static const char gFastTextEvtName[] = "SampleCode_FastText_Event";
+
+bool SampleCode::CharQ(const SkEvent& evt, SkUnichar* outUni) {
+ if (evt.isType(gCharEvtName, sizeof(gCharEvtName) - 1)) {
+ if (outUni) {
+ *outUni = evt.getFast32();
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SampleCode::KeyQ(const SkEvent& evt, SkKey* outKey) {
+ if (evt.isType(gKeyEvtName, sizeof(gKeyEvtName) - 1)) {
+ if (outKey) {
+ *outKey = (SkKey)evt.getFast32();
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SampleCode::TitleQ(const SkEvent& evt) {
+ return evt.isType(gTitleEvtName, sizeof(gTitleEvtName) - 1);
+}
+
+void SampleCode::TitleR(SkEvent* evt, const char title[]) {
+ GrAssert(evt && TitleQ(*evt));
+ evt->setString(gTitleEvtName, title);
+}
+
+bool SampleCode::PrefSizeQ(const SkEvent& evt) {
+ return evt.isType(gPrefSizeEvtName, sizeof(gPrefSizeEvtName) - 1);
+}
+
+void SampleCode::PrefSizeR(SkEvent* evt, SkScalar width, SkScalar height) {
+ GrAssert(evt && PrefSizeQ(*evt));
+ SkScalar size[2];
+ size[0] = width;
+ size[1] = height;
+ evt->setScalars(gPrefSizeEvtName, 2, size);
+}
+
+bool SampleCode::FastTextQ(const SkEvent& evt) {
+ return evt.isType(gFastTextEvtName, sizeof(gFastTextEvtName) - 1);
+}
+
+static SkMSec gAnimTime;
+static SkMSec gAnimTimePrev;
+
+SkMSec SampleCode::GetAnimTime() { return gAnimTime; }
+SkMSec SampleCode::GetAnimTimeDelta() { return gAnimTime - gAnimTimePrev; }
+SkScalar SampleCode::GetAnimSecondsDelta() {
+ return SkDoubleToScalar(GetAnimTimeDelta() / 1000.0);
+}
+
+SkScalar SampleCode::GetAnimScalar(SkScalar speed, SkScalar period) {
+ // since gAnimTime can be up to 32 bits, we can't convert it to a float
+ // or we'll lose the low bits. Hence we use doubles for the intermediate
+ // calculations
+ double seconds = (double)gAnimTime / 1000.0;
+ double value = SkScalarToDouble(speed) * seconds;
+ if (period) {
+ value = ::fmod(value, SkScalarToDouble(period));
+ }
+ return SkDoubleToScalar(value);
+}
+
+static void drawIntoCanvas(State* state, SkCanvas* canvas) {
+ gAnimTime = SkTime::GetMSecs();
+ SkView* view = state->getView();
+ view->draw(canvas);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void resetGpuState();
+
+State::State() {
+ fViewport.set(0, 0);
+
+ const SkViewRegister* reg = SkViewRegister::Head();
+ while (reg) {
+ *fFactory.append() = reg->factory();
+ reg = reg->next();
+ }
+
+ SkDebugf("----- %d slides\n", fFactory.count());
+ fView = NULL;
+ this->chooseSlide(0);
+}
+
+State::~State() {
+ SkSafeUnref(fView);
+}
+
+void State::setViewport(int w, int h) {
+ fViewport.set(w, h);
+ if (fView) {
+ fView->setSize(SkIntToScalar(w), SkIntToScalar(h));
+ }
+ resetGpuState();
+}
+
+const char* State::getSlideTitle(int index) const {
+ SkEvent evt(gTitleEvtName);
+ evt.setFast32(index);
+ {
+ SkView* view = fFactory[index]();
+ view->doQuery(&evt);
+ view->unref();
+ }
+ return evt.findString(gTitleEvtName);
+}
+
+void State::handleTouch(void* owner, TouchState state, float x, float y) {
+ switch (state) {
+ case kDown_TouchState:
+ fGesture.touchBegin(owner, x, y);
+ break;
+ case kMoved_TouchState:
+ fGesture.touchMoved(owner, x, y);
+ break;
+ case kUp_TouchState:
+ fGesture.touchEnd(owner);
+ break;
+ }
+}
+
+void State::applyMatrix(SkCanvas* canvas) {
+ const SkMatrix& localM = fGesture.localM();
+ if (localM.getType() & SkMatrix::kScale_Mask) {
+ canvas->setExternalMatrix(&localM);
+ }
+ canvas->concat(localM);
+ canvas->concat(fGesture.globalM());
+}
+
+static State* get_state() {
+ static State* gState;
+ if (NULL == gState) {
+ gState = new State;
+ }
+ return gState;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static GrContext* gContext;
+static int gWidth;
+static int gHeight;
+static float gX, gY;
+
+static void resetGpuState() {
+ if (NULL == gContext) {
+ SkDebugf("creating context for first time\n");
+ gContext = make_context();
+ } else {
+ SkDebugf("------ gContext refcnt=%d\n", gContext->refcnt());
+ gContext->abandonAllTextures();
+ gContext->unref();
+ gContext = make_context();
+ }
+}
+
+static void doDraw() {
+ if (NULL == gContext) {
+ gContext = make_context();
+ }
+
+ State* state = get_state();
+ SkBitmap viewport;
+ viewport.setConfig(SkBitmap::kARGB_8888_Config,
+ state->getWidth(), state->getHeight());
+
+ SkGpuCanvas canvas(gContext);
+
+ canvas.setBitmapDevice(viewport);
+ state->applyMatrix(&canvas);
+
+ drawIntoCanvas(state, &canvas);
+
+ GrGLCheckErr();
+ GrGLClearErr();
+// gContext->checkError();
+// gContext->clearError();
+
+ if (true) {
+ static const int FRAME_COUNT = 32;
+ static SkMSec gDuration;
+
+ static SkMSec gNow;
+ static int gFrameCounter;
+ if (++gFrameCounter == FRAME_COUNT) {
+ gFrameCounter = 0;
+ SkMSec now = SkTime::GetMSecs();
+
+ gDuration = now - gNow;
+ gNow = now;
+ }
+
+ int fps = (FRAME_COUNT * 1000) / gDuration;
+ SkString str;
+ str.printf("FPS=%3d MS=%3d", fps, gDuration / FRAME_COUNT);
+
+ SkGpuCanvas c(gContext);
+ c.setBitmapDevice(viewport);
+
+ SkPaint p;
+ p.setAntiAlias(true);
+ SkRect r = { 0, 0, 110, 16 };
+ p.setColor(SK_ColorWHITE);
+ c.drawRect(r, p);
+ p.setColor(SK_ColorBLACK);
+ c.drawText(str.c_str(), str.size(), 4, 12, p);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+extern "C" {
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeSurfaceCreated(
+ JNIEnv*, jobject);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeViewport(JNIEnv*, jobject,
+ jint w, jint h);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeDrawFrame(JNIEnv*, jobject);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeTouch(JNIEnv*, jobject,
+ jint id, jint type, jfloat x, jfloat y);
+
+ JNIEXPORT int JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeCountSlides(JNIEnv*, jobject);
+ JNIEXPORT jobject JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeGetSlideTitle(JNIEnv*, jobject, jint index);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeChooseSlide(JNIEnv*, jobject, jint index);
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeSurfaceCreated(
+ JNIEnv*, jobject) {
+ SkDebugf("------ nativeSurfaceCreated\n");
+ resetGpuState();
+ SkDebugf("------ end nativeSurfaceCreated\n");
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeViewport(JNIEnv*, jobject,
+ jint w, jint h) {
+ State* state = get_state();
+ SkDebugf("---- state.setviewport %p %d %d\n", state, w, h);
+ state->setViewport(w, h);
+ SkDebugf("---- end setviewport\n");
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeDrawFrame(JNIEnv*, jobject) {
+ doDraw();
+}
+
+union IntPtr {
+ jint fInt;
+ void* fPtr;
+};
+static void* int2ptr(jint n) {
+ IntPtr data;
+ data.fInt = n;
+ return data.fPtr;
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeTouch(JNIEnv*, jobject,
+ jint id, jint type, jfloat x, jfloat y) {
+ get_state()->handleTouch(int2ptr(id), (TouchState)type, x, y);
+}
+
+////////////
+
+JNIEXPORT int JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeCountSlides(JNIEnv*, jobject) {
+ return get_state()->countSlides();
+}
+
+JNIEXPORT jobject JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeGetSlideTitle(JNIEnv* env, jobject, jint index) {
+ return env->NewStringUTF(get_state()->getSlideTitle(index));
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeChooseSlide(JNIEnv*, jobject, jint index) {
+ get_state()->chooseSlide(index);
+}
+
+
+
+
+
diff --git a/src/gpu/gr_hello_world.cpp b/src/gpu/gr_hello_world.cpp
new file mode 100644
index 0000000000..b475fb8de9
--- /dev/null
+++ b/src/gpu/gr_hello_world.cpp
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "SkGLCanvas.h"
+#include "SkBitmap.h"
+#include "SkPaint.h"
+#include "SkGpuGLShaders.h"
+
+extern "C" {
+ void gr_hello_world();
+}
+
+void gr_hello_world() {
+ static GrGpu* gGpu;
+ if (NULL == gGpu) {
+ gGpu = new SkGpuGLShaders;
+ }
+
+ SkGLCanvas canvas(gGpu);
+ SkBitmap bm;
+
+ bm.setConfig(SkBitmap::kARGB_8888_Config, WIDTH, HEIGHT);
+ canvas.setBitmapDevice(bm);
+
+ canvas.drawColor(SK_ColorWHITE);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setTextSize(30);
+ canvas.drawText("Hello Kno", 9, 40, 40, paint);
+}
+
+
diff --git a/src/gpu/gr_unittests.cpp b/src/gpu/gr_unittests.cpp
new file mode 100644
index 0000000000..6e51e19a7c
--- /dev/null
+++ b/src/gpu/gr_unittests.cpp
@@ -0,0 +1,231 @@
+
+/*
+ * Copyright 2010 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+
+#include "GrBinHashKey.h"
+#include "GrDrawTarget.h"
+#include "GrMatrix.h"
+#include "GrPath.h"
+#include "GrRedBlackTree.h"
+#include "GrTDArray.h"
+
+// If we aren't inheriting these as #defines from elsewhere,
+// clang demands they be declared before we #include the template
+// that relies on them.
+static bool LT(const int& elem, int value) {
+ return elem < value;
+}
+static bool EQ(const int& elem, int value) {
+ return elem == value;
+}
+#include "GrTBSearch.h"
+
+static void dump(const GrTDArray<int>& array) {
+#if 0
+ for (int i = 0; i < array.count(); i++) {
+ printf(" %d", array[i]);
+ }
+ printf("\n");
+#endif
+}
+
+static void test_tdarray() {
+ GrTDArray<int> array;
+
+ *array.append() = 0; dump(array);
+ *array.append() = 2; dump(array);
+ *array.append() = 4; dump(array);
+ *array.append() = 6; dump(array);
+ GrAssert(array.count() == 4);
+
+ *array.insert(0) = -1; dump(array);
+ *array.insert(2) = 1; dump(array);
+ *array.insert(4) = 3; dump(array);
+ *array.insert(7) = 7; dump(array);
+ GrAssert(array.count() == 8);
+ array.remove(3); dump(array);
+ array.remove(0); dump(array);
+ array.removeShuffle(4); dump(array);
+ array.removeShuffle(1); dump(array);
+ GrAssert(array.count() == 4);
+}
+
+
+static void test_bsearch() {
+ const int array[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99
+ };
+
+ for (size_t n = 0; n < GR_ARRAY_COUNT(array); n++) {
+ for (size_t i = 0; i < n; i++) {
+ int index = GrTBSearch<int, int>(array, n, array[i]);
+ GrAssert(index == (int) i);
+ index = GrTBSearch<int, int>(array, n, -array[i]);
+ GrAssert(index < 0);
+ }
+ }
+}
+
+// bogus empty class for GrBinHashKey
+class BogusEntry {};
+
+static void test_binHashKey()
+{
+ const char* testStringA_ = "abcdABCD";
+ const char* testStringB_ = "abcdBBCD";
+ const uint32_t* testStringA = reinterpret_cast<const uint32_t*>(testStringA_);
+ const uint32_t* testStringB = reinterpret_cast<const uint32_t*>(testStringB_);
+ enum {
+ kDataLenUsedForKey = 8
+ };
+
+ GrBinHashKey<BogusEntry, kDataLenUsedForKey> keyA;
+ keyA.setKeyData(testStringA);
+ // test copy constructor and comparison
+ GrBinHashKey<BogusEntry, kDataLenUsedForKey> keyA2(keyA);
+ GrAssert(keyA.compare(keyA2) == 0);
+ GrAssert(keyA.getHash() == keyA2.getHash());
+ // test re-init
+ keyA2.setKeyData(testStringA);
+ GrAssert(keyA.compare(keyA2) == 0);
+ GrAssert(keyA.getHash() == keyA2.getHash());
+ // test sorting
+ GrBinHashKey<BogusEntry, kDataLenUsedForKey> keyB;
+ keyB.setKeyData(testStringB);
+ GrAssert(keyA.compare(keyB) < 0);
+ GrAssert(keyA.getHash() != keyB.getHash());
+}
+
+static void test_convex() {
+#if 0
+ GrPath testPath;
+ GrPath::Iter testIter;
+
+ GrPath pt;
+ pt.moveTo(0, 0);
+ pt.close();
+
+ testIter.reset(pt);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConvex_ConvexHint == testPath.getConvexHint());
+
+ GrPath line;
+ line.moveTo(GrIntToScalar(12), GrIntToScalar(20));
+ line.lineTo(GrIntToScalar(-12), GrIntToScalar(-20));
+ line.close();
+
+ testIter.reset(line);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConvex_ConvexHint == testPath.getConvexHint());
+
+ GrPath triLeft;
+ triLeft.moveTo(0, 0);
+ triLeft.lineTo(1, 0);
+ triLeft.lineTo(1, 1);
+ triLeft.close();
+
+ testIter.reset(triLeft);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConvex_ConvexHint == testPath.getConvexHint());
+
+ GrPath triRight;
+ triRight.moveTo(0, 0);
+ triRight.lineTo(-1, 0);
+ triRight.lineTo(1, 1);
+ triRight.close();
+
+ testIter.reset(triRight);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConvex_ConvexHint == testPath.getConvexHint());
+
+ GrPath square;
+ square.moveTo(0, 0);
+ square.lineTo(1, 0);
+ square.lineTo(1, 1);
+ square.lineTo(0, 1);
+ square.close();
+
+ testIter.reset(square);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConvex_ConvexHint == testPath.getConvexHint());
+
+ GrPath redundantSquare;
+ square.moveTo(0, 0);
+ square.lineTo(0, 0);
+ square.lineTo(0, 0);
+ square.lineTo(1, 0);
+ square.lineTo(1, 0);
+ square.lineTo(1, 0);
+ square.lineTo(1, 1);
+ square.lineTo(1, 1);
+ square.lineTo(1, 1);
+ square.lineTo(0, 1);
+ square.lineTo(0, 1);
+ square.lineTo(0, 1);
+ square.close();
+
+ testIter.reset(redundantSquare);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConvex_ConvexHint == testPath.getConvexHint());
+
+ GrPath bowTie;
+ bowTie.moveTo(0, 0);
+ bowTie.lineTo(0, 0);
+ bowTie.lineTo(0, 0);
+ bowTie.lineTo(1, 1);
+ bowTie.lineTo(1, 1);
+ bowTie.lineTo(1, 1);
+ bowTie.lineTo(1, 0);
+ bowTie.lineTo(1, 0);
+ bowTie.lineTo(1, 0);
+ bowTie.lineTo(0, 1);
+ bowTie.lineTo(0, 1);
+ bowTie.lineTo(0, 1);
+ bowTie.close();
+
+ testIter.reset(bowTie);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConcave_ConvexHint == testPath.getConvexHint());
+
+ GrPath spiral;
+ spiral.moveTo(0, 0);
+ spiral.lineTo(1, 0);
+ spiral.lineTo(1, 1);
+ spiral.lineTo(0, 1);
+ spiral.lineTo(0,.5);
+ spiral.lineTo(.5,.5);
+ spiral.lineTo(.5,.75);
+ spiral.close();
+
+ testIter.reset(spiral);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConcave_ConvexHint == testPath.getConvexHint());
+
+ GrPath dent;
+ dent.moveTo(0, 0);
+ dent.lineTo(1, 1);
+ dent.lineTo(0, 1);
+ dent.lineTo(-.5,2);
+ dent.lineTo(-2, 1);
+ dent.close();
+
+ testIter.reset(dent);
+ testPath.resetFromIter(&testIter);
+ GrAssert(kConcave_ConvexHint == testPath.getConvexHint());
+#endif
+}
+
+void gr_run_unittests() {
+ test_tdarray();
+ test_bsearch();
+ test_binHashKey();
+ test_convex();
+ GrRedBlackTree<int>::UnitTest();
+ GrDrawTarget::VertexLayoutUnitTest();
+}
diff --git a/src/gpu/ios/GrGLDefaultInterface_iOS.cpp b/src/gpu/ios/GrGLDefaultInterface_iOS.cpp
new file mode 100644
index 0000000000..7be40db89a
--- /dev/null
+++ b/src/gpu/ios/GrGLDefaultInterface_iOS.cpp
@@ -0,0 +1,152 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLInterface.h"
+
+#import <OpenGLES/ES1/gl.h>
+#import <OpenGLES/ES1/glext.h>
+
+#import <OpenGLES/ES2/gl.h>
+#import <OpenGLES/ES2/glext.h>
+
+const GrGLInterface* GrGLDefaultInterface() {
+ static SkAutoTUnref<GrGLInterface> glInterface;
+ if (!glInterface.get()) {
+ GrGLInteface* interface = new GrGLInterface;
+ glInterface.reset(interface);
+
+ interface->fNPOTRenderTargetSupport = kProbe_GrGLCapability;
+ interface->fMinRenderTargetHeight = kProbe_GrGLCapability;
+ interface->fMinRenderTargetWidth = kProbe_GrGLCapability;
+ interface->fActiveTexture = glActiveTexture;
+ interface->fAttachShader = glAttachShader;
+ interface->fBindAttribLocation = glBindAttribLocation;
+ interface->fBindBuffer = glBindBuffer;
+ interface->fBindTexture = glBindTexture;
+ interface->fBlendColor = glBlendColor;
+ interface->fBlendFunc = glBlendFunc;
+ interface->fBufferData = (GrGLBufferDataProc)glBufferData;
+ interface->fBufferSubData = (GrGLBufferSubDataProc)glBufferSubData;
+ interface->fClear = glClear;
+ interface->fClearColor = glClearColor;
+ interface->fClearStencil = glClearStencil;
+ interface->fClientActiveTexture = glClientActiveTexture;
+ interface->fColorMask = glColorMask;
+ interface->fColorPointer = glColorPointer;
+ interface->fColor4ub = glColor4ub;
+ interface->fCompileShader = glCompileShader;
+ interface->fCompressedTexImage2D = glCompressedTexImage2D;
+ interface->fCreateProgram = glCreateProgram;
+ interface->fCreateShader = glCreateShader;
+ interface->fCullFace = glCullFace;
+ interface->fDeleteBuffers = glDeleteBuffers;
+ interface->fDeleteProgram = glDeleteProgram;
+ interface->fDeleteShader = glDeleteShader;
+ interface->fDeleteTextures = glDeleteTextures;
+ interface->fDepthMask = glDepthMask;
+ interface->fDisable = glDisable;
+ interface->fDisableClientState = glDisableClientState;
+ interface->fDisableVertexAttribArray = glDisableVertexAttribArray;
+ interface->fDrawArrays = glDrawArrays;
+ interface->fDrawBuffer = NULL;
+ interface->fDrawBuffers = NULL;
+ interface->fDrawElements = glDrawElements;
+ interface->fEnable = glEnable;
+ interface->fEnableClientState = glEnableClientState;
+ interface->fEnableVertexAttribArray = glEnableVertexAttribArray;
+ interface->fFrontFace = glFrontFace;
+ interface->fGenBuffers = glGenBuffers;
+ interface->fGetBufferParameteriv = glGetBufferParameteriv;
+ interface->fGetError = glGetError;
+ interface->fGetIntegerv = glGetIntegerv;
+ interface->fGetProgramInfoLog = glGetProgramInfoLog;
+ interface->fGetProgramiv = glGetProgramiv;
+ interface->fGetShaderInfoLog = glGetShaderInfoLog;
+ interface->fGetShaderiv = glGetShaderiv;
+ interface->fGetString = glGetString;
+ interface->fGenTextures = glGenTextures;
+ interface->fGetUniformLocation = glGetUniformLocation;
+ interface->fLineWidth = glLineWidth;
+ interface->fLinkProgram = glLinkProgram;
+ interface->fLoadMatrixf = glLoadMatrixf;
+ interface->fMatrixMode = glMatrixMode;
+ interface->fPointSize = glPointSize;
+ interface->fPixelStorei = glPixelStorei;
+ interface->fReadBuffer = NULL;
+ interface->fReadPixels = glReadPixels;
+ interface->fScissor = glScissor;
+ interface->fShadeModel = glShadeModel;
+ interface->fShaderSource = glShaderSource;
+ interface->fStencilFunc = glStencilFunc;
+ interface->fStencilFuncSeparate = glStencilFuncSeparate;
+ interface->fStencilMask = glStencilMask;
+ interface->fStencilMaskSeparate = glStencilMaskSeparate;
+ interface->fStencilOp = glStencilOp;
+ interface->fStencilOpSeparate = glStencilOpSeparate;
+ interface->fTexCoordPointer = glTexCoordPointer;
+ interface->fTexEnvi = glTexEnvi;
+ // mac uses GLenum for internalFormat param (non-standard)
+ // amounts to int vs. uint.
+ interface->fTexImage2D = (GrGLTexImage2DProc)glTexImage2D;
+ interface->fTexParameteri = glTexParameteri;
+ interface->fTexSubImage2D = glTexSubImage2D;
+ interface->fUniform1f = glUniform1f;
+ interface->fUniform1i = glUniform1i;
+ interface->fUniform1fv = glUniform1fv;
+ interface->fUniform1iv = glUniform1iv;
+ interface->fUniform2f = glUniform2f;
+ interface->fUniform2i = glUniform2i;
+ interface->fUniform2fv = glUniform2fv;
+ interface->fUniform2iv = glUniform2iv;
+ interface->fUniform3f = glUniform3f;
+ interface->fUniform3i = glUniform3i;
+ interface->fUniform3fv = glUniform3fv;
+ interface->fUniform3iv = glUniform3iv;
+ interface->fUniform4f = glUniform4f;
+ interface->fUniform4i = glUniform4i;
+ interface->fUniform4fv = glUniform4fv;
+ interface->fUniform4iv = glUniform4iv;
+ interface->fUniform4fv = glUniform4fv;
+ interface->fUniformMatrix2fv = glUniformMatrix2fv;
+ interface->fUniformMatrix3fv = glUniformMatrix3fv;
+ interface->fUniformMatrix4fv = glUniformMatrix4fv;
+ interface->fUseProgram = glUseProgram;
+ interface->fVertexAttrib4fv = glVertexAttrib4fv;
+ interface->fVertexAttribPointer = glVertexAttribPointer;
+ interface->fVertexPointer = glVertexPointer;
+ interface->fViewport = glViewport;
+ interface->fGenFramebuffers = glGenFramebuffers;
+ interface->fGetFramebufferAttachmentParameteriv = glGetFramebufferAttachmentParameteriv;
+ interface->fGetRenderbufferParameteriv = glGetRenderbufferParameteriv;
+ interface->fBindFramebuffer = glBindFramebuffer;
+ interface->fFramebufferTexture2D = glFramebufferTexture2D;
+ interface->fCheckFramebufferStatus = glCheckFramebufferStatus;
+ interface->fDeleteFramebuffers = glDeleteFramebuffers;
+ interface->fRenderbufferStorage = glRenderbufferStorage;
+ interface->fGenRenderbuffers = glGenRenderbuffers;
+ interface->fDeleteRenderbuffers = glDeleteRenderbuffers;
+ interface->fFramebufferRenderbuffer = glFramebufferRenderbuffer;
+ interface->fBindRenderbuffer = glBindRenderbuffer;
+
+ #if GL_OES_mapbuffer
+ interface->fMapBuffer = glMapBufferOES;
+ interface->fUnmapBuffer = glUnmapBufferOES;
+ #endif
+
+ #if GL_APPLE_framebuffer_multisample
+ interface->fRenderbufferStorageMultisample = glRenderbufferStorageMultisampleAPPLE;
+ interface->fResolveMultisampleFramebuffer = glResolveMultisampleFramebufferAPPLE;
+ #endif
+ interface->fBindFragDataLocationIndexed = NULL;
+
+ interface->fBindingsExported = (GrGLBinding)(kES2_GrGLBinding | kES1_GrGLBinding);
+ }
+ glInterface.get()->ref();
+ return glInterface.get();
+}
diff --git a/src/gpu/ios/SkUIView.mm b/src/gpu/ios/SkUIView.mm
new file mode 100644
index 0000000000..261ed9ca1f
--- /dev/null
+++ b/src/gpu/ios/SkUIView.mm
@@ -0,0 +1,827 @@
+#import "SkUIView.h"
+#include <QuartzCore/QuartzCore.h>
+
+//#include "SkGpuCanvas.h"
+#include "SkGpuDevice.h"
+#include "SkCGUtils.h"
+#include "GrContext.h"
+
+#define SKWIND_CONFIG SkBitmap::kRGB_565_Config
+//#define SKWIND_CONFIG SkBitmap::kARGB_8888_Config
+#define SKGL_CONFIG kEAGLColorFormatRGB565
+//#define SKGL_CONFIG kEAGLColorFormatRGBA8
+
+#define SHOW_FPS
+#define FORCE_REDRAW
+//#define DUMP_FPS_TO_PRINTF
+
+//#define USE_ACCEL_TO_ROTATE
+
+//#define SHOULD_COUNTER_INIT 334
+static int gShouldCounter;
+static bool should_draw() {
+ if (--gShouldCounter == 0) {
+ // printf("\n");
+ }
+ return true;
+ return gShouldCounter >= 0;
+}
+#ifdef SHOULD_COUNTER_INIT
+ bool (*gShouldDrawProc)() = should_draw;
+#else
+ bool (*gShouldDrawProc)() = NULL;
+#endif
+
+//#define USE_GL_1
+//#define USE_GL_2
+
+#if defined(USE_GL_1) || defined(USE_GL_2)
+ #define USE_GL
+#endif
+
+@implementation SkUIView
+
+
+@synthesize fWind;
+@synthesize fTitle;
+@synthesize fBackend;
+@synthesize fComplexClip;
+@synthesize fUseWarp;
+
+#include "SkWindow.h"
+#include "SkEvent.h"
+
+static float gScreenScale = 1;
+
+extern SkOSWindow* create_sk_window(void* hwnd, int argc, char** argv);
+
+#define kREDRAW_UIVIEW_GL "sk_redraw_uiview_gl_iOS"
+
+#define TITLE_HEIGHT 0
+
+static const float SCALE_FOR_ZOOM_LENS = 4.0;
+#define Y_OFFSET_FOR_ZOOM_LENS 200
+#define SIZE_FOR_ZOOM_LENS 250
+
+static const float MAX_ZOOM_SCALE = 4.0;
+static const float MIN_ZOOM_SCALE = 2.0 / MAX_ZOOM_SCALE;
+
+extern bool gDoTraceDraw;
+#define DO_TRACE_DRAW_MAX 100
+
+#ifdef SHOW_FPS
+struct FPSState {
+ static const int FRAME_COUNT = 60;
+
+ CFTimeInterval fNow0, fNow1;
+ CFTimeInterval fTime0, fTime1, fTotalTime;
+ int fFrameCounter;
+ int fDrawCounter;
+
+ FPSState() {
+ fTime0 = fTime1 = fTotalTime = 0;
+ fFrameCounter = 0;
+ }
+
+ void startDraw() {
+ fNow0 = CACurrentMediaTime();
+
+ if (0 == fDrawCounter && false) {
+ gDoTraceDraw = true;
+ SkDebugf("\n");
+ }
+ }
+
+ void endDraw() {
+ fNow1 = CACurrentMediaTime();
+
+ if (0 == fDrawCounter) {
+ gDoTraceDraw = true;
+ }
+ if (DO_TRACE_DRAW_MAX == ++fDrawCounter) {
+ fDrawCounter = 0;
+ }
+ }
+
+ void flush(SkOSWindow* wind) {
+ CFTimeInterval now2 = CACurrentMediaTime();
+
+ fTime0 += fNow1 - fNow0;
+ fTime1 += now2 - fNow1;
+
+ if (++fFrameCounter == FRAME_COUNT) {
+ CFTimeInterval totalNow = CACurrentMediaTime();
+ fTotalTime = totalNow - fTotalTime;
+
+ SkMSec ms0 = (int)(1000 * fTime0 / FRAME_COUNT);
+ SkMSec msTotal = (int)(1000 * fTotalTime / FRAME_COUNT);
+
+ SkString str;
+ str.printf("ms: %d [%d], fps: %3.1f", msTotal, ms0,
+ FRAME_COUNT / fTotalTime);
+#ifdef DUMP_FPS_TO_PRINTF
+ SkDebugf("%s\n", str.c_str());
+#else
+ wind->setTitle(str.c_str());
+#endif
+
+ fTotalTime = totalNow;
+ fTime0 = fTime1 = 0;
+ fFrameCounter = 0;
+ }
+ }
+};
+
+static FPSState gFPS;
+
+ #define FPS_StartDraw() gFPS.startDraw()
+ #define FPS_EndDraw() gFPS.endDraw()
+ #define FPS_Flush(wind) gFPS.flush(wind)
+#else
+ #define FPS_StartDraw()
+ #define FPS_EndDraw()
+ #define FPS_Flush(wind)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef USE_GL
++ (Class) layerClass
+{
+ return [CAEAGLLayer class];
+}
+#endif
+
+- (id)initWithMyDefaults {
+ fBackend = kGL_Backend;
+ fUseWarp = false;
+ fRedrawRequestPending = false;
+ // FIXME: If iOS has argc & argv, pass them here.
+ //fWind = create_sk_window(self, 0, NULL);
+ //fWind->setConfig(SKWIND_CONFIG);
+ fMatrix.reset();
+ fLocalMatrix.reset();
+ fNeedGestureEnded = false;
+ fNeedFirstPinch = true;
+ fZoomAround = false;
+ fComplexClip = false;
+
+ [self initGestures];
+
+#ifdef USE_GL
+ CAEAGLLayer *eaglLayer = (CAEAGLLayer *)self.layer;
+ eaglLayer.opaque = TRUE;
+ eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
+ [NSNumber numberWithBool:NO],
+ kEAGLDrawablePropertyRetainedBacking,
+ SKGL_CONFIG,
+ kEAGLDrawablePropertyColorFormat,
+ nil];
+
+#ifdef USE_GL_1
+ fGL.fContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES1];
+#else
+ fGL.fContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
+#endif
+
+ if (!fGL.fContext || ![EAGLContext setCurrentContext:fGL.fContext])
+ {
+ [self release];
+ return nil;
+ }
+
+ // Create default framebuffer object. The backing will be allocated for the current layer in -resizeFromLayer
+ glGenFramebuffersOES(1, &fGL.fFramebuffer);
+ glBindFramebufferOES(GL_FRAMEBUFFER_OES, fGL.fFramebuffer);
+
+ glGenRenderbuffersOES(1, &fGL.fRenderbuffer);
+ glGenRenderbuffersOES(1, &fGL.fStencilbuffer);
+
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+ glFramebufferRenderbufferOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fStencilbuffer);
+ glFramebufferRenderbufferOES(GL_FRAMEBUFFER_OES, GL_STENCIL_ATTACHMENT_OES, GL_RENDERBUFFER_OES, fGL.fStencilbuffer);
+#endif
+
+#ifdef USE_ACCEL_TO_ROTATE
+ fRotateMatrix.reset();
+ [UIAccelerometer sharedAccelerometer].delegate = self;
+ [UIAccelerometer sharedAccelerometer].updateInterval = 1 / 30.0;
+#endif
+ return self;
+}
+
+- (id)initWithCoder:(NSCoder*)coder {
+ if ((self = [super initWithCoder:coder])) {
+ self = [self initWithMyDefaults];
+ }
+ return self;
+}
+
+- (id)initWithFrame:(CGRect)frame {
+ if (self = [super initWithFrame:frame]) {
+ self = [self initWithMyDefaults];
+ }
+ return self;
+}
+
+#include "SkImageDecoder.h"
+#include "SkStream_NSData.h"
+
+static void zoom_around(SkCanvas* canvas, float cx, float cy, float zoom) {
+ float clipW = SIZE_FOR_ZOOM_LENS;
+ float clipH = SIZE_FOR_ZOOM_LENS;
+
+ SkRect r;
+ r.set(0, 0, clipW, clipH);
+ r.offset(cx - clipW/2, cy - clipH/2);
+
+ SkPaint paint;
+ paint.setColor(0xFF66AAEE);
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setStrokeWidth(10);
+
+ // draw our "frame" around the zoom lens
+ canvas->drawRect(r, paint);
+
+ // now clip and scale the lens
+ canvas->clipRect(r);
+ canvas->translate(cx, cy);
+ canvas->scale(zoom, zoom);
+ canvas->translate(-cx, -cy);
+}
+
+- (void)drawWithCanvas:(SkCanvas*)canvas {
+ if (fComplexClip) {
+ canvas->drawColor(SK_ColorBLUE);
+
+ SkPath path;
+ static const SkRect r[] = {
+ { 50, 50, 250, 250 },
+ { 150, 150, 500, 600 }
+ };
+ for (size_t i = 0; i < GR_ARRAY_COUNT(r); i++) {
+ path.addRect(r[i]);
+ }
+ canvas->clipPath(path);
+ }
+
+ // This is to consolidate multiple inval requests
+ fRedrawRequestPending = false;
+
+ if (fFlingState.isActive()) {
+ if (!fFlingState.evaluateMatrix(&fLocalMatrix)) {
+ [self flushLocalMatrix];
+ }
+ }
+
+ SkMatrix localMatrix = fLocalMatrix;
+#ifdef USE_ACCEL_TO_ROTATE
+ localMatrix.preConcat(fRotateMatrix);
+#endif
+
+ SkMatrix matrix;
+ matrix.setConcat(localMatrix, fMatrix);
+
+ const SkMatrix* localM = NULL;
+ if (localMatrix.getType() & SkMatrix::kScale_Mask) {
+ localM = &localMatrix;
+ }
+#ifdef USE_ACCEL_TO_ROTATE
+ localM = &localMatrix;
+#endif
+ canvas->setExternalMatrix(localM);
+
+#ifdef SHOULD_COUNTER_INIT
+ gShouldCounter = SHOULD_COUNTER_INIT;
+#endif
+ {
+ int saveCount = canvas->save();
+ canvas->concat(matrix);
+ // SkRect r = { 10, 10, 500, 600 }; canvas->clipRect(r);
+ fWind->draw(canvas);
+ canvas->restoreToCount(saveCount);
+ }
+
+ if (fZoomAround) {
+ zoom_around(canvas, fZoomAroundX, fZoomAroundY, SCALE_FOR_ZOOM_LENS);
+ canvas->concat(matrix);
+ fWind->draw(canvas);
+ }
+
+#ifdef FORCE_REDRAW
+ fWind->inval(NULL);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+- (void)layoutSubviews {
+ int W, H;
+
+ gScreenScale = [UIScreen mainScreen].scale;
+
+#ifdef USE_GL
+
+ CAEAGLLayer* eaglLayer = (CAEAGLLayer*)self.layer;
+ if ([self respondsToSelector:@selector(setContentScaleFactor:)]) {
+ self.contentScaleFactor = gScreenScale;
+ }
+
+ // Allocate color buffer backing based on the current layer size
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+ [fGL.fContext renderbufferStorage:GL_RENDERBUFFER_OES fromDrawable:eaglLayer];
+
+ glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &fGL.fWidth);
+ glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &fGL.fHeight);
+
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fStencilbuffer);
+ glRenderbufferStorageOES(GL_RENDERBUFFER_OES, GL_STENCIL_INDEX8_OES, fGL.fWidth, fGL.fHeight);
+
+
+ if (glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES) != GL_FRAMEBUFFER_COMPLETE_OES)
+ {
+ NSLog(@"Failed to make complete framebuffer object %x", glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES));
+ }
+
+ W = fGL.fWidth;
+ H = fGL.fHeight;
+#else
+ CGRect rect = [self bounds];
+ W = (int)CGRectGetWidth(rect);
+ H = (int)CGRectGetHeight(rect) - TITLE_HEIGHT;
+#endif
+
+ printf("---- layoutSubviews %d %d\n", W, H);
+ fWind->resize(W, H);
+ fWind->inval(NULL);
+}
+
+#ifdef USE_GL
+
+static GrContext* gCtx;
+static GrContext* get_global_grctx() {
+ // should be pthread-local at least
+ if (NULL == gCtx) {
+#ifdef USE_GL_1
+ gCtx = GrContext::Create(kOpenGL_Fixed_GrEngine, 0);
+#else
+ gCtx = GrContext::Create(kOpenGL_Shaders_GrEngine, 0);
+#endif
+ }
+ return gCtx;
+}
+
+#include "SkDevice.h"
+#include "SkShader.h"
+#include "SkGrTexturePixelRef.h"
+#include "GrMesh.h"
+#include "SkRandom.h"
+
+#include "GrAtlas.h"
+#include "GrTextStrike.h"
+
+static void show_fontcache(GrContext* ctx, SkCanvas* canvas) {
+#if 0
+ SkPaint paint;
+ const int SIZE = 64;
+ GrAtlas* plot[64][64];
+
+ paint.setAntiAlias(true);
+ paint.setTextSize(24);
+ paint.setTextAlign(SkPaint::kCenter_Align);
+
+ Gr_bzero(plot, sizeof(plot));
+
+ GrFontCache* cache = ctx->getFontCache();
+ GrTextStrike* strike = cache->getHeadStrike();
+ int count = 0;
+ while (strike) {
+ GrAtlas* atlas = strike->getAtlas();
+ while (atlas) {
+ int x = atlas->getPlotX();
+ int y = atlas->getPlotY();
+
+ SkRandom rand((intptr_t)strike);
+ SkColor c = rand.nextU() | 0x80808080;
+ paint.setColor(c);
+ paint.setAlpha(0x80);
+
+ SkRect r;
+ r.set(x * SIZE, y * SIZE, (x + 1)*SIZE, (y+1)*SIZE);
+ r.inset(1, 1);
+ canvas->drawRect(r, paint);
+
+ paint.setColor(0xFF660000);
+ SkString label;
+ label.printf("%d", count);
+ canvas->drawText(label.c_str(), label.size(), r.centerX(),
+ r.fTop + r.height() * 2 / 3, paint);
+
+ atlas = atlas->nextAtlas();
+ }
+ strike = strike->fNext;
+ count += 1;
+ }
+#endif
+}
+
+void test_patch(SkCanvas* canvas, const SkBitmap& bm, SkScalar scale);
+
+static void draw_mesh(SkCanvas* canvas, const SkBitmap& bm) {
+ GrMesh fMesh;
+
+ SkRect r;
+ r.set(0, 0, SkIntToScalar(bm.width()), SkIntToScalar(bm.height()));
+
+ // fMesh.init(bounds, fBitmap.width() / 40, fBitmap.height() / 40, texture);
+ fMesh.init(r, bm.width()/16, bm.height()/16, r);
+
+ SkPaint paint;
+ SkShader* s = SkShader::CreateBitmapShader(bm, SkShader::kClamp_TileMode, SkShader::kClamp_TileMode);
+ paint.setShader(s)->unref();
+ fMesh.draw(canvas, paint);
+}
+
+static void scale_about(SkCanvas* canvas, float sx, float sy, float px, float py) {
+ canvas->translate(px, py);
+ canvas->scale(sx, sy);
+ canvas->translate(-px, -py);
+}
+
+static float grInterp(float v0, float v1, float percent) {
+ return v0 + percent * (v1 - v0);
+}
+
+static void draw_device(SkCanvas* canvas, SkDevice* dev, float w, float h, float warp) {
+ canvas->save();
+ float s = grInterp(1, 0.8, warp);
+ scale_about(canvas, s, s, w/2, h/2);
+ test_patch(canvas, dev->accessBitmap(false), warp);
+ canvas->restore();
+}
+
+- (void)drawInGL {
+// printf("------ drawInGL\n");
+ // This application only creates a single context which is already set current at this point.
+ // This call is redundant, but needed if dealing with multiple contexts.
+ [EAGLContext setCurrentContext:fGL.fContext];
+
+ // This application only creates a single default framebuffer which is already bound at this point.
+ // This call is redundant, but needed if dealing with multiple framebuffers.
+ glBindFramebufferOES(GL_FRAMEBUFFER_OES, fGL.fFramebuffer);
+
+ GLint scissorEnable;
+ glGetIntegerv(GL_SCISSOR_TEST, &scissorEnable);
+ glDisable(GL_SCISSOR_TEST);
+ glClearColor(0,0,0,0);
+ glClear(GL_COLOR_BUFFER_BIT);
+ if (scissorEnable) {
+ glEnable(GL_SCISSOR_TEST);
+ }
+ glViewport(0, 0, fWind->width(), fWind->height());
+
+ GrContext* ctx = get_global_grctx();
+
+ //SkGpuCanvas origCanvas(ctx);
+ //origCanvas.setBitmapDevice(fWind->getBitmap());
+ //gl->reset();
+
+ SkCanvas glCanvas;
+ SkGpuDevice* dev = new SkGpuDevice(ctx, SkGpuDevice::Current3DApiRenderTarget());
+ glCanvas.setDevice(dev)->unref();
+
+ SkCanvas rasterCanvas;
+
+ SkCanvas* canvas;
+ //SkDevice* dev = NULL;
+
+ switch (fBackend) {
+ case kRaster_Backend:
+ canvas = &rasterCanvas;
+ break;
+ case kGL_Backend:
+ canvas = &glCanvas;
+ break;
+ }
+
+// if (fUseWarp || fWarpState.isActive()) {
+// if (kGL_Backend == fBackend) {
+// dev = origCanvas.createDevice(fWind->getBitmap(), true);
+// canvas->setDevice(dev)->unref();
+// } else {
+// canvas->setBitmapDevice(fWind->getBitmap());
+// dev = canvas->getDevice();
+// }
+// } else {
+// canvas->setBitmapDevice(fWind->getBitmap());
+// dev = NULL;
+// }
+
+ canvas->translate(0, TITLE_HEIGHT);
+
+ // if we're not "retained", then we have to always redraw everything.
+ // This call forces us to ignore the fDirtyRgn, and draw everywhere.
+ // If we are "retained", we can skip this call (as the raster case does)
+ fWind->forceInvalAll();
+
+ FPS_StartDraw();
+ [self drawWithCanvas:canvas];
+ FPS_EndDraw();
+
+// if (dev) {
+// draw_device(&origCanvas, dev, fWind->width(), fWind->height(),
+// fWarpState.evaluate());
+// } else {
+// if (kRaster_Backend == fBackend) {
+// origCanvas.drawBitmap(fWind->getBitmap(), 0, 0, NULL);
+// }
+// // else GL - we're already on screen
+// }
+
+ show_fontcache(ctx, canvas);
+ ctx->flush(false);
+
+ // This application only creates a single color renderbuffer which is already bound at this point.
+ // This call is redundant, but needed if dealing with multiple renderbuffers.
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+ [fGL.fContext presentRenderbuffer:GL_RENDERBUFFER_OES];
+
+#if GR_COLLECT_STATS
+ static int frame = 0;
+ if (!(frame % 100)) {
+ get_global_grctx()->printStats();
+ }
+ get_global_grctx()->resetStats();
+ ++frame;
+#endif
+
+ FPS_Flush(fWind);
+
+#if 0
+ gCtx->deleteAllTextures(GrTextureCache::kAbandonTexture_DeleteMode);
+ gCtx->unref();
+ gCtx = NULL;
+#endif
+}
+
+#else // raster case
+
+- (void)drawRect:(CGRect)rect {
+ SkCanvas canvas;
+ canvas.setBitmapDevice(fWind->getBitmap());
+ FPS_StartDraw();
+ [self drawWithCanvas:&canvas];
+ FPS_EndDraw();
+
+ CGContextRef cg = UIGraphicsGetCurrentContext();
+ SkCGDrawBitmap(cg, fWind->getBitmap(), 0, TITLE_HEIGHT);
+
+ FPS_Flush(fWind);
+
+}
+#endif
+
+- (void)setWarpState:(bool)useWarp {
+ fWarpState.stop(); // we should reverse from where we are if active...
+
+ const float duration = 0.5;
+ fUseWarp = useWarp;
+ if (useWarp) {
+ fWarpState.start(0, 1, duration);
+ } else {
+ fWarpState.start(1, 0, duration);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+- (void)flushLocalMatrix {
+ fMatrix.postConcat(fLocalMatrix);
+ fLocalMatrix.reset();
+ fFlingState.stop();
+ fNeedGestureEnded = false;
+ fNeedFirstPinch = true;
+}
+
+- (void)localMatrixWithGesture:(UIGestureRecognizer*)gesture {
+ fNeedGestureEnded = true;
+
+ switch (gesture.state) {
+ case UIGestureRecognizerStateCancelled:
+ case UIGestureRecognizerStateEnded:
+ [self flushLocalMatrix];
+ break;
+ case UIGestureRecognizerStateChanged: {
+ SkMatrix matrix;
+ matrix.setConcat(fLocalMatrix, fMatrix);
+ } break;
+ default:
+ break;
+ }
+}
+
+- (void)commonHandleGesture:(UIGestureRecognizer*)sender {
+ if (fFlingState.isActive()) {
+ [self flushLocalMatrix];
+ }
+
+ switch (sender.state) {
+ case UIGestureRecognizerStateBegan:
+ [self flushLocalMatrix];
+ break;
+ default:
+ break;
+ }
+}
+
+- (float)limitTotalZoom:(float)scale {
+ // this query works 'cause we know that we're square-scale w/ no skew/rotation
+ const float curr = fMatrix[0];
+
+ if (scale > 1 && curr * scale > MAX_ZOOM_SCALE) {
+ scale = MAX_ZOOM_SCALE / curr;
+ } else if (scale < 1 && curr * scale < MIN_ZOOM_SCALE) {
+ scale = MIN_ZOOM_SCALE / curr;
+ }
+ return scale;
+}
+
+- (void)handleLongPressGesture:(UILongPressGestureRecognizer*)sender {
+ [self commonHandleGesture:sender];
+
+ if ([sender numberOfTouches] == 0) {
+ fZoomAround = false;
+ return;
+ }
+
+ CGPoint pt = [sender locationOfTouch:0 inView:self];
+ switch (sender.state) {
+ case UIGestureRecognizerStateBegan:
+ case UIGestureRecognizerStateChanged:
+ fZoomAround = true;
+ fZoomAroundX = pt.x;
+ fZoomAroundY = pt.y - Y_OFFSET_FOR_ZOOM_LENS;
+ break;
+ case UIGestureRecognizerStateEnded:
+ case UIGestureRecognizerStateCancelled:
+ fZoomAround = false;
+ break;
+ default:
+ break;
+ }
+}
+
+- (void)addAndReleaseGesture:(UIGestureRecognizer*)gesture {
+ [self addGestureRecognizer:gesture];
+ [gesture release];
+}
+
+
+
+//Gesture Handlers
+- (void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event {
+ for (UITouch *touch in touches) {
+ CGPoint loc = [touch locationInView:self];
+ fWind->handleClick(loc.x, loc.y, SkView::Click::kDown_State, touch);
+ }
+}
+
+- (void)touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event {
+ for (UITouch *touch in touches) {
+ CGPoint loc = [touch locationInView:self];
+ fWind->handleClick(loc.x, loc.y, SkView::Click::kMoved_State, touch);
+ }
+}
+
+- (void)touchesEnded:(NSSet *)touches withEvent:(UIEvent *)event {
+ for (UITouch *touch in touches) {
+ CGPoint loc = [touch locationInView:self];
+ fWind->handleClick(loc.x, loc.y, SkView::Click::kUp_State, touch);
+ }
+}
+
+- (void)touchesCancelled:(NSSet *)touches withEvent:(UIEvent *)event {
+ for (UITouch *touch in touches) {
+ CGPoint loc = [touch locationInView:self];
+ fWind->handleClick(loc.x, loc.y, SkView::Click::kUp_State, touch);
+ }
+}
+
+- (void)initGestures {
+ UILongPressGestureRecognizer* longG = [UILongPressGestureRecognizer alloc];
+ [longG initWithTarget:self action:@selector(handleLongPressGesture:)];
+ [self addAndReleaseGesture:longG];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static float abs(float x) { return x < 0 ? -x : x; }
+
+static bool normalize(UIAcceleration* acc, float xy[]) {
+ float mag2 = acc.x*acc.x + acc.y*acc.y + acc.z*acc.z;
+ if (mag2 < 0.000001) {
+ return false;
+ }
+ if (abs((float)acc.z) > 0.9 * sqrt(mag2)) {
+ return false;
+ }
+
+ mag2 = acc.x*acc.x + acc.y*acc.y;
+ if (mag2 < 0.000001) {
+ return false;
+ }
+ float scale = 1 / sqrt(mag2);
+ xy[0] = acc.x * scale;
+ xy[1] = acc.y * scale;
+ return true;
+}
+
+static void normalize(float xy[]) {
+ float scale = 1 / sqrt(xy[0]*xy[0] + xy[1]*xy[1]);
+ xy[0] *= scale;
+ xy[1] *= scale;
+}
+
+static float weighted_average(float newv, float oldv) {
+ return newv * 0.25 + oldv * 0.75;
+}
+
+- (void)accelerometer:(UIAccelerometer *)accelerometer didAccelerate:(UIAcceleration *)acc {
+
+ float norm[2];
+ if (normalize(acc, norm)) {
+ float sinv = -norm[0];
+ float cosv = -norm[1];
+ // smooth
+ norm[0] = weighted_average(sinv, -fRotateMatrix[1]);
+ norm[1] = weighted_average(cosv, fRotateMatrix[0]);
+ normalize(norm);
+ fRotateMatrix.setSinCos(norm[0], norm[1], 400, 400);
+ }
+#if 0
+ NSDate *now = [NSDate date];
+ NSTimeInterval intervalDate = [now timeIntervalSinceDate:now_prev];
+
+ velX += (acceleration.x * intervalDate);
+ distX += (velX * intervalDate);
+ //do other axis here too
+
+ // setup for next UIAccelerometer event
+ now_prev = now;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+- (void)setSkTitle:(const char *)title {
+ if (fTitle) {
+ fTitle.title = [NSString stringWithUTF8String:title];
+ }
+}
+
+- (BOOL)onHandleEvent:(const SkEvent&)evt {
+ if (evt.isType(kREDRAW_UIVIEW_GL)) {
+ [self drawInGL];
+ return true;
+ }
+ return false;
+}
+
+- (void)postInvalWithRect:(const SkIRect*)r {
+#ifdef USE_GL
+
+#if 1
+ if (!fRedrawRequestPending) {
+ fRedrawRequestPending = true;
+ /*
+ performSelectorOnMainThread seems to starve updating other views
+ (e.g. our FPS view in the titlebar), so we use the afterDelay
+ version
+ */
+ if (0) {
+ [self performSelectorOnMainThread:@selector(drawInGL) withObject:nil waitUntilDone:NO];
+ } else {
+ [self performSelector:@selector(drawInGL) withObject:nil afterDelay:0];
+ }
+ }
+#else
+ if (!fRedrawRequestPending) {
+ SkEvent* evt = new SkEvent(kREDRAW_UIVIEW_GL);
+ evt->post(fWind->getSinkID());
+ fRedrawRequestPending = true;
+ }
+#endif
+
+#else
+ if (r) {
+ [self setNeedsDisplayInRect:CGRectMake(r->fLeft, r->fTop,
+ r->width(), r->height())];
+ } else {
+ [self setNeedsDisplay];
+ }
+#endif
+}
+
+@end
diff --git a/src/gpu/mac/GrGLDefaultInterface_mac.cpp b/src/gpu/mac/GrGLDefaultInterface_mac.cpp
new file mode 100644
index 0000000000..ba43975406
--- /dev/null
+++ b/src/gpu/mac/GrGLDefaultInterface_mac.cpp
@@ -0,0 +1,170 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLInterface.h"
+
+#include <OpenGL/gl.h>
+#include <OpenGL/glext.h>
+
+const GrGLInterface* GrGLDefaultInterface() {
+ static SkAutoTUnref<GrGLInterface> glInterface;
+ if (!glInterface.get()) {
+ GrGLInterface* interface = new GrGLInterface;
+ glInterface.reset(interface);
+ interface->fBindingsExported = kDesktop_GrGLBinding;
+ interface->fActiveTexture = glActiveTexture;
+ interface->fAttachShader = glAttachShader;
+ interface->fBindAttribLocation = glBindAttribLocation;
+ interface->fBindBuffer = glBindBuffer;
+#if GL_VERSION_3_0
+ interface->fBindFragDataLocation = glBindFragDataLocation;
+#endif
+ interface->fBindTexture = glBindTexture;
+ interface->fBlendColor = glBlendColor;
+ interface->fBlendFunc = glBlendFunc;
+ interface->fBufferData = glBufferData;
+ interface->fBufferSubData = glBufferSubData;
+ interface->fClear = glClear;
+ interface->fClearColor = glClearColor;
+ interface->fClearStencil = glClearStencil;
+ interface->fClientActiveTexture = glClientActiveTexture;
+ interface->fColorMask = glColorMask;
+ interface->fColorPointer = glColorPointer;
+ interface->fColor4ub = glColor4ub;
+ interface->fCompileShader = glCompileShader;
+ interface->fCompressedTexImage2D = glCompressedTexImage2D;
+ interface->fCreateProgram = glCreateProgram;
+ interface->fCreateShader = glCreateShader;
+ interface->fCullFace = glCullFace;
+ interface->fDeleteBuffers = glDeleteBuffers;
+ interface->fDeleteProgram = glDeleteProgram;
+ interface->fDeleteShader = glDeleteShader;
+ interface->fDeleteTextures = glDeleteTextures;
+ interface->fDepthMask = glDepthMask;
+ interface->fDisable = glDisable;
+ interface->fDisableClientState = glDisableClientState;
+ interface->fDisableVertexAttribArray =
+ glDisableVertexAttribArray;
+ interface->fDrawArrays = glDrawArrays;
+ interface->fDrawBuffer = glDrawBuffer;
+ interface->fDrawBuffers = glDrawBuffers;
+ interface->fDrawElements = glDrawElements;
+ interface->fEnable = glEnable;
+ interface->fEnableClientState = glEnableClientState;
+ interface->fEnableVertexAttribArray = glEnableVertexAttribArray;
+ interface->fFrontFace = glFrontFace;
+ interface->fGenBuffers = glGenBuffers;
+ interface->fGetBufferParameteriv = glGetBufferParameteriv;
+ interface->fGetError = glGetError;
+ interface->fGetIntegerv = glGetIntegerv;
+ interface->fGetProgramInfoLog = glGetProgramInfoLog;
+ interface->fGetProgramiv = glGetProgramiv;
+ interface->fGetShaderInfoLog = glGetShaderInfoLog;
+ interface->fGetShaderiv = glGetShaderiv;
+ interface->fGetString = glGetString;
+ interface->fGetTexLevelParameteriv = glGetTexLevelParameteriv;
+ interface->fGenTextures = glGenTextures;
+ interface->fGetUniformLocation = glGetUniformLocation;
+ interface->fLineWidth = glLineWidth;
+ interface->fLinkProgram = glLinkProgram;
+ interface->fLoadMatrixf = glLoadMatrixf;
+ interface->fMapBuffer = glMapBuffer;
+ interface->fMatrixMode = glMatrixMode;
+ interface->fPointSize = glPointSize;
+ interface->fPixelStorei = glPixelStorei;
+ interface->fReadBuffer = glReadBuffer;
+ interface->fReadPixels = glReadPixels;
+ interface->fScissor = glScissor;
+ interface->fShadeModel = glShadeModel;
+ interface->fShaderSource = glShaderSource;
+ interface->fStencilFunc = glStencilFunc;
+ interface->fStencilFuncSeparate = glStencilFuncSeparate;
+ interface->fStencilMask = glStencilMask;
+ interface->fStencilMaskSeparate = glStencilMaskSeparate;
+ interface->fStencilOp = glStencilOp;
+ interface->fStencilOpSeparate = glStencilOpSeparate;
+ interface->fTexCoordPointer = glTexCoordPointer;
+ interface->fTexEnvi = glTexEnvi;
+ // mac uses GLenum for internalFormat param (non-standard)
+ // amounts to int vs. uint.
+ interface->fTexImage2D = (GrGLTexImage2DProc)glTexImage2D;
+ interface->fTexParameteri = glTexParameteri;
+ interface->fTexSubImage2D = glTexSubImage2D;
+ interface->fUniform1f = glUniform1f;
+ interface->fUniform1i = glUniform1i;
+ interface->fUniform1fv = glUniform1fv;
+ interface->fUniform1iv = glUniform1iv;
+ interface->fUniform2f = glUniform2f;
+ interface->fUniform2i = glUniform2i;
+ interface->fUniform2fv = glUniform2fv;
+ interface->fUniform2iv = glUniform2iv;
+ interface->fUniform3f = glUniform3f;
+ interface->fUniform3i = glUniform3i;
+ interface->fUniform3fv = glUniform3fv;
+ interface->fUniform3iv = glUniform3iv;
+ interface->fUniform4f = glUniform4f;
+ interface->fUniform4i = glUniform4i;
+ interface->fUniform4fv = glUniform4fv;
+ interface->fUniform4iv = glUniform4iv;
+ interface->fUniform4fv = glUniform4fv;
+ interface->fUniformMatrix2fv = glUniformMatrix2fv;
+ interface->fUniformMatrix3fv = glUniformMatrix3fv;
+ interface->fUniformMatrix4fv = glUniformMatrix4fv;
+ interface->fUnmapBuffer = glUnmapBuffer;
+ interface->fUseProgram = glUseProgram;
+ interface->fVertexAttrib4fv = glVertexAttrib4fv;
+ interface->fVertexAttribPointer = glVertexAttribPointer;
+ interface->fVertexPointer = glVertexPointer;
+ interface->fViewport = glViewport;
+
+ #if GL_ARB_framebuffer_object
+ interface->fGenFramebuffers = glGenFramebuffers;
+ interface->fGetFramebufferAttachmentParameteriv = glGetFramebufferAttachmentParameteriv;
+ interface->fGetRenderbufferParameteriv = glGetRenderbufferParameteriv;
+ interface->fBindFramebuffer = glBindFramebuffer;
+ interface->fFramebufferTexture2D = glFramebufferTexture2D;
+ interface->fCheckFramebufferStatus = glCheckFramebufferStatus;
+ interface->fDeleteFramebuffers = glDeleteFramebuffers;
+ interface->fRenderbufferStorage = glRenderbufferStorage;
+ interface->fGenRenderbuffers = glGenRenderbuffers;
+ interface->fDeleteRenderbuffers = glDeleteRenderbuffers;
+ interface->fFramebufferRenderbuffer = glFramebufferRenderbuffer;
+ interface->fBindRenderbuffer = glBindRenderbuffer;
+ interface->fRenderbufferStorageMultisample =
+ glRenderbufferStorageMultisample;
+ interface->fBlitFramebuffer = glBlitFramebuffer;
+ #elif GL_EXT_framebuffer_object
+ interface->fGenFramebuffers = glGenFramebuffersEXT;
+ interface->fGetFramebufferAttachmentParameteriv = glGetFramebufferAttachmentParameterivEXT;
+ interface->fGetRenderbufferParameteriv = glGetRenderbufferParameterivEXT;
+ interface->fBindFramebuffer = glBindFramebufferEXT;
+ interface->fFramebufferTexture2D = glFramebufferTexture2DEXT;
+ interface->fCheckFramebufferStatus = glCheckFramebufferStatusEXT;
+ interface->fDeleteFramebuffers = glDeleteFramebuffersEXT;
+ interface->fRenderbufferStorage = glRenderbufferStorageEXT;
+ interface->fGenRenderbuffers = glGenRenderbuffersEXT;
+ interface->fDeleteRenderbuffers = glDeleteRenderbuffersEXT;
+ interface->fFramebufferRenderbuffer =
+ glFramebufferRenderbufferEXT;
+ interface->fBindRenderbuffer = glBindRenderbufferEXT;
+ #if GL_EXT_framebuffer_multisample
+ interface->fRenderbufferStorageMultisample =
+ glRenderbufferStorageMultisampleEXT;
+ #endif
+ #if GL_EXT_framebuffer_blit
+ interface->fBlitFramebuffer = glBlitFramebufferEXT;
+ #endif
+ #endif
+ interface->fBindFragDataLocationIndexed = NULL;
+
+ interface->fBindingsExported = kDesktop_GrGLBinding;
+ }
+ glInterface.get()->ref();
+ return glInterface.get();
+}
diff --git a/src/gpu/mesa/GrGLDefaultInterface_mesa.cpp b/src/gpu/mesa/GrGLDefaultInterface_mesa.cpp
new file mode 100644
index 0000000000..793e65c61e
--- /dev/null
+++ b/src/gpu/mesa/GrGLDefaultInterface_mesa.cpp
@@ -0,0 +1,188 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLInterface.h"
+
+#include "GL/osmesa.h"
+#include <GL/glext.h>
+#include <GL/glu.h>
+
+#define GR_GL_GET_PROC(F) interface->f ## F = (GrGL ## F ## Proc) \
+ OSMesaGetProcAddress("gl" #F);
+#define GR_GL_GET_PROC_SUFFIX(F, S) interface->f ## F = (GrGL ## F ## Proc) \
+ OSMesaGetProcAddress("gl" #F #S);
+
+const GrGLInterface* GrGLDefaultInterface() {
+ if (NULL != OSMesaGetCurrentContext()) {
+ const char* versionString = (const char*) glGetString(GL_VERSION);
+ const char* extString = (const char*) glGetString(GL_EXTENSIONS);
+ GrGLVersion glVer = GrGLGetVersionFromString(versionString);
+
+ if (glVer < GR_GL_VER(1,5)) {
+ // We must have array and element_array buffer objects.
+ return NULL;
+ }
+ GrGLInterface* interface = new GrGLInterface();
+ interface->fNPOTRenderTargetSupport = kProbe_GrGLCapability;
+ interface->fMinRenderTargetHeight = kProbe_GrGLCapability;
+ interface->fMinRenderTargetWidth = kProbe_GrGLCapability;
+
+ interface->fActiveTexture = glActiveTexture;GrGLIn
+ GR_GL_GET_PROC(AttachShader);
+ GR_GL_GET_PROC(BindAttribLocation);
+ GR_GL_GET_PROC(BindBuffer);
+ GR_GL_GET_PROC(BindFragDataLocation);
+ interface->fBindTexture = glBindTexture;
+ interface->fBlendColor = glBlendColor;
+ interface->fBlendFunc = glBlendFunc;
+ GR_GL_GET_PROC(BufferData);
+ GR_GL_GET_PROC(BufferSubData);
+ interface->fClear = glClear;
+ interface->fClearColor = glClearColor;
+ interface->fClearStencil = glClearStencil;
+ interface->fClientActiveTexture = glClientActiveTexture;
+ interface->fColorMask = glColorMask;
+ interface->fColorPointer = glColorPointer;
+ interface->fColor4ub = glColor4ub;
+ GR_GL_GET_PROC(CompileShader);
+ interface->fCompressedTexImage2D = glCompressedTexImage2D;
+ GR_GL_GET_PROC(CreateProgram);
+ GR_GL_GET_PROC(CreateShader);
+ interface->fCullFace = glCullFace;
+ GR_GL_GET_PROC(DeleteBuffers);
+ GR_GL_GET_PROC(DeleteProgram);
+ GR_GL_GET_PROC(DeleteShader);
+ interface->fDeleteTextures = glDeleteTextures;
+ interface->fDepthMask = glDepthMask;
+ interface->fDisable = glDisable;
+ interface->fDisableClientState = glDisableClientState;
+ GR_GL_GET_PROC(DisableVertexAttribArray);
+ interface->fDrawArrays = glDrawArrays;
+ interface->fDrawBuffer = glDrawBuffer;
+ GR_GL_GET_PROC(DrawBuffers);
+ interface->fDrawElements = glDrawElements;
+ interface->fEnable = glEnable;
+ interface->fEnableClientState = glEnableClientState;
+ GR_GL_GET_PROC(EnableVertexAttribArray);
+ interface->fFrontFace = glFrontFace;
+ GR_GL_GET_PROC(GenBuffers);
+ GR_GL_GET_PROC(GetBufferParameteriv);
+ interface->fGetError = glGetError;
+ interface->fGetIntegerv = glGetIntegerv;
+ GR_GL_GET_PROC(GetProgramInfoLog);
+ GR_GL_GET_PROC(GetProgramiv);
+ GR_GL_GET_PROC(GetShaderInfoLog);
+ GR_GL_GET_PROC(GetShaderiv);
+ interface->fGetString = glGetString;
+ interface->fGetTexLevelParameteriv = glGetTexLevelParameteriv;
+ interface->fGenTextures = glGenTextures;
+ GR_GL_GET_PROC(GetUniformLocation);
+ interface->fLineWidth = glLineWidth;
+ GR_GL_GET_PROC(LinkProgram);
+ interface->fLoadMatrixf = glLoadMatrixf;
+ GR_GL_GET_PROC(MapBuffer);
+ interface->fMatrixMode = glMatrixMode;
+ interface->fPointSize = glPointSize;
+ interface->fPixelStorei = glPixelStorei;
+ interface->fReadBuffer = glReadBuffer;
+ interface->fReadPixels = glReadPixels;
+ interface->fScissor = glScissor;
+ interface->fShadeModel = glShadeModel;
+ GR_GL_GET_PROC(ShaderSource);
+ interface->fStencilFunc = glStencilFunc;
+ GR_GL_GET_PROC(StencilFuncSeparate);
+ interface->fStencilMask = glStencilMask;
+ GR_GL_GET_PROC(StencilMaskSeparate);
+ interface->fStencilOp = glStencilOp;
+ GR_GL_GET_PROC(StencilOpSeparate);
+ interface->fTexCoordPointer = glTexCoordPointer;
+ interface->fTexEnvi = glTexEnvi;
+ //OSMesa on Mac's glTexImage2D takes a GLenum for internalFormat rather than a GLint.
+ interface->fTexImage2D = reinterpret_cast<GrGLTexImage2DProc>(glTexImage2D);
+ interface->fTexParameteri = glTexParameteri;
+ interface->fTexSubImage2D = glTexSubImage2D;
+ GR_GL_GET_PROC(Uniform1f);
+ GR_GL_GET_PROC(Uniform1i);
+ GR_GL_GET_PROC(Uniform1fv);
+ GR_GL_GET_PROC(Uniform1iv);
+ GR_GL_GET_PROC(Uniform2f);
+ GR_GL_GET_PROC(Uniform2i);
+ GR_GL_GET_PROC(Uniform2fv);
+ GR_GL_GET_PROC(Uniform2iv);
+ GR_GL_GET_PROC(Uniform3f);
+ GR_GL_GET_PROC(Uniform3i);
+ GR_GL_GET_PROC(Uniform3fv);
+ GR_GL_GET_PROC(Uniform3iv);
+ GR_GL_GET_PROC(Uniform4f);
+ GR_GL_GET_PROC(Uniform4i);
+ GR_GL_GET_PROC(Uniform4fv);
+ GR_GL_GET_PROC(Uniform4iv);
+ GR_GL_GET_PROC(UniformMatrix2fv);
+ GR_GL_GET_PROC(UniformMatrix3fv);
+ GR_GL_GET_PROC(UniformMatrix4fv);
+ GR_GL_GET_PROC(UnmapBuffer);
+ GR_GL_GET_PROC(UseProgram);
+ GR_GL_GET_PROC(VertexAttrib4fv);
+ GR_GL_GET_PROC(VertexAttribPointer);
+ interface->fVertexPointer = glVertexPointer;
+ interface->fViewport = glViewport;
+
+ // First look for GL3.0 FBO or GL_ARB_framebuffer_object (same since
+ // GL_ARB_framebuffer_object doesn't use ARB suffix.)
+ if (glVer >= GR_GL_VER(3,0) ||
+ GrGLHasExtensionFromString("GL_ARB_framebuffer_object",
+ extString)) {
+ GR_GL_GET_PROC(GenFramebuffers);
+ GR_GL_GET_PROC(GetFramebufferAttachmentParameteriv);
+ GR_GL_GET_PROC(GetRenderbufferParameteriv);
+ GR_GL_GET_PROC(BindFramebuffer);
+ GR_GL_GET_PROC(FramebufferTexture2D);
+ GR_GL_GET_PROC(CheckFramebufferStatus);
+ GR_GL_GET_PROC(DeleteFramebuffers);
+ GR_GL_GET_PROC(RenderbufferStorage);
+ GR_GL_GET_PROC(GenRenderbuffers);
+ GR_GL_GET_PROC(DeleteRenderbuffers);
+ GR_GL_GET_PROC(FramebufferRenderbuffer);
+ GR_GL_GET_PROC(BindRenderbuffer);
+ GR_GL_GET_PROC(RenderbufferStorageMultisample);
+ GR_GL_GET_PROC(BlitFramebuffer);
+ } else if (GrGLHasExtensionFromString("GL_EXT_framebuffer_object",
+ extString)) {
+ GR_GL_GET_PROC_SUFFIX(GenFramebuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(GetFramebufferAttachmentParameteriv, EXT);
+ GR_GL_GET_PROC_SUFFIX(GetRenderbufferParameteriv, EXT);
+ GR_GL_GET_PROC_SUFFIX(BindFramebuffer, EXT);
+ GR_GL_GET_PROC_SUFFIX(FramebufferTexture2D, EXT);
+ GR_GL_GET_PROC_SUFFIX(CheckFramebufferStatus, EXT);
+ GR_GL_GET_PROC_SUFFIX(DeleteFramebuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(RenderbufferStorage, EXT);
+ GR_GL_GET_PROC_SUFFIX(GenRenderbuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(DeleteRenderbuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(FramebufferRenderbuffer, EXT);
+ GR_GL_GET_PROC_SUFFIX(BindRenderbuffer, EXT);
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_multisample",
+ extString)) {
+ GR_GL_GET_PROC_SUFFIX(RenderbufferStorageMultisample, EXT);
+ }
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_blit",
+ extString)) {
+ GR_GL_GET_PROC_SUFFIX(BlitFramebuffer, EXT);
+ }
+ } else {
+ // we must have FBOs
+ delete interface;
+ return NULL;
+ }
+ GR_GL_GET_PROC(BindFragDataLocationIndexed);
+ interface->fBindingsExported = kDesktop_GrGLBinding;
+ return interface;
+ } else {
+ return NULL;
+ }
+}
diff --git a/src/gpu/unix/GrGLDefaultInterface_unix.cpp b/src/gpu/unix/GrGLDefaultInterface_unix.cpp
new file mode 100644
index 0000000000..041caec1c5
--- /dev/null
+++ b/src/gpu/unix/GrGLDefaultInterface_unix.cpp
@@ -0,0 +1,191 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLInterface.h"
+
+#include <GL/glx.h>
+#include <GL/gl.h>
+#include <GL/glext.h>
+#include <GL/glu.h>
+
+#define GR_GL_GET_PROC(F) interface->f ## F = (GrGL ## F ## Proc) \
+ glXGetProcAddress(reinterpret_cast<const GLubyte*>("gl" #F));
+#define GR_GL_GET_PROC_SUFFIX(F, S) interface->f ## F = (GrGL ## F ## Proc) \
+ glXGetProcAddress(reinterpret_cast<const GLubyte*>("gl" #F #S));
+
+const GrGLInterface* GrGLDefaultInterface() {
+ if (NULL != glXGetCurrentContext()) {
+ const char* versionString = (const char*) glGetString(GL_VERSION);
+ const char* extString = (const char*) glGetString(GL_EXTENSIONS);
+ GrGLVersion glVer = GrGLGetVersionFromString(versionString);
+
+ if (glVer < GR_GL_VER(1,5)) {
+ // We must have array and element_array buffer objects.
+ return NULL;
+ }
+
+ GrGLInterface* interface = new GrGLInterface();
+
+ interface->fNPOTRenderTargetSupport = kProbe_GrGLCapability;
+ interface->fMinRenderTargetHeight = kProbe_GrGLCapability;
+ interface->fMinRenderTargetWidth = kProbe_GrGLCapability;
+
+ interface->fActiveTexture = glActiveTexture;
+ GR_GL_GET_PROC(AttachShader);
+ GR_GL_GET_PROC(BindAttribLocation);
+ GR_GL_GET_PROC(BindBuffer);
+ GR_GL_GET_PROC(BindFragDataLocation);
+ interface->fBindTexture = glBindTexture;
+ interface->fBlendColor = glBlendColor;
+ interface->fBlendFunc = glBlendFunc;
+ GR_GL_GET_PROC(BufferData);
+ GR_GL_GET_PROC(BufferSubData);
+ interface->fClear = glClear;
+ interface->fClearColor = glClearColor;
+ interface->fClearStencil = glClearStencil;
+ interface->fClientActiveTexture = glClientActiveTexture;
+ interface->fColorMask = glColorMask;
+ interface->fColorPointer = glColorPointer;
+ interface->fColor4ub = glColor4ub;
+ GR_GL_GET_PROC(CompileShader);
+ interface->fCompressedTexImage2D = glCompressedTexImage2D;
+ GR_GL_GET_PROC(CreateProgram);
+ GR_GL_GET_PROC(CreateShader);
+ interface->fCullFace = glCullFace;
+ GR_GL_GET_PROC(DeleteBuffers);
+ GR_GL_GET_PROC(DeleteProgram);
+ GR_GL_GET_PROC(DeleteShader);
+ interface->fDeleteTextures = glDeleteTextures;
+ interface->fDepthMask = glDepthMask;
+ interface->fDisable = glDisable;
+ interface->fDisableClientState = glDisableClientState;
+ GR_GL_GET_PROC(DisableVertexAttribArray);
+ interface->fDrawArrays = glDrawArrays;
+ interface->fDrawBuffer = glDrawBuffer;
+ GR_GL_GET_PROC(DrawBuffers);
+ interface->fDrawElements = glDrawElements;
+ interface->fEnable = glEnable;
+ interface->fEnableClientState = glEnableClientState;
+ GR_GL_GET_PROC(EnableVertexAttribArray);
+ interface->fFrontFace = glFrontFace;
+ GR_GL_GET_PROC(GenBuffers);
+ GR_GL_GET_PROC(GetBufferParameteriv);
+ interface->fGetError = glGetError;
+ interface->fGetIntegerv = glGetIntegerv;
+ GR_GL_GET_PROC(GetProgramInfoLog);
+ GR_GL_GET_PROC(GetProgramiv);
+ GR_GL_GET_PROC(GetShaderInfoLog);
+ GR_GL_GET_PROC(GetShaderiv);
+ interface->fGetString = glGetString;
+ interface->fGetTexLevelParameteriv = glGetTexLevelParameteriv;
+ interface->fGenTextures = glGenTextures;
+ GR_GL_GET_PROC(GetUniformLocation);
+ interface->fLineWidth = glLineWidth;
+ GR_GL_GET_PROC(LinkProgram);
+ interface->fLoadMatrixf = glLoadMatrixf;
+ GR_GL_GET_PROC(MapBuffer);
+ interface->fMatrixMode = glMatrixMode;
+ interface->fPointSize = glPointSize;
+ interface->fPixelStorei = glPixelStorei;
+ interface->fReadBuffer = glReadBuffer;
+ interface->fReadPixels = glReadPixels;
+ interface->fScissor = glScissor;
+ interface->fShadeModel = glShadeModel;
+ GR_GL_GET_PROC(ShaderSource);
+ interface->fStencilFunc = glStencilFunc;
+ GR_GL_GET_PROC(StencilFuncSeparate);
+ interface->fStencilMask = glStencilMask;
+ GR_GL_GET_PROC(StencilMaskSeparate);
+ interface->fStencilOp = glStencilOp;
+ GR_GL_GET_PROC(StencilOpSeparate);
+ interface->fTexCoordPointer = glTexCoordPointer;
+ interface->fTexEnvi = glTexEnvi;
+ interface->fTexImage2D = glTexImage2D;
+ interface->fTexParameteri = glTexParameteri;
+ interface->fTexSubImage2D = glTexSubImage2D;
+ GR_GL_GET_PROC(Uniform1f);
+ GR_GL_GET_PROC(Uniform1i);
+ GR_GL_GET_PROC(Uniform1fv);
+ GR_GL_GET_PROC(Uniform1iv);
+ GR_GL_GET_PROC(Uniform2f);
+ GR_GL_GET_PROC(Uniform2i);
+ GR_GL_GET_PROC(Uniform2fv);
+ GR_GL_GET_PROC(Uniform2iv);
+ GR_GL_GET_PROC(Uniform3f);
+ GR_GL_GET_PROC(Uniform3i);
+ GR_GL_GET_PROC(Uniform3fv);
+ GR_GL_GET_PROC(Uniform3iv);
+ GR_GL_GET_PROC(Uniform4f);
+ GR_GL_GET_PROC(Uniform4i);
+ GR_GL_GET_PROC(Uniform4fv);
+ GR_GL_GET_PROC(Uniform4iv);
+ GR_GL_GET_PROC(UniformMatrix2fv);
+ GR_GL_GET_PROC(UniformMatrix3fv);
+ GR_GL_GET_PROC(UniformMatrix4fv);
+ GR_GL_GET_PROC(UnmapBuffer);
+ GR_GL_GET_PROC(UseProgram);
+ GR_GL_GET_PROC(VertexAttrib4fv);
+ GR_GL_GET_PROC(VertexAttribPointer);
+ interface->fVertexPointer = glVertexPointer;
+ interface->fViewport = glViewport;
+ GR_GL_GET_PROC(BindFragDataLocationIndexed);
+
+ // First look for GL3.0 FBO or GL_ARB_framebuffer_object (same since
+ // GL_ARB_framebuffer_object doesn't use ARB suffix.)
+ if (glVer >= GR_GL_VER(3,0) ||
+ GrGLHasExtensionFromString("GL_ARB_framebuffer_object",
+ extString)) {
+ GR_GL_GET_PROC(GenFramebuffers);
+ GR_GL_GET_PROC(GetFramebufferAttachmentParameteriv);
+ GR_GL_GET_PROC(GetRenderbufferParameteriv);
+ GR_GL_GET_PROC(BindFramebuffer);
+ GR_GL_GET_PROC(FramebufferTexture2D);
+ GR_GL_GET_PROC(CheckFramebufferStatus);
+ GR_GL_GET_PROC(DeleteFramebuffers);
+ GR_GL_GET_PROC(RenderbufferStorage);
+ GR_GL_GET_PROC(GenRenderbuffers);
+ GR_GL_GET_PROC(DeleteRenderbuffers);
+ GR_GL_GET_PROC(FramebufferRenderbuffer);
+ GR_GL_GET_PROC(BindRenderbuffer);
+ GR_GL_GET_PROC(RenderbufferStorageMultisample);
+ GR_GL_GET_PROC(BlitFramebuffer);
+ } else if (GrGLHasExtensionFromString("GL_EXT_framebuffer_object",
+ extString)) {
+ GR_GL_GET_PROC_SUFFIX(GenFramebuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(GetFramebufferAttachmentParameteriv, EXT);
+ GR_GL_GET_PROC_SUFFIX(GetRenderbufferParameteriv, EXT);
+ GR_GL_GET_PROC_SUFFIX(BindFramebuffer, EXT);
+ GR_GL_GET_PROC_SUFFIX(FramebufferTexture2D, EXT);
+ GR_GL_GET_PROC_SUFFIX(CheckFramebufferStatus, EXT);
+ GR_GL_GET_PROC_SUFFIX(DeleteFramebuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(RenderbufferStorage, EXT);
+ GR_GL_GET_PROC_SUFFIX(GenRenderbuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(DeleteRenderbuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(FramebufferRenderbuffer, EXT);
+ GR_GL_GET_PROC_SUFFIX(BindRenderbuffer, EXT);
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_multisample",
+ extString)) {
+ GR_GL_GET_PROC_SUFFIX(RenderbufferStorageMultisample, EXT);
+ }
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_blit",
+ extString)) {
+ GR_GL_GET_PROC_SUFFIX(BlitFramebuffer, EXT);
+ }
+ } else {
+ // we must have FBOs
+ delete interface;
+ return NULL;
+ }
+ interface->fBindingsExported = kDesktop_GrGLBinding;
+
+ return interface;
+ } else {
+ return NULL;
+ }
+}
diff --git a/src/gpu/win/GrGLDefaultInterface_win.cpp b/src/gpu/win/GrGLDefaultInterface_win.cpp
new file mode 100644
index 0000000000..609869f83d
--- /dev/null
+++ b/src/gpu/win/GrGLDefaultInterface_win.cpp
@@ -0,0 +1,197 @@
+
+/*
+ * Copyright 2011 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#include "GrGLInterface.h"
+
+#include <Windows.h>
+#include <GL/GL.h>
+
+/*
+ * Windows makes the GL funcs all be __stdcall instead of __cdecl :(
+ * This implementation will only work if GR_GL_FUNCTION_TYPE is __stdcall.
+ * Otherwise, a springboard would be needed that hides the calling convention.
+ */
+
+#define GR_GL_GET_PROC(F) interface->f ## F = (GrGL ## F ## Proc) wglGetProcAddress("gl" #F);
+#define GR_GL_GET_PROC_SUFFIX(F, S) interface->f ## F = (GrGL ## F ## Proc) wglGetProcAddress("gl" #F #S);
+
+const GrGLInterface* GrGLDefaultInterface() {
+ // wglGetProcAddress requires a context.
+ // GL Function pointers retrieved in one context may not be valid in another
+ // context. For that reason we create a new GrGLInterface each time we're
+ // called.
+ if (NULL != wglGetCurrentContext()) {
+ const char* versionString = (const char*) glGetString(GL_VERSION);
+ const char* extString = (const char*) glGetString(GL_EXTENSIONS);
+ GrGLVersion glVer = GrGLGetVersionFromString(versionString);
+
+ if (glVer < GR_GL_VER(1,5)) {
+ // We must have array and element_array buffer objects.
+ return NULL;
+ }
+ GrGLInterface* interface = new GrGLInterface();
+
+ interface->fNPOTRenderTargetSupport = kProbe_GrGLCapability;
+ interface->fMinRenderTargetHeight = kProbe_GrGLCapability;
+ interface->fMinRenderTargetWidth = kProbe_GrGLCapability;
+
+ // Functions that are part of GL 1.1 will return NULL in
+ // wglGetProcAddress
+ interface->fBindTexture = glBindTexture;
+ interface->fBlendFunc = glBlendFunc;
+ interface->fClear = glClear;
+ interface->fClearColor = glClearColor;
+ interface->fClearStencil = glClearStencil;
+ interface->fColor4ub = glColor4ub;
+ interface->fColorMask = glColorMask;
+ interface->fColorPointer = glColorPointer;
+ interface->fCullFace = glCullFace;
+ interface->fDeleteTextures = glDeleteTextures;
+ interface->fDepthMask = glDepthMask;
+ interface->fDisable = glDisable;
+ interface->fDisableClientState = glDisableClientState;
+ interface->fDrawArrays = glDrawArrays;
+ interface->fDrawElements = glDrawElements;
+ interface->fDrawBuffer = glDrawBuffer;
+ interface->fEnable = glEnable;
+ interface->fEnableClientState = glEnableClientState;
+ interface->fFrontFace = glFrontFace;
+ interface->fGenTextures = glGenTextures;
+ interface->fGetError = glGetError;
+ interface->fGetIntegerv = glGetIntegerv;
+ interface->fGetString = glGetString;
+ interface->fGetTexLevelParameteriv = glGetTexLevelParameteriv;
+ interface->fLineWidth = glLineWidth;
+ interface->fLoadMatrixf = glLoadMatrixf;
+ interface->fMatrixMode = glMatrixMode;
+ interface->fPixelStorei = glPixelStorei;
+ interface->fPointSize = glPointSize;
+ interface->fReadBuffer = glReadBuffer;
+ interface->fReadPixels = glReadPixels;
+ interface->fScissor = glScissor;
+ interface->fShadeModel = glShadeModel;
+ interface->fStencilFunc = glStencilFunc;
+ interface->fStencilMask = glStencilMask;
+ interface->fStencilOp = glStencilOp;
+ interface->fTexImage2D = glTexImage2D;
+ interface->fTexParameteri = glTexParameteri;
+ interface->fTexCoordPointer = glTexCoordPointer;
+ interface->fTexEnvi = glTexEnvi;
+ interface->fTexSubImage2D = glTexSubImage2D;
+ interface->fViewport = glViewport;
+ interface->fVertexPointer = glVertexPointer;
+
+ GR_GL_GET_PROC(ActiveTexture);
+ GR_GL_GET_PROC(AttachShader);
+ GR_GL_GET_PROC(BindAttribLocation);
+ GR_GL_GET_PROC(BindBuffer);
+ GR_GL_GET_PROC(BindFragDataLocation);
+ GR_GL_GET_PROC(BlendColor);
+ GR_GL_GET_PROC(BufferData);
+ GR_GL_GET_PROC(BufferSubData);
+ GR_GL_GET_PROC(ClientActiveTexture);
+ GR_GL_GET_PROC(CompileShader);
+ GR_GL_GET_PROC(CompressedTexImage2D);
+ GR_GL_GET_PROC(CreateProgram);
+ GR_GL_GET_PROC(CreateShader);
+ GR_GL_GET_PROC(DeleteBuffers);
+ GR_GL_GET_PROC(DeleteProgram);
+ GR_GL_GET_PROC(DeleteShader);
+ GR_GL_GET_PROC(DisableVertexAttribArray);
+ GR_GL_GET_PROC(DrawBuffers);
+ GR_GL_GET_PROC(EnableVertexAttribArray);
+ GR_GL_GET_PROC(GenBuffers);
+ GR_GL_GET_PROC(GetBufferParameteriv);
+ GR_GL_GET_PROC(GetProgramInfoLog);
+ GR_GL_GET_PROC(GetProgramiv);
+ GR_GL_GET_PROC(GetShaderInfoLog);
+ GR_GL_GET_PROC(GetShaderiv);
+ GR_GL_GET_PROC(GetUniformLocation);
+ GR_GL_GET_PROC(LinkProgram);
+ GR_GL_GET_PROC(ShaderSource);
+ GR_GL_GET_PROC(StencilFuncSeparate);
+ GR_GL_GET_PROC(StencilMaskSeparate);
+ GR_GL_GET_PROC(StencilOpSeparate);
+ GR_GL_GET_PROC(Uniform1f);
+ GR_GL_GET_PROC(Uniform1i);
+ GR_GL_GET_PROC(Uniform1fv);
+ GR_GL_GET_PROC(Uniform1iv);
+ GR_GL_GET_PROC(Uniform2f);
+ GR_GL_GET_PROC(Uniform2i);
+ GR_GL_GET_PROC(Uniform2fv);
+ GR_GL_GET_PROC(Uniform2iv);
+ GR_GL_GET_PROC(Uniform3f);
+ GR_GL_GET_PROC(Uniform3i);
+ GR_GL_GET_PROC(Uniform3fv);
+ GR_GL_GET_PROC(Uniform3iv);
+ GR_GL_GET_PROC(Uniform4f);
+ GR_GL_GET_PROC(Uniform4i);
+ GR_GL_GET_PROC(Uniform4fv);
+ GR_GL_GET_PROC(Uniform4iv);
+ GR_GL_GET_PROC(UniformMatrix2fv);
+ GR_GL_GET_PROC(UniformMatrix3fv);
+ GR_GL_GET_PROC(UniformMatrix4fv);
+ GR_GL_GET_PROC(UseProgram);
+ GR_GL_GET_PROC(VertexAttrib4fv);
+ GR_GL_GET_PROC(VertexAttribPointer);
+ GR_GL_GET_PROC(BindFragDataLocationIndexed);
+
+ // First look for GL3.0 FBO or GL_ARB_framebuffer_object (same since
+ // GL_ARB_framebuffer_object doesn't use ARB suffix.)
+ if (glVer > GR_GL_VER(3,0) ||
+ GrGLHasExtensionFromString("GL_ARB_framebuffer_object", extString)) {
+ GR_GL_GET_PROC(GenFramebuffers);
+ GR_GL_GET_PROC(GetFramebufferAttachmentParameteriv);
+ GR_GL_GET_PROC(GetRenderbufferParameteriv);
+ GR_GL_GET_PROC(BindFramebuffer);
+ GR_GL_GET_PROC(FramebufferTexture2D);
+ GR_GL_GET_PROC(CheckFramebufferStatus);
+ GR_GL_GET_PROC(DeleteFramebuffers);
+ GR_GL_GET_PROC(RenderbufferStorage);
+ GR_GL_GET_PROC(GenRenderbuffers);
+ GR_GL_GET_PROC(DeleteRenderbuffers);
+ GR_GL_GET_PROC(FramebufferRenderbuffer);
+ GR_GL_GET_PROC(BindRenderbuffer);
+ GR_GL_GET_PROC(RenderbufferStorageMultisample);
+ GR_GL_GET_PROC(BlitFramebuffer);
+ } else if (GrGLHasExtensionFromString("GL_EXT_framebuffer_object",
+ extString)) {
+ GR_GL_GET_PROC_SUFFIX(GenFramebuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(GetFramebufferAttachmentParameteriv, EXT);
+ GR_GL_GET_PROC_SUFFIX(GetRenderbufferParameteriv, EXT);
+ GR_GL_GET_PROC_SUFFIX(BindFramebuffer, EXT);
+ GR_GL_GET_PROC_SUFFIX(FramebufferTexture2D, EXT);
+ GR_GL_GET_PROC_SUFFIX(CheckFramebufferStatus, EXT);
+ GR_GL_GET_PROC_SUFFIX(DeleteFramebuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(RenderbufferStorage, EXT);
+ GR_GL_GET_PROC_SUFFIX(GenRenderbuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(DeleteRenderbuffers, EXT);
+ GR_GL_GET_PROC_SUFFIX(FramebufferRenderbuffer, EXT);
+ GR_GL_GET_PROC_SUFFIX(BindRenderbuffer, EXT);
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_multisample", extString)) {
+ GR_GL_GET_PROC_SUFFIX(RenderbufferStorageMultisample, EXT);
+ }
+ if (GrGLHasExtensionFromString("GL_EXT_framebuffer_blit", extString)) {
+ GR_GL_GET_PROC_SUFFIX(BlitFramebuffer, EXT);
+ }
+ } else {
+ // we must have FBOs
+ delete interface;
+ return NULL;
+ }
+ GR_GL_GET_PROC(MapBuffer);
+ GR_GL_GET_PROC(UnmapBuffer);
+
+ interface->fBindingsExported = kDesktop_GrGLBinding;
+
+ return interface;
+ } else {
+ return NULL;
+ }
+}