aboutsummaryrefslogtreecommitdiffhomepage
path: root/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'gpu')
-rw-r--r--gpu/include/FlingState.h59
-rw-r--r--gpu/include/GrAPI.h37
-rw-r--r--gpu/include/GrAllocPool.h71
-rwxr-xr-xgpu/include/GrAllocator.h230
-rw-r--r--gpu/include/GrAtlas.h88
-rw-r--r--gpu/include/GrClip.h115
-rw-r--r--gpu/include/GrClipIterator.h81
-rw-r--r--gpu/include/GrColor.h72
-rw-r--r--gpu/include/GrConfig.h344
-rw-r--r--gpu/include/GrContext.h322
-rw-r--r--gpu/include/GrDrawTarget.h736
-rw-r--r--gpu/include/GrFontScaler.h43
-rw-r--r--gpu/include/GrGLConfig.h323
-rw-r--r--gpu/include/GrGLIndexBuffer.h53
-rw-r--r--gpu/include/GrGLTexture.h166
-rw-r--r--gpu/include/GrGLVertexBuffer.h55
-rw-r--r--gpu/include/GrGlyph.h89
-rw-r--r--gpu/include/GrGpu.h446
-rw-r--r--gpu/include/GrGpuD3D9.h259
-rw-r--r--gpu/include/GrGpuVertex.h104
-rw-r--r--gpu/include/GrIPoint.h35
-rw-r--r--gpu/include/GrInOrderDrawBuffer.h131
-rw-r--r--gpu/include/GrIndexBuffer.h92
-rw-r--r--gpu/include/GrInstanceCounter.h47
-rw-r--r--gpu/include/GrKey.h47
-rw-r--r--gpu/include/GrMatrix.h370
-rw-r--r--gpu/include/GrMemory.h151
-rw-r--r--gpu/include/GrMesh.h42
-rw-r--r--gpu/include/GrNoncopyable.h38
-rw-r--r--gpu/include/GrPath.h84
-rw-r--r--gpu/include/GrPathIter.h110
-rw-r--r--gpu/include/GrPathSink.h36
-rw-r--r--gpu/include/GrPlotMgr.h84
-rw-r--r--gpu/include/GrPoint.h287
-rw-r--r--gpu/include/GrRandom.h62
-rw-r--r--gpu/include/GrRect.h284
-rw-r--r--gpu/include/GrRectanizer.h64
-rw-r--r--gpu/include/GrRefCnt.h125
-rw-r--r--gpu/include/GrSamplerState.h130
-rw-r--r--gpu/include/GrScalar.h116
-rw-r--r--gpu/include/GrStopwatch.h135
-rw-r--r--gpu/include/GrStringBuilder.h182
-rw-r--r--gpu/include/GrTArray.h298
-rw-r--r--gpu/include/GrTBSearch.h53
-rw-r--r--gpu/include/GrTDArray.h222
-rw-r--r--gpu/include/GrTHashCache.h226
-rw-r--r--gpu/include/GrTLList.h61
-rw-r--r--gpu/include/GrTextContext.h67
-rw-r--r--gpu/include/GrTextStrike.h119
-rw-r--r--gpu/include/GrTexture.h213
-rw-r--r--gpu/include/GrTextureCache.h289
-rw-r--r--gpu/include/GrTouchGesture.h56
-rw-r--r--gpu/include/GrTypes.h142
-rw-r--r--gpu/include/GrUserConfig.h57
-rw-r--r--gpu/include/GrVertexBuffer.h92
-rw-r--r--gpu/include/GrVertexBufferAllocPool.h141
-rw-r--r--gpu/include/SkGpuCanvas.h72
-rw-r--r--gpu/include/SkGr.h238
-rw-r--r--gpu/include/SkGrTexturePixelRef.h50
-rw-r--r--gpu/include/SkUIView.h64
-rw-r--r--gpu/src/FlingState.cpp134
-rw-r--r--gpu/src/GrAllocPool.cpp127
-rw-r--r--gpu/src/GrAtlas.cpp187
-rw-r--r--gpu/src/GrClip.cpp136
-rw-r--r--gpu/src/GrContext.cpp1040
-rw-r--r--gpu/src/GrDrawMesh.cpp140
-rw-r--r--gpu/src/GrDrawTarget.cpp296
-rw-r--r--gpu/src/GrGLIndexBuffer.cpp106
-rw-r--r--gpu/src/GrGLTexture.cpp174
-rw-r--r--gpu/src/GrGLVertexBuffer.cpp103
-rw-r--r--gpu/src/GrGpu.cpp343
-rw-r--r--gpu/src/GrGpuD3D9.cpp1484
-rw-r--r--gpu/src/GrGpuFactory.cpp78
-rw-r--r--gpu/src/GrGpuGL.cpp1824
-rw-r--r--gpu/src/GrGpuGL.h188
-rw-r--r--gpu/src/GrGpuGLFixed.cpp342
-rw-r--r--gpu/src/GrGpuGLFixed.h68
-rw-r--r--gpu/src/GrGpuGLShaders.cpp937
-rw-r--r--gpu/src/GrGpuGLShaders.h153
-rw-r--r--gpu/src/GrGpuGLShaders2.cpp1388
-rw-r--r--gpu/src/GrGpuGLShaders2.h102
-rw-r--r--gpu/src/GrInOrderDrawBuffer.cpp345
-rw-r--r--gpu/src/GrMatrix.cpp767
-rw-r--r--gpu/src/GrMemory.cpp36
-rw-r--r--gpu/src/GrPath.cpp173
-rw-r--r--gpu/src/GrPrintf_printf.cpp36
-rw-r--r--gpu/src/GrPrintf_skia.cpp39
-rw-r--r--gpu/src/GrQuadIndexTable.h98
-rw-r--r--gpu/src/GrRectanizer.cpp130
-rw-r--r--gpu/src/GrRectanizer_fifo.cpp130
-rw-r--r--gpu/src/GrTextContext.cpp244
-rw-r--r--gpu/src/GrTextStrike.cpp204
-rw-r--r--gpu/src/GrTextStrike_impl.h113
-rw-r--r--gpu/src/GrTextureCache.cpp297
-rw-r--r--gpu/src/GrTouchGesture.cpp243
-rw-r--r--gpu/src/GrVertexBufferAllocPool.cpp220
-rw-r--r--gpu/src/app-android.cpp387
-rw-r--r--gpu/src/gr_files.mk23
-rw-r--r--gpu/src/gr_hello_world.cpp30
-rw-r--r--gpu/src/gr_unittests.cpp143
-rw-r--r--gpu/src/skia/SkGpuCanvas.cpp60
-rw-r--r--gpu/src/skia/SkGpuDevice.cpp1048
-rw-r--r--gpu/src/skia/SkGpuDevice.h176
-rw-r--r--gpu/src/skia/SkGr.cpp206
-rw-r--r--gpu/src/skia/SkGrFontScaler.cpp142
-rw-r--r--gpu/src/skia/SkGrTexturePixelRef.cpp30
-rw-r--r--gpu/src/skia/SkUIView.mm858
-rw-r--r--gpu/src/skia/skgr_files.mk7
108 files changed, 24340 insertions, 0 deletions
diff --git a/gpu/include/FlingState.h b/gpu/include/FlingState.h
new file mode 100644
index 0000000000..a1da4fbc86
--- /dev/null
+++ b/gpu/include/FlingState.h
@@ -0,0 +1,59 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef SkFlingState_DEFINED
+#define SkFlingState_DEFINED
+
+#include "SkScalar.h"
+#include "SkPoint.h"
+
+class SkMatrix;
+
+struct FlingState {
+ FlingState() : fActive(false) {}
+
+ bool isActive() const { return fActive; }
+ void stop() { fActive = false; }
+
+ void reset(float sx, float sy);
+ bool evaluateMatrix(SkMatrix* matrix);
+
+private:
+ SkPoint fDirection;
+ SkScalar fSpeed0;
+ double fTime0;
+ bool fActive;
+};
+
+class GrAnimateFloat {
+public:
+ GrAnimateFloat();
+
+ void start(float v0, float v1, float duration);
+ bool isActive() const { return fTime0 != 0; }
+ void stop() { fTime0 = 0; }
+
+ float evaluate();
+
+private:
+ float fValue0, fValue1, fDuration;
+ SkMSec fTime0;
+};
+
+#endif
+
+
diff --git a/gpu/include/GrAPI.h b/gpu/include/GrAPI.h
new file mode 100644
index 0000000000..b660e8d869
--- /dev/null
+++ b/gpu/include/GrAPI.h
@@ -0,0 +1,37 @@
+
+
+class GrAPI {
+public:
+
+ void setRenderTarget(GrRenderTarget* target);
+
+ void setMatrix(const GrMatrix&);
+
+ void setClip(rect, bool aa);
+ void setClip(rect[], bool aa);
+ void setClip(path, bool aa);
+ void setClip(rect, texture/key, state, matrix);
+ void setClip(path, texture/key, state, matrix);
+
+ void setColor(color);
+ void setTexture(texture/key, sampler, const GrMatrix* = NULL);
+ void setBlend(src, dst);
+
+ void drawRect(const GrRect&, stroke, join);
+ void drawOval(const GrRect&, stroke);
+ void drawRoundRect(const GrRect&, GrScalar ovalW, GrScalar ovalH, stroke);
+ void drawPath(const GrPathIter&, GrPathFill);
+ void drawVertices(...);
+ void drawGlyphs(const uint16_t[], int count, const GrPoint[], GrFontScaler*);
+
+///
+
+ void save();
+ void restore();
+ void concatMatrix(const GrMatrix&);
+ void concatClipRect(const GrRect&);
+ void concatClipPath(const GrPathIter&, bool aa);
+
+};
+
+
diff --git a/gpu/include/GrAllocPool.h b/gpu/include/GrAllocPool.h
new file mode 100644
index 0000000000..46359e5058
--- /dev/null
+++ b/gpu/include/GrAllocPool.h
@@ -0,0 +1,71 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrAllocPool_DEFINED
+#define GrAllocPool_DEFINED
+
+#include "GrNoncopyable.h"
+
+class GrAllocPool : GrNoncopyable {
+public:
+ GrAllocPool(size_t blockSize = 0);
+ ~GrAllocPool();
+
+ /**
+ * Frees all blocks that have been allocated with alloc().
+ */
+ void reset();
+
+ /**
+ * Returns a block of memory bytes size big. This address must not be
+ * passed to realloc/free/delete or any other function that assumes the
+ * address was allocated by malloc or new (becuase it hasn't).
+ */
+ void* alloc(size_t bytes);
+
+ /**
+ * Releases the most recently allocated bytes back to allocpool.
+ */
+ void release(size_t bytes);
+
+private:
+ struct Block;
+
+ Block* fBlock;
+ size_t fMinBlockSize;
+
+#if GR_DEBUG
+ int fBlocksAllocated;
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+};
+
+template <typename T> class GrTAllocPool {
+public:
+ GrTAllocPool(int count) : fPool(count * sizeof(T)) {}
+
+ void reset() { fPool.reset(); }
+ T* alloc() { return (T*)fPool.alloc(sizeof(T)); }
+
+private:
+ GrAllocPool fPool;
+};
+
+#endif
+
diff --git a/gpu/include/GrAllocator.h b/gpu/include/GrAllocator.h
new file mode 100755
index 0000000000..da02ba40b0
--- /dev/null
+++ b/gpu/include/GrAllocator.h
@@ -0,0 +1,230 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrAllocator_DEFINED
+#define GrAllocator_DEFINED
+
+#include "GrConfig.h"
+#include "GrTArray.h"
+
+class GrAllocator {
+public:
+ virtual ~GrAllocator() {
+ reset();
+ }
+
+ /**
+ * Create an allocator
+ *
+ * @param itemSize the size of each item to allocate
+ * @param itemsPerBlock the number of items to allocate at once
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least itemSize*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ GrAllocator(size_t itemSize, uint32_t itemsPerBlock, void* initialBlock) :
+ fBlocks(fBlockInitialStorage, NUM_INIT_BLOCK_PTRS),
+ fItemSize(itemSize),
+ fItemsPerBlock(itemsPerBlock),
+ fOwnFirstBlock(NULL == initialBlock),
+ fCount(0) {
+ fBlockSize = fItemSize * fItemsPerBlock;
+ fBlocks.push_back() = initialBlock;
+ GR_DEBUGCODE(if (!fOwnFirstBlock) {*((char*)initialBlock+fBlockSize-1)='a';} );
+ }
+
+ /**
+ * Adds an item and returns pointer to it.
+ *
+ * @return pointer to the added item.
+ */
+ void* push_back() {
+ uint32_t indexInBlock = fCount % fItemsPerBlock;
+ // we always have at least one block
+ if (0 == indexInBlock) {
+ if (0 != fCount) {
+ fBlocks.push_back() = GrMalloc(fBlockSize);
+ } else if (fOwnFirstBlock) {
+ fBlocks[0] = GrMalloc(fBlockSize);
+ }
+ }
+ void* ret = (char*)fBlocks[fCount/fItemsPerBlock] +
+ fItemSize * indexInBlock;
+ ++fCount;
+ return ret;
+ }
+
+ /**
+ * removes all added items
+ */
+ void reset() {
+ uint32_t blockCount = GrMax((unsigned)1,
+ GrUIDivRoundUp(fCount, fItemsPerBlock));
+ for (uint32_t i = 1; i < blockCount; ++i) {
+ GrFree(fBlocks[i]);
+ }
+ if (fOwnFirstBlock) {
+ GrFree(fBlocks[0]);
+ fBlocks[0] = NULL;
+ }
+ fBlocks.pop_back_n(blockCount-1);
+ fCount = 0;
+ }
+
+ /**
+ * count of items
+ */
+ uint32_t count() const {
+ return fCount;
+ }
+
+ /**
+ * is the count 0
+ */
+ bool empty() const { return fCount == 0; }
+
+ /**
+ * access last item, only call if count() != 0
+ */
+ void* back() {
+ GrAssert(fCount);
+ return (*this)[fCount-1];
+ }
+
+ /**
+ * access last item, only call if count() != 0
+ */
+ const void* back() const {
+ GrAssert(fCount);
+ return (*this)[fCount-1];
+ }
+
+ /**
+ * access item by index.
+ */
+ void* operator[] (uint32_t i) {
+ GrAssert(i < fCount);
+ return (char*)fBlocks[i / fItemsPerBlock] +
+ fItemSize * (i % fItemsPerBlock);
+ }
+
+ /**
+ * access item by index.
+ */
+ const void* operator[] (uint32_t i) const {
+ GrAssert(i < fCount);
+ return (const char*)fBlocks[i / fItemsPerBlock] +
+ fItemSize * (i % fItemsPerBlock);
+ }
+
+private:
+ static const uint32_t NUM_INIT_BLOCK_PTRS = 8;
+
+ GrTArray<void*> fBlocks;
+ size_t fBlockSize;
+ char fBlockInitialStorage[NUM_INIT_BLOCK_PTRS*sizeof(void*)];
+ size_t fItemSize;
+ uint32_t fItemsPerBlock;
+ bool fOwnFirstBlock;
+ uint32_t fCount;
+};
+
+template <typename T>
+class GrTAllocator {
+private:
+ GrAllocator fAllocator;
+
+public:
+ virtual ~GrTAllocator() {};
+
+ /**
+ * Create an allocator
+ *
+ * @param itemsPerBlock the number of items to allocate at once
+ * @param initialBlock optional memory to use for the first block.
+ * Must be at least size(T)*itemsPerBlock sized.
+ * Caller is responsible for freeing this memory.
+ */
+ GrTAllocator(uint32_t itemsPerBlock, void* initialBlock) :
+ fAllocator(sizeof(T), itemsPerBlock, initialBlock)
+ {}
+
+ /**
+ * Adds an item and returns it.
+ *
+ * @return the added item.
+ */
+ T& push_back() {
+ void* item = fAllocator.push_back();
+ GrAssert(NULL != item);
+ new (item) T;
+ return *(T*)item;
+ }
+
+ /**
+ * removes all added items
+ */
+ void reset() {
+ uint32_t c = fAllocator.count();
+ for (uint32_t i = 0; i < c; ++i) {
+ ((T*)fAllocator[i])->~T();
+ }
+ fAllocator.reset();
+ }
+
+ /**
+ * count of items
+ */
+ uint32_t count() const {
+ return fAllocator.count();
+ }
+
+ /**
+ * is the count 0
+ */
+ bool empty() const { return fAllocator.empty(); }
+
+ /**
+ * access last item, only call if count() != 0
+ */
+ T& back() {
+ return *(T*)fAllocator.back();
+ }
+
+ /**
+ * access last item, only call if count() != 0
+ */
+ const T& back() const {
+ return *(const T*)fAllocator.back();
+ }
+
+ /**
+ * access item by index.
+ */
+ T& operator[] (uint32_t i) {
+ return *(T*)(fAllocator[i]);
+ }
+
+ /**
+ * access item by index.
+ */
+ const T& operator[] (uint32_t i) const {
+ return *(const T*)(fAllocator[i]);
+ }
+};
+
+#endif
diff --git a/gpu/include/GrAtlas.h b/gpu/include/GrAtlas.h
new file mode 100644
index 0000000000..9526e0bb4c
--- /dev/null
+++ b/gpu/include/GrAtlas.h
@@ -0,0 +1,88 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrAtlas_DEFINED
+#define GrAtlas_DEFINED
+
+#include "GrPoint.h"
+#include "GrTexture.h"
+#include "GrTDArray.h"
+
+class GrGpu;
+class GrRectanizer;
+class GrAtlasMgr;
+
+class GrAtlas {
+public:
+ GrAtlas(GrAtlasMgr*, int plotX, int plotY);
+
+ int getPlotX() const { return fPlot.fX; }
+ int getPlotY() const { return fPlot.fY; }
+
+ GrTexture* texture() const { return fTexture; }
+
+ bool addSubImage(int width, int height, const void*, GrIPoint16*);
+
+ static void FreeLList(GrAtlas* atlas) {
+ while (atlas) {
+ GrAtlas* next = atlas->fNext;
+ delete atlas;
+ atlas = next;
+ }
+ }
+
+ // testing
+ GrAtlas* nextAtlas() const { return fNext; }
+
+private:
+ ~GrAtlas(); // does not try to delete the fNext field
+
+ GrAtlas* fNext;
+ GrTexture* fTexture;
+ GrRectanizer* fRects;
+ GrAtlasMgr* fAtlasMgr;
+ GrIPoint16 fPlot;
+
+ friend class GrAtlasMgr;
+};
+
+class GrPlotMgr;
+
+class GrAtlasMgr {
+public:
+ GrAtlasMgr(GrGpu*);
+ ~GrAtlasMgr();
+
+ GrAtlas* addToAtlas(GrAtlas*, int width, int height, const void*,
+ GrIPoint16*);
+
+ GrTexture* getTexture() const { return fTexture; }
+
+ // to be called by ~GrAtlas()
+ void freePlot(int x, int y);
+
+ void abandonAll();
+
+private:
+ GrGpu* fGpu;
+ GrTexture* fTexture;
+ GrPlotMgr* fPlotMgr;
+};
+
+#endif
+
+
diff --git a/gpu/include/GrClip.h b/gpu/include/GrClip.h
new file mode 100644
index 0000000000..8e3030c753
--- /dev/null
+++ b/gpu/include/GrClip.h
@@ -0,0 +1,115 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrClip_DEFINED
+#define GrClip_DEFINED
+
+#include "GrClipIterator.h"
+#include "GrRect.h"
+#include "GrTDArray.h"
+
+class GrClip {
+public:
+ GrClip();
+ GrClip(const GrClip& src);
+ GrClip(GrClipIterator* iter);
+ ~GrClip();
+
+ GrClip& operator=(const GrClip& src);
+
+ bool isEmpty() const { return fBounds.isEmpty(); }
+ bool isComplex() const { return fList.count() > 0; }
+ bool isRect() const {
+ return !this->isEmpty() && !this->isComplex();
+ }
+
+ const GrIRect& getBounds() const { return fBounds; }
+
+ /**
+ * Resets this clip to be empty (fBounds is empty, and fList is empty)
+ */
+ void setEmpty();
+
+ /**
+ * Resets this clip to have fBounds == rect, and fList is empty.
+ */
+ void setRect(const GrIRect& rect);
+
+ /**
+ * Append a rect to an existing clip. The call must ensure that rect does
+ * not overlap with any previous rect in this clip (either from setRect
+ * or addRect). fBounds is automatically updated to reflect the union of
+ * all rects that have been added.
+ */
+ void addRect(const GrIRect&);
+
+ void setFromIterator(GrClipIterator* iter);
+
+ friend bool operator==(const GrClip& a, const GrClip& b) {
+ return a.fBounds == b.fBounds && a.fList == b.fList;
+ }
+ friend bool operator!=(const GrClip& a, const GrClip& b) {
+ return !(a == b);
+ }
+
+ /**
+ * Return the number of rects in this clip: 0 for empty, 1 for a rect,
+ * or N for a complex clip.
+ */
+ int countRects() const {
+ return this->isEmpty() ? 0 : GrMax<int>(1, fList.count());
+ }
+
+ /**
+ * Return an array of rects for this clip. Use countRects() to know the
+ * number of entries.
+ */
+ const GrIRect* getRects() const {
+ return fList.count() > 0 ? fList.begin() : &fBounds;
+ }
+
+#if GR_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ GrTDArray<GrIRect> fList;
+ GrIRect fBounds;
+};
+
+class GrClipIter : public GrClipIterator {
+public:
+ GrClipIter(const GrClip& clip) : fClip(&clip), fIndex(0) {}
+ GrClipIter() : fClip(NULL), fIndex(0) {}
+
+ void reset(const GrClip& clip);
+
+ virtual bool isDone();
+ virtual void rewind();
+ virtual void getRect(GrIRect* r);
+ virtual void next();
+ virtual void computeBounds(GrIRect* r);
+
+private:
+ const GrClip* fClip;
+ int fIndex;
+};
+
+#endif
+
diff --git a/gpu/include/GrClipIterator.h b/gpu/include/GrClipIterator.h
new file mode 100644
index 0000000000..d1fe4dde2f
--- /dev/null
+++ b/gpu/include/GrClipIterator.h
@@ -0,0 +1,81 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrClipIterator_DEFINED
+#define GrClipIterator_DEFINED
+
+#include "GrRect.h"
+
+class GrClipIterator {
+public:
+ GrClipIterator() : fNeedBounds(true) {}
+ virtual ~GrClipIterator() {}
+
+ /**
+ * Returns true if there are no more rects to process
+ */
+ virtual bool isDone() = 0;
+
+ /**
+ * Rewind the iterate to replay the set of rects again
+ */
+ virtual void rewind() = 0;
+
+ /**
+ * Return the current rect. It is an error to call this when done() is true
+ */
+ virtual void getRect(GrIRect*) = 0;
+
+ /**
+ * Call to move to the next rect in the set
+ */
+ virtual void next() = 0;
+
+ /**
+ * Set bounds to be the bounds of the clip.
+ */
+ virtual void computeBounds(GrIRect* bounds) = 0;
+
+ /**
+ * Subclass should call this whenever their underlying bounds has changed.
+ */
+ void invalidateBoundsCache() { fNeedBounds = true; }
+
+ const GrIRect& getBounds() {
+ if (fNeedBounds) {
+ this->computeBounds(&fBounds);
+ fNeedBounds = false;
+ }
+ return fBounds;
+ }
+
+private:
+ GrIRect fBounds;
+ bool fNeedBounds;
+};
+
+/**
+ * Call to rewind iter, first checking to see if iter is NULL
+ */
+static inline void GrSafeRewind(GrClipIterator* iter) {
+ if (iter) {
+ iter->rewind();
+ }
+}
+
+#endif
+
diff --git a/gpu/include/GrColor.h b/gpu/include/GrColor.h
new file mode 100644
index 0000000000..8dc03d258c
--- /dev/null
+++ b/gpu/include/GrColor.h
@@ -0,0 +1,72 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrColor_DEFINED
+#define GrColor_DEFINED
+
+#include "GrTypes.h"
+
+/**
+ * GrColor is 4 bytes for R, G, B, A, in a compile-time specific order. The
+ * components are stored premultiplied.
+ */
+typedef uint32_t GrColor;
+
+// indices for address a GrColor as an array of bytes
+
+#define GrColor_INDEX_R 0
+#define GrColor_INDEX_G 1
+#define GrColor_INDEX_B 2
+#define GrColor_INDEX_A 3
+
+// shfit amount to assign a component to a GrColor int
+
+#define GrColor_SHIFT_R 0
+#define GrColor_SHIFT_G 8
+#define GrColor_SHIFT_B 16
+#define GrColor_SHIFT_A 24
+
+/**
+ * Pack 4 components (RGBA) into a GrColor int
+ */
+static inline GrColor GrColorPackRGBA(unsigned r, unsigned g,
+ unsigned b, unsigned a) {
+ GrAssert((uint8_t)r == r);
+ GrAssert((uint8_t)g == g);
+ GrAssert((uint8_t)b == b);
+ GrAssert((uint8_t)a == a);
+ return (r << GrColor_SHIFT_R) |
+ (g << GrColor_SHIFT_G) |
+ (b << GrColor_SHIFT_B) |
+ (a << GrColor_SHIFT_A);
+}
+
+// extract a component (byte) from a GrColor int
+
+#define GrColorUnpackR(color) (((color) >> GrColor_SHIFT_R) & 0xFF)
+#define GrColorUnpackG(color) (((color) >> GrColor_SHIFT_G) & 0xFF)
+#define GrColorUnpackB(color) (((color) >> GrColor_SHIFT_B) & 0xFF)
+#define GrColorUnpackA(color) (((color) >> GrColor_SHIFT_A) & 0xFF)
+
+/**
+ * Since premultiplied means that alpha >= color, we construct a color with
+ * each component==255 and alpha == 0 to be "illegal"
+ */
+#define GrColor_ILLEGAL (~(0xFF << GrColor_SHIFT_A))
+
+#endif
+
diff --git a/gpu/include/GrConfig.h b/gpu/include/GrConfig.h
new file mode 100644
index 0000000000..9c18f60ef6
--- /dev/null
+++ b/gpu/include/GrConfig.h
@@ -0,0 +1,344 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrConfig_DEFINED
+#define GrConfig_DEFINED
+
+///////////////////////////////////////////////////////////////////////////////
+// preconfig section:
+//
+// All the work before including GrUserConfig.h should center around guessing
+// what platform we're on, and defining low-level symbols based on that.
+//
+// A build environment may have already defined symbols, so we first check
+// for that
+//
+
+// hack to ensure we know what sort of Apple platform we're on
+#if defined(__APPLE_CPP__) || defined(__APPLE_CC__)
+ #include <TargetConditionals.h>
+#endif
+
+/**
+ * Gr defines are set to 0 or 1, rather than being undefined or defined
+ */
+
+#if !defined(GR_ANDROID_BUILD)
+ #define GR_ANDROID_BUILD 0
+#endif
+#if !defined(GR_IOS_BUILD)
+ #define GR_IOS_BUILD 0
+#endif
+#if !defined(GR_LINUX_BUILD)
+ #define GR_LINUX_BUILD 0
+#endif
+#if !defined(GR_MAC_BUILD)
+ #define GR_MAC_BUILD 0
+#endif
+#if !defined(GR_WIN32_BUILD)
+ #define GR_WIN32_BUILD 0
+#endif
+#if !defined(GR_QNX_BUILD)
+ #define GR_QNX_BUILD 0
+#endif
+
+/**
+ * If no build target has been defined, attempt to infer.
+ */
+#if !GR_ANDROID_BUILD && !GR_IOS_BUILD && !GR_LINUX_BUILD && !GR_MAC_BUILD && !GR_WIN32_BUILD && !GR_QNX_BUILD
+ #if defined(_WIN32)
+ #undef GR_WIN32_BUILD
+ #define GR_WIN32_BUILD 1
+// #error "WIN"
+ #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+ #undef GR_IOS_BUILD
+ #define GR_IOS_BUILD 1
+// #error "IOS"
+ #elif ANDROID_NDK || defined(ANDROID)
+ #undef GR_ANDROID_BUILD
+ #define GR_ANDROID_BUILD 1
+// #error "ANDROID"
+ #elif TARGET_OS_MAC
+ #undef GR_MAC_BUILD
+ #define GR_MAC_BUILD 1
+// #error "MAC"
+ #elif TARGET_OS_QNX || defined(__QNXNTO__)
+ #undef GR_QNX_BUILD
+ #define GR_QNX_BUILD 1
+// #error "QNX"
+ #else
+ #undef GR_LINUX_BUILD
+ #define GR_LINUX_BUILD 1
+// #error "LINUX"
+ #endif
+#endif
+
+#if !defined(GR_DEBUG) && !defined(GR_RELEASE)
+ #ifdef NDEBUG
+ #define GR_DEBUG 0
+ #else
+ #define GR_DEBUG 1
+ #endif
+ #define GR_RELEASE !GR_DEBUG
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Pull stdint.h in before user-config, to be sure our __STDC... macros are
+ * defined before anyone else might try to include stdint.h
+ */
+#define __STDC_LIMIT_MACROS
+#define __STDC_CONSTANT_MACROS
+#include <stdint.h>
+
+/*
+ * The "user config" file can be empty, and everything should work. It is
+ * meant to store a given platform/client's overrides of our guess-work.
+ *
+ * A alternate user config file can be specified by defining
+ * GR_USER_CONFIG_FILE. It should be defined relative to GrConfig.h
+ *
+ * e.g. it can specify GR_DEBUG/GR_RELEASE as it please, change the BUILD
+ * target, or supply its own defines for anything else (e.g. GR_SCALAR)
+ */
+#if !defined(GR_USER_CONFIG_FILE)
+ #include "GrUserConfig.h"
+#else
+ #include GR_USER_CONFIG_FILE
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+// postconfig section:
+//
+// By now we must have a GR_..._BUILD symbol set to 1, and a decision about
+// debug -vs- release
+//
+
+extern void GrPrintf(const char format[], ...);
+
+/**
+ * GR_STRING makes a string of X where X is expanded before conversion to a string
+ * if X itself contains macros.
+ */
+#define GR_STRING(X) GR_STRING_IMPL(X)
+#define GR_STRING_IMPL(X) #X
+
+/**
+ * GR_CONCAT concatenates X and Y where each is expanded before
+ * contanenation if either contains macros.
+ */
+#define GR_CONCAT(X,Y) GR_CONCAT_IMPL(X,Y)
+#define GR_CONCAT_IMPL(X,Y) X##Y
+
+/**
+ * Creates a string of the form "<filename>(<linenumber>) : "
+ */
+#define GR_FILE_AND_LINE_STR __FILE__ "(" GR_STRING(__LINE__) ") : "
+
+/**
+ * Compilers have different ways of issuing warnings. This macro
+ * attempts to abstract them, but may need to be specialized for your
+ * particular compiler.
+ * To insert compiler warnings use "#pragma message GR_WARN(<string>)"
+ */
+#if _MSC_VER
+ #define GR_WARN(MSG) (GR_FILE_AND_LINE_STR "WARNING: " MSG)
+#else//__GNUC__ - may need other defines for different compilers
+ #define GR_WARN(MSG) ("WARNING: " MSG)
+#endif
+
+/**
+ * GR_ALWAYSBREAK is an unconditional break in all builds.
+ */
+#if !defined(GR_ALWAYSBREAK)
+ #if GR_WIN32_BUILD
+ #define GR_ALWAYSBREAK __debugbreak()
+ #else
+ // TODO: do other platforms really not have continuable breakpoints?
+ // sign extend for 64bit architectures to be sure this is
+ // in the high address range
+ #define GR_ALWAYSBREAK *((int*)(int64_t)(int32_t)0xbeefcafe) = 0;
+ #endif
+#endif
+
+/**
+ * GR_DEBUGBREAK is an unconditional break in debug builds.
+ */
+#if !defined(GR_DEBUGBREAK)
+ #if GR_DEBUG
+ #define GR_DEBUGBREAK GR_ALWAYSBREAK
+ #else
+ #define GR_DEBUGBREAK
+ #endif
+#endif
+
+/**
+ * GR_ALWAYSASSERT is an assertion in all builds.
+ */
+#if !defined(GR_ALWAYSASSERT)
+ #define GR_ALWAYSASSERT(COND) \
+ do { \
+ if (!(COND)) { \
+ GrPrintf("%s %s failed\n", GR_FILE_AND_LINE_STR, #COND); \
+ GR_ALWAYSBREAK; \
+ } \
+ } while (false)
+#endif
+
+/**
+ * GR_DEBUGASSERT is an assertion in debug builds only.
+ */
+#if !defined(GR_DEBUGASSERT)
+ #if GR_DEBUG
+ #define GR_DEBUGASSERT(COND) GR_ALWAYSASSERT(COND)
+ #else
+ #define GR_DEBUGASSERT(COND)
+ #endif
+#endif
+
+/**
+ * Prettier forms of the above macros.
+ */
+#define GrAssert(COND) GR_DEBUGASSERT(COND)
+#define GrAlwaysAssert(COND) GR_ALWAYSASSERT(COND)
+
+/**
+ * GR_DEBUGCODE compiles the code X in debug builds only
+ */
+#if !defined(GR_DEBUGCODE)
+ #if GR_DEBUG
+ #define GR_DEBUGCODE(X) X
+ #else
+ #define GR_DEBUGCODE(X)
+ #endif
+#endif
+
+/**
+ * GR_STATIC_ASSERT is a compile time assertion. Depending on the platform
+ * it may print the message in the compiler log. Obviously, the condition must
+ * be evaluatable at compile time.
+ */
+// VS 2010 and GCC compiled with c++0x or gnu++0x support the new
+// static_assert.
+#if !defined(GR_STATIC_ASSERT)
+ #if (_MSC_VER >= 1600) || __GXX_EXPERIMENTAL_CXX0X__
+ #define GR_STATIC_ASSERT(CONDITION) static_assert(CONDITION, "bug")
+ #else
+ template <bool> class GR_STATIC_ASSERT_FAILURE;
+ template <> class GR_STATIC_ASSERT_FAILURE<true> {};
+ #define GR_STATIC_ASSERT(CONDITION) \
+ enum {GR_CONCAT(X,__LINE__) = \
+ sizeof(GR_STATIC_ASSERT_FAILURE<CONDITION>)}
+ #endif
+#endif
+
+#if !defined(GR_SCALAR_IS_FLOAT)
+ #define GR_SCALAR_IS_FLOAT 0
+#endif
+#if !defined(GR_SCALAR_IS_FIXED)
+ #define GR_SCALAR_IS_FIXED 0
+#endif
+
+#if !defined(GR_TEXT_SCALAR_TYPE_IS_USHORT)
+ #define GR_TEXT_SCALAR_TYPE_IS_USHORT 0
+#endif
+#if !defined(GR_TEXT_SCALAR_TYPE_IS_FLOAT)
+ #define GR_TEXT_SCALAR_TYPE_IS_FLOAT 0
+#endif
+#if !defined(GR_TEXT_SCALAR_TYPE_IS_FIXED)
+ #define GR_TEXT_SCALAR_TYPE_IS_FIXED 0
+#endif
+
+#ifndef GR_DUMP_TEXTURE_UPLOAD
+ #define GR_DUMP_TEXTURE_UPLOAD 0
+#endif
+
+/**
+ * GR_COLLECT_STATS controls whether the GrGpu class collects stats.
+ * If not already defined then collect in debug build but not release.
+ */
+#if !defined(GR_COLLECT_STATS)
+ #define GR_COLLECT_STATS GR_DEBUG
+#endif
+
+/**
+ * GR_GL_LOG_CALLS controls whether each GL call is logged.
+ */
+#if !defined(GR_GL_LOG_CALLS)
+ #define GR_GL_LOG_CALLS 0
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// tail section:
+//
+// Now we just assert if we are missing some required define, or if we detect
+// and inconsistent combination of defines
+//
+
+
+/**
+ * Only one build target macro should be 1 and the rest should be 0.
+ */
+#define GR_BUILD_SUM (GR_WIN32_BUILD + GR_MAC_BUILD + GR_IOS_BUILD + GR_ANDROID_BUILD + GR_LINUX_BUILD + GR_QNX_BUILD)
+#if 0 == GR_BUILD_SUM
+ #error "Missing a GR_BUILD define"
+#elif 1 != GR_BUILD_SUM
+ #error "More than one GR_BUILD defined"
+#endif
+
+
+#if !GR_SCALAR_IS_FLOAT && !GR_SCALAR_IS_FIXED
+ #undef GR_SCALAR_IS_FLOAT
+ #define GR_SCALAR_IS_FLOAT 1
+ #pragma message GR_WARN("Scalar type not defined, defaulting to float")
+#endif
+
+#if !GR_TEXT_SCALAR_IS_FLOAT && \
+ !GR_TEXT_SCALAR_IS_FIXED && \
+ !GR_TEXT_SCALAR_IS_USHORT
+ #undef GR_TEXT_SCALAR_IS_FLOAT
+ #define GR_TEXT_SCALAR_IS_FLOAT 1
+ #pragma message GR_WARN("Text scalar type not defined, defaulting to float")
+#endif
+
+#if 0
+#if GR_WIN32_BUILD
+// #pragma message GR_WARN("GR_WIN32_BUILD")
+#endif
+#if GR_MAC_BUILD
+// #pragma message GR_WARN("GR_MAC_BUILD")
+#endif
+#if GR_IOS_BUILD
+// #pragma message GR_WARN("GR_IOS_BUILD")
+#endif
+#if GR_ANDROID_BUILD
+// #pragma message GR_WARN("GR_ANDROID_BUILD")
+#endif
+#if GR_LINUX_BUILD
+// #pragma message GR_WARN("GR_LINUX_BUILD")
+#endif
+#if GR_QNX_BUILD
+// #pragma message GR_WARN("GR_QNX_BUILD")
+#endif
+#endif
+
+#endif
+
diff --git a/gpu/include/GrContext.h b/gpu/include/GrContext.h
new file mode 100644
index 0000000000..f9d5ed5090
--- /dev/null
+++ b/gpu/include/GrContext.h
@@ -0,0 +1,322 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+#ifndef GrContext_DEFINED
+#define GrContext_DEFINED
+
+#include "GrClip.h"
+#include "GrGpu.h"
+#include "GrSamplerState.h"
+#include "GrTextureCache.h"
+#include "GrInOrderDrawBuffer.h"
+#include "GrVertexBufferAllocPool.h"
+
+class GrFontCache;
+class GrPathIter;
+
+//TODO: move GrGpu enums/nested types here
+
+class GrContext : public GrRefCnt {
+public:
+ /**
+ * Creates a GrContext from within a 3D context.
+ */
+ static GrContext* Create(GrGpu::Engine engine,
+ GrGpu::Platform3DContext context3D);
+
+ virtual ~GrContext();
+
+ /**
+ * The GrContext normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the context that the state was modified and it should resend. Shouldn't
+ * be called frequently for good performance.
+ */
+ void resetContext();
+
+ /**
+ * Abandons all textures. Call this if you have lost the associated GPU
+ * context, and thus internal texture references/IDs are now invalid.
+ */
+ void abandonAllTextures();
+
+ /**
+ * Search for an entry with the same Key. If found, "lock" it and return it.
+ * If not found, return null.
+ */
+ GrTextureEntry* findAndLockTexture(GrTextureKey*,
+ const GrSamplerState&);
+
+
+ /**
+ * Create a new entry, based on the specified key and texture, and return
+ * its "locked" entry.
+ *
+ * Ownership of the texture is transferred to the Entry, which will unref()
+ * it when we are purged or deleted.
+ */
+ GrTextureEntry* createAndLockTexture(GrTextureKey* key,
+ const GrSamplerState&,
+ const GrGpu::TextureDesc&,
+ void* srcData, size_t rowBytes);
+
+ /**
+ * When done with an entry, call unlockTexture(entry) on it, which returns
+ * it to the cache, where it may be purged.
+ */
+ void unlockTexture(GrTextureEntry* entry);
+
+ /**
+ * Removes an texture from the cache. This prevents the texture from
+ * being found by a subsequent findAndLockTexture() until it is
+ * reattached. The entry still counts against the cache's budget and should
+ * be reattached when exclusive access is no longer needed.
+ */
+ void detachCachedTexture(GrTextureEntry*);
+
+ /**
+ * Reattaches a texture to the cache and unlocks it. Allows it to be found
+ * by a subsequent findAndLock or be purged (provided its lock count is
+ * now 0.)
+ */
+ void reattachAndUnlockCachedTexture(GrTextureEntry*);
+
+ /**
+ * Creates a texture that is outside the cache. Does not count against
+ * cache's budget.
+ */
+ GrTexture* createUncachedTexture(const GrGpu::TextureDesc&,
+ void* srcData,
+ size_t rowBytes);
+
+ /**
+ * Wraps an externally-created rendertarget in a GrRenderTarget.
+ * e.g. in GL platforamRenderTarget is an FBO id.
+ */
+ GrRenderTarget* createPlatformRenderTarget(intptr_t platformRenderTarget,
+ int width, int height);
+
+ /**
+ * Returns true if the specified use of an indexed texture is supported.
+ */
+ bool supportsIndex8PixelConfig(const GrSamplerState&, int width, int height);
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ GrRenderTarget* currentRenderTarget() const;
+ void getViewMatrix(GrMatrix* m) const;
+ const GrClip& getClip() const { return fGpu->getClip(); }
+
+ void setRenderTarget(GrRenderTarget* target);
+ void setDefaultRenderTargetSize(uint32_t width, uint32_t height);
+ GrRenderTarget* defaultRenderTarget() { return fGpu->defaultRenderTarget(); }
+
+ void setTexture(GrTexture* texture);
+ void setSamplerState(const GrSamplerState&);
+ void setTextureMatrix(const GrMatrix& m);
+
+ void setAntiAlias(bool);
+ void setDither(bool);
+ void setAlpha(uint8_t alpha);
+ void setColor(GrColor color);
+ void setPointSize(float size);
+ void setBlendFunc(GrGpu::BlendCoeff srcCoef, GrGpu::BlendCoeff dstCoef);
+ void setViewMatrix(const GrMatrix& m);
+ void setClip(const GrClip&);
+
+ /**
+ * Erase the entire render target, ignoring any clips/scissors.
+ */
+ void eraseColor(GrColor color);
+
+ /**
+ * Draw everywhere (respecting the clip) with the current color.
+ */
+ void drawFull(bool useTexture);
+
+ /**
+ * Draw the rect, respecting the current texture if useTexture is true.
+ * If strokeWidth < 0, then the rect is filled, else the rect is stroked
+ * based on strokeWidth. If strokeWidth == 0, then the stroke is always
+ * a single pixel thick.
+ */
+ void drawRect(const GrRect&, bool useTexture, GrScalar strokeWidth);
+
+ void fillRect(const GrRect& rect, bool useTexture) {
+ this->drawRect(rect, useTexture, -1);
+ }
+
+ /**
+ * Path filling rules
+ */
+ enum PathFills {
+ kWinding_PathFill,
+ kEvenOdd_PathFill,
+ kInverseWinding_PathFill,
+ kInverseEvenOdd_PathFill,
+ kHairLine_PathFill,
+
+ kPathFillCount
+ };
+
+ /**
+ * Tessellates and draws a path.
+ *
+ * @param path the path to draw
+ * @param paint the paint to set before drawing
+ * @param useTexture if true the path vertices will also be used as
+ * texture coorindates referencing last texture passed
+ * to setTexture.
+ */
+ void drawPath(GrPathIter* path,
+ PathFills fill,
+ bool useTexture,
+ const GrPoint* translate = NULL);
+
+ /**
+ * Call to ensure all drawing to the context has been issued to the
+ * underlying 3D API.
+ * if flushRenderTarget is true then after the call the last
+ * rendertarget set will be current in the underlying 3D API, otherwise
+ * it may not be. It is useful to set if the caller plans to use the 3D
+ * context outside of Ganesh to render into the current RT.
+ */
+ void flush(bool flushRenderTarget);
+
+ /**
+ * Return true on success, i.e. if we could copy the specified range of
+ * pixels from the current render-target into the buffer, converting into
+ * the specified pixel-config.
+ */
+ bool readPixels(int left, int top, int width, int height,
+ GrTexture::PixelConfig, void* buffer);
+
+ /**
+ * Copy the src pixels [buffer, stride, pixelconfig] into the current
+ * render-target at the specified rectangle.
+ */
+ void writePixels(int left, int top, int width, int height,
+ GrTexture::PixelConfig, const void* buffer, size_t stride);
+
+ /* -------------------------------------------------------
+ * Mimicking the GrGpu interface for now
+ * TODO: define appropriate higher-level API for context
+ */
+
+ GrVertexBuffer* createVertexBuffer(uint32_t size, bool dynamic);
+
+ GrIndexBuffer* createIndexBuffer(uint32_t size, bool dynamic);
+
+ bool reserveAndLockGeometry(GrVertexLayout vertexLayout,
+ uint32_t vertexCount,
+ uint32_t indexCount,
+ void** vertices,
+ void** indices);
+
+ void drawIndexed(GrGpu::PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+
+ void drawNonIndexed(GrGpu::PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount);
+
+ void setVertexSourceToArray(const void* array,
+ GrVertexLayout vertexLayout);
+ void setIndexSourceToArray(const void* array);
+ void setVertexSourceToBuffer(GrVertexBuffer* buffer,
+ GrVertexLayout vertexLayout);
+ void setIndexSourceToBuffer(GrIndexBuffer* buffer);
+
+ void releaseReservedGeometry();
+
+ void resetStats();
+
+ const GrGpu::Stats& getStats() const;
+
+ void printStats() const;
+
+ class AutoRenderTarget : ::GrNoncopyable {
+ public:
+ AutoRenderTarget(GrContext* context, GrRenderTarget* target) {
+ fContext = NULL;
+ fPrevTarget = context->currentRenderTarget();
+ if (fPrevTarget != target) {
+ context->setRenderTarget(target);
+ fContext = context;
+ }
+ }
+ ~AutoRenderTarget() {
+ if (fContext) {
+ fContext->setRenderTarget(fPrevTarget);
+ }
+ }
+ private:
+ GrContext* fContext;
+ GrRenderTarget* fPrevTarget;
+ };
+
+ /* -------------------------------------------------------
+ */
+
+ // Intended only to be used within Ganesh:
+ GrGpu* getGpu() { return fGpu; }
+ GrFontCache* getFontCache() { return fFontCache; }
+ GrDrawTarget* getTextTarget();
+ void flushText();
+
+ const GrIndexBuffer* quadIndexBuffer() const;
+ int maxQuadsInIndexBuffer() const;
+
+private:
+ GrGpu* fGpu;
+ GrTextureCache* fTextureCache;
+ GrFontCache* fFontCache;
+
+ GrVertexBufferAllocPool fVBAllocPool;
+ GrInOrderDrawBuffer fTextDrawBuffer;
+
+ GrContext(GrGpu* gpu);
+ bool finalizeTextureKey(GrTextureKey*, const GrSamplerState&) const;
+
+ void drawClipIntoStencil();
+};
+
+/**
+ * Save/restore the view-matrix in the context.
+ */
+class GrAutoViewMatrix : GrNoncopyable {
+public:
+ GrAutoViewMatrix(GrContext* ctx) : fContext(ctx) {
+ ctx->getViewMatrix(&fMatrix);
+ }
+ GrAutoViewMatrix(GrContext* ctx, const GrMatrix& matrix) : fContext(ctx) {
+ ctx->getViewMatrix(&fMatrix);
+ ctx->setViewMatrix(matrix);
+ }
+ ~GrAutoViewMatrix() {
+ fContext->setViewMatrix(fMatrix);
+ }
+
+private:
+ GrContext* fContext;
+ GrMatrix fMatrix;
+};
+
+#endif
+
diff --git a/gpu/include/GrDrawTarget.h b/gpu/include/GrDrawTarget.h
new file mode 100644
index 0000000000..fee13a6c7e
--- /dev/null
+++ b/gpu/include/GrDrawTarget.h
@@ -0,0 +1,736 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrDrawTarget_DEFINED
+#define GrDrawTarget_DEFINED
+
+#include "GrScalar.h"
+#include "GrMatrix.h"
+#include "GrColor.h"
+#include "GrRefCnt.h"
+#include "GrSamplerState.h"
+#include "GrClip.h"
+
+class GrTexture;
+class GrRenderTarget;
+class GrClipIterator;
+class GrVertexBuffer;
+class GrIndexBuffer;
+
+class GrDrawTarget : public GrRefCnt {
+public:
+ /**
+ * Geometric primitives used for drawing.
+ */
+ enum PrimitiveType {
+ kTriangles_PrimitiveType,
+ kTriangleStrip_PrimitiveType,
+ kTriangleFan_PrimitiveType,
+ kPoints_PrimitiveType,
+ kLines_PrimitiveType,
+ kLineStrip_PrimitiveType
+ };
+
+ /**
+ * Flags that affect rendering. Controlled using enable/disableState(). All
+ * default to disabled.
+ */
+ enum StateBits {
+ kDither_StateBit = 0x1,//<! Perform color dithering
+ kAntialias_StateBit = 0x2,//<! Perform anti-aliasing. The render-
+ // target must support some form of AA
+ // (msaa, coverage sampling, etc). For
+ // GrGpu-created rendertarget/textures
+ // this is controlled by parameters
+ // passed to createTexture.
+ kClip_StateBit = 0x4,//<! Controls whether drawing is clipped
+ // against the region specified by
+ // setClip.
+ };
+
+ /**
+ * Coeffecients for alpha-blending.
+ */
+ enum BlendCoeff {
+ kZero_BlendCoeff, //<! 0
+ kOne_BlendCoeff, //<! 1
+ kSC_BlendCoeff, //<! src color
+ kISC_BlendCoeff, //<! one minus src color
+ kDC_BlendCoeff, //<! dst color
+ kIDC_BlendCoeff, //<! one minus dst color
+ kSA_BlendCoeff, //<! src alpha
+ kISA_BlendCoeff, //<! one minus src alpha
+ kDA_BlendCoeff, //<! dst alpha
+ kIDA_BlendCoeff, //<! one minus dst alpha
+ };
+
+ /**
+ * StencilPass
+ *
+ * Sets the stencil state for subsequent draw calls. Used to fill paths.
+ *
+ * Winding requires two passes when the GPU/API doesn't support separate
+ * stencil.
+ *
+ * The color pass for path fill is used to zero out stencil bits used for
+ * path filling. Every pixel covere by a winding/EO stencil pass must get
+ * covered by the color pass in order to leave stencil buffer in the correct
+ * state for the next path draw.
+ *
+ * NOTE: Stencil-based Winding fill has alias-to-zero problems. (e.g. A
+ * winding count of 128,256,512,etc with a 8 bit stencil buffer
+ * will be unfilled)
+ */
+ enum StencilPass {
+ kNone_StencilPass, //<! Not drawing a path or clip.
+ kEvenOddStencil_StencilPass, //<! records in/out in stencil buffer
+ // using the Even/Odd fill rule.
+ kEvenOddColor_StencilPass, //<! writes colors to color target in
+ // pixels marked inside the fill by
+ // kEOFillStencil_StencilPass. Clears
+ // stencil in pixels covered by
+ // geometry.
+ kWindingStencil1_StencilPass, //<! records in/out in stencil buffer
+ // using the Winding fill rule.
+ kWindingStencil2_StencilPass, //<! records in/out in stencil buffer
+ // using the Winding fill rule.
+ // Run when single-stencil-pass winding
+ // not supported (i.e. no separate
+ // stencil support)
+ kWindingColor_StencilPass, //<! writes colors to color target in
+ // pixels marked inside the fill by
+ // kWindFillStencil_StencilPass. Clears
+ // stencil in pixels covered by
+ // geometry.
+ kDrawTargetCount_StencilPass //<! Subclass may extend this enum to use
+ // the stencil for other purposes (e.g.
+ // to do stencil-based clipping)
+ // This value is provided as basis for
+ // defining these extended enum values.
+ };
+
+protected:
+ enum MatrixMode {
+ kModelView_MatrixMode = 0,
+ kTexture_MatrixMode,
+
+ kMatrixModeCount
+ };
+
+ struct DrawState {
+ uint32_t fFlagBits;
+ BlendCoeff fSrcBlend;
+ BlendCoeff fDstBlend;
+ GrTexture* fTexture;
+ GrSamplerState fSamplerState;
+ GrRenderTarget* fRenderTarget;
+ GrColor fColor;
+ float fPointSize;
+ StencilPass fStencilPass;
+ bool fReverseFill;
+ GrMatrix fMatrixModeCache[kMatrixModeCount];
+ bool operator ==(const DrawState& s) const {
+ return 0 == memcmp(this, &s, sizeof(DrawState));
+ }
+ bool operator !=(const DrawState& s) const { return !(*this == s); }
+ };
+
+public:
+ ///////////////////////////////////////////////////////////////////////////
+
+ GrDrawTarget();
+
+ /**
+ * Sets the current clip to the region specified by clip. All draws will be
+ * clipped against this clip if kClip_StateBit is enabled.
+ *
+ * @param description of the clipping region
+ */
+ void setClip(const GrClip& clip);
+
+ /**
+ * Gets the current clip.
+ *
+ * @return the clip.
+ */
+ const GrClip& getClip() const;
+
+ /**
+ * Sets the texture used at the next drawing call
+ *
+ * @param texture The texture to set. Can be NULL though there is no advantage
+ * to settings a NULL texture if doing non-textured drawing
+ */
+ void setTexture(GrTexture* texture);
+
+ /**
+ * Retrieves the currently set texture.
+ *
+ * @return The currently set texture. The return value will be NULL if no
+ * texture has been set, NULL was most recently passed to
+ * setTexture, or the last setTexture was destroyed.
+ */
+ GrTexture* currentTexture() const;
+
+ /**
+ * Sets the rendertarget used at the next drawing call
+ *
+ * @param target The render target to set. Must be a valid rendertarget.
+ * That is it is a value that was returned by
+ * currentRenderTarget() or GrTexture::asRenderTarget().
+ */
+ void setRenderTarget(GrRenderTarget* target);
+
+ /**
+ * Retrieves the currently set rendertarget.
+ *
+ * @return The currently set render target.
+ */
+ GrRenderTarget* currentRenderTarget() const;
+
+ /**
+ * Sets the sampler state for the next draw.
+ *
+ * The sampler state determines the address wrap modes and
+ * filtering
+ *
+ * @param samplerState Specifies the sampler state.
+ */
+ void setSamplerState(const GrSamplerState& samplerState);
+
+ /**
+ * Sets the matrix applied to texture coordinates.
+ *
+ * The post-matrix texture coordinates in the square [0,1]^2 cover the
+ * entire area of the texture. This means the full POT width when a NPOT
+ * texture is embedded in a POT width texture to meet the 3D API
+ * requirements. The texture matrix is applied both when the texture
+ * coordinates are explicit and when vertex positions are used as texture
+ * coordinates. In the latter case the texture matrix is applied to the
+ * pre-modelview position values.
+ *
+ * @param m the matrix used to transform the texture coordinates.
+ */
+ void setTextureMatrix(const GrMatrix& m) {
+ this->loadMatrix(m, kTexture_MatrixMode);
+ }
+
+ /**
+ * Sets the matrix applied to veretx positions.
+ *
+ * In the post-view-matrix space the rectangle [0,w]x[0,h]
+ * fully covers the render target. (w and h are the width and height of the
+ * the rendertarget.)
+ *
+ * @param m the matrix used to transform the vertex positions.
+ */
+ void setViewMatrix(const GrMatrix& m) {
+ this->loadMatrix(m, kModelView_MatrixMode);
+ }
+
+ /**
+ * Multiplies the current view matrix by a matrix
+ *
+ * After this call V' = V*m where V is the old view matrix,
+ * m is the parameter to this function, and V' is the new view matrix.
+ * (We consider positions to be column vectors so position vector p is
+ * transformed by matrix X as p' = X*p.)
+ *
+ * @param m the matrix used to modify the modelview matrix.
+ */
+ void concatViewMatrix(const GrMatrix& m);
+
+ /**
+ * Sets color for next draw to a premultiplied-alpha color.
+ *
+ * @param the color to set.
+ */
+ void setColor(GrColor);
+
+ /**
+ * Sets the color to be used for the next draw to be
+ * (r,g,b,a) = (alpha, alpha, alpha, alpha).
+ *
+ * @param alpha The alpha value to set as the color.
+ */
+ void setAlpha(uint8_t alpha);
+
+ /**
+ * Sets pass for path rendering
+ *
+ * @param pass of path rendering
+ */
+ void setStencilPass(StencilPass pass);
+
+ /**
+ * Reveses the in/out decision of the fill rule for path rendering.
+ * Only affects kEOFillColor_StencilPass and kWindingFillColor_StencilPass
+ *
+ * @param reverse true to reverse, false otherwise
+ */
+ void setReverseFill(bool reverse);
+
+ /**
+ * Enable render state settings.
+ *
+ * @param flags bitfield of StateBits specifing the states to enable
+ */
+ void enableState(uint32_t stateBits);
+
+ /**
+ * Disable render state settings.
+ *
+ * @param flags bitfield of StateBits specifing the states to disable
+ */
+ void disableState(uint32_t stateBits);
+
+ bool isDitherState() const {
+ return fCurrDrawState.fFlagBits & kDither_StateBit;
+ }
+
+ /**
+ * Sets the size of points used the next time points are drawn.
+ *
+ * @param the point size
+ */
+ void setPointSize(float size);
+
+ /**
+ * Sets the blending function coeffecients.
+ *
+ * The blend function will be:
+ * D' = sat(S*srcCoef + D*dstCoef)
+ *
+ * where D is the existing destination color, S is the incoming source
+ * color, and D' is the new destination color that will be written. sat()
+ * is the saturation function.
+ *
+ * @param srcCoef coeffecient applied to the src color.
+ * @param dstCoef coeffecient applied to the dst color.
+ */
+ void setBlendFunc(BlendCoeff srcCoef, BlendCoeff dstCoef);
+
+ /**
+ * Retrieves the current view matrix
+ * @param matrix will be the current view matrix after return.
+ */
+ void getViewMatrix(GrMatrix* matrix) const;
+
+ /**
+ * Retrieves the inverse of the current view matrix.
+ *
+ * If the current view matrix is invertible, return true, and if matrix
+ * is non-null, copy the inverse into it. If the current view matrix is
+ * non-invertible, return false and ignore the matrix parameter.
+ *
+ * @param matrix if not null, will receive a copy of the current inverse.
+ */
+ bool getViewInverse(GrMatrix* matrix) const;
+
+ /**
+ * Used to save and restore the GrGpu's drawing state
+ */
+ struct SavedDrawState {
+ private:
+ DrawState fState;
+ friend class GrDrawTarget;
+ };
+
+ /**
+ * Saves the current draw state. The state can be restored at a later time
+ * with restoreDrawState.
+ *
+ * See also AutoStateRestore class.
+ *
+ * @param state will hold the state after the function returns.
+ */
+ void saveCurrentDrawState(SavedDrawState* state) const;
+
+ /**
+ * Restores previously saved draw state. The client guarantees that state
+ * was previously passed to saveCurrentDrawState and that the rendertarget
+ * and texture set at save are still valid.
+ *
+ * See also AutoStateRestore class.
+ *
+ * @param state the previously saved state to restore.
+ */
+ void restoreDrawState(const SavedDrawState& state);
+
+ /**
+ * Copies the draw state from another target to this target.
+ *
+ * @param srcTarget draw target used as src of the draw state.
+ */
+ void copyDrawState(const GrDrawTarget& srcTarget);
+
+ /**
+ * Flags that indicate the layout of vertex data.
+ *
+ * kSeparateTexCoord_VertexLayoutBit is incompatible with
+ * kPositionAsTexCoord_VertexLayoutBit. kTextFormat_VertexLayoutBit is
+ * incompatible with any other flags.
+ *
+ * When kTextFormat_VertexLayoutBit is set:
+ * Texture coordinates are separate.
+ * Positions and Texture coordinates are SkGpuTextVertex.
+ * For non-text vertices:
+ * Position and texture coordinates are GrPoints.
+ * Colors are GrColors.
+ *
+ * The order is always positions, texture coords, colors.
+ */
+ enum VertexLayoutBits {
+ kSeparateTexCoord_VertexLayoutBit = 0x1, //<! vertices have texture
+ // coords that are not
+ // inferred from the
+ // positions
+ kPositionAsTexCoord_VertexLayoutBit = 0x2, //<! vertices use positions
+ // as texture coords.
+ kColor_VertexLayoutBit = 0x4, //<! vertices have colors
+ kTextFormat_VertexLayoutBit = 0x8, //<! vertices represent glyphs
+ // and therefore contain
+ // two GrGpuTextVertexs.
+ // One for pos and one for
+ // text coords.
+ // for below assert
+ kDummy,
+ kHighVertexLayoutBit = kDummy - 1
+ };
+ GR_STATIC_ASSERT(kHighVertexLayoutBit < (1 << 8*sizeof(GrVertexLayout)));
+
+ /**
+ * Reserves space for vertices and/or indices. Draw target will use
+ * reserved vertices / indices at next draw.
+ *
+ * If succeeds:
+ * if vertexCount is nonzero, *vertices will be the array
+ * of vertices to be filled by caller. The next draw will read
+ * these vertices.
+ *
+ * if indecCount is nonzero, *indices will be the array of indices
+ * to be filled by caller. The next indexed draw will read from
+ * these indices.
+ *
+ * If a client does not already have a vertex buffer or cpu arrays then this
+ * is the preferred way to allocate vertex/index array. It allows the
+ * subclass of GrDrawTarget to decide whether to put data in buffers, to
+ * group vertex data that uses the same state (e.g. for deferred rendering),
+ * etc.
+ *
+ * This must be matched with a releaseReservedGeometry call after all
+ * draws that reference the reserved geometry data have been called.
+ *
+ * AutoGeometryRelease can be used to automatically call the release.
+ *
+ * @param vertexCount the number of vertices to reserve space for. Can be 0.
+ * @param indexCount the number of indices to reserve space for. Can be 0.
+ * @param vertexLayout the format of vertices (ignored if vertexCount == 0).
+ * @param vertices will point to reserved vertex space if vertexCount is
+ * non-zero. Illegal to pass NULL if vertexCount > 0.
+ * @param indices will point to reserved index space if indexCount is
+ * non-zero. Illegal to pass NULL if indexCount > 0.
+ *
+ * @return true if succeeded in allocating space for the vertices and false
+ * if not.
+ */
+ bool reserveAndLockGeometry(GrVertexLayout vertexLayout,
+ uint32_t vertexCount,
+ uint32_t indexCount,
+ void** vertices,
+ void** indices);
+ /**
+ * Provides hints to caller about the number of vertices and indices
+ * that can be allocated cheaply. This can be useful if caller is reserving
+ * space but doesn't know exactly how much geometry is needed.
+ *
+ * Also may hint whether the draw target should be flushed first. This is
+ * useful for deferred targets.
+ *
+ * @param vertexLayout layout of vertices caller would like to reserve
+ * @param vertexCount in: hint about how many vertices the caller would
+ * like to allocate.
+ * out: a hint about the number of vertices that can be
+ * allocated cheaply. Negative means no hint.
+ * Ignored if NULL.
+ * @param indexCount in: hint about how many indices the caller would
+ * like to allocate.
+ * out: a hint about the number of indices that can be
+ * allocated cheaply. Negative means no hint.
+ * Ignored if NULL.
+ *
+ * @return true if target should be flushed based on the input values.
+ */
+ virtual bool geometryHints(GrVertexLayout vertexLayout,
+ int32_t* vertexCount,
+ int32_t* indexCount) const;
+
+ /**
+ * Releases reserved vertex/index data from reserveAndLockGeometry().
+ */
+ void releaseReservedGeometry();
+
+ /**
+ * Sets source of vertex data for the next draw. Data does not have to be
+ * in the array until drawIndexed or drawNonIndexed.
+ *
+ * @param array cpu array containing vertex data.
+ * @param vertexLayout layout of the vertex data in the array.
+ */
+ void setVertexSourceToArray(const void* array, GrVertexLayout vertexLayout);
+
+ /**
+ * Sets source of index data for the next indexed draw. Data does not have
+ * to be in the array until drawIndexed or drawNonIndexed.
+ *
+ * @param array cpu array containing index data.
+ */
+ void setIndexSourceToArray(const void* array);
+
+ /**
+ * Sets source of vertex data for the next draw. Data does not have to be
+ * in the buffer until drawIndexed or drawNonIndexed.
+ *
+ * @param buffer vertex buffer containing vertex data. Must be
+ * unlocked before draw call.
+ * @param vertexLayout layout of the vertex data in the buffer.
+ */
+ void setVertexSourceToBuffer(const GrVertexBuffer* buffer,
+ GrVertexLayout vertexLayout);
+
+ /**
+ * Sets source of index data for the next indexed draw. Data does not have
+ * to be in the buffer until drawIndexed or drawNonIndexed.
+ *
+ * @param buffer index buffer containing indices. Must be unlocked
+ * before indexed draw call.
+ */
+ void setIndexSourceToBuffer(const GrIndexBuffer* buffer);
+
+ /**
+ * Draws indexed geometry using the current state and current vertex / index
+ * sources.
+ *
+ * @param type The type of primitives to draw.
+ * @param startVertex the vertex in the vertex array/buffer corresponding
+ * to index 0
+ * @param startIndex first index to read from index src.
+ * @param vertexCount one greater than the max index.
+ * @param indexCount the number of index elements to read. The index count
+ * is effectively trimmed to the last completely
+ * specified primitive.
+ */
+ virtual void drawIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) = 0;
+
+ /**
+ * Draws non-indexed geometry using the current state and current vertex
+ * sources.
+ *
+ * @param type The type of primitives to draw.
+ * @param startVertex the vertex in the vertex array/buffer corresponding
+ * to index 0
+ * @param vertexCount one greater than the max index.
+ */
+ virtual void drawNonIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ class AutoStateRestore : ::GrNoncopyable {
+ public:
+ AutoStateRestore(GrDrawTarget* target);
+ ~AutoStateRestore();
+
+ private:
+ GrDrawTarget* fDrawTarget;
+ SavedDrawState fDrawState;
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ class AutoReleaseGeometry : ::GrNoncopyable {
+ public:
+ AutoReleaseGeometry(GrDrawTarget* target,
+ GrVertexLayout vertexLayout,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+ fTarget = target;
+ fSuccess = fTarget->reserveAndLockGeometry(vertexLayout,
+ vertexCount,
+ indexCount,
+ &fVertices,
+ &fIndices);
+ }
+ ~AutoReleaseGeometry() {
+ if (fSuccess) {
+ fTarget->releaseReservedGeometry();
+ }
+ }
+
+ bool succeeded() const { return fSuccess; }
+ void* vertices() const { return fVertices; }
+ void* indices() const { return fIndices; }
+
+ GrPoint* positions() const {
+ return static_cast<GrPoint*>(fVertices);
+ }
+
+ private:
+ GrDrawTarget* fTarget;
+ bool fSuccess;
+ void* fVertices;
+ void* fIndices;
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ class AutoClipRestore : ::GrNoncopyable {
+ public:
+ AutoClipRestore(GrDrawTarget* target) {
+ fTarget = target;
+ fClip = fTarget->getClip();
+ }
+
+ ~AutoClipRestore() {
+ fTarget->setClip(fClip);
+ }
+ private:
+ GrDrawTarget* fTarget;
+ GrClip fClip;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Helper function to compute the size of a vertex from a vertex layout
+ * @return size of a single vertex.
+ */
+ static size_t VertexSize(GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute the offset of texture coordinates in a vertex
+ * @return offset of texture coordinates in vertex layout or -1 if the
+ * layout has no texture coordinates.
+ */
+ static int VertexTexCoordOffset(GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute the offset of the color in a vertex
+ * @return offset of color in vertex layout or -1 if the
+ * layout has no color.
+ */
+ static int VertexColorOffset(GrVertexLayout vertexLayout);
+
+ /**
+ * Helper function to compute vertex size and component offsets.
+ * @param texCoordOffset after return it is the offset of texture coords
+ * in vertex layout or -1 if the layout has no
+ * texture coords.
+ * @param colorOffset after return it is the offset of color in vertex
+ * layout or -1 if the layout has no color.
+ * @return size of a single vertex.
+ */
+ static int VertexSizeAndOffsets(GrVertexLayout vertexLayout,
+ int* texCoordOffset,
+ int* colorOffset);
+ /**
+ * Helper function to determine if vertex layout contains either explicit or
+ * implicit texture coordinates.
+ *
+ * @return true if vertex specifies texture coordinates, false otherwise.
+ */
+ static bool VertexHasTexCoords(GrVertexLayout vertexLayout);
+
+protected:
+
+ // Helpers for GrDrawTarget subclasses that won't have private access to
+ // SavedDrawState but need to peek at the state values.
+ static DrawState& accessSavedDrawState(SavedDrawState& sds)
+ { return sds.fState; }
+ static const DrawState& accessSavedDrawState(const SavedDrawState& sds)
+ { return sds.fState; }
+
+ // implemented by subclass
+ virtual bool acquireGeometryHelper(GrVertexLayout vertexLayout,
+ void** vertices,
+ void** indices) = 0;
+
+ virtual void releaseGeometryHelper() = 0;
+
+ virtual void clipWillChange(const GrClip& clip) = 0;
+
+ enum GeometrySrcType {
+ kArray_GeometrySrcType,
+ kReserved_GeometrySrcType,
+ kBuffer_GeometrySrcType
+ };
+
+ struct {
+ bool fLocked;
+ uint32_t fVertexCount;
+ uint32_t fIndexCount;
+ } fReservedGeometry;
+
+ struct GeometrySrc {
+ GeometrySrcType fVertexSrc;
+ union {
+ const GrVertexBuffer* fVertexBuffer;
+ const void* fVertexArray;
+ };
+ GeometrySrcType fIndexSrc;
+ union {
+ const GrIndexBuffer* fIndexBuffer;
+ const void* fIndexArray;
+ };
+ GrVertexLayout fVertexLayout;
+ } fGeometrySrc;
+
+ GrClip fClip;
+
+ DrawState fCurrDrawState;
+
+ // set texture or modelview matrix
+ void loadMatrix(const GrMatrix&, MatrixMode);
+
+ // not meant for outside usage. Could cause problems if calls between
+ // the save and restore mess with reserved geometry state.
+ class AutoGeometrySrcRestore {
+ public:
+ AutoGeometrySrcRestore(GrDrawTarget* target) {
+ fTarget = target;
+ fGeometrySrc = fTarget->fGeometrySrc;
+ }
+ ~AutoGeometrySrcRestore() {
+ fTarget->fGeometrySrc = fGeometrySrc;
+ }
+ private:
+ GrDrawTarget *fTarget;
+ GeometrySrc fGeometrySrc;
+
+ AutoGeometrySrcRestore();
+ AutoGeometrySrcRestore(const AutoGeometrySrcRestore&);
+ AutoGeometrySrcRestore& operator =(AutoGeometrySrcRestore&);
+ };
+
+};
+
+#endif
diff --git a/gpu/include/GrFontScaler.h b/gpu/include/GrFontScaler.h
new file mode 100644
index 0000000000..6baa56fff3
--- /dev/null
+++ b/gpu/include/GrFontScaler.h
@@ -0,0 +1,43 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrFontScaler_DEFINED
+#define GrFontScaler_DEFINED
+
+#include "GrGlyph.h"
+#include "GrKey.h"
+
+class GrPath;
+
+/**
+ * This is a virtual base class which Gr's interface to the host platform's
+ * font scaler.
+ *
+ * The client is responsible for subclassing, and instantiating this. The
+ * instance is create for a specific font+size+matrix.
+ */
+class GrFontScaler : public GrRefCnt {
+public:
+ virtual const GrKey* getKey() = 0;
+ virtual bool getPackedGlyphBounds(GrGlyph::PackedID, GrIRect* bounds) = 0;
+ virtual bool getPackedGlyphImage(GrGlyph::PackedID, int width, int height,
+ int rowBytes, void* image) = 0;
+ virtual bool getGlyphPath(uint16_t glyphID, GrPath*) = 0;
+};
+
+#endif
+
diff --git a/gpu/include/GrGLConfig.h b/gpu/include/GrGLConfig.h
new file mode 100644
index 0000000000..90763abc5c
--- /dev/null
+++ b/gpu/include/GrGLConfig.h
@@ -0,0 +1,323 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGLConfig_DEFINED
+#define GrGLConfig_DEFINED
+
+#include "GrTypes.h"
+
+#if GR_WIN32_BUILD
+ // glew has to be included before gl
+ #define GR_INCLUDE_GLDESKTOP <GL/glew.h>
+ #define GR_INCLUDE_GLDESKTOPext <GL/gl.h>
+ #define GR_GL_FUNC __stdcall
+ // undo stupid windows defines
+ #undef near
+ #undef far
+#elif GR_MAC_BUILD
+ #define GR_INCLUDE_GLDESKTOP <OpenGL/gl.h>
+ #define GR_INCLUDE_GLDESKTOPext <OpenGL/glext.h>
+ #define GR_GL_FUNC
+#elif GR_IOS_BUILD
+ #define GR_INCLUDE_GLES1 <OpenGLES/ES1/gl.h>
+ #define GR_INCLUDE_GLES1ext <OpenGLES/ES1/glext.h>
+ #define GR_INCLUDE_GLES2 <OpenGLES/ES2/gl.h>
+ #define GR_INCLUDE_GLES2ext <OpenGLES/ES2/glext.h>
+ #define GR_GL_FUNC
+#elif GR_ANDROID_BUILD
+ #ifndef GL_GLEXT_PROTOTYPES
+ #define GL_GLEXT_PROTOTYPES
+ #endif
+ #define GR_INCLUDE_GLES2 <GLES2/gl2.h>
+ #define GR_INCLUDE_GLES2ext <GLES2/gl2ext.h>
+ #define GR_GL_FUNC
+#elif GR_LINUX_BUILD
+ // need to distinguish between ES and Deskop versions for linux
+ #ifndef GL_GLEXT_PROTOTYPES
+ #define GL_GLEXT_PROTOTYPES
+ #endif
+ #define GR_INCLUDE_GLES1 <GLES/gl.h>
+ #define GR_INCLUDE_GLES1ext <GLES/glext.h>
+ #define GR_INCLUDE_GLES2 <GLES2/gl2.h>
+ #define GR_INCLUDE_GLES2ext <GLES2/gl2ext.h>
+ #define GR_GL_FUNC
+#elif GR_QNX_BUILD
+ #ifndef GL_GLEXT_PROTOTYPES
+ #define GL_GLEXT_PROTOTYPES
+ #endif
+ // This is needed by the QNX GLES2 headers
+ #define GL_API_EXT
+ #define GR_INCLUDE_GLES2 <GLES2/gl2.h>
+ #define GR_INCLUDE_GLES2ext <GLES2/gl2ext.h>
+ #define GR_INCLUDE_EGL <EGL/egl.h>
+ #define GR_GL_FUNC
+#else
+ #error "unsupported GR_???_BUILD"
+#endif
+
+// Ensure we're at least defined
+//
+
+#ifndef GR_SUPPORT_GLES1
+ #if defined(GR_INCLUDE_GLES1)
+ #define GR_SUPPORT_GLES1 1
+ #else
+ #define GR_SUPPORT_GLES1 0
+ #endif
+#endif
+
+#ifndef GR_SUPPORT_GLES2
+ #if defined(GR_INCLUDE_GLES2)
+ #define GR_SUPPORT_GLES2 1
+ #else
+ #define GR_SUPPORT_GLES2 0
+ #endif
+#endif
+
+#ifndef GR_SUPPORT_GLDESKTOP
+ #if defined(GR_INCLUDE_GLDESKTOP)
+ #define GR_SUPPORT_GLDESKTOP 1
+ #else
+ #define GR_SUPPORT_GLDESKTOP 0
+ #endif
+#endif
+
+#ifndef GR_SUPPORT_EGL
+ #if defined(GR_INCLUDE_EGL)
+ #define GR_SUPPORT_EGL 1
+ #else
+ #define GR_SUPPORT_EGL 0
+ #endif
+#endif
+// Filter the includes based on what we support
+//
+
+#if !GR_SUPPORT_GLES1
+ #undef GR_INCLUDE_GLES1
+ #undef GR_INCLUDE_GLES1ext
+#endif
+
+#if !GR_SUPPORT_GLES2
+ #undef GR_INCLUDE_GLES2
+ #undef GR_INCLUDE_GLES2ext
+#endif
+
+#if !GR_SUPPORT_GLDESKTOP
+ #undef GR_INCLUDE_GLDESKTOP
+ #undef GR_INCLUDE_GLDESKTOPext
+#endif
+
+#if !GR_SUPPORT_EGL
+ #undef GR_INCLUDE_EGL
+#endif
+
+// Begin including GL headers
+//
+
+#ifdef GR_INCLUDE_GLES1
+ #include GR_INCLUDE_GLES1
+#endif
+#ifdef GR_INCLUDE_GLES1ext
+ #include GR_INCLUDE_GLES1ext
+#endif
+#ifdef GR_INCLUDE_GLES2
+ #include GR_INCLUDE_GLES2
+#endif
+#ifdef GR_INCLUDE_GLES2ext
+ #include GR_INCLUDE_GLES2ext
+#endif
+#ifdef GR_INCLUDE_GLDESKTOP
+ #include GR_INCLUDE_GLDESKTOP
+#endif
+#ifdef GR_INCLUDE_GLDESKTOPext
+ #include GR_INCLUDE_GLDESKTOPext
+#endif
+#ifdef GR_INCLUDE_EGL
+ #include GR_INCLUDE_EGL
+#endif
+
+//
+// End including GL headers
+
+#if GL_VERSION_1_1
+ #define GR_GL_DESKTOP 1
+ #define GR_GL_ES 0
+#else
+ #ifndef GL_ES_VERSION_2_0
+ GR_STATIC_ASSERT(GL_VERSION_ES_CM_1_0 ||
+ GL_VERSION_ES_CL_1_0 ||
+ GL_VERSION_ES_CM_1_1 ||
+ GL_VERSION_ES_CL_1_1);
+ #endif
+ #define GR_GL_DESKTOP 0
+ #define GR_GL_ES 1
+#endif
+
+#if GR_SCALAR_IS_FIXED
+ #define GrGLType GL_FIXED
+#elif GR_SCALAR_IS_FLOAT
+ #define GrGLType GL_FLOAT
+#else
+ #error "unknown GR_SCALAR type"
+#endif
+
+#if GR_TEXT_SCALAR_IS_USHORT
+ #define GrGLTextType GL_UNSIGNED_SHORT
+ #define GR_GL_TEXT_TEXTURE_NORMALIZED 1
+#elif GR_TEXT_SCALAR_IS_FLOAT
+ #define GrGLTextType GL_FLOAT
+ #define GR_GL_TEXT_TEXTURE_NORMALIZED 0
+#elif GR_TEXT_SCALAR_IS_FIXED
+ #define GrGLTextType GL_FIXED
+ #define GR_GL_TEXT_TEXTURE_NORMALIZED 0
+#else
+ #error "unknown GR_TEXT_SCALAR type"
+#endif
+
+// Pick a pixel config for 32bit bitmaps. Our default is GL_RGBA
+#ifndef SK_GL_32BPP_COLOR_FORMAT
+ #define SK_GL_32BPP_COLOR_FORMAT GL_RGBA
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+// setup for opengl ES/desktop extensions
+// we make a struct of function pointers so that each GL context
+// can have it's own struct. (Some environments may have different proc
+// addresses for different contexts).
+
+extern "C" {
+struct GrGLExts {
+// FBO
+ GLvoid (GR_GL_FUNC *GenFramebuffers)(GLsizei n, GLuint *framebuffers);
+ GLvoid (GR_GL_FUNC *BindFramebuffer)(GLenum target, GLuint framebuffer);
+ GLvoid (GR_GL_FUNC *FramebufferTexture2D)(GLenum target, GLenum attachment,
+ GLenum textarget, GLuint texture,
+ GLint level);
+ GLenum (GR_GL_FUNC *CheckFramebufferStatus)(GLenum target);
+ GLvoid (GR_GL_FUNC *DeleteFramebuffers)(GLsizei n, const
+ GLuint *framebuffers);
+ GLvoid (GR_GL_FUNC *RenderbufferStorage)(GLenum target,
+ GLenum internalformat,
+ GLsizei width, GLsizei height);
+ GLvoid (GR_GL_FUNC *GenRenderbuffers)(GLsizei n, GLuint *renderbuffers);
+ GLvoid (GR_GL_FUNC *DeleteRenderbuffers)(GLsizei n,
+ const GLuint *renderbuffers);
+ GLvoid (GR_GL_FUNC *FramebufferRenderbuffer)(GLenum target,
+ GLenum attachment,
+ GLenum renderbuffertarget,
+ GLuint renderbuffer);
+ GLvoid (GR_GL_FUNC *BindRenderbuffer)(GLenum target, GLuint renderbuffer);
+
+// Multisampling
+ // same prototype for ARB_FBO, EXT_FBO, GL 3.0, & Apple ES extension
+ GLvoid (GR_GL_FUNC *RenderbufferStorageMultisample)(GLenum target,
+ GLsizei samples,
+ GLenum internalformat,
+ GLsizei width,
+ GLsizei height);
+ // desktop: ext_fbo_blit, arb_fbo, gl 3.0
+ GLvoid (GR_GL_FUNC *BlitFramebuffer)(GLint srcX0, GLint srcY0,
+ GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0,
+ GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter);
+ // apple's es extension
+ GLvoid (GR_GL_FUNC *ResolveMultisampleFramebuffer)();
+
+ // IMG'e es extension
+ GLvoid (GR_GL_FUNC *FramebufferTexture2DMultisample)(GLenum target,
+ GLenum attachment,
+ GLenum textarget,
+ GLuint texture,
+ GLint level,
+ GLsizei samples);
+
+// Buffer mapping (extension in ES).
+ GLvoid* (GR_GL_FUNC *MapBuffer)(GLenum target, GLenum access);
+ GLboolean (GR_GL_FUNC *UnmapBuffer)(GLenum target);
+};
+}
+
+// FBO
+#define GR_FRAMEBUFFER 0x8D40
+#define GR_FRAMEBUFFER_COMPLETE 0x8CD5
+#define GR_COLOR_ATTACHMENT0 0x8CE0
+#define GR_FRAMEBUFFER_BINDING 0x8CA6
+#define GR_RENDERBUFFER 0x8D41
+#define GR_STENCIL_ATTACHMENT 0x8D20
+#define GR_STENCIL_INDEX8 0x8D48
+#define GR_STENCIL_INDEX16 0x8D49
+#define GR_MAX_RENDERBUFFER_SIZE 0x84E8
+#define GR_DEPTH_STENCIL_ATTACHMENT 0x821A
+#define GR_UNSIGNED_INT_24_8 0x84FA
+#define GR_DEPTH_STENCIL 0x84F9
+#define GR_RGBA8 0x8058
+#define GR_RGB565 0x8D62
+
+
+// Multisampling
+
+// IMG MAX_SAMPLES uses a different value than desktop, Apple ES extension.
+#define GR_MAX_SAMPLES 0x8D57
+#define GR_MAX_SAMPLES_IMG 0x9135
+#define GR_READ_FRAMEBUFFER 0x8CA8
+#define GR_DRAW_FRAMEBUFFER 0x8CA9
+
+// Buffer mapping
+#define GR_WRITE_ONLY 0x88B9
+#define GR_BUFFER_MAPPED 0x88BC
+
+// Palette texture
+#define GR_PALETTE8_RGBA8 0x8B91
+
+extern void GrGLInitExtensions(GrGLExts* exts);
+////////////////////////////////////////////////////////////////////////////////
+
+extern void GrGLCheckErr(const char* location, const char* call);
+
+static inline void GrGLClearErr() {
+ while (GL_NO_ERROR != glGetError()) {}
+}
+
+// GR_FORCE_GLCHECKERR can be defined by GrUserConfig.h
+#if defined(GR_FORCE_GLCHECKERR)
+ #define GR_LOCAL_CALL_CHECKERR GR_FORCE_GLCHECKERR
+#else
+ #define GR_LOCAL_CALL_CHECKERR GR_DEBUG
+#endif
+static inline void GrDebugGLCheckErr(const char* location, const char* call) {
+#if GR_LOCAL_CALL_CHECKERR
+ GrGLCheckErr(location, call);
+#endif
+}
+#undef GR_LOCAL_CALL_CHECKERR
+
+#if GR_GL_LOG_CALLS
+ extern bool gPrintGL;
+ #define GR_GL(X) gl ## X; GrDebugGLCheckErr(GR_FILE_AND_LINE_STR, #X); if (gPrintGL) GrPrintf(GR_FILE_AND_LINE_STR "GL: " #X "\n")
+ #define GR_GL_NO_ERR(X) GrGLClearErr(); gl ## X; if (gPrintGL) GrPrintf(GR_FILE_AND_LINE_STR "GL: " #X "\n")
+ #define GR_GLEXT(exts, X) exts. X; GrDebugGLCheckErr(GR_FILE_AND_LINE_STR, #X); if (gPrintGL) GrPrintf(GR_FILE_AND_LINE_STR "GL: " #X "\n")
+ #define GR_GLEXT_NO_ERR(exts, X) GrGLClearErr(); exts. X; if (gPrintGL) GrPrintf(GR_FILE_AND_LINE_STR "GL: " #X "\n")
+#else
+ #define GR_GL(X) gl ## X; GrDebugGLCheckErr(GR_FILE_AND_LINE_STR, #X)
+ #define GR_GL_NO_ERR(X) GrGLClearErr(); gl ## X
+ #define GR_GLEXT(exts, X) exts. X; GrDebugGLCheckErr(GR_FILE_AND_LINE_STR, #X)
+ #define GR_GLEXT_NO_ERR(exts, X) GrGLClearErr(); exts. X
+#endif
+
+#endif
+
diff --git a/gpu/include/GrGLIndexBuffer.h b/gpu/include/GrGLIndexBuffer.h
new file mode 100644
index 0000000000..5177b4b7f5
--- /dev/null
+++ b/gpu/include/GrGLIndexBuffer.h
@@ -0,0 +1,53 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGLIndexBuffer_DEFINED
+#define GrGLIndexBuffer_DEFINED
+
+#include "GrIndexBuffer.h"
+#include "GrGLConfig.h"
+
+class GrGpuGL;
+
+class GrGLIndexBuffer : public GrIndexBuffer {
+protected:
+ GrGLIndexBuffer(GLuint id,
+ GrGpuGL* gl,
+ uint32_t sizeInBytes,
+ bool dynamic);
+public:
+ virtual ~GrGLIndexBuffer();
+
+ GLuint bufferID() const;
+
+ // overrides of GrIndexBuffer
+ virtual void abandon();
+ virtual void* lock();
+ virtual void unlock();
+ virtual bool isLocked() const;
+ virtual bool updateData(const void* src, uint32_t srcSizeInBytes);
+private:
+ GrGpuGL* fGL;
+ GLuint fBufferID;
+ void* fLockPtr;
+
+ friend class GrGpuGL;
+
+ typedef GrIndexBuffer INHERITED;
+};
+
+#endif
diff --git a/gpu/include/GrGLTexture.h b/gpu/include/GrGLTexture.h
new file mode 100644
index 0000000000..ada31512f5
--- /dev/null
+++ b/gpu/include/GrGLTexture.h
@@ -0,0 +1,166 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGLTexture_DEFINED
+#define GrGLTexture_DEFINED
+
+#include "GrGLConfig.h"
+#include "GrGpu.h"
+#include "GrTexture.h"
+#include "GrRect.h"
+
+class GrGpuGL;
+class GrGLTexture;
+
+class GrGLRenderTarget : public GrRenderTarget {
+protected:
+
+ struct GLRenderTargetIDs {
+ GLuint fRTFBOID;
+ GLuint fTexFBOID;
+ GLuint fStencilRenderbufferID;
+ GLuint fMSColorRenderbufferID;
+ bool fOwnIDs;
+ };
+
+ GrGLRenderTarget(const GLRenderTargetIDs& ids,
+ const GrIRect& fViewport,
+ GrGLTexture* texture,
+ GrGpuGL* gl);
+
+ void setViewport(const GrIRect& rect) { GrAssert(rect.height() <= 0);
+ fViewport = rect;}
+
+ virtual uint32_t width() const { return fViewport.width(); }
+ virtual uint32_t height() const { return -fViewport.height(); }
+
+public:
+ virtual ~GrGLRenderTarget();
+
+ bool resolveable() const { return fRTFBOID != fTexFBOID; }
+ bool needsResolve() const { return fNeedsResolve; }
+ void setDirty(bool dirty) { fNeedsResolve = resolveable() && dirty; }
+
+ GLuint renderFBOID() const { return fRTFBOID; }
+ GLuint textureFBOID() const { return fTexFBOID; }
+
+ const GrIRect& viewport() const { return fViewport; }
+ void abandon();
+
+private:
+ GrGpuGL* fGL;
+ GLuint fRTFBOID;
+ GLuint fTexFBOID;
+ GLuint fStencilRenderbufferID;
+ GLuint fMSColorRenderbufferID;
+
+ // Should this object delete IDs when it is destroyed or does someone
+ // else own them.
+ bool fOwnIDs;
+
+ // If there separate Texture and RenderTarget FBO IDs then the rendertarget
+ // must be resolved to the texture FBO before it is used as a texture.
+ bool fNeedsResolve;
+
+ // when we switch to this rendertarget we want to set the viewport to
+ // only render to to content area (as opposed to the whole allocation) and
+ // we want the rendering to be at top left (GL has origin in bottom left)
+ GrIRect fViewport;
+
+ friend class GrGpuGL;
+ friend class GrGLTexture;
+
+ typedef GrRenderTarget INHERITED;
+};
+
+class GrGLTexture : public GrTexture {
+public:
+ enum Orientation {
+ kBottomUp_Orientation,
+ kTopDown_Orientation,
+ };
+
+protected:
+ struct GLTextureDesc {
+ uint32_t fContentWidth;
+ uint32_t fContentHeight;
+ uint32_t fAllocWidth;
+ uint32_t fAllocHeight;
+ PixelConfig fFormat;
+ GLuint fTextureID;
+ GLenum fUploadFormat;
+ GLenum fUploadByteCount;
+ GLenum fUploadType;
+ Orientation fOrientation;
+ };
+ typedef GrGLRenderTarget::GLRenderTargetIDs GLRenderTargetIDs;
+ GrGLTexture(const GLTextureDesc& textureDesc,
+ const GLRenderTargetIDs& rtIDs,
+ GrGpuGL* gl);
+
+public:
+ virtual ~GrGLTexture();
+
+ // overloads of GrTexture
+ virtual void abandon();
+ virtual bool isRenderTarget() const;
+ virtual GrRenderTarget* asRenderTarget();
+ virtual void removeRenderTarget();
+ virtual void uploadTextureData(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height,
+ const void* srcData);
+ virtual intptr_t getTextureHandle();
+
+ const GrSamplerState& samplerState() const { return fSamplerState; }
+ void setSamplerState(const GrSamplerState& state)
+ { fSamplerState = state; }
+ GLuint textureID() const { return fTextureID; }
+
+ GLenum uploadFormat() const { return fUploadFormat; }
+ GLenum uploadByteCount() const { return fUploadByteCount; }
+ GLenum uploadType() const { return fUploadType; }
+
+ // Ganesh assumes texture coordinates have their origin
+ // in the top-left corner of the image. OpenGL, however,
+ // has the origin in the lower-left corner. For content that
+ // is loaded by Ganesh we just push the content "upside down"
+ // (by GL's understanding of the world ) in glTex*Image and the
+ // addressing just works out. However, content generated by GL
+ // (FBO or externally imported texture) will be updside down
+ // and it is up to the GrGpuGL derivative to handle y-mirroing.
+ Orientation orientation() const { return fOrientation; }
+
+private:
+ GrSamplerState fSamplerState;
+ GLuint fTextureID;
+ GLenum fUploadFormat;
+ GLenum fUploadByteCount;
+ GLenum fUploadType;
+ Orientation fOrientation;
+ GrGLRenderTarget* fRenderTarget;
+ GrGpuGL* fGpuGL;
+
+ static const GLenum gWrapMode2GLWrap[];
+
+ friend class GrGpuGL;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/gpu/include/GrGLVertexBuffer.h b/gpu/include/GrGLVertexBuffer.h
new file mode 100644
index 0000000000..6b99f57e5f
--- /dev/null
+++ b/gpu/include/GrGLVertexBuffer.h
@@ -0,0 +1,55 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGLVertexBuffer_DEFINED
+#define GrGLVertexBuffer_DEFINED
+
+#include "GrVertexBuffer.h"
+#include "GrGLConfig.h"
+
+class GrGpuGL;
+
+class GrGLVertexBuffer : public GrVertexBuffer {
+protected:
+ GrGLVertexBuffer(GLuint id,
+ GrGpuGL* gl,
+ uint32_t sizeInBytes,
+ bool dynamic);
+
+public:
+ virtual ~GrGLVertexBuffer();
+
+ // overrides of GrVertexBuffer
+ virtual void abandon();
+ virtual void* lock();
+ virtual void unlock();
+ virtual bool isLocked() const;
+ virtual bool updateData(const void* src, uint32_t srcSizeInBytes);
+
+ GLuint bufferID() const;
+
+private:
+ GrGpuGL* fGL;
+ GLuint fBufferID;
+ void* fLockPtr;
+
+ friend class GrGpuGL;
+
+ typedef GrVertexBuffer INHERITED;
+};
+
+#endif
diff --git a/gpu/include/GrGlyph.h b/gpu/include/GrGlyph.h
new file mode 100644
index 0000000000..4a3b3072c7
--- /dev/null
+++ b/gpu/include/GrGlyph.h
@@ -0,0 +1,89 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGlyph_DEFINED
+#define GrGlyph_DEFINED
+
+#include "GrPath.h"
+#include "GrRect.h"
+
+class GrAtlas;
+
+/* Need this to be quad-state:
+ - complete w/ image
+ - just metrics
+ - failed to get image, but has metrics
+ - failed to get metrics
+ */
+struct GrGlyph {
+ typedef uint32_t PackedID;
+
+ GrAtlas* fAtlas;
+ GrPath* fPath;
+ PackedID fPackedID;
+ GrIRect16 fBounds;
+ GrIPoint16 fAtlasLocation;
+
+ void init(GrGlyph::PackedID packed, const GrIRect& bounds) {
+ fAtlas = NULL;
+ fPath = NULL;
+ fPackedID = packed;
+ fBounds.set(bounds);
+ fAtlasLocation.set(0, 0);
+ }
+
+ void free() {
+ if (fPath) {
+ delete fPath;
+ fPath = NULL;
+ }
+ }
+
+ int width() const { return fBounds.width(); }
+ int height() const { return fBounds.height(); }
+ bool isEmpty() const { return fBounds.isEmpty(); }
+ uint16_t glyphID() const { return UnpackID(fPackedID); }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ static inline unsigned ExtractSubPixelBitsFromFixed(GrFixed pos) {
+ // two most significant fraction bits from fixed-point
+ return (pos >> 14) & 3;
+ }
+
+ static inline PackedID Pack(uint16_t glyphID, GrFixed x, GrFixed y) {
+ x = ExtractSubPixelBitsFromFixed(x);
+ y = ExtractSubPixelBitsFromFixed(y);
+ return (x << 18) | (y << 16) | glyphID;
+ }
+
+ static inline GrFixed UnpackFixedX(PackedID packed) {
+ return ((packed >> 18) & 3) << 14;
+ }
+
+ static inline GrFixed UnpackFixedY(PackedID packed) {
+ return ((packed >> 16) & 3) << 14;
+ }
+
+ static inline uint16_t UnpackID(PackedID packed) {
+ return (uint16_t)packed;
+ }
+};
+
+
+#endif
+
diff --git a/gpu/include/GrGpu.h b/gpu/include/GrGpu.h
new file mode 100644
index 0000000000..f1fdf01d50
--- /dev/null
+++ b/gpu/include/GrGpu.h
@@ -0,0 +1,446 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGpu_DEFINED
+#define GrGpu_DEFINED
+
+#include "GrRect.h"
+#include "GrRefCnt.h"
+#include "GrDrawTarget.h"
+#include "GrGpuVertex.h"
+#include "GrTexture.h"
+#include "GrMemory.h"
+
+
+class GrGpu : public GrDrawTarget {
+
+public:
+ /**
+ * Possible 3D APIs that may be used by Ganesh.
+ */
+ enum Engine {
+ kOpenGL_Shaders_Engine,
+ kOpenGL_Fixed_Engine,
+ kDirect3D9_Engine
+ };
+
+ /**
+ * Platform specific 3D context.
+ * For
+ * kOpenGL_Shaders_Engine use NULL
+ * kOpenGL_Fixed_Engine use NULL
+ * kDirect3D9_Engine use an IDirect3DDevice9*
+ */
+ typedef void* Platform3DContext;
+
+ /**
+ * Create an instance of GrGpu that matches the specified Engine backend.
+ * If the requested engine is not supported (at compile-time or run-time)
+ * this returns NULL.
+ */
+ static GrGpu* Create(Engine, Platform3DContext context3D);
+
+ /**
+ * Describes levels of support for non-power-of-two textures.
+ */
+ enum NPOTTextureTypes {
+ /**
+ * no support for NPOT textures
+ */
+ kNone_NPOTTextureType,
+ /**
+ * only clamp is supported for textures
+ */
+ kNoRepeat_NPOTTextureType,
+ /**
+ * no texture restrictions at all, but rendertargets must be POW2
+ */
+ kNonRendertarget_NPOTTextureType,
+ /**
+ * no POW2 restrictions at all
+ */
+ kFull_NPOTTextureType
+ };
+
+ /**
+ * Used to control the level of antialiasing available for a rendertarget.
+ * Anti-alias quality levels depend on the underlying API/GPU capabilities.
+ */
+ enum AALevels {
+ kNone_AALevel, //<! No antialiasing available.
+ kLow_AALevel, //<! Low quality antialiased rendering. Actual
+ // interpretation is platform-dependent.
+ kMed_AALevel, //<! Medium quality antialiased rendering. Actual
+ // interpretation is platform-dependent.
+ kHigh_AALevel, //<! High quality antialiased rendering. Actual
+ // interpretation is platform-dependent.
+ };
+
+
+ /**
+ * Optional bitfield flags that can be passed to createTexture.
+ */
+ enum TextureFlags {
+ kRenderTarget_TextureFlag = 0x1, //<! Creates a texture that can be
+ // rendered to by calling
+ // GrGpu::setRenderTarget() with
+ // GrTexture::asRenderTarget().
+ kNoPathRendering_TextureFlag = 0x2, //<! If the texture is used as a
+ // rendertarget but paths will not
+ // be rendered to it.
+ kDynamicUpdate_TextureFlag = 0x4 //!< Hint that the CPU may modify
+ // this texture after creation
+ };
+
+ enum {
+ /**
+ * For Index8 pixel config, the colortable must be 256 entries
+ */
+ kColorTableSize = 256 * sizeof(GrColor)
+ };
+ /**
+ * Describes a texture to be created.
+ */
+ struct TextureDesc {
+ uint32_t fFlags; //!< bitfield of TextureFlags
+ GrGpu::AALevels fAALevel;//!< The level of antialiasing available
+ // for a rendertarget texture. Only
+ // flags contains
+ // kRenderTarget_TextureFlag.
+ uint32_t fWidth; //!< Width of the texture
+ uint32_t fHeight; //!< Height of the texture
+ GrTexture::PixelConfig fFormat; //!< Format of source data of the
+ // texture. Not guaraunteed to be the
+ // same as internal format used by
+ // 3D API.
+ };
+
+ /**
+ * Gpu usage statistics.
+ */
+ struct Stats {
+ uint32_t fVertexCnt; //<! Number of vertices drawn
+ uint32_t fIndexCnt; //<! Number of indices drawn
+ uint32_t fDrawCnt; //<! Number of draws
+
+ uint32_t fProgChngCnt;//<! Number of program changes (N/A for fixed)
+
+ /*
+ * Number of times the texture is set in 3D API
+ */
+ uint32_t fTextureChngCnt;
+ /*
+ * Number of times the render target is set in 3D API
+ */
+ uint32_t fRenderTargetChngCnt;
+ /*
+ * Number of textures created (includes textures that are rendertargets).
+ */
+ uint32_t fTextureCreateCnt;
+ /*
+ * Number of rendertargets created.
+ */
+ uint32_t fRenderTargetCreateCnt;
+ };
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ GrGpu();
+ virtual ~GrGpu();
+
+ /**
+ * The GrGpu object normally assumes that no outsider is setting state
+ * within the underlying 3D API's context/device/whatever. This call informs
+ * the GrGpu that the state was modified and it should resend. Shouldn't
+ * be called frequently for good performance.
+ */
+ virtual void resetContext();
+
+ void unimpl(const char[]);
+
+ /**
+ * Creates a texture object
+ *
+ * @param desc describes the texture to be created.
+ * @param srcData texel data to load texture. Begins with full-size
+ * palette data for paletted textures. Contains width*
+ * height texels. If NULL texture data is uninitialized.
+ *
+ * @return The texture object if successful, otherwise NULL.
+ */
+ virtual GrTexture* createTexture(const TextureDesc& desc,
+ const void* srcData, size_t rowBytes) = 0;
+ /**
+ * Wraps an externally-created rendertarget in a GrRenderTarget.
+ * @param platformRenderTarget handle to the the render target in the
+ * underlying 3D API. Interpretation depends on
+ * GrGpu subclass in use.
+ * @param width width of the render target
+ * @param height height of the render target
+ */
+ virtual GrRenderTarget* createPlatformRenderTarget(
+ intptr_t platformRenderTarget,
+ int width, int height) = 0;
+
+ /**
+ * Creates a vertex buffer.
+ *
+ * @param size size in bytes of the vertex buffer
+ * @param dynamic hints whether the data will be frequently changed
+ * by either GrVertexBuffer::lock or
+ * GrVertexBuffer::updateData.
+ *
+ * @return The vertex buffer if successful, otherwise NULL.
+ */
+ virtual GrVertexBuffer* createVertexBuffer(uint32_t size, bool dynamic) = 0;
+
+ /**
+ * Creates an index buffer.
+ *
+ * @param size size in bytes of the index buffer
+ * @param dynamic hints whether the data will be frequently changed
+ * by either GrIndexBuffer::lock or
+ * GrIndexBuffer::updateData.
+ *
+ * @return The index buffer if successful, otherwise NULL.
+ */
+ virtual GrIndexBuffer* createIndexBuffer(uint32_t size, bool dynamic) = 0;
+
+ /**
+ * Gets the default render target. This is the render target set in the
+ * 3D API at the time the GrGpu was created.
+ */
+ virtual GrRenderTarget* defaultRenderTarget() = 0;
+
+ /**
+ * At construction time the GrGpu infers the render target and viewport from
+ * the state of the underlying 3D API. However, a platform-specific resize
+ * event may occur.
+ * @param width new width of the default rendertarget
+ * @param height new height of the default rendertarget
+ */
+ virtual void setDefaultRenderTargetSize(uint32_t width, uint32_t height) = 0;
+
+ /**
+ * Erase the entire render target, ignoring any clips/scissors.
+ *
+ * This is issued to the GPU driver immediately.
+ */
+ virtual void eraseColor(GrColor color) = 0;
+
+ /**
+ * Are 8 bit paletted textures supported.
+ *
+ * @return true if 8bit palette textures are supported, false otherwise
+ */
+ bool supports8BitPalette() const { return f8bitPaletteSupport; }
+
+ /**
+ * If single stencil pass winding is supported then one stencil pass
+ * (kWindingStencil1_PathPass) is required to do winding rule path filling
+ * (or inverse winding rule). Otherwise, two passes are required
+ * (kWindingStencil1_PathPass followed by kWindingStencil2_PathPass).
+ *
+ * @return true if only a single stencil pass is needed.
+ */
+ bool supportsSingleStencilPassWinding() const
+ { return fSingleStencilPassForWinding; }
+
+ /**
+ * Checks whether locking vertex and index buffers is supported.
+ *
+ * @return true if locking is supported.
+ */
+ bool supportsBufferLocking() const { return fBufferLockSupport; }
+
+ /**
+ * Gets the minimum width of a render target. If a texture/rt is created
+ * with a width less than this size the GrGpu object will clamp it to this
+ * value.
+ */
+ int minRenderTargetWidth() const { return fMinRenderTargetWidth; }
+
+ /**
+ * Gets the minimum width of a render target. If a texture/rt is created
+ * with a height less than this size the GrGpu object will clamp it to this
+ * value.
+ */
+ int minRenderTargetHeight() const { return fMinRenderTargetHeight; }
+
+ /**
+ * Retrieves the level of NPOT texture support. Regardless of support level
+ * NPOT textures can always be created, but internally they may be imbedded
+ * in a POT texture. An exception is paletted textures which must be
+ * specified as a POT when npotTextureSupport() is not Full.
+ *
+ * @return the level of NPOT texture support.
+ */
+ NPOTTextureTypes npotTextureSupport() const { return fNPOTTextureSupport; }
+
+ // GrDrawTarget overrides
+ virtual void drawIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+
+ virtual void drawNonIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount);
+
+ /**
+ * Determines if blend is effectively disabled.
+ *
+ * @return true if blend can be disabled without changing the rendering
+ * result given the current state including the vertex layout specified
+ * with the vertex source.
+ */
+ bool canDisableBlend() const;
+
+ /**
+ * Returns an index buffer that can be used to render quads.
+ * Indices are 0, 1, 2, 0, 2, 3, etc.
+ * Draw with kTriangles_PrimitiveType
+ */
+ const GrIndexBuffer* quadIndexBuffer() const;
+ /**
+ * Gets the number of quads that can be rendered using quadIndexBuffer.
+ */
+ int maxQuadsInIndexBuffer() const;
+
+ /**
+ * Ensures that the current render target is actually set in the
+ * underlying 3D API. Used when client wants to use 3D API to directly
+ * render to the RT.
+ */
+ virtual void forceRenderTargetFlush() = 0;
+
+ virtual bool readPixels(int left, int top, int width, int height,
+ GrTexture::PixelConfig, void* buffer) = 0;
+
+
+ const Stats& getStats() const;
+ void resetStats();
+ void printStats() const;
+
+protected:
+ /**
+ * Extensions to GrDrawTarget::StencilPass to implement stencil clipping
+ */
+ enum GpuStencilPass {
+ kSetClip_StencilPass = kDrawTargetCount_StencilPass,
+ /* rendering a hard clip to the stencil
+ buffer. Subsequent draws with other
+ StencilPass values will be clipped
+ if kStencilClip_StateBit is set. */
+ kGpuCount_StencilPass
+ };
+
+ /**
+ * Extensions to GrDrawTarget::StateBits to implement stencil clipping
+ */
+ struct ClipState {
+ bool fClipInStencil;
+ bool fClipIsDirty;
+ GrRenderTarget* fStencilClipTarget;
+ } fClipState;
+
+ virtual void clipWillChange(const GrClip& clip);
+ bool setupClipAndFlushState(PrimitiveType type);
+
+ struct BoundsState {
+ bool fScissorEnabled;
+ GrIRect fScissorRect;
+ GrIRect fViewportRect;
+ };
+
+ // defaults to false, subclass can set true to support palleted textures
+ bool f8bitPaletteSupport;
+
+ // defaults to false, subclass can set higher support level
+ NPOTTextureTypes fNPOTTextureSupport;
+
+ // True if only one stencil pass is required to implement the winding path
+ // fill rule. Subclass responsible for setting this value.
+ bool fSingleStencilPassForWinding;
+
+ // set by subclass to true if index and vertex buffers can be locked, false
+ // otherwise.
+ bool fBufferLockSupport;
+
+ // set by subclass
+ int fMinRenderTargetWidth;
+ int fMinRenderTargetHeight;
+
+ // overridden by API specific GrGpu-derived class to perform the draw call.
+ virtual void drawIndexedHelper(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) = 0;
+
+ virtual void drawNonIndexedHelper(PrimitiveType type,
+ uint32_t vertexCount,
+ uint32_t numVertices) = 0;
+
+ // called to program the vertex data, indexCount will be 0 if drawing non-
+ // indexed geometry.
+ virtual void setupGeometry(uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) = 0;
+
+
+ // The GrGpu typically records the clients requested state and then flushes
+ // deltas from previous state at draw time. This function does the
+ // API-specific flush of the state
+ // returns false if current state is unsupported.
+ virtual bool flushGraphicsState(PrimitiveType type) = 0;
+
+ // Sets the scissor rect, or disables if rect is NULL.
+ virtual void flushScissor(const GrIRect* rect) = 0;
+
+ // GrGpu subclass removes the clip from the stencil buffer
+ virtual void eraseStencilClip() = 0;
+
+ // GrDrawTarget overrides
+ virtual bool acquireGeometryHelper(GrVertexLayout vertexLayout,
+ void** vertices,
+ void** indices);
+ virtual void releaseGeometryHelper();
+
+private:
+ mutable GrIndexBuffer* fQuadIndexBuffer; // mutable so it can be
+ // created on-demand
+
+ static const int MAX_VERTEX_SIZE = GR_CT_MAX(2*sizeof(GrPoint) + sizeof(GrColor),
+ 2*sizeof(GrGpuTextVertex));
+ static const int VERTEX_STORAGE = 16 * MAX_VERTEX_SIZE;
+ static const int INDEX_STORAGE = 32 * sizeof(uint16_t);
+
+protected:
+ GrAutoSMalloc<VERTEX_STORAGE> fVertices;
+ GrAutoSMalloc<INDEX_STORAGE> fIndices;
+
+ Stats fStats;
+
+private:
+ typedef GrRefCnt INHERITED;
+};
+
+#endif
+
diff --git a/gpu/include/GrGpuD3D9.h b/gpu/include/GrGpuD3D9.h
new file mode 100644
index 0000000000..c3d3e1a829
--- /dev/null
+++ b/gpu/include/GrGpuD3D9.h
@@ -0,0 +1,259 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGpuD3D9_DEFINED
+#define GrGpuD3D9_DEFINED
+
+#include <Windows.h>
+#include <d3d9.h>
+
+#include "GrGpu.h"
+
+class GrD3D9VertexBuffer;
+class GrD3D9IndexBuffer;
+class GrD3D9Texture;
+
+// For D3D9 GrRenderTarget casts to a (GrD3D9RenderTarget*)
+struct GrD3D9RenderTarget {
+ IDirect3DSurface9* fColor;
+ IDirect3DSurface9* fStencil;
+ bool fClearStencil;
+};
+
+// GrGpu implementation for D3D9 fixed pipeline.
+// Known needed improvements:
+// vertex/index buffers need to be better managed:
+// use no_overwrite and walk down VB/IB until reach end and wrap
+// take advantage of the redrawHint and don't recopy vertex/idx data
+// User created vertex buffers must have position Z values
+// (required for fixed pipeline) but there is no way to communicate
+// this now
+// We create a temporary sysmem surface for each texture update.
+// split this out into fixed/shader subclasses (use vdecls for shaders)
+class GrGpuD3D9 : public GrGpu {
+public:
+ GrGpuD3D9(IDirect3DDevice9* device);
+ virtual ~GrGpuD3D9();
+
+ // overrides from GrGpu
+ virtual GrTexture* createTexture(const TextureDesc& desc,
+ const void* srcData);
+ virtual GrVertexBuffer* createVertexBuffer(uint32_t size, bool dynamic);
+ virtual GrIndexBuffer* createIndexBuffer(uint32_t size, bool dynamic);
+
+ virtual void eraseColor(GrColor color);
+ virtual void eraseStencil();
+
+protected:
+ // overrides from GrGpu
+ virtual bool flushGraphicsState(PrimitiveTypes type);
+ virtual void drawIndexArrayApi(PrimitiveTypes type,
+ int baseVertex,
+ int vertexCount,
+ int indexCount,
+ const uint16_t* indexArray,
+ bool redrawHint);
+ virtual void drawIndexBufferApi(PrimitiveTypes type,
+ int baseVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount,
+ GrIndexBuffer* indexBuffer,
+ bool redrawHint);
+ virtual void drawNonIndexedApi(PrimitiveTypes type,
+ int baseVertex,
+ int indexCount,
+ bool redrawHint);
+ virtual void flushScissor();
+
+private:
+
+ // baseVertex may be modified while setting up the stage
+ GrD3D9VertexBuffer* setupVBufferStage(int vsize, int* baseVertex,
+ int vertexCount, DrawModes mode);
+ GrD3D9IndexBuffer* setupIBufferStage(int* startIndex, int indexCount,
+ const uint16_t* indices);
+ static int vertexSize(int vertFlagBits, GrGpu::DrawModes mode);
+ static bool positionsOnly(int vertFlagBits);
+
+ // notify callbacks to update state tracking when related
+ // objects are bound to the device or deleted outside of the class
+ void notifyVertexBufferBind(GrD3D9VertexBuffer* buffer);
+ void notifyVertexBufferDelete(GrD3D9VertexBuffer* buffer);
+ void notifyIndexBufferBind(GrD3D9IndexBuffer* buffer);
+ void notifyIndexBufferDelete(GrD3D9IndexBuffer* buffer);
+ void notifyTextureDelete(GrD3D9Texture* texture);
+ void notifyTextureRemoveRenderTarget(GrD3D9Texture* texture);
+
+ IDirect3DSurface9* createStencil(uint32_t width,
+ uint32_t height,
+ D3DMULTISAMPLE_TYPE msType,
+ DWORD msQual);
+
+ void setRenderTargetImm();
+
+ friend class GrD3D9VertexBuffer;
+ friend class GrD3D9IndexBuffer;
+ friend class GrD3D9Texture;
+
+ GrIndexBuffer* fLastIndexBuffer;
+
+ // used to track the COLORARG1 value for tex stage 0
+ // needs to use ALPHAREPLICATE when using alpha-only textures
+ DWORD fLastColorArg1;
+
+ IDirect3DDevice9* fDevice;
+ // We may use Ex functionality if this is a Ex device
+ IDirect3DDevice9Ex* fDeviceEx;
+
+ enum VertDecls {
+ kInvalid_VertDecl = -1,
+ kPosOnly_VertDecl = 0,
+ kTex_VertDecl,
+ kColors_VertDecl,
+ kTexAndColors_VertDecl,
+ kPosAsTex_VertDecl,
+ kPosAsTexAndColors_VertDecl,
+ kVertDeclCount
+ };
+
+ static const VertDecls gVertFlags2VertDeclIdx[];
+ static const DWORD gDeclToFVFs[];
+ static const DWORD gTextFVF;
+
+ DWORD fLastVertFVF;
+
+ bool fLastBlendOff;
+
+ // D3D allows user pointers in place of buffers for vertex/index data
+ // but it doesn't allow:
+ // -multiple streams (non-interleaved) ~ this will be resolved when we
+ // go AoS with our verts
+ // -mixing user pointer verts with index buffer (or vice versa)
+ // So we use these staging buffers
+ GrD3D9VertexBuffer* fStageVBuffer;
+ GrD3D9IndexBuffer* fStageIBuffer;
+
+ // did we use texture coordinate generation at the last flush
+ bool fLastTexGen;
+
+ GrD3D9RenderTarget fDefaultRenderTarget;
+
+ // We use texture stage 0 to set a constant color
+ // D3D disables the stage if NULL is bound (even when the ops don't
+ // reference the texture). So we have a 1x1 dummy texture that
+ // gets set when drawing constant color with no texture
+ GrD3D9Texture* fDummyTexture;
+};
+
+class GrD3D9Texture : public GrTexture {
+protected:
+ GrD3D9Texture(uint32_t width,
+ uint32_t height,
+ PixelConfig config,
+ IDirect3DTexture9* texture,
+ IDirect3DSurface9* stencil,
+ bool clearStencil,
+ GrGpuD3D9* gpuD3D9);
+public:
+ virtual ~GrD3D9Texture();
+
+ // overloads of GrTexture
+ virtual void abandon();
+ virtual bool isRenderTarget();
+ virtual GrRenderTarget* asRenderTarget()
+ { return (GrRenderTarget*) &fRenderTarget; }
+ virtual void removeRenderTarget();
+ virtual void uploadTextureData(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height,
+ const void* srcData);
+ IDirect3DTexture9* texture() const { return fTexture; }
+ IDirect3DSurface9* stencil() const { return fStencil; }
+ D3DFORMAT format() const { return fDesc.Format; }
+private:
+ IDirect3DTexture9* fTexture;
+ GrD3D9RenderTarget fRenderTarget;
+ IDirect3DSurface9* fStencil;
+ D3DSURFACE_DESC fDesc;
+ GrGpuD3D9* fGpuD3D9;
+
+ friend class GrGpuD3D9;
+
+ typedef GrTexture INHERITED;
+};
+
+class GrD3D9VertexBuffer : public GrVertexBuffer {
+protected:
+ GrD3D9VertexBuffer(uint32_t size,
+ bool dynamic,
+ IDirect3DVertexBuffer9* vbuffer,
+ GrGpuD3D9* gpuD3D9);
+public:
+ virtual ~GrD3D9VertexBuffer();
+
+ IDirect3DVertexBuffer9* buffer() const { return fBuffer; }
+
+ // overrides of GrVertexBuffer
+ virtual void abandon();
+ virtual void* lock();
+ virtual void unlock();
+ virtual bool isLocked();
+ virtual bool updateData(const void* src, uint32_t srcSizeInBytes);
+
+private:
+ IDirect3DVertexBuffer9* fBuffer;
+ D3DVERTEXBUFFER_DESC fDesc;
+ bool fLocked;
+ GrGpuD3D9* fGpuD3D9;
+
+ friend class GrGpuD3D9;
+
+ typedef GrVertexBuffer INHERITED;
+};
+
+class GrD3D9IndexBuffer : public GrIndexBuffer {
+protected:
+ GrD3D9IndexBuffer(uint32_t size,
+ bool dynamic,
+ IDirect3DIndexBuffer9* vbuffer,
+ GrGpuD3D9* gpuD3D9);
+public:
+ virtual ~GrD3D9IndexBuffer();
+
+ IDirect3DIndexBuffer9* buffer() const { return fBuffer; }
+
+ // overrides of GrIndexBuffer
+ virtual void abandon();
+ virtual void* lock();
+ virtual void unlock();
+ virtual bool isLocked();
+ virtual bool updateData(const void* src, uint32_t srcSizeInBytes);
+private:
+ IDirect3DIndexBuffer9* fBuffer;
+ D3DINDEXBUFFER_DESC fDesc;
+ bool fLocked;
+ GrGpuD3D9* fGpuD3D9;
+
+ friend class GrGpuD3D9;
+
+ typedef GrIndexBuffer INHERITED;
+};
+
+#endif
+
diff --git a/gpu/include/GrGpuVertex.h b/gpu/include/GrGpuVertex.h
new file mode 100644
index 0000000000..1e3293a2ba
--- /dev/null
+++ b/gpu/include/GrGpuVertex.h
@@ -0,0 +1,104 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGpuVertex_DEFINED
+#define GrGpuVertex_DEFINED
+
+#include "GrGLConfig.h"
+#include "GrPoint.h"
+
+#if GR_TEXT_SCALAR_IS_USHORT
+ typedef uint16_t GrTextScalar;
+ #define GrIntToTextScalar(x) ((uint16_t)x)
+ #define GrFixedToTextScalar(x) (x)
+#elif GR_TEXT_SCALAR_IS_FIXED
+ typedef GrFixed GrTextScalar;
+ #define GrIntToTextScalar(x) GrIntToFixed(x)
+ #define GrFixedToTextScalar(x) (x)
+#elif GR_TEXT_SCALAR_IS_FLOAT
+ typedef float GrTextScalar;
+ #define GrIntToTextScalar(x) ((GrTextScalar)x)
+ #define GrFixedToTextScalar(x) GrFixedToFloat(x)
+#else
+ #error "Text scalar type not defined"
+#endif
+
+// text has its own vertex class, since it may want to be in fixed point (given)
+// that it starts with all integers) even when the default vertices are floats
+struct GrGpuTextVertex {
+ GrTextScalar fX;
+ GrTextScalar fY;
+
+ void set(GrTextScalar x, GrTextScalar y) {
+ fX = x;
+ fY = y;
+ }
+
+ void setI(int x, int y) {
+ fX = GrIntToTextScalar(x);
+ fY = GrIntToTextScalar(y);
+ }
+
+ void setX(GrFixed x, GrFixed y) {
+ fX = GrFixedToTextScalar(x);
+ fY = GrFixedToTextScalar(y);
+ }
+
+ // rect fan is counter-clockwise
+
+ void setRectFan(GrTextScalar l, GrTextScalar t, GrTextScalar r,
+ GrTextScalar b) {
+ GrGpuTextVertex* v = this;
+ v[0].set(l, t);
+ v[1].set(l, b);
+ v[2].set(r, b);
+ v[3].set(r, t);
+ }
+
+ void setIRectFan(int l, int t, int r, int b) {
+ this->setRectFan(GrIntToTextScalar(l), GrIntToTextScalar(t),
+ GrIntToTextScalar(r), GrIntToTextScalar(b));
+ }
+
+ void setIRectFan(int l, int t, int r, int b, size_t stride) {
+ GrAssert(stride > sizeof(GrGpuTextVertex));
+ char* v = (char*)this;
+ ((GrGpuTextVertex*)(v + 0*stride))->setI(l, t);
+ ((GrGpuTextVertex*)(v + 1*stride))->setI(l, b);
+ ((GrGpuTextVertex*)(v + 2*stride))->setI(r, b);
+ ((GrGpuTextVertex*)(v + 3*stride))->setI(r, t);
+ }
+
+ // counter-clockwise fan
+ void setXRectFan(GrFixed l, GrFixed t, GrFixed r, GrFixed b) {
+ this->setRectFan(GrFixedToTextScalar(l), GrFixedToTextScalar(t),
+ GrFixedToTextScalar(r), GrFixedToTextScalar(b));
+ }
+
+ void setXRectFan(GrFixed l, GrFixed t, GrFixed r, GrFixed b, size_t stride) {
+ GrAssert(stride > sizeof(GrGpuTextVertex));
+ char* v = (char*)this;
+ ((GrGpuTextVertex*)(v + 0*stride))->setX(l, t);
+ ((GrGpuTextVertex*)(v + 1*stride))->setX(l, b);
+ ((GrGpuTextVertex*)(v + 2*stride))->setX(r, b);
+ ((GrGpuTextVertex*)(v + 3*stride))->setX(r, t);
+ }
+
+};
+
+#endif
+
diff --git a/gpu/include/GrIPoint.h b/gpu/include/GrIPoint.h
new file mode 100644
index 0000000000..b979a09340
--- /dev/null
+++ b/gpu/include/GrIPoint.h
@@ -0,0 +1,35 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrIPoint_DEFINED
+#define GrIPoint_DEFINED
+
+#include "GrTypes.h"
+
+struct GrIPoint {
+public:
+ int32_t fX, fY;
+
+ GrIPoint(int32_t x, int32_t y) : fX(x), fY(y) {}
+
+ void set(int32_t x, int32_t y) {
+ fX = x;
+ fY = y;
+ }
+};
+
+#endif
diff --git a/gpu/include/GrInOrderDrawBuffer.h b/gpu/include/GrInOrderDrawBuffer.h
new file mode 100644
index 0000000000..805861ada1
--- /dev/null
+++ b/gpu/include/GrInOrderDrawBuffer.h
@@ -0,0 +1,131 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrInOrderDrawBuffer_DEFINED
+#define GrInOrderDrawBuffer_DEFINED
+
+#include "GrDrawTarget.h"
+#include "GrAllocPool.h"
+#include "GrAllocator.h"
+#include "GrClip.h"
+
+class GrVertexBufferAllocPool;
+
+// TODO: don't save clip per draw
+class GrInOrderDrawBuffer : public GrDrawTarget {
+public:
+
+ GrInOrderDrawBuffer(GrVertexBufferAllocPool* pool = NULL);
+
+ virtual ~GrInOrderDrawBuffer();
+
+ void initializeDrawStateAndClip(const GrDrawTarget& target);
+
+ virtual void drawIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+
+ virtual void drawNonIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount);
+
+ virtual bool geometryHints(GrVertexLayout vertexLayout,
+ int32_t* vertexCount,
+ int32_t* indexCount) const;
+
+ void reset();
+
+ void playback(GrDrawTarget* target);
+
+private:
+
+ struct Draw {
+ PrimitiveType fType;
+ uint32_t fStartVertex;
+ uint32_t fStartIndex;
+ uint32_t fVertexCount;
+ uint32_t fIndexCount;
+ bool fStateChange;
+ GrVertexLayout fVertexLayout;
+ bool fUseVertexBuffer;
+ bool fClipChanged;
+ union {
+ const GrVertexBuffer* fVertexBuffer;
+ const void* fVertexArray;
+ };
+ bool fUseIndexBuffer;
+ union {
+ const GrIndexBuffer* fIndexBuffer;
+ const void* fIndexArray;
+ };
+ };
+
+ virtual bool acquireGeometryHelper(GrVertexLayout vertexLayout,
+ void** vertices,
+ void** indices);
+ virtual void releaseGeometryHelper();
+ virtual void clipWillChange(const GrClip& clip);
+
+
+ bool grabState();
+ bool grabClip();
+
+ GrTAllocator<Draw> fDraws;
+ // HACK: We hold refs on textures in saved state but not RTs, VBs, and IBs.
+ // a) RTs aren't ref counted (yet)
+ // b) we are only using this class for text which doesn't use VBs or IBs
+ // This should be fixed by either refcounting them all or having some
+ // notification occur if a cache is purging an object we have a ptr to.
+ GrTAllocator<SavedDrawState> fStates;
+
+ GrTAllocator<GrClip> fClips;
+ bool fClipChanged;
+
+ // vertices are either queued in cpu arrays or some vertex buffer pool
+ // that knows about a specific GrGpu object.
+ GrAllocPool fCPUVertices;
+ GrVertexBufferAllocPool* fBufferVertices;
+ GrAllocPool fIndices;
+ void* fCurrReservedVertices;
+ void* fCurrReservedIndices;
+ // valid if we're queueing vertices in fBufferVertices
+ GrVertexBuffer* fCurrVertexBuffer;
+ uint32_t fCurrStartVertex;
+
+ // caller may conservatively over allocate vertices / indices.
+ // we release unused space back to allocator if possible
+ size_t fReservedVertexBytes;
+ size_t fReservedIndexBytes;
+ size_t fUsedReservedVertexBytes;
+ size_t fUsedReservedIndexBytes;
+
+ static const uint32_t STATES_BLOCK_SIZE = 8;
+ static const uint32_t DRAWS_BLOCK_SIZE = 8;
+ static const uint32_t CLIPS_BLOCK_SIZE = 8;
+ static const uint32_t VERTEX_BLOCK_SIZE = 1 << 12;
+ static const uint32_t INDEX_BLOCK_SIZE = 1 << 10;
+ int8_t fDrawsStorage[sizeof(Draw) *
+ DRAWS_BLOCK_SIZE];
+ int8_t fStatesStorage[sizeof(SavedDrawState) *
+ STATES_BLOCK_SIZE];
+ int8_t fClipsStorage[sizeof(GrClip) *
+ CLIPS_BLOCK_SIZE];
+};
+
+#endif
diff --git a/gpu/include/GrIndexBuffer.h b/gpu/include/GrIndexBuffer.h
new file mode 100644
index 0000000000..0f4c4d691f
--- /dev/null
+++ b/gpu/include/GrIndexBuffer.h
@@ -0,0 +1,92 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrIndexBuffer_DEFINED
+#define GrIndexBuffer_DEFINED
+
+#include "GrRefCnt.h"
+
+class GrIndexBuffer : public GrRefCnt {
+protected:
+ GrIndexBuffer(uint32_t sizeInBytes, bool dynamic) :
+ fSizeInBytes(sizeInBytes),
+ fDynamic(dynamic) {}
+public:
+ virtual ~GrIndexBuffer() {}
+
+ /**
+ Retrieves the size of the index buffer
+
+ @return the size of the index buffer in bytes
+ */
+ uint32_t size() const { return fSizeInBytes; }
+
+ /**
+ Retrieves whether the index buffer was created with the dynamic flag
+
+ @return true if the index buffer was created with the dynamic flag
+ */
+ bool dynamic() const { return fDynamic; }
+
+ /**
+ Indicates that GPU context in which this veretx buffer was created is
+ destroyed and that Ganesh should not attempt to free the texture with the
+ underlying API.
+ */
+ virtual void abandon() = 0;
+
+ /**
+ Locks the index buffer to be written by the CPU.
+
+ The previous content of the index buffer is invalidated. It is an error to
+ draw whil the buffer is locked. It is an error to call lock on an already
+ locked index buffer.
+
+ @return a pointer to the index data or NULL if the lock fails.
+ */
+ virtual void* lock() = 0;
+
+ /**
+ Unlocks the index buffer.
+
+ The pointer returned by the previous lock call will no longer be valid.
+ */
+ virtual void unlock() = 0;
+
+ /**
+ Queries whether the index buffer has been locked.
+
+ @return true if the index buffer is locked, false otherwise.
+ */
+ virtual bool isLocked() const = 0;
+
+ /**
+ Updates the index buffer data.
+
+ The size of the index buffer will be preserved. However, only the updated
+ region will have defined contents.
+
+ @return returns true if the update succeeds, false otherwise.
+ */
+ virtual bool updateData(const void* src, uint32_t srcSizeInBytes) = 0;
+
+private:
+ uint32_t fSizeInBytes;
+ bool fDynamic;
+};
+
+#endif
diff --git a/gpu/include/GrInstanceCounter.h b/gpu/include/GrInstanceCounter.h
new file mode 100644
index 0000000000..11cec2b2ec
--- /dev/null
+++ b/gpu/include/GrInstanceCounter.h
@@ -0,0 +1,47 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrInstanceCounter_DEFINED
+#define GrInstanceCounter_DEFINED
+
+#include "GrTypes.h"
+
+template <typename T> class GrInstanceCounter {
+public:
+ GrInstanceCounter() {
+ ++gCounter;
+ GrPrintf("+ %s %d\n", T::InstanceCounterClassName(), gCounter);
+ }
+
+ ~GrInstanceCounter() {
+ --gCounter;
+ GrPrintf("- %s %d\n", T::InstanceCounterClassName(), gCounter);
+ }
+
+private:
+ static int gCounter;
+};
+
+template <typename T> int GrInstanceCounter<T>::gCounter;
+
+#define DECLARE_INSTANCE_COUNTER(T) \
+ static const char* InstanceCounterClassName() { return #T; } \
+ friend class GrInstanceCounter<T>; \
+ GrInstanceCounter<T> fInstanceCounter
+
+#endif
+
diff --git a/gpu/include/GrKey.h b/gpu/include/GrKey.h
new file mode 100644
index 0000000000..19133aee7a
--- /dev/null
+++ b/gpu/include/GrKey.h
@@ -0,0 +1,47 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrKey_DEFINED
+#define GrKey_DEFINED
+
+#include "GrRefCnt.h"
+
+class GrKey : public GrRefCnt {
+public:
+ typedef intptr_t Hash;
+
+ explicit GrKey(Hash hash) : fHash(hash) {}
+
+ intptr_t getHash() const { return fHash; }
+
+ bool operator<(const GrKey& rh) const {
+ return fHash < rh.fHash || (fHash == rh.fHash && this->lt(rh));
+ }
+ bool operator==(const GrKey& rh) const {
+ return fHash == rh.fHash && this->eq(rh);
+ }
+
+protected:
+ virtual bool lt(const GrKey& rh) const = 0;
+ virtual bool eq(const GrKey& rh) const = 0;
+
+private:
+ const Hash fHash;
+};
+
+#endif
+
diff --git a/gpu/include/GrMatrix.h b/gpu/include/GrMatrix.h
new file mode 100644
index 0000000000..43fd4a5ed1
--- /dev/null
+++ b/gpu/include/GrMatrix.h
@@ -0,0 +1,370 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrMatrix_DEFINED
+#define GrMatrix_DEFINED
+
+#include "GrPoint.h"
+
+struct GrRect;
+
+/*
+ * 3x3 matrix
+ */
+class GrMatrix {
+public:
+ static const GrMatrix& I();
+ static const GrScalar gRESCALE;
+ /**
+ * Handy index constants
+ */
+ enum {
+ kScaleX,
+ kSkewX,
+ kTransX,
+ kSkewY,
+ kScaleY,
+ kTransY,
+ kPersp0,
+ kPersp1,
+ kPersp2
+ };
+
+ /**
+ * Create an uninitialized matrix
+ */
+ GrMatrix() {
+ fTypeMask = 0;
+ }
+
+ /**
+ * Create a matrix from an array of values
+ * @param values row-major array of matrix components
+ */
+ explicit GrMatrix(GrScalar* values) {
+ setToArray(values);
+ }
+
+ /**
+ * Create a matrix from values
+ * @param scaleX (0,0) matrix element
+ * @param skewX (0,1) matrix element
+ * @param transX (0,2) matrix element
+ * @param skewY (1,0) matrix element
+ * @param scaleY (1,1) matrix element
+ * @param transY (1,2) matrix element
+ * @param persp0 (2,0) matrix element
+ * @param persp1 (2,1) matrix element
+ * @param persp2 (2,2) matrix element
+ */
+ GrMatrix(GrScalar scaleX,
+ GrScalar skewX,
+ GrScalar transX,
+ GrScalar skewY,
+ GrScalar scaleY,
+ GrScalar transY,
+ GrScalar persp0,
+ GrScalar persp1,
+ GrScalar persp2) {
+ setAll(scaleX, skewX, transX,
+ skewY, scaleY, transY,
+ persp0, persp1, persp2);
+ }
+
+ /**
+ * access matrix component
+ * @return matrix component value
+ */
+ const GrScalar& operator[] (int idx) const {
+ GrAssert((unsigned)idx < 9);
+ return fM[idx];
+ }
+
+ /**
+ * Set a matrix from an array of values
+ * @param values row-major array of matrix components
+ */
+ void setToArray(GrScalar* values) {
+ for (int i = 0; i < 9; ++i) {
+ fM[i] = values[i];
+ }
+ setTypeMask();
+ }
+
+ /**
+ * Create a matrix from values
+ * @param scaleX (0,0) matrix element
+ * @param skewX (0,1) matrix element
+ * @param transX (0,2) matrix element
+ * @param skewY (1,0) matrix element
+ * @param scaleY (1,1) matrix element
+ * @param transY (1,2) matrix element
+ * @param persp0 (2,0) matrix element
+ * @param persp1 (2,1) matrix element
+ * @param persp2 (2,2) matrix element
+ */
+ void setAll(GrScalar scaleX,
+ GrScalar skewX,
+ GrScalar transX,
+ GrScalar skewY,
+ GrScalar scaleY,
+ GrScalar transY,
+ GrScalar persp0,
+ GrScalar persp1,
+ GrScalar persp2) {
+ fM[kScaleX] = scaleX;
+ fM[kSkewX] = skewX;
+ fM[kTransX] = transX;
+ fM[kSkewY] = skewY;
+ fM[kScaleY] = scaleY;
+ fM[kTransY] = transY;
+ fM[kPersp0] = persp0;
+ fM[kPersp1] = persp1;
+ fM[kPersp2] = persp2;
+
+ setTypeMask();
+ }
+
+ /**
+ * set matrix component
+ * @param idx index of component to set
+ * @param value value to set component to
+ */
+ inline void set(int idx, GrScalar value);
+
+ /**
+ * make this matrix an identity matrix
+ */
+ void setIdentity();
+
+ /**
+ * overwrite entire matrix to be a translation matrix
+ * @param dx amount to translate by in x
+ * @param dy amount to translate by in y
+ */
+ void setTranslate(GrScalar dx, GrScalar dy);
+
+ /**
+ * overwrite entire matrix to be a scaling matrix
+ * @param sx x scale factor
+ * @param sy y scale factor
+ */
+ void setScale(GrScalar sx, GrScalar sy);
+
+ /**
+ * overwrite entire matrix to be a skew matrix
+ * @param skx x skew factor
+ * @param sky y skew factor
+ */
+ void setSkew(GrScalar skx, GrScalar sky);
+
+ /**
+ * set this matrix to be a concantenation of two
+ * matrices (a*b). Either a, b, or both can be this matrix.
+ * @param a first matrix to multiply
+ * @param b second matrix to multiply
+ */
+ void setConcat(const GrMatrix& a, const GrMatrix& b);
+
+ /**
+ * Set this matrix to this*m
+ * @param m matrix to concatenate
+ */
+ void preConcat(const GrMatrix& m);
+
+ /**
+ * Set this matrix to m*this
+ * @param m matrix to concatenate
+ */
+ void postConcat(const GrMatrix& m);
+
+ /**
+ * Compute the inverse of this matrix, and return true if it is invertible,
+ * or false if not.
+ *
+ * If inverted is not null, and the matrix is invertible, then the inverse
+ * is written into it. If the matrix is not invertible (this method returns
+ * false) then inverted is left unchanged.
+ */
+ bool invert(GrMatrix* inverted) const;
+
+ /**
+ * Transforms a point by the matrix
+ *
+ * @param src the point to transform
+ * @return the transformed point
+ */
+ GrPoint mapPoint(const GrPoint& src) const {
+ GrPoint result;
+ (this->*gMapProcs[fTypeMask])(&result, &src, 1);
+ return result;
+ }
+
+ /**
+ * Transforms an array of points by the matrix.
+ *
+ * @param dstPts the array to write transformed points into
+ * @param srcPts the array of points to transform
+ @ @param count the number of points to transform
+ */
+ void mapPoints(GrPoint dstPts[],
+ const GrPoint srcPts[],
+ uint32_t count) const {
+ (this->*gMapProcs[fTypeMask])(dstPts, srcPts, count);
+ }
+
+ /**
+ * Transforms pts with arbitrary stride in place.
+ *
+ * @param start pointer to first point to transform
+ * @param stride distance in bytes between consecutive points
+ @ @param count the number of points to transform
+ */
+ void mapPointsWithStride(GrPoint* start,
+ size_t stride,
+ uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ this->mapPoints(start, start, 1);
+ start = (GrPoint*)((intptr_t)start + stride);
+ }
+ }
+
+ /**
+ * Transform the 4 corners of the src rect, and return the bounding rect
+ * in the dst rect. Note: src and dst may point to the same memory.
+ */
+ void mapRect(GrRect* dst, const GrRect& src) const;
+
+ /**
+ * Transform the 4 corners of the rect, and return their bounds in the rect
+ */
+ void mapRect(GrRect* rect) const {
+ this->mapRect(rect, *rect);
+ }
+
+ /**
+ * Checks if matrix is a perspective matrix.
+ * @return true if third row is not (0, 0, 1)
+ */
+ bool hasPerspective() const;
+
+ /**
+ * Checks whether matrix is identity
+ * @return true if matrix is idenity
+ */
+ bool isIdentity() const;
+
+ /**
+ * Calculates the maximum stretching factor of the matrix. Only defined if
+ * the matrix does not have perspective.
+ *
+ * @return maximum strecthing factor or negative if matrix has perspective.
+ */
+ GrScalar getMaxStretch() const;
+
+ /**
+ * Checks for matrix equality. Test is element-by-element equality,
+ * not a homogeneous test.
+ * @return true if matrices are equal, false otherwise
+ */
+ bool operator == (const GrMatrix& m) const;
+
+ /**
+ * Checks for matrix inequality. Test is element-by-element inequality,
+ * not a homogeneous test.
+ * @return true if matrices are not equal, false otherwise
+ */
+ bool operator != (const GrMatrix& m) const;
+
+ static void UnitTest();
+
+private:
+
+ void setTypeMask();
+
+ double determinant() const;
+
+ enum TypeBits {
+ kScale_TypeBit = 1 << 0, // set if scales are not both 1
+ kTranslate_TypeBit = 1 << 1, // set if translates are not both 0
+ kSkew_TypeBit = 1 << 2, // set if skews are not both 0
+ kPerspective_TypeBit = 1 << 3, // set if perspective
+ kZeroScale_TypeBit = 1 << 4, // set if scales are both zero
+ };
+
+ void mapIdentity(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapScale(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapScaleAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapSkew(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapScaleAndSkew(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapSkewAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapNonPerspective(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapPerspective(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapZero(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapSetToTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapSwappedScale(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ void mapSwappedScaleAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+
+ void mapInvalid(GrPoint* dst, const GrPoint* src, uint32_t count) const;
+
+ typedef void (GrMatrix::*MapProc) (GrPoint* dst, const GrPoint* src, uint32_t count) const;
+ static const MapProc gMapProcs[];
+
+ int fTypeMask;
+
+ GrScalar fM[9];
+};
+
+void GrMatrix::set(int idx, GrScalar value) {
+ GrAssert((unsigned)idx < 9);
+ fM[idx] = value;
+ if (idx > 5) {
+ if (0 != fM[kPersp0] || 0 != fM[kPersp1] ||
+ gRESCALE != fM[kPersp2]) {
+ fTypeMask |= kPerspective_TypeBit;
+ } else {
+ fTypeMask &= ~kPerspective_TypeBit;
+ }
+ } else if (!(idx % 4)) {
+ if ((GR_Scalar1 == fM[kScaleX] && GR_Scalar1 == fM[kScaleY])) {
+ fTypeMask &= ~kScale_TypeBit;
+ fTypeMask &= ~kZeroScale_TypeBit;
+ } else {
+ fTypeMask |= kScale_TypeBit;
+ if ((0 == fM[kScaleX] && 0 == fM[kScaleY])) {
+ fTypeMask |= kZeroScale_TypeBit;
+ } else {
+ fTypeMask &= ~kZeroScale_TypeBit;
+ }
+ }
+ } else if (2 == (idx % 3)) {
+ if (0 != fM[kTransX] || 0 != fM[kTransY]) {
+ fTypeMask |= kTranslate_TypeBit;
+ } else {
+ fTypeMask &= ~kTranslate_TypeBit;
+ }
+ } else {
+ if (0 != fM[kSkewX] || 0 != fM[kSkewY]) {
+ fTypeMask |= kSkew_TypeBit;
+ } else {
+ fTypeMask &= ~kSkew_TypeBit;
+ }
+ }
+}
+
+#endif
diff --git a/gpu/include/GrMemory.h b/gpu/include/GrMemory.h
new file mode 100644
index 0000000000..673d0ab43d
--- /dev/null
+++ b/gpu/include/GrMemory.h
@@ -0,0 +1,151 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrMemory_DEFINED
+#define GrMemory_DEFINED
+
+#include "GrNoncopyable.h"
+
+class GrAutoMalloc : GrNoncopyable {
+public:
+ GrAutoMalloc(size_t bytes) : fPtr(GrMalloc(bytes)) {}
+ ~GrAutoMalloc() { GrFree(fPtr); }
+
+ /**
+ * Return the allocated memory, or NULL if it has already been freed or
+ * detached.
+ */
+ void* get() const { return fPtr; }
+
+ /**
+ * transfer ownership of the memory to the caller. It must be freed with
+ * a call to GrFree()
+ */
+ void* detach() {
+ void* ptr = fPtr;
+ fPtr = NULL; // we no longer own the block
+ return ptr;
+ }
+
+ /**
+ * free the block now. get() will now return NULL
+ */
+ void free() {
+ GrFree(fPtr);
+ fPtr = NULL;
+ }
+
+private:
+ void* fPtr;
+};
+
+/**
+ * Variant of GrAutoMalloc with a compile-time specified byte size that is
+ * pre-allocated in the class object, avoiding a call to to GrMalloc if
+ * possible.
+ */
+template <size_t SIZE> class GrAutoSMalloc : GrNoncopyable {
+public:
+ GrAutoSMalloc() {
+ fPtr = fStorage;
+ fAllocatedBytes = SIZE;
+ }
+
+ explicit GrAutoSMalloc(size_t bytes) {
+ if (bytes > SIZE) {
+ fPtr = GrMalloc(bytes);
+ fAllocatedBytes = bytes;
+ } else {
+ fPtr = fStorage;
+ fAllocatedBytes = SIZE;
+ }
+ }
+
+ ~GrAutoSMalloc() {
+ if (fPtr != (void*)fStorage) {
+ GrFree(fPtr);
+ }
+ }
+
+ /**
+ * Return the allocated memory, or NULL if it has already been freed or
+ * detached.
+ */
+ void* get() const { return fPtr; }
+
+ /**
+ * Reallocates to a new size. May or may not call malloc. The contents
+ * are not preserved. If growOnly is true it will never reduce the
+ * allocated size.
+ */
+ void* realloc(size_t newSize, bool growOnly = false) {
+ if (newSize <= SIZE) {
+ if (NULL == fPtr) {
+ fPtr = fStorage;
+ fAllocatedBytes = SIZE;
+ } else if (!growOnly && fPtr != (void*)fStorage) {
+ GrFree(fPtr);
+ fPtr = fStorage;
+ fAllocatedBytes = SIZE;
+ }
+ } else if ((newSize > fAllocatedBytes) ||
+ (!growOnly && newSize < (fAllocatedBytes >> 1))) {
+ if (NULL != fPtr && fPtr != (void*)fStorage) {
+ GrFree(fPtr);
+ }
+ fPtr = GrMalloc(newSize);
+ fAllocatedBytes = newSize;
+ }
+ GrAssert(fAllocatedBytes >= newSize);
+ GrAssert((fPtr == fStorage) == (fAllocatedBytes == SIZE));
+ GR_DEBUGCODE(memset(fPtr, 0xEF, fAllocatedBytes));
+ return fPtr;
+ }
+
+ /**
+ * free the block now. get() will now return NULL
+ */
+ void free() {
+ if (fPtr != (void*)fStorage) {
+ GrFree(fPtr);
+ }
+ fAllocatedBytes = 0;
+ fPtr = NULL;
+ }
+
+private:
+ void* fPtr;
+ uint32_t fAllocatedBytes;
+ uint32_t fStorage[GrALIGN4(SIZE) >> 2];
+};
+
+/**
+ * Variant of GrAutoMalloc with a compile-time specified byte size that is
+ * pre-allocated in the class object, avoiding a call to to GrMalloc if
+ * possible.
+ */
+template <int COUNT, typename T>
+class GrAutoSTMalloc : public GrAutoSMalloc<COUNT * sizeof(T)> {
+public:
+ GrAutoSTMalloc(int count) : GrAutoSMalloc<COUNT * sizeof(T)>(count * sizeof(T)) {}
+
+ operator T*() { return (T*)this->get(); }
+};
+
+
+#endif
+
diff --git a/gpu/include/GrMesh.h b/gpu/include/GrMesh.h
new file mode 100644
index 0000000000..4d904e41bd
--- /dev/null
+++ b/gpu/include/GrMesh.h
@@ -0,0 +1,42 @@
+#ifndef GrMesh_DEFINED
+#define GrMesh_DEFINED
+
+#include "SkRect.h"
+#include "SkPoint.h"
+
+class SkCanvas;
+class SkPaint;
+
+class GrMesh {
+public:
+ GrMesh();
+ ~GrMesh();
+
+ GrMesh& operator=(const GrMesh& src);
+
+ void init(const SkRect& bounds, int rows, int cols,
+ const SkRect& texture);
+
+ const SkRect& bounds() const { return fBounds; }
+
+ int rows() const { return fRows; }
+ int cols() const { return fCols; }
+ SkPoint& pt(int row, int col) {
+ return fPts[row * (fRows + 1) + col];
+ }
+
+ void draw(SkCanvas*, const SkPaint&);
+ void drawWireframe(SkCanvas* canvas, const SkPaint& paint);
+
+private:
+ SkRect fBounds;
+ int fRows, fCols;
+ SkPoint* fPts;
+ SkPoint* fTex; // just points into fPts, not separately allocated
+ int fCount;
+ uint16_t* fIndices;
+ int fIndexCount;
+};
+
+#endif
+
diff --git a/gpu/include/GrNoncopyable.h b/gpu/include/GrNoncopyable.h
new file mode 100644
index 0000000000..888e3b173b
--- /dev/null
+++ b/gpu/include/GrNoncopyable.h
@@ -0,0 +1,38 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrNoncopyable_DEFINED
+#define GrNoncopyable_DEFINED
+
+#include "GrTypes.h"
+
+/**
+ * Base for classes that want to disallow copying themselves. It makes its
+ * copy-constructor and assignment operators private (and unimplemented).
+ */
+class GrNoncopyable {
+public:
+ GrNoncopyable() {}
+
+private:
+ // illegal
+ GrNoncopyable(const GrNoncopyable&);
+ GrNoncopyable& operator=(const GrNoncopyable&);
+};
+
+#endif
+
diff --git a/gpu/include/GrPath.h b/gpu/include/GrPath.h
new file mode 100644
index 0000000000..a9b75665c3
--- /dev/null
+++ b/gpu/include/GrPath.h
@@ -0,0 +1,84 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrPath_DEFINED
+#define GrPath_DEFINED
+
+#include "GrPathSink.h"
+#include "GrPathIter.h"
+#include "GrTDArray.h"
+#include "GrPoint.h"
+
+class GrPath : public GrPathSink {
+public:
+ GrPath();
+ GrPath(const GrPath&);
+ explicit GrPath(GrPathIter&);
+ virtual ~GrPath();
+
+ GrPathIter::ConvexHint getConvexHint() const { return fConvexHint; }
+ void setConvexHint(GrPathIter::ConvexHint hint) { fConvexHint = hint; }
+
+ void resetFromIter(GrPathIter*);
+
+ // overrides from GrPathSink
+
+ virtual void moveTo(GrScalar x, GrScalar y);
+ virtual void lineTo(GrScalar x, GrScalar y);
+ virtual void quadTo(GrScalar x0, GrScalar y0, GrScalar x1, GrScalar y1);
+ virtual void cubicTo(GrScalar x0, GrScalar y0, GrScalar x1, GrScalar y1,
+ GrScalar x2, GrScalar y2);
+ virtual void close();
+
+ class Iter : public GrPathIter {
+ public:
+ Iter(const GrPath& path);
+
+ // overrides from GrPathIter
+ virtual Command next(GrPoint points[]);
+ virtual ConvexHint hint() const;
+ virtual Command next();
+ virtual void rewind();
+ private:
+ const GrPath& fPath;
+ GrPoint fLastPt;
+ int fVerbIndex;
+ int fPtIndex;
+ };
+
+private:
+ enum Verb {
+ kMove, kLine, kQuad, kCubic, kClose
+ };
+
+ GrTDArray<uint8_t> fVerbs;
+ GrTDArray<GrPoint> fPts;
+ GrPathIter::ConvexHint fConvexHint;
+
+ // this ensures we have a moveTo at the start of each contour
+ inline void ensureMoveTo();
+
+ bool wasLastVerb(Verb verb) const {
+ int count = fVerbs.count();
+ return count > 0 && verb == fVerbs[count - 1];
+ }
+
+ friend class Iter;
+};
+
+#endif
+
diff --git a/gpu/include/GrPathIter.h b/gpu/include/GrPathIter.h
new file mode 100644
index 0000000000..028faaa14d
--- /dev/null
+++ b/gpu/include/GrPathIter.h
@@ -0,0 +1,110 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrPathIter_DEFINED
+#define GrPathIter_DEFINED
+
+#include "GrTypes.h"
+
+struct GrPoint;
+
+/**
+ 2D Path iterator. Porting layer creates a subclass of this. It allows Ganesh to
+ parse the top-level API's 2D paths. Supports lines, quadratics, and cubic
+ pieces and moves (multi-part paths).
+ */
+class GrPathIter {
+public:
+ /**
+ Returned by next(). Indicates the next piece of the path.
+ */
+ enum Command {
+ kMove_Command, //!< next() returns 1 pt
+ // Starts a new subpath at
+ // at the returned point
+ kLine_Command, //!< next() returns 2 pts
+ // Adds a line segment
+ kQuadratic_Command, //!< next() returns 3 pts
+ // Adds a quadratic segment
+ kCubic_Command, //!< next() returns 4 pts
+ // Adds a cubic segment
+ kClose_Command, //!< next() returns 0 pts
+ kEnd_Command //!< next() returns 0 pts
+ // Implictly closes the last
+ // point
+ };
+
+ enum ConvexHint {
+ kNone_ConvexHint, //<! No hint about convexity
+ // of the path
+ kConvex_ConvexHint, //<! Path is one convex piece
+ kNonOverlappingConvexPieces_ConvexHint, //<! Multiple convex pieces,
+ // pieces are known to be
+ // disjoint
+ kSameWindingConvexPieces_ConvexHint, //<! Multiple convex pieces,
+ // may or may not intersect,
+ // either all wind cw or all
+ // wind ccw.
+ kConcave_ConvexHint //<! Path is known to be
+ // concave
+ };
+
+ static int NumCommandPoints(Command cmd) {
+ static const int numPoints[] = {
+ 1, 2, 3, 4, 0, 0
+ };
+ return numPoints[cmd];
+ }
+
+ virtual ~GrPathIter() {};
+
+ /**
+ Iterates through the path. Should not be called after
+ kEnd_Command has been returned once. This version retrieves the
+ points for the command.
+ @param points The points relevant to returned commend. See Command
+ enum for number of points valid for each command.
+ @return The next command of the path.
+ */
+ virtual Command next(GrPoint points[4]) = 0;
+
+ /**
+ * If the host API has knowledge of the convexity of the path
+ * it can be communicated by this hint. Ganesh can make these
+ * determinations itself. So it is not necessary to compute
+ * convexity status if it isn't already determined.
+ *
+ * @return a hint about the convexity of the path.
+ */
+ virtual ConvexHint hint() const { return kNone_ConvexHint; }
+
+ /**
+ Iterates through the path. Should not be called after
+ kEnd_Command has been returned once. This version does not retrieve the
+ points for the command.
+ @return The next command of the path.
+ */
+ virtual Command next() = 0;
+
+ /**
+ Restarts iteration from the beginning.
+ */
+ virtual void rewind() = 0;
+
+};
+
+#endif
diff --git a/gpu/include/GrPathSink.h b/gpu/include/GrPathSink.h
new file mode 100644
index 0000000000..4e8a0c2c43
--- /dev/null
+++ b/gpu/include/GrPathSink.h
@@ -0,0 +1,36 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrPathSink_DEFINED
+#define GrPathSink_DEFINED
+
+#include "GrScalar.h"
+
+class GrPathSink {
+public:
+ virtual ~GrPathSink() {}
+
+ virtual void moveTo(GrScalar x, GrScalar y) = 0;
+ virtual void lineTo(GrScalar x, GrScalar y) = 0;
+ virtual void quadTo(GrScalar x0, GrScalar y0, GrScalar x1, GrScalar y1) = 0;
+ virtual void cubicTo(GrScalar x0, GrScalar y0, GrScalar x1, GrScalar y1,
+ GrScalar x2, GrScalar y2) = 0;
+ virtual void close() = 0;
+};
+
+#endif
+
diff --git a/gpu/include/GrPlotMgr.h b/gpu/include/GrPlotMgr.h
new file mode 100644
index 0000000000..cd60bdebde
--- /dev/null
+++ b/gpu/include/GrPlotMgr.h
@@ -0,0 +1,84 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrPlotMgr_DEFINED
+#define GrPlotMgr_DEFINED
+
+#include "GrTypes.h"
+#include "GrPoint.h"
+
+class GrPlotMgr : GrNoncopyable {
+public:
+ GrPlotMgr(int width, int height) {
+ fDim.set(width, height);
+ size_t needed = width * height;
+ if (needed <= sizeof(fStorage)) {
+ fBusy = fStorage;
+ } else {
+ fBusy = new char[needed];
+ }
+ this->reset();
+ }
+
+ ~GrPlotMgr() {
+ if (fBusy != fStorage) {
+ delete[] fBusy;
+ }
+ }
+
+ void reset() {
+ Gr_bzero(fBusy, fDim.fX * fDim.fY);
+ }
+
+ bool newPlot(GrIPoint16* loc) {
+ char* busy = fBusy;
+ for (int y = 0; y < fDim.fY; y++) {
+ for (int x = 0; x < fDim.fX; x++) {
+ if (!*busy) {
+ *busy = true;
+ loc->set(x, y);
+ return true;
+ }
+ busy++;
+ }
+ }
+ return false;
+ }
+
+ bool isBusy(int x, int y) const {
+ GrAssert((unsigned)x < (unsigned)fDim.fX);
+ GrAssert((unsigned)y < (unsigned)fDim.fY);
+ return fBusy[y * fDim.fX + x] != 0;
+ }
+
+ void freePlot(int x, int y) {
+ GrAssert((unsigned)x < (unsigned)fDim.fX);
+ GrAssert((unsigned)y < (unsigned)fDim.fY);
+ fBusy[y * fDim.fX + x] = false;
+ }
+
+private:
+ enum {
+ STORAGE = 64
+ };
+ char fStorage[STORAGE];
+ char* fBusy;
+ GrIPoint16 fDim;
+};
+
+#endif
+
diff --git a/gpu/include/GrPoint.h b/gpu/include/GrPoint.h
new file mode 100644
index 0000000000..bb24959c34
--- /dev/null
+++ b/gpu/include/GrPoint.h
@@ -0,0 +1,287 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrPoint_DEFINED
+#define GrPoint_DEFINED
+
+#include "GrTypes.h"
+#include "GrScalar.h"
+
+/**
+ * 2D Point struct
+ */
+struct GrPoint {
+public:
+ GrScalar fX, fY;
+
+ GrPoint() {}
+ GrPoint(GrScalar x, GrScalar y) { fX = x; fY = y; }
+
+ GrScalar x() const { return fX; }
+ GrScalar y() const { return fY; }
+
+ void set(GrScalar x, GrScalar y) {
+ fX = x;
+ fY = y;
+ }
+
+ void setAsMidPoint(const GrPoint& a, const GrPoint& b) {
+ fX = GrScalarAve(a.fX, b.fX);
+ fY = GrScalarAve(a.fY, b.fY);
+ }
+
+ void offset(GrScalar dx, GrScalar dy) {
+ fX += dx;
+ fY += dy;
+ }
+
+ GrScalar distanceToSqd(const GrPoint& p) const {
+ GrScalar dx = (p.fX - fX);
+ GrScalar dy = (p.fY - fY);
+ return GrMul(dx, dx) + GrMul(dy, dy);
+ }
+
+ GrScalar distanceTo(const GrPoint& p) const {
+ // TODO: fixed point sqrt
+ return GrFloatToScalar(sqrtf(GrScalarToFloat(distanceToSqd(p))));
+ }
+
+ GrScalar distanceToOriginSqd() const {
+ return GrMul(fX, fX) + GrMul(fY, fY);
+ }
+
+ GrScalar distanceToOrigin() const {
+ return GrFloatToScalar(sqrtf(GrScalarToFloat(distanceToOriginSqd())));
+ }
+
+ inline GrScalar distanceToLineBetweenSqd(const GrPoint& a,
+ const GrPoint& b) const;
+
+ inline GrScalar distanceToLineBetween(const GrPoint& a,
+ const GrPoint& b) const;
+
+ inline GrScalar distanceToLineSegmentBetweenSqd(const GrPoint& a,
+ const GrPoint& b) const;
+
+ inline GrScalar distanceToLineSegmentBetween(const GrPoint& a,
+ const GrPoint& b) const;
+
+ // counter-clockwise fan
+ void setRectFan(GrScalar l, GrScalar t, GrScalar r, GrScalar b) {
+ GrPoint* v = this;
+ v[0].set(l, t);
+ v[1].set(l, b);
+ v[2].set(r, b);
+ v[3].set(r, t);
+ }
+
+ void setRectFan(GrScalar l, GrScalar t, GrScalar r, GrScalar b, size_t stride) {
+ GrAssert(stride >= sizeof(GrPoint));
+ ((GrPoint*)((intptr_t)this + 0 * stride))->set(l, t);
+ ((GrPoint*)((intptr_t)this + 1 * stride))->set(l, b);
+ ((GrPoint*)((intptr_t)this + 2 * stride))->set(r, b);
+ ((GrPoint*)((intptr_t)this + 3 * stride))->set(r, t);
+ }
+
+ // counter-clockwise fan
+ void setIRectFan(int l, int t, int r, int b) {
+ GrPoint* v = this;
+ v[0].set(GrIntToScalar(l), GrIntToScalar(t));
+ v[1].set(GrIntToScalar(l), GrIntToScalar(b));
+ v[2].set(GrIntToScalar(r), GrIntToScalar(b));
+ v[3].set(GrIntToScalar(r), GrIntToScalar(t));
+ }
+
+ void setIRectFan(int l, int t, int r, int b, size_t stride) {
+ GrAssert(stride >= sizeof(GrPoint));
+ ((GrPoint*)((intptr_t)this + 0 * stride))->set(GrIntToScalar(l),
+ GrIntToScalar(t));
+ ((GrPoint*)((intptr_t)this + 1 * stride))->set(GrIntToScalar(l),
+ GrIntToScalar(b));
+ ((GrPoint*)((intptr_t)this + 2 * stride))->set(GrIntToScalar(r),
+ GrIntToScalar(b));
+ ((GrPoint*)((intptr_t)this + 3 * stride))->set(GrIntToScalar(r),
+ GrIntToScalar(t));
+ }
+
+ bool operator ==(const GrPoint& p) const {
+ return fX == p.fX && fY == p.fY;
+ }
+
+ bool operator !=(const GrPoint& p) const {
+ return fX != p.fX || fY != p.fY;
+ }
+};
+
+struct GrIPoint16 {
+ int16_t fX, fY;
+
+ void set(intptr_t x, intptr_t y) {
+ fX = GrToS16(x);
+ fY = GrToS16(y);
+ }
+};
+
+struct GrVec {
+public:
+ GrScalar fX, fY;
+
+ GrVec() {}
+ GrVec(GrScalar x, GrScalar y) { fX = x; fY = y; }
+
+ GrScalar x() const { return fX; }
+ GrScalar y() const { return fY; }
+
+ /**
+ * set x and y length of the vector.
+ */
+ void set(GrScalar x, GrScalar y) {
+ fX = x;
+ fY = y;
+ }
+
+ /**
+ * set vector to point from a to b.
+ */
+ void setBetween(const GrPoint& a, const GrPoint& b) {
+ fX = b.fX - a.fX;
+ fY = b.fY - a.fY;
+ }
+
+ /**
+ * length of the vector squared.
+ */
+ GrScalar lengthSqd() const {
+ return GrMul(fX, fX) + GrMul(fY, fY);
+ }
+
+ /**
+ * length of the vector.
+ */
+ GrScalar length() const {
+ // TODO: fixed point sqrt
+ return GrFloatToScalar(sqrtf(GrScalarToFloat(lengthSqd())));
+ }
+
+ /**
+ * normalizes the vector if it's length is not 0.
+ * @return true if normalized, otherwise false.
+ */
+ bool normalize() {
+ GrScalar l = lengthSqd();
+ if (l) {
+ // TODO: fixed point sqrt and invert
+ l = 1 / sqrtf(l);
+ fX *= l;
+ fY *= l;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Dot product of this with vec.
+ */
+ GrScalar dot(const GrVec& vec) const {
+ return GrMul(vec.fX, fX) + GrMul(vec.fY, fY);
+ }
+
+ /**
+ * z-value of this cross vec.
+ */
+ GrScalar cross(const GrVec& vec) const {
+ return GrMul(fX, vec.fY) - GrMul(fY, vec.fX);
+ }
+
+ bool operator ==(const GrPoint& p) const {
+ return fX == p.fX && fY == p.fY;
+ }
+
+ bool operator !=(const GrPoint& p) const {
+ return fX != p.fX || fY != p.fY;
+ }
+};
+
+GrScalar GrPoint::distanceToLineBetweenSqd(const GrPoint& a,
+ const GrPoint& b) const {
+ // Let d be the distance between c (this) and line ab.
+ // The area of the triangle defined by a, b, and c is
+ // A = |b-a|*d/2. Let u = b-a and v = c-a. The cross product of
+ // u and v is aligned with the z axis and its magnitude is 2A.
+ // So d = |u x v| / |u|.
+ GrVec u, v;
+ u.setBetween(a,b);
+ v.setBetween(a,*this);
+
+ GrScalar det = u.cross(v);
+ return (GrMul(det, det)) / u.lengthSqd();
+}
+
+GrScalar GrPoint::distanceToLineBetween(const GrPoint& a,
+ const GrPoint& b) const {
+ GrVec u, v;
+ u.setBetween(a,b);
+ v.setBetween(a,*this);
+
+ GrScalar det = u.cross(v);
+ return (GrScalarAbs(det)) / u.length();
+}
+
+GrScalar GrPoint::distanceToLineSegmentBetweenSqd(const GrPoint& a,
+ const GrPoint& b) const {
+ // See comments to distanceToLineBetweenSqd. If the projection of c onto
+ // u is between a and b then this returns the same result as that
+ // function. Otherwise, it returns the distance to the closer of a and
+ // b. Let the projection of v onto u be v'. There are three cases:
+ // 1. v' points opposite to u. c is not between a and b and is closer
+ // to a than b.
+ // 2. v' points along u and has magnitude less than y. c is between
+ // a and b and the distance to the segment is the same as distance
+ // to the line ab.
+ // 3. v' points along u and has greater magnitude than u. c is not
+ // not between a and b and is closer to b than a.
+ // v' = (u dot v) * u / |u|. So if (u dot v)/|u| is less than zero we're
+ // in case 1. If (u dot v)/|u| is > |u| we are in case 3. Otherwise
+ // we're in case 2. We actually compare (u dot v) to 0 and |u|^2 to
+ // avoid a sqrt to compute |u|.
+
+ GrVec u, v;
+ u.setBetween(a,b);
+ v.setBetween(a,*this);
+
+ GrScalar uLengthSqd = u.lengthSqd();
+ GrScalar uDotV = u.dot(v);
+
+ if (uDotV <= 0) {
+ return v.lengthSqd();
+ } else if (uDotV > uLengthSqd) {
+ return b.distanceToSqd(*this);
+ } else {
+ GrScalar det = u.cross(v);
+ return (GrMul(det, det)) / uLengthSqd;
+ }
+}
+
+GrScalar GrPoint::distanceToLineSegmentBetween(const GrPoint& a,
+ const GrPoint& b) const {
+ // TODO: fixed point sqrt
+ return GrFloatToScalar(sqrtf(GrScalarToFloat(distanceToLineSegmentBetweenSqd(a,b))));
+}
+
+
+#endif
+
diff --git a/gpu/include/GrRandom.h b/gpu/include/GrRandom.h
new file mode 100644
index 0000000000..408f61de7b
--- /dev/null
+++ b/gpu/include/GrRandom.h
@@ -0,0 +1,62 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrRandom_DEFINED
+#define GrRandom_DEFINED
+
+class GrRandom {
+public:
+ GrRandom() : fSeed(0) {}
+ GrRandom(uint32_t seed) : fSeed(seed) {}
+
+ uint32_t seed() const { return fSeed; }
+
+ uint32_t nextU() {
+ fSeed = fSeed * kMUL + kADD;
+ return fSeed;
+ }
+
+ int32_t nextS() { return (int32_t)this->nextU(); }
+
+ /**
+ * Returns value [0...1) as a float
+ */
+ float nextF() {
+ // const is 1 / (2^32 - 1)
+ return (float)(this->nextU() * 2.32830644e-10);
+ }
+
+ /**
+ * Returns value [min...max) as a float
+ */
+ float nextF(float min, float max) {
+ return min + this->nextF() * (max - min);
+ }
+
+private:
+ /*
+ * These constants taken from "Numerical Recipes in C", reprinted 1999
+ */
+ enum {
+ kMUL = 1664525,
+ kADD = 1013904223
+ };
+ uint32_t fSeed;
+};
+
+#endif
+
diff --git a/gpu/include/GrRect.h b/gpu/include/GrRect.h
new file mode 100644
index 0000000000..5192ebdf18
--- /dev/null
+++ b/gpu/include/GrRect.h
@@ -0,0 +1,284 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrRect_DEFINED
+#define GrRect_DEFINED
+
+#include "GrPoint.h"
+
+struct GrIRect {
+ int32_t fLeft, fTop, fRight, fBottom;
+
+ GrIRect() {}
+ GrIRect(int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ int32_t x() const { return fLeft; }
+ int32_t y() const { return fTop; }
+ int32_t width() const { return fRight - fLeft; }
+ int32_t height() const { return fBottom - fTop; }
+
+ bool isEmpty() const { return fLeft >= fRight || fTop >= fBottom; }
+ bool isInverted() const { return fLeft > fRight || fTop > fBottom; }
+
+ void setEmpty() { fLeft = fTop = fRight = fBottom = 0; }
+
+ void setXYWH(int32_t x, int32_t y, int32_t w, int32_t h) {
+ fLeft = x;
+ fTop = y;
+ fRight = x + w;
+ fBottom = y + h;
+ }
+
+ void setLTRB(int32_t l, int32_t t, int32_t r, int32_t b) {
+ fLeft = l;
+ fTop = t;
+ fRight = r;
+ fBottom = b;
+ }
+
+ /**
+ * Make the largest representable rectangle
+
+ */
+ void setLargest() {
+ fLeft = fTop = GR_Int32Min;
+ fRight = fBottom = GR_Int32Max;
+ }
+
+ bool quickReject(int l, int t, int r, int b) const {
+ return l >= fRight || fLeft >= r || t >= fBottom || fTop >= b;
+ }
+
+ void unionWith(const GrIRect& r) {
+ if (fLeft > r.fLeft) fLeft = r.fLeft;
+ if (fTop > r.fTop) fTop = r.fTop;
+ if (fRight < r.fRight) fRight = r.fRight;
+ if (fBottom < r.fBottom) fBottom = r.fBottom;
+ }
+
+ friend bool operator==(const GrIRect& a, const GrIRect& b) {
+ return 0 == memcmp(&a, &b, sizeof(a));
+ }
+
+ friend bool operator!=(const GrIRect& a, const GrIRect& b) {
+ return 0 != memcmp(&a, &b, sizeof(a));
+ }
+
+ bool equalsLTRB(int l, int t, int r, int b) const {
+ return fLeft == l && fTop == t &&
+ fRight == r && fBottom == b;
+ }
+ bool equalsXYWH(int x, int y, int w, int h) const {
+ return fLeft == x && fTop == y &&
+ this->width() == w && this->height() == h;
+ }
+
+ bool contains(const GrIRect& r) const {
+ return fLeft <= r.fLeft &&
+ fRight >= r.fRight &&
+ fTop <= r.fTop &&
+ fBottom >= r.fBottom;
+ }
+};
+
+struct GrIRect16 {
+ int16_t fLeft, fTop, fRight, fBottom;
+
+ int width() const { return fRight - fLeft; }
+ int height() const { return fBottom - fTop; }
+ int area() const { return this->width() * this->height(); }
+ bool isEmpty() const { return fLeft >= fRight || fTop >= fBottom; }
+
+ void set(const GrIRect& r) {
+ fLeft = GrToS16(r.fLeft);
+ fTop = GrToS16(r.fTop);
+ fRight = GrToS16(r.fRight);
+ fBottom = GrToS16(r.fBottom);
+ }
+};
+
+/**
+ * 2D Rect struct
+ */
+struct GrRect {
+ GrScalar fLeft, fTop, fRight, fBottom;
+
+ /**
+ * Uninitialized rectangle.
+ */
+ GrRect() {}
+
+ /**
+ * Initialize a rectangle to a point.
+ * @param pt the point used to initialize the rectanglee.
+ */
+ GrRect(GrPoint pt) {
+ setToPoint(pt);
+ }
+
+ GrRect(GrScalar left, GrScalar top, GrScalar right, GrScalar bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ explicit GrRect(const GrIRect& src) {
+ fLeft = GrIntToScalar(src.fLeft);
+ fTop = GrIntToScalar(src.fTop);
+ fRight = GrIntToScalar(src.fRight);
+ fBottom = GrIntToScalar(src.fBottom);
+ }
+
+ GrScalar x() const { return fLeft; }
+ GrScalar y() const { return fTop; }
+ GrScalar width() const { return fRight - fLeft; }
+ GrScalar height() const { return fBottom - fTop; }
+
+ GrScalar left() const { return fLeft; }
+ GrScalar top() const { return fTop; }
+ GrScalar right() const { return fRight; }
+ GrScalar bottom() const { return fBottom; }
+
+ GrScalar diagonalLengthSqd() const {
+ GrScalar w = width();
+ GrScalar h = height();
+ return GrMul(w, w) + GrMul(h, h);
+ }
+
+ GrScalar diagonalLength() const {
+ // TODO: fixed point sqrt
+ return GrFloatToScalar(sqrtf(GrScalarToFloat(diagonalLengthSqd())));
+ }
+
+ /**
+ * Returns true if the width or height is <= 0
+ */
+ bool isEmpty() const {
+ return fLeft >= fRight || fTop >= fBottom;
+ }
+
+ void setEmpty() {
+ fLeft = fTop = fRight = fBottom = 0;
+ }
+
+ /**
+ * returns true if the rectangle is inverted either in x or y
+ */
+ bool isInverted() const {
+ return (fLeft > fRight) || (fTop > fBottom);
+ }
+
+ /**
+ * Initialize a rectangle to a point.
+ * @param pt the point used to initialize the rectangle.
+ */
+ void setToPoint(const GrPoint& pt) {
+ fLeft = pt.fX;
+ fTop = pt.fY;
+ fRight = pt.fX;
+ fBottom = pt.fY;
+ }
+
+ void set(const GrIRect& r) {
+ fLeft = GrIntToScalar(r.fLeft);
+ fTop = GrIntToScalar(r.fTop);
+ fRight = GrIntToScalar(r.fRight);
+ fBottom = GrIntToScalar(r.fBottom);
+ }
+
+ void roundOut(GrIRect* r) const {
+ r->setLTRB(GrScalarFloorToInt(fLeft),
+ GrScalarFloorToInt(fTop),
+ GrScalarCeilToInt(fRight),
+ GrScalarCeilToInt(fBottom));
+ }
+
+ /**
+ * Set the rect to the union of the array of points. If the array is empty
+ * the rect will be empty [0,0,0,0]
+ */
+ void setBounds(const GrPoint pts[], int count);
+
+ /**
+ * Make the largest representable rectangle
+ * Set the rect to fLeft = fTop = GR_ScalarMin and
+ * fRight = fBottom = GR_ScalarMax.
+ */
+ void setLargest() {
+ fLeft = fTop = GR_ScalarMin;
+ fRight = fBottom = GR_ScalarMax;
+ }
+
+ /**
+ Set the rect to fLeft = fTop = GR_ScalarMax and
+ fRight = fBottom = GR_ScalarMin.
+ Useful for initializing a bounding rectangle.
+ */
+ void setLargestInverted() {
+ fLeft = fTop = GR_ScalarMax;
+ fRight = fBottom = GR_ScalarMin;
+ }
+
+ void setLTRB(GrScalar left,
+ GrScalar top,
+ GrScalar right,
+ GrScalar bottom) {
+ fLeft = left;
+ fTop = top;
+ fRight = right;
+ fBottom = bottom;
+ }
+
+ void setXYWH(GrScalar x, GrScalar y, GrScalar width, GrScalar height) {
+ fLeft = x;
+ fTop = y;
+ fRight = x + width;
+ fBottom = y + height;
+ }
+
+ /**
+ Expand the edges of the rectangle to include a point.
+ Useful for constructing a bounding rectangle.
+ @param pt the point used to grow the rectangle.
+ */
+ void growToInclude(const GrPoint& pt) {
+ fLeft = GrMin(pt.fX, fLeft);
+ fRight = GrMax(pt.fX, fRight);
+
+ fTop = GrMin(pt.fY, fTop);
+ fBottom = GrMax(pt.fY, fBottom);
+ }
+
+ /**
+ * Assigns 4 sequential points in order to construct a counter-clockwise
+ * triangle fan, given the corners of this rect. Returns the address of
+ * the next point, treating pts as an array.
+ */
+ GrPoint* setRectFan(GrPoint pts[4]) const {
+ pts->setRectFan(fLeft, fTop, fRight, fBottom);
+ return pts + 4;
+ }
+};
+
+#endif
+
diff --git a/gpu/include/GrRectanizer.h b/gpu/include/GrRectanizer.h
new file mode 100644
index 0000000000..50bb8fed24
--- /dev/null
+++ b/gpu/include/GrRectanizer.h
@@ -0,0 +1,64 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrRectanizer_DEFINED
+#define GrRectanizer_DEFINED
+
+#include "GrRect.h"
+#include "GrTDArray.h"
+
+class GrRectanizerPurgeListener {
+public:
+ virtual ~GrRectanizerPurgeListener() {}
+
+ virtual void notifyPurgeStrip(void*, int yCoord) = 0;
+};
+
+class GrRectanizer {
+public:
+ GrRectanizer(int width, int height) : fWidth(width), fHeight(height) {
+ GrAssert(width >= 0);
+ GrAssert(height >= 0);
+ }
+
+ virtual ~GrRectanizer() {}
+
+ int width() const { return fWidth; }
+ int height() const { return fHeight; }
+
+ virtual bool addRect(int width, int height, GrIPoint16* loc) = 0;
+ virtual float percentFull() const = 0;
+
+ // return the Y-coordinate of a strip that should be purged, given height
+ // i.e. return the oldest such strip, or some other criteria. Return -1
+ // if there is no candidate
+ virtual int stripToPurge(int height) const = 0;
+ virtual void purgeStripAtY(int yCoord) = 0;
+
+ /**
+ * Our factory, which returns the subclass du jour
+ */
+ static GrRectanizer* Factory(int width, int height);
+
+private:
+ int fWidth;
+ int fHeight;
+};
+
+#endif
+
+
diff --git a/gpu/include/GrRefCnt.h b/gpu/include/GrRefCnt.h
new file mode 100644
index 0000000000..7204aff198
--- /dev/null
+++ b/gpu/include/GrRefCnt.h
@@ -0,0 +1,125 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrRefCnt_DEFINED
+#define GrRefCnt_DEFINED
+
+#include "GrTypes.h"
+#include "GrNoncopyable.h"
+
+/**
+ * Base class for reference counting. When an object is first instantiated,
+ * its reference count is 1. If the object may be null, use GrSafeRef() and
+ * GrSafeUnref().
+ *
+ * It is an error (though only checked for in the debug build) to call unref()
+ * such that the reference count becomes 0.
+ */
+class GrRefCnt : GrNoncopyable {
+public:
+ GrRefCnt() : fRefCnt(1) {}
+ virtual ~GrRefCnt() {
+ GrAssert(1 == fRefCnt);
+#if GR_DEBUG
+ fRefCnt = 0; // force validate() to trigger if called afterwards
+#endif
+ }
+
+ int32_t refcnt() const { return fRefCnt; }
+
+ void ref() const {
+ GrAssert(fRefCnt > 0);
+ ++fRefCnt;
+ }
+
+ void unref() const {
+ GrAssert(fRefCnt > 0);
+ if (1 == fRefCnt) {
+ delete this;
+ } else {
+ --fRefCnt;
+ }
+ }
+
+#if GR_DEBUG
+ void validate() const {
+ GrAssert(fRefCnt > 0);
+ }
+#else
+ void validate() const {}
+#endif
+
+private:
+ mutable int32_t fRefCnt;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Call with instance/subclass of GrRefCnt. This does nothing if obj is null,
+ * but otherwise it calls ref().
+ */
+static inline void GrSafeRef(const GrRefCnt* obj) {
+ if (obj) {
+ obj->ref();
+ }
+}
+
+/**
+ * Call with instance/subclass of GrRefCnt. This does nothing if obj is null,
+ * but otherwise it calls unref().
+ */
+static inline void GrSafeUnref(const GrRefCnt* obj) {
+ if (obj) {
+ obj->unref();
+ }
+}
+
+/**
+ * Assigns src to dst, checking for NULLs in each, and correctly incrementing
+ * the reference count of src, and decrementing the reference count of dst
+ */
+static inline void GrSafeAssign(GrRefCnt*& dst, GrRefCnt* src) {
+ if (src) {
+ src->ref();
+ }
+ if (dst) {
+ dst->unref();
+ }
+ dst = src;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrAutoRef : GrNoncopyable {
+public:
+ GrAutoRef(GrRefCnt* obj) : fObj(obj) { GrSafeRef(obj); }
+ ~GrAutoRef() { GrSafeUnref(fObj); }
+private:
+ GrRefCnt* fObj;
+};
+
+class GrAutoUnref : GrNoncopyable {
+public:
+ GrAutoUnref(GrRefCnt* obj) : fObj(obj) {}
+ ~GrAutoUnref() { GrSafeUnref(fObj); }
+private:
+ GrRefCnt* fObj;
+};
+
+#endif
+
diff --git a/gpu/include/GrSamplerState.h b/gpu/include/GrSamplerState.h
new file mode 100644
index 0000000000..06c2346d93
--- /dev/null
+++ b/gpu/include/GrSamplerState.h
@@ -0,0 +1,130 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrSamplerState_DEFINED
+#define GrSamplerState_DEFINED
+
+#include "GrTypes.h"
+
+class GrSamplerState {
+public:
+ enum SampleMode {
+ kNormal_SampleMode, //!< sample color directly
+ kAlphaMod_SampleMode, //!< modulate with alpha only
+ kRadial_SampleMode, //!< treat as radial gradient
+ kRadial2_SampleMode, //!< treat as 2-point radial gradient
+ kSweep_SampleMode, //!< treat as sweep gradient
+ };
+
+ /**
+ * Describes how a texture is sampled when coordinates are outside the
+ * texture border
+ */
+ enum WrapMode {
+ kClamp_WrapMode,
+ kRepeat_WrapMode,
+ kMirror_WrapMode
+ };
+
+ /**
+ * Default sampler state is set to kClamp and no-filter
+ */
+ GrSamplerState() {
+ this->setClampNoFilter();
+ }
+
+ GrSamplerState(bool filter) {
+ fWrapX = kClamp_WrapMode;
+ fWrapY = kClamp_WrapMode;
+ fSampleMode = kNormal_SampleMode;
+ fFilter = filter;
+ }
+
+ GrSamplerState(WrapMode wx, WrapMode wy, bool filter) {
+ fWrapX = wx;
+ fWrapY = wy;
+ fSampleMode = kNormal_SampleMode;
+ fFilter = filter;
+ }
+
+ GrSamplerState(WrapMode wx, WrapMode wy, SampleMode sample, bool filter) {
+ fWrapX = wx;
+ fWrapY = wy;
+ fSampleMode = sample;
+ fFilter = filter;
+ }
+
+ WrapMode getWrapX() const { return fWrapX; }
+ WrapMode getWrapY() const { return fWrapY; }
+ SampleMode getSampleMode() const { return fSampleMode; }
+ bool isFilter() const { return fFilter; }
+
+ bool isGradient() const {
+ return kRadial_SampleMode == fSampleMode ||
+ kRadial2_SampleMode == fSampleMode ||
+ kSweep_SampleMode == fSampleMode;
+ }
+
+ void setWrapX(WrapMode mode) { fWrapX = mode; }
+ void setWrapY(WrapMode mode) { fWrapY = mode; }
+ void setSampleMode(SampleMode mode) { fSampleMode = mode; }
+ void setFilter(bool filter) { fFilter = filter; }
+
+ void setClampNoFilter() {
+ fWrapX = kClamp_WrapMode;
+ fWrapY = kClamp_WrapMode;
+ fSampleMode = kNormal_SampleMode;
+ fFilter = false;
+ }
+
+ GrScalar getRadial2CenterX1() const { return fRadial2CenterX1; }
+ GrScalar getRadial2Radius0() const { return fRadial2Radius0; }
+ bool isRadial2PosRoot() const { return fRadial2PosRoot; }
+
+ /**
+ * Sets the parameters for kRadial2_SampleMode. The texture
+ * matrix must be set so that the first point is at (0,0) and the second
+ * point lies on the x-axis. The second radius minus the first is 1 unit.
+ * The additional parameters to define the gradient are specified by this
+ * function.
+ */
+ void setRadial2Params(GrScalar centerX1, GrScalar radius0, bool posRoot) {
+ fRadial2CenterX1 = centerX1;
+ fRadial2Radius0 = radius0;
+ fRadial2PosRoot = posRoot;
+ }
+
+ static const GrSamplerState& ClampNoFilter() {
+ return gClampNoFilter;
+ }
+
+private:
+ WrapMode fWrapX;
+ WrapMode fWrapY;
+ SampleMode fSampleMode;
+ bool fFilter;
+
+ // these are undefined unless fSampleMode == kRadial2_SampleMode
+ GrScalar fRadial2CenterX1;
+ GrScalar fRadial2Radius0;
+ bool fRadial2PosRoot;
+
+ static const GrSamplerState gClampNoFilter;
+};
+
+#endif
+
diff --git a/gpu/include/GrScalar.h b/gpu/include/GrScalar.h
new file mode 100644
index 0000000000..1353fb2148
--- /dev/null
+++ b/gpu/include/GrScalar.h
@@ -0,0 +1,116 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrScalar_DEFINED
+#define GrScalar_DEFINED
+
+#include "GrTypes.h"
+
+#include <float.h>
+#include <math.h>
+
+#define GR_Int32Max (0x7fffffff)
+#define GR_Int32Min (0x80000000)
+
+/**
+ * Convert an int to fixed point
+ */
+#if GR_DEBUG
+ inline GrFixed GrIntToFixed(int i) {
+ GrAssert(((i & 0xffff0000) == 0xffff0000) || ((i & 0xffff0000) == 0x0));
+ return i << 16;
+ }
+#else
+ #define GrIntToFixed(i) (GrFixed)((i) << 16)
+#endif
+
+#define GR_Fixed1 (1 << 16)
+#define GR_FixedHalf (1 << 15)
+#define GR_FixedMax GR_Int32Max
+#define GR_FixedMin GR_Int32Min
+
+#define GrFixedFloorToFixed(x) ((x) & ~0xFFFF)
+#define GrFixedFloorToInt(x) ((x) >> 16)
+
+/**
+ * Convert fixed point to floating point
+ */
+#define GrFixedToFloat(x) ((x) * 0.0000152587890625f)
+
+/**
+ * Convert floating point to fixed point
+ */
+#define GrFloatToFixed(x) ((GrFixed)((x) * GR_Fixed1))
+
+inline GrFixed GrFixedAbs(GrFixed x) {
+ int32_t s = (x & 0x80000000) >> 31;
+ return (GrFixed)(((int32_t)x ^ s) - s);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_SCALAR_IS_FIXED
+ typedef GrFixed GrScalar;
+ #define GrIntToScalar(x) GrIntToFixed(x)
+ #define GrFixedToScalar(x) (x)
+ #define GrScalarToFloat(x) GrFixedToFloat(x)
+ #define GrFloatToScalar(x) GrFloatToFixed(x)
+ #define GrScalarHalf(x) ((x) >> 1)
+ #define GrScalarAve(x,y) (((x)+(y)) >> 1)
+ #define GrScalarAbs(x) GrFixedAbs(x)
+ #define GR_Scalar1 GR_Fixed1
+ #define GR_ScalarHalf GR_FixedHalf
+ #define GR_ScalarMax GR_FixedMax
+ #define GR_ScalarMin GR_FixedMin
+#elif GR_SCALAR_IS_FLOAT
+ typedef float GrScalar;
+ #define GrIntToScalar(x) ((GrScalar)x)
+ #define GrFixedToScalar(x) GrFixedToFloat(x)
+ #define GrScalarToFloat(x) (x)
+ #define GrFloatToScalar(x) (x)
+ #define GrScalarHalf(x) ((x) * 0.5f)
+ #define GrScalarAbs(x) fabsf(x)
+ #define GrScalarAve(x,y) (((x) + (y)) * 0.5f)
+ #define GR_Scalar1 1.f
+ #define GR_ScalarHalf 0.5f
+ #define GR_ScalarMax (FLT_MAX)
+ #define GR_ScalarMin (-FLT_MAX)
+
+ static inline int32_t GrScalarFloorToInt(float x) {
+ return (int32_t)::floorf(x);
+ }
+ static inline int32_t GrScalarCeilToInt(float x) {
+ return (int32_t)::ceilf(x);
+ }
+#else
+ #error "Scalar type not defined"
+#endif
+
+/**
+ * Multiply two GrScalar values
+ */
+static inline GrScalar GrMul(GrScalar a, GrScalar b) {
+#if GR_SCALAR_IS_FLOAT
+ return a * b;
+#else
+ int64_t tmp = (int64_t)a * b;
+ return (tmp + GR_FixedHalf) >> 16;
+#endif
+}
+
+#endif
+
diff --git a/gpu/include/GrStopwatch.h b/gpu/include/GrStopwatch.h
new file mode 100644
index 0000000000..4945897431
--- /dev/null
+++ b/gpu/include/GrStopwatch.h
@@ -0,0 +1,135 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrStopwatch_DEFINED
+#define GrStopwatch_DEFINED
+
+#include "GrTypes.h"
+
+template <typename PLATFORM_TIMER>
+/**
+ * Base class for stopwatch. Relies on PLATFORM_TIMER for platform-specific
+ * timer functions. PLATFORM_TIMER provides:
+ * - typename TIMESTAMP : a timestamp value that can be used with Diff()
+ * - static TIMESTAMP Now() : gets current timestamp
+ * - static double Diff(const TIMESTAMP& begin, const TIMESTAMP& end) :
+ * computes delta in seconds between two timestamps
+ */
+class GrStopwatchBase {
+public:
+ /**
+ * Contructor - implicit reset()
+ */
+ GrStopwatchBase() {
+ fRunning = false;
+ fTotalElapsed = 0.0;
+ }
+
+ /**
+ * begins a new lap
+ */
+ void start() {
+ double lastLap = lapTime();
+ fTotalElapsed += lastLap;
+ fRunning = true;
+ fLastStart = PLATFORM_TIMER::Now();
+ }
+
+ /**
+ * ends current lap (or no effect if lap not started)
+ */
+ void stop() {
+ double lastLap = lapTime();
+ fTotalElapsed += lastLap;
+ fRunning = false;
+ }
+
+ /**
+ * ends current lap, resets total time
+ */
+ void reset() {
+ fRunning = false;
+ fTotalElapsed = 0.f;
+ }
+
+ /**
+ * Computes the time of all laps since last reset() including current lap
+ * if lap is still running.
+ *
+ * @return the sum time in seconds of all laps since last reset().
+ */
+ double totalTime() const {
+ return fTotalElapsed + lapTime();
+ }
+
+ /**
+ * Current lap time.
+ *
+ * @return time in seconds of current lap if one is running otherwise 0.
+ */
+ double lapTime() const {
+ if (fRunning) {
+ PLATFORM_TIMER::Timestamp now = PLATFORM_TIMER::Now();
+ return PLATFORM_TIMER::Elapsed(fLastStart, now);
+ }
+ return 0.0;
+ }
+
+private:
+ double fTotalElapsed;
+
+ typename PLATFORM_TIMER::Timestamp fLastStart;
+ bool fRunning;
+};
+
+#if GR_WIN32_BUILD
+
+ #include <Windows.h>
+
+ class GrWin32Timer {
+ public:
+ typedef LARGE_INTEGER Timestamp;
+
+ static Timestamp Now() {
+ LARGE_INTEGER now;
+ QueryPerformanceCounter(&now);
+ return now;
+ }
+
+ static double Elapsed(const Timestamp& begin, const Timestamp& end) {
+ double diff = (double)(end.QuadPart - begin.QuadPart);
+ return diff * Scale();
+ }
+ private:
+ static double Scale() {
+ static double scale;
+ if (0.0 == scale) {
+ LARGE_INTEGER freq;
+ QueryPerformanceFrequency(&freq);
+ GrAssert(0 != freq.QuadPart);
+ scale = 1 / (double) freq.QuadPart;
+ }
+ return scale;
+ }
+ };
+ typedef GrStopwatchBase<GrWin32Timer> GrStopwatch;
+#else
+ #error "Implement platform timer for stopwatch"
+#endif
+
+
+#endif
diff --git a/gpu/include/GrStringBuilder.h b/gpu/include/GrStringBuilder.h
new file mode 100644
index 0000000000..bcf124f337
--- /dev/null
+++ b/gpu/include/GrStringBuilder.h
@@ -0,0 +1,182 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrStringBuilder_DEFINED
+#define GrStringBuilder_DEFINED
+
+#include <GrTArray.h>
+#include <stdio.h>
+
+// Class used to concat strings together into a single string
+// See below for GrSStringBuilder subclass that has a pool of
+// stack storage (to avoid malloc).
+class GrStringBuilder {
+public:
+ GrStringBuilder() :
+ fChars() {
+ fChars.push_back() = '\0';
+ }
+
+ GrStringBuilder(const GrStringBuilder& s) :
+ fChars(s.fChars) {
+ GrAssert('\0' == s.fChars.back());
+ }
+
+ GrStringBuilder(const char* s) :
+ fChars(s, strlen(s)+1) {
+ }
+
+ GrStringBuilder(const GrStringBuilder& a, const GrStringBuilder& b) {
+ GrAssert('\0' == a.fChars.back());
+ GrAssert('\0' == b.fChars.back());
+
+ fChars.push_back_n(a.fChars.count() + b.fChars.count() - 1);
+ char* s = &fChars.front();
+ memcpy(s, &a.fChars.front(), a.fChars.count() - 1);
+ s += a.fChars.count() - 1;
+ memcpy(s, &b.fChars.front(), b.fChars.count());
+ }
+
+ GrStringBuilder& operator =(const GrStringBuilder& s) {
+ fChars = s.fChars;
+ return *this;
+ }
+
+ GrStringBuilder& operator =(const char* s) {
+ GrAssert('\0' == fChars.back());
+
+ int l = strlen(s);
+ fChars.resize_back(l + 1);
+ memcpy(&fChars.front(), s, l + 1);
+ return *this;
+ }
+
+ GrStringBuilder& operator +=(const GrStringBuilder& s) {
+ GrAssert('\0' == fChars.back());
+ GrAssert('\0' == s.fChars.back());
+ fChars.push_back_n(s.length());
+ memcpy(&fChars.fromBack(s.length()), &s.fChars.front(), s.fChars.count());
+ return *this;
+ }
+
+ GrStringBuilder& operator +=(const char* s) {
+ GrAssert('\0' == fChars.back());
+ int l = strlen(s);
+ fChars.push_back_n(l);
+ memcpy(&fChars.fromBack(l), s, l + 1);
+ return *this;
+ }
+
+ GrStringBuilder& operator +=(char c) {
+ GrAssert('\0' == fChars.back());
+ fChars.back() = c;
+ fChars.push_back() = '\0';
+ return *this;
+ }
+
+ void appendInt(int x) {
+ GR_STATIC_ASSERT(4 == sizeof(int));
+ // -, 10 digits, null char
+ char temp[12];
+ sprintf(temp, "%d", x);
+ *this += temp;
+ }
+
+ char& operator [](int i) {
+ GrAssert(i < length());
+ return fChars[i];
+ }
+
+ char operator [](int i) const {
+ GrAssert(i < length());
+ return fChars[i];
+ }
+
+ const char* cstr() const { return &fChars.front(); }
+
+ int length() const { return fChars.count() - 1; }
+
+protected:
+ // helpers for GrSStringBuilder (with storage on the stack)
+
+ GrStringBuilder(void* stackChars, int stackCount) :
+ fChars(stackCount ? stackChars : NULL,
+ stackCount) {
+ fChars.push_back() = '\0';
+ }
+
+ GrStringBuilder(void* stackChars,
+ int stackCount,
+ const GrStringBuilder& s) :
+ fChars(s.fChars,
+ (stackCount ? stackChars : NULL),
+ stackCount) {
+ }
+
+ GrStringBuilder(void* stackChars,
+ int stackCount,
+ const char* s) :
+ fChars(s,
+ strlen(s)+1,
+ stackCount ? stackChars : NULL,
+ stackCount) {
+ }
+
+ GrStringBuilder(void* stackChars,
+ int stackCount,
+ const GrStringBuilder& a,
+ const GrStringBuilder& b) :
+ fChars(stackCount ? stackChars : NULL,
+ stackCount) {
+ GrAssert('\0' == a.fChars.back());
+ GrAssert('\0' == b.fChars.back());
+
+ fChars.push_back_n(a.fChars.count() + b.fChars.count() - 1);
+ char* s = &fChars.front();
+ memcpy(s, &a.fChars.front(), a.fChars.count() - 1);
+ s += a.fChars.count() - 1;
+ memcpy(s, &b.fChars.front(), b.fChars.count());
+ }
+
+private:
+ GrTArray<char, true> fChars;
+};
+
+template <int STACK_COUNT = 128>
+class GrSStringBuilder : public GrStringBuilder {
+public:
+ GrSStringBuilder() : GrStringBuilder(fStackChars, STACK_COUNT) {}
+
+ GrSStringBuilder(const GrStringBuilder& s) : GrStringBuilder(fStackChars,
+ STACK_COUNT,
+ s) {
+ }
+
+ GrSStringBuilder(const char* s) : GrStringBuilder(fStackChars,
+ STACK_COUNT,
+ s) {
+ }
+
+ GrSStringBuilder(const GrStringBuilder& a, const GrStringBuilder& b) :
+ GrStringBuilder(fStackChars, STACK_COUNT, a, b) {
+ }
+private:
+ char fStackChars[STACK_COUNT];
+};
+
+#endif
+
diff --git a/gpu/include/GrTArray.h b/gpu/include/GrTArray.h
new file mode 100644
index 0000000000..f0d94943a9
--- /dev/null
+++ b/gpu/include/GrTArray.h
@@ -0,0 +1,298 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTArray_DEFINED
+#define GrTArray_DEFINED
+
+#include <new>
+#include "GrTypes.h"
+
+// TODO: convert from uint32_t to int.
+
+// DATA_TYPE indicates that T has a trivial cons, destructor
+// and can be shallow-copied
+template <typename T, bool DATA_TYPE = false> class GrTArray {
+public:
+ GrTArray() {
+ fCount = 0;
+ fReserveCount = MIN_ALLOC_COUNT;
+ fAllocCount = 0;
+ fMemArray = NULL;
+ fPreAllocMemArray = NULL;
+ }
+
+ GrTArray(uint32_t reserveCount) {
+ fCount = 0;
+ fReserveCount = GrMax(reserveCount, (uint32_t)MIN_ALLOC_COUNT);
+ fAllocCount = fReserveCount;
+ fMemArray = GrMalloc(sizeof(T) * fReserveCount);
+ fPreAllocMemArray = NULL;
+ }
+
+ GrTArray(void* preAllocStorage, uint32_t preAllocCount) {
+ // we allow NULL,0 args and revert to the default cons. behavior
+ // this makes it possible for a owner-object to use same constructor
+ // to get either prealloc or nonprealloc behavior based using same line
+ GrAssert((NULL == preAllocStorage) == !preAllocCount);
+
+ fCount = 0;
+ fReserveCount = preAllocCount > 0 ? preAllocCount :
+ MIN_ALLOC_COUNT;
+ fAllocCount = preAllocCount;
+ fMemArray = preAllocStorage;
+ fPreAllocMemArray = preAllocStorage;
+ }
+
+ GrTArray(const GrTArray& array) {
+ fCount = array.count();
+ fReserveCount = MIN_ALLOC_COUNT;
+ fAllocCount = GrMax(fReserveCount, fCount);
+ fMemArray = GrMalloc(sizeof(T) * fAllocCount);
+ fPreAllocMemArray = NULL;
+ if (DATA_TYPE) {
+ memcpy(fMemArray, array.fMemArray, sizeof(T) * fCount);
+ } else {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T(array[i]);
+ }
+ }
+ }
+
+ GrTArray(const T* array, uint32_t count) {
+ fCount = count;
+ fReserveCount = MIN_ALLOC_COUNT;
+ fAllocCount = GrMax(fReserveCount, fCount);
+ fMemArray = GrMalloc(sizeof(T) * fAllocCount);
+ fPreAllocMemArray = NULL;
+ if (DATA_TYPE) {
+ memcpy(fMemArray, array, sizeof(T) * fCount);
+ } else {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T(array[i]);
+ }
+ }
+ }
+
+ GrTArray(const GrTArray& array,
+ void* preAllocStorage, uint32_t preAllocCount) {
+
+ // for same reason as non-copying cons we allow NULL, 0 for prealloc
+ GrAssert((NULL == preAllocStorage) == !preAllocCount);
+
+ fCount = array.count();
+ fReserveCount = preAllocCount > 0 ? preAllocCount :
+ MIN_ALLOC_COUNT;
+ fPreAllocMemArray = preAllocStorage;
+
+ if (fReserveCount >= fCount && preAllocCount) {
+ fAllocCount = fReserveCount;
+ fMemArray = preAllocStorage;
+ } else {
+ fAllocCount = GrMax(fCount, fReserveCount);
+ fMemArray = GrMalloc(fAllocCount * sizeof(T));
+ }
+
+ if (DATA_TYPE) {
+ memcpy(fMemArray, array.fMemArray, sizeof(T) * fCount);
+ } else {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T(array[i]);
+ }
+ }
+ }
+
+ GrTArray(const T* array, uint32_t count,
+ void* preAllocStorage, uint32_t preAllocCount) {
+
+ // for same reason as non-copying cons we allow NULL, 0 for prealloc
+ GrAssert((NULL == preAllocStorage) == !preAllocCount);
+
+ fCount = count;
+ fReserveCount = (preAllocCount > 0) ? preAllocCount :
+ MIN_ALLOC_COUNT;
+ fPreAllocMemArray = preAllocStorage;
+
+ if (fReserveCount >= fCount && preAllocCount) {
+ fAllocCount = fReserveCount;
+ fMemArray = preAllocStorage;
+ } else {
+ fAllocCount = GrMax(fCount, fReserveCount);
+ fMemArray = GrMalloc(fAllocCount * sizeof(T));
+ }
+
+ if (DATA_TYPE) {
+ memcpy(fMemArray, array, sizeof(T) * fCount);
+ } else {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T(array[i]);
+ }
+ }
+ }
+
+ GrTArray& operator =(const GrTArray& array) {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ fCount = 0;
+ checkRealloc((int)array.count());
+ fCount = array.count();
+ if (DATA_TYPE) {
+ memcpy(fMemArray, array.fMemArray, sizeof(T) * fCount);
+ } else {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ new (fItemArray + i) T(array[i]);
+ }
+ }
+ return *this;
+ }
+
+ ~GrTArray() {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ fItemArray[i].~T();
+ }
+ if (fMemArray != fPreAllocMemArray) {
+ GrFree(fMemArray);
+ }
+ }
+
+ uint32_t count() const { return fCount; }
+
+ bool empty() const { return !fCount; }
+
+ T& push_back() {
+ checkRealloc(1);
+ new ((char*)fMemArray+sizeof(T)*fCount) T;
+ ++fCount;
+ return fItemArray[fCount-1];
+ }
+
+ void push_back_n(uint32_t n) {
+ checkRealloc(n);
+ for (uint32_t i = 0; i < n; ++i) {
+ new (fItemArray + fCount + i) T;
+ }
+ fCount += n;
+ }
+
+ void pop_back() {
+ GrAssert(0 != fCount);
+ --fCount;
+ fItemArray[fCount].~T();
+ checkRealloc(0);
+ }
+
+ void pop_back_n(uint32_t n) {
+ GrAssert(fCount >= n);
+ fCount -= n;
+ for (uint32_t i = 0; i < n; ++i) {
+ fItemArray[i].~T();
+ }
+ checkRealloc(0);
+ }
+
+ // pushes or pops from the back to resize
+ void resize_back(uint32_t newCount) {
+ if (newCount > fCount) {
+ push_back_n(newCount - fCount);
+ } else if (newCount < fCount) {
+ pop_back_n(fCount - newCount);
+ }
+ }
+
+ T& operator[] (uint32_t i) {
+ GrAssert(i < fCount);
+ return fItemArray[i];
+ }
+
+ const T& operator[] (uint32_t i) const {
+ GrAssert(i < fCount);
+ return fItemArray[i];
+ }
+
+ T& front() { GrAssert(fCount); return fItemArray[0];}
+
+ const T& front() const { GrAssert(fCount); return fItemArray[0];}
+
+ T& back() { GrAssert(fCount); return fItemArray[fCount - 1];}
+
+ const T& back() const { GrAssert(fCount); return fItemArray[fCount - 1];}
+
+ T& fromBack(uint32_t i) {
+ GrAssert(i < fCount);
+ return fItemArray[fCount - i - 1];
+ }
+
+ const T& fromBack(uint32_t i) const {
+ GrAssert(i < fCount);
+ return fItemArray[fCount - i - 1];
+ }
+
+private:
+ static const uint32_t MIN_ALLOC_COUNT = 8;
+
+ inline void checkRealloc(int32_t delta) {
+ GrAssert(-delta <= (int32_t)fCount);
+
+ uint32_t newCount = fCount + delta;
+ uint32_t fNewAllocCount = fAllocCount;
+
+ if (newCount > fAllocCount) {
+ fNewAllocCount = GrMax(newCount + ((newCount + 1) >> 1),
+ fReserveCount);
+ } else if (newCount < fAllocCount / 3) {
+ fNewAllocCount = GrMax(fAllocCount / 2, fReserveCount);
+ }
+
+ if (fNewAllocCount != fAllocCount) {
+
+ fAllocCount = fNewAllocCount;
+ char* fNewMemArray;
+
+ if (fAllocCount == fReserveCount && NULL != fPreAllocMemArray) {
+ fNewMemArray = (char*) fPreAllocMemArray;
+ } else {
+ fNewMemArray = (char*) GrMalloc(fAllocCount*sizeof(T));
+ }
+
+ if (DATA_TYPE) {
+ memcpy(fNewMemArray, fMemArray, fCount * sizeof(T));
+ } else {
+ for (uint32_t i = 0; i < fCount; ++i) {
+ new (fNewMemArray + sizeof(T) * i) T(fItemArray[i]);
+ fItemArray[i].~T();
+ }
+ }
+
+ if (fMemArray != fPreAllocMemArray) {
+ GrFree(fMemArray);
+ }
+ fMemArray = fNewMemArray;
+ }
+ }
+
+ uint32_t fReserveCount;
+ uint32_t fCount;
+ uint32_t fAllocCount;
+ void* fPreAllocMemArray;
+ union {
+ T* fItemArray;
+ void* fMemArray;
+ };
+};
+
+#endif
+
diff --git a/gpu/include/GrTBSearch.h b/gpu/include/GrTBSearch.h
new file mode 100644
index 0000000000..264ccb0dbb
--- /dev/null
+++ b/gpu/include/GrTBSearch.h
@@ -0,0 +1,53 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTBSearch_DEFINED
+#define GrTBSearch_DEFINED
+
+template <typename ELEM, typename KEY>
+int GrTBSearch(const ELEM array[], int count, KEY target) {
+ GrAssert(count >= 0);
+ if (0 == count) {
+ // we should insert it at 0
+ return ~0;
+ }
+
+ int high = count - 1;
+ int low = 0;
+ while (high > low) {
+ int index = (low + high) >> 1;
+ if (LT(array[index], target)) {
+ low = index + 1;
+ } else {
+ high = index;
+ }
+ }
+
+ // check if we found it
+ if (EQ(array[high], target)) {
+ return high;
+ }
+
+ // now return the ~ of where we should insert it
+ if (LT(array[high], target)) {
+ high += 1;
+ }
+ return ~high;
+}
+
+#endif
+
diff --git a/gpu/include/GrTDArray.h b/gpu/include/GrTDArray.h
new file mode 100644
index 0000000000..092242e5fc
--- /dev/null
+++ b/gpu/include/GrTDArray.h
@@ -0,0 +1,222 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTDArray_DEFINED
+#define GrTDArray_DEFINED
+
+#include "GrTypes.h"
+
+static int GrInitialArrayAllocationCount() {
+ return 4;
+}
+
+static int GrNextArrayAllocationCount(int count) {
+ return count + ((count + 1) >> 1);
+}
+
+template <typename T> class GrTDArray {
+public:
+ GrTDArray() : fArray(NULL), fAllocated(0), fCount(0) {}
+ GrTDArray(const GrTDArray& src) {
+ fCount = fAllocated = src.fCount;
+ fArray = (T*)GrMalloc(fAllocated * sizeof(T));
+ memcpy(fArray, src.fArray, fCount * sizeof(T));
+ }
+ ~GrTDArray() {
+ if (fArray) {
+ GrFree(fArray);
+ }
+ }
+
+ bool isEmpty() const { return 0 == fCount; }
+ int count() const { return fCount; }
+
+ const T& at(int index) const {
+ GrAssert((unsigned)index < (unsigned)fCount);
+ return fArray[index];
+ }
+ T& at(int index) {
+ GrAssert((unsigned)index < (unsigned)fCount);
+ return fArray[index];
+ }
+
+ const T& operator[](int index) const { return this->at(index); }
+ T& operator[](int index) { return this->at(index); }
+
+ GrTDArray& operator=(const GrTDArray& src) {
+ if (fAllocated < src.fCount) {
+ fAllocated = src.fCount;
+ GrFree(fArray);
+ fArray = (T*)GrMalloc(fAllocated * sizeof(T));
+ }
+ fCount = src.fCount;
+ memcpy(fArray, src.fArray, fCount * sizeof(T));
+ return *this;
+ }
+
+ void reset() {
+ if (fArray) {
+ GrFree(fArray);
+ fArray = NULL;
+ }
+ fAllocated = fCount = 0;
+ }
+
+ T* begin() const { return fArray; }
+ T* end() const { return fArray + fCount; }
+ T* back() const { GrAssert(fCount); return fArray + (fCount - 1); }
+
+ T* prepend() {
+ this->growAt(0);
+ return fArray;
+ }
+
+ T* append() {
+ this->growAt(fCount);
+ return fArray + fCount - 1;
+ }
+
+ /**
+ * index may be [0..count], so that you can insert at the end (like append)
+ */
+ T* insert(int index) {
+ GrAssert((unsigned)index <= (unsigned)fCount);
+ this->growAt(index);
+ return fArray + index;
+ }
+
+ void remove(int index) {
+ GrAssert((unsigned)index < (unsigned)fCount);
+ fCount -= 1;
+ if (index < fCount) {
+ int remaining = fCount - index;
+ memmove(fArray + index, fArray + index + 1, remaining * sizeof(T));
+ }
+ }
+
+ void removeShuffle(int index) {
+ GrAssert((unsigned)index < (unsigned)fCount);
+ fCount -= 1;
+ if (index < fCount) {
+ memmove(fArray + index, fArray + fCount, sizeof(T));
+ }
+ }
+
+ // Utility iterators
+
+ /**
+ * Calls GrFree() on each element. Assumes each is NULL or was allocated
+ * with GrMalloc().
+ */
+ void freeAll() {
+ T* stop = this->end();
+ for (T* curr = this->begin(); curr < stop; curr++) {
+ GrFree(*curr);
+ }
+ this->reset();
+ }
+
+ /**
+ * Calls delete on each element. Assumes each is NULL or was allocated
+ * with new.
+ */
+ void deleteAll() {
+ T* stop = this->end();
+ for (T* curr = this->begin(); curr < stop; curr++) {
+ delete *curr;
+ }
+ this->reset();
+ }
+
+ /**
+ * Calls GrSafeUnref() on each element. Assumes each is NULL or is a
+ * subclass of GrRefCnt.
+ */
+ void unrefAll() {
+ T* stop = this->end();
+ for (T* curr = this->begin(); curr < stop; curr++) {
+ GrSafeUnref(*curr);
+ }
+ this->reset();
+ }
+
+ void visit(void visitor(T&)) const {
+ T* stop = this->end();
+ for (T* curr = this->begin(); curr < stop; curr++) {
+ if (*curr) {
+ visitor(*curr);
+ }
+ }
+ }
+
+ int find(const T& elem) const {
+ int count = this->count();
+ T* curr = this->begin();
+ for (int i = 0; i < count; i++) {
+ if (elem == curr[i]) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ friend bool operator==(const GrTDArray<T>& a, const GrTDArray<T>& b) {
+ return a.count() == b.count() &&
+ (0 == a.count() ||
+ 0 == memcmp(a.begin(), b.begin(), a.count() * sizeof(T)));
+ }
+ friend bool operator!=(const GrTDArray<T>& a, const GrTDArray<T>& b) {
+ return !(a == b);
+ }
+
+private:
+ T* fArray;
+ int fAllocated, fCount;
+
+ // growAt will increment fCount, reallocate fArray (as needed), and slide
+ // the contents of fArray to make a hole for new data at index.
+ void growAt(int index) {
+ GrAssert(fCount <= fAllocated);
+ if (0 == fAllocated) {
+ fAllocated = GrInitialArrayAllocationCount();
+ fArray = (T*)GrMalloc(fAllocated * sizeof(T));
+ } else if (fCount == fAllocated) {
+ fAllocated = GrNextArrayAllocationCount(fAllocated);
+ T* newArray = (T*)GrMalloc(fAllocated * sizeof(T));
+ memcpy(newArray, fArray, index * sizeof(T));
+ memcpy(newArray + index + 1, fArray + index,
+ (fCount - index) * sizeof(T));
+ GrFree(fArray);
+ fArray = newArray;
+ } else {
+ // check that we're not just appending
+ if (index < fCount) {
+ memmove(fArray + index + 1, fArray + index,
+ (fCount - index) * sizeof(T));
+ }
+ }
+ GrAssert(fCount < fAllocated);
+ fCount += 1;
+ }
+};
+
+extern void* GrTDArray_growAt(void*, int* allocated, int& count, int index,
+ size_t);
+
+
+#endif
+
diff --git a/gpu/include/GrTHashCache.h b/gpu/include/GrTHashCache.h
new file mode 100644
index 0000000000..510f9ab205
--- /dev/null
+++ b/gpu/include/GrTHashCache.h
@@ -0,0 +1,226 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTHashCache_DEFINED
+#define GrTHashCache_DEFINED
+
+#include "GrTDArray.h"
+
+/**
+ * Key needs
+ * static bool EQ(const Entry&, const HashKey&);
+ * static bool LT(const Entry&, const HashKey&);
+ * uint32_t getHash() const;
+ *
+ * Allows duplicate key entries but on find you may get
+ * any of the duplicate entries returned.
+ */
+template <typename T, typename Key, size_t kHashBits> class GrTHashTable {
+public:
+ GrTHashTable() { Gr_bzero(fHash, sizeof(fHash)); }
+ ~GrTHashTable() {}
+
+ int count() const { return fSorted.count(); }
+ T* find(const Key&) const;
+ // return true if key was unique when inserted.
+ bool insert(const Key&, T*);
+ void remove(const Key&, const T*);
+ T* removeAt(int index, uint32_t hash);
+ void removeAll();
+ void deleteAll();
+ void unrefAll();
+
+ /**
+ * Return the index for the element, using a linear search.
+ */
+ int slowFindIndex(T* elem) const { return fSorted.find(elem); }
+
+#if GR_DEBUG
+ void validate() const;
+ bool contains(T*) const;
+#endif
+
+ // testing
+ const GrTDArray<T*>& getArray() const { return fSorted; }
+private:
+ enum {
+ kHashCount = 1 << kHashBits,
+ kHashMask = kHashCount - 1
+ };
+ static unsigned hash2Index(uint32_t hash) {
+ hash ^= hash >> 16;
+ if (kHashBits <= 8) {
+ hash ^= hash >> 8;
+ }
+ return hash & kHashMask;
+ }
+
+ mutable T* fHash[kHashCount];
+ GrTDArray<T*> fSorted;
+
+ // search fSorted, and return the found index, or ~index of where it
+ // should be inserted
+ int searchArray(const Key&) const;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+template <typename T, typename Key, size_t kHashBits>
+int GrTHashTable<T, Key, kHashBits>::searchArray(const Key& key) const {
+ int count = fSorted.count();
+ if (0 == count) {
+ // we should insert it at 0
+ return ~0;
+ }
+
+ const T* const* array = fSorted.begin();
+ int high = count - 1;
+ int low = 0;
+ while (high > low) {
+ int index = (low + high) >> 1;
+ if (Key::LT(*array[index], key)) {
+ low = index + 1;
+ } else {
+ high = index;
+ }
+ }
+
+ // check if we found it
+ if (Key::EQ(*array[high], key)) {
+ // above search should have found the first occurrence if there
+ // are multiple.
+ GrAssert(0 == high || Key::LT(*array[high - 1], key));
+ return high;
+ }
+
+ // now return the ~ of where we should insert it
+ if (Key::LT(*array[high], key)) {
+ high += 1;
+ }
+ return ~high;
+}
+
+template <typename T, typename Key, size_t kHashBits>
+T* GrTHashTable<T, Key, kHashBits>::find(const Key& key) const {
+ int hashIndex = hash2Index(key.getHash());
+ T* elem = fHash[hashIndex];
+
+ if (NULL == elem || !Key::EQ(*elem, key)) {
+ // bsearch for the key in our sorted array
+ int index = this->searchArray(key);
+ if (index < 0) {
+ return NULL;
+ }
+ elem = fSorted[index];
+ // update the hash
+ fHash[hashIndex] = elem;
+ }
+ return elem;
+}
+
+template <typename T, typename Key, size_t kHashBits>
+bool GrTHashTable<T, Key, kHashBits>::insert(const Key& key, T* elem) {
+ int index = this->searchArray(key);
+ bool first = index < 0;
+ if (first) {
+ // turn it into the actual index
+ index = ~index;
+ }
+ // add it to our array
+ *fSorted.insert(index) = elem;
+ // update our hash table (overwrites any dupe's position in the hash)
+ fHash[hash2Index(key.getHash())] = elem;
+ return first;
+}
+
+template <typename T, typename Key, size_t kHashBits>
+void GrTHashTable<T, Key, kHashBits>::remove(const Key& key, const T* elem) {
+ int index = hash2Index(key.getHash());
+ if (fHash[index] == elem) {
+ fHash[index] = NULL;
+ }
+
+ // remove from our sorted array
+ index = this->searchArray(key);
+ GrAssert(index >= 0);
+ // if there are multiple matches searchArray will give us the first match
+ // march forward until we find elem.
+ while (elem != fSorted[index]) {
+ ++index;
+ GrAssert(index < fSorted.count());
+ }
+ GrAssert(elem == fSorted[index]);
+ fSorted.remove(index);
+}
+
+template <typename T, typename Key, size_t kHashBits>
+T* GrTHashTable<T, Key, kHashBits>::removeAt(int elemIndex, uint32_t hash) {
+ int hashIndex = hash2Index(hash);
+ if (fHash[hashIndex] == fSorted[elemIndex]) {
+ fHash[hashIndex] = NULL;
+ }
+ // remove from our sorted array
+ T* elem = fSorted[elemIndex];
+ fSorted.remove(elemIndex);
+ return elem;
+}
+
+template <typename T, typename Key, size_t kHashBits>
+void GrTHashTable<T, Key, kHashBits>::removeAll() {
+ fSorted.reset();
+ Gr_bzero(fHash, sizeof(fHash));
+}
+
+template <typename T, typename Key, size_t kHashBits>
+void GrTHashTable<T, Key, kHashBits>::deleteAll() {
+ fSorted.deleteAll();
+ Gr_bzero(fHash, sizeof(fHash));
+}
+
+template <typename T, typename Key, size_t kHashBits>
+void GrTHashTable<T, Key, kHashBits>::unrefAll() {
+ fSorted.unrefAll();
+ Gr_bzero(fHash, sizeof(fHash));
+}
+
+#if GR_DEBUG
+template <typename T, typename Key, size_t kHashBits>
+void GrTHashTable<T, Key, kHashBits>::validate() const {
+ for (size_t i = 0; i < GR_ARRAY_COUNT(fHash); i++) {
+ if (fHash[i]) {
+ unsigned hashIndex = hash2Index(Key::GetHash(*fHash[i]));
+ GrAssert(hashIndex == i);
+ }
+ }
+
+ int count = fSorted.count();
+ for (int i = 1; i < count; i++) {
+ GrAssert(Key::LT(*fSorted[i - 1], *fSorted[i]) ||
+ Key::EQ(*fSorted[i - 1], *fSorted[i]));
+ }
+}
+
+template <typename T, typename Key, size_t kHashBits>
+bool GrTHashTable<T, Key, kHashBits>::contains(T* elem) const {
+ int index = fSorted.find(elem);
+ return index >= 0;
+}
+
+#endif
+
+#endif
+
diff --git a/gpu/include/GrTLList.h b/gpu/include/GrTLList.h
new file mode 100644
index 0000000000..1f59635ad4
--- /dev/null
+++ b/gpu/include/GrTLList.h
@@ -0,0 +1,61 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTLList_DEFINED
+#define GrTLList_DEFINED
+
+#include "GrNoncopyable.h"
+
+template <typename T> class GrTLList : GrNoncopyable {
+public:
+ class Entry {
+ Entry* fPrev;
+ Entry* fNext;
+ };
+
+ GrTLList() : fHead(NULL), fTail(NULL) {}
+#if GR_DEBUG
+ ~GrTLList() {
+ GrAssert(NULL == fHead);
+ GrAssert(NULL == ftail);
+ }
+#endif
+
+ T* head() const { return fHead; }
+ T* tail() const { return fTail; }
+
+ void addToHead(T*);
+ void addToTail(T*);
+ void removeFromList(T*);
+
+private:
+ Entry* fHead;
+ Entry* fTail;
+
+ friend class Entry;
+};
+
+
+class Parent {
+ GrTDLList<Child> fList;
+};
+
+class Child : public GrTLList::Entry<Child> {
+};
+
+#endif
+
diff --git a/gpu/include/GrTextContext.h b/gpu/include/GrTextContext.h
new file mode 100644
index 0000000000..6b7446b4be
--- /dev/null
+++ b/gpu/include/GrTextContext.h
@@ -0,0 +1,67 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTextContext_DEFINED
+#define GrTextContext_DEFINED
+
+#include "GrGlyph.h"
+#include "GrGpuVertex.h"
+
+class GrContext;
+class GrTextStrike;
+class GrFontScaler;
+
+class GrTextContext {
+public:
+ GrTextContext(GrContext*, const GrMatrix* extMatrix = NULL);
+ ~GrTextContext();
+
+ void drawPackedGlyph(GrGlyph::PackedID, GrFixed left, GrFixed top,
+ GrFontScaler*);
+
+ void flush(); // optional; automatically called by destructor
+
+private:
+ GrContext* fContext;
+ GrDrawTarget* fDrawTarget;
+
+ GrMatrix fExtMatrix;
+ GrFontScaler* fScaler;
+ GrTextStrike* fStrike;
+
+ inline void flushGlyphs();
+
+ enum {
+ kMinRequestedGlyphs = 1,
+ kDefaultRequestedGlyphs = 64,
+ kMinRequestedVerts = kMinRequestedGlyphs * 4,
+ kDefaultRequestedVerts = kDefaultRequestedGlyphs * 4,
+ };
+
+ GrGpuTextVertex* fVertices;
+
+ int32_t fMaxVertices;
+ GrTexture* fCurrTexture;
+ int fCurrVertex;
+
+ GrIRect fClipRect;
+ GrMatrix fOrigViewMatrix; // restore previous viewmatrix
+};
+
+#endif
+
+
diff --git a/gpu/include/GrTextStrike.h b/gpu/include/GrTextStrike.h
new file mode 100644
index 0000000000..abafa57130
--- /dev/null
+++ b/gpu/include/GrTextStrike.h
@@ -0,0 +1,119 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTextStrike_DEFINED
+#define GrTextStrike_DEFINED
+
+#include "GrAllocPool.h"
+#include "GrFontScaler.h"
+#include "GrTHashCache.h"
+#include "GrPoint.h"
+#include "GrGlyph.h"
+
+class GrAtlasMgr;
+class GrFontCache;
+class GrGpu;
+class GrFontPurgeListener;
+
+/**
+ * The textcache maps a hostfontscaler instance to a dictionary of
+ * glyphid->strike
+ */
+class GrTextStrike {
+public:
+ GrTextStrike(GrFontCache*, const GrKey* fontScalerKey, GrAtlasMgr*);
+ ~GrTextStrike();
+
+ const GrKey* getFontScalerKey() const { return fFontScalerKey; }
+ GrFontCache* getFontCache() const { return fFontCache; }
+
+ inline GrGlyph* getGlyph(GrGlyph::PackedID, GrFontScaler*);
+ bool getGlyphAtlas(GrGlyph*, GrFontScaler*);
+
+ // testing
+ int countGlyphs() const { return fCache.getArray().count(); }
+ const GrGlyph* glyphAt(int index) const {
+ return fCache.getArray()[index];
+ }
+ GrAtlas* getAtlas() const { return fAtlas; }
+
+public:
+ // for LRU
+ GrTextStrike* fPrev;
+ GrTextStrike* fNext;
+
+private:
+ class Key;
+ GrTHashTable<GrGlyph, Key, 7> fCache;
+ const GrKey* fFontScalerKey;
+ GrTAllocPool<GrGlyph> fPool;
+
+ GrFontCache* fFontCache;
+ GrAtlasMgr* fAtlasMgr;
+ GrAtlas* fAtlas; // linklist
+
+ GrGlyph* generateGlyph(GrGlyph::PackedID packed, GrFontScaler* scaler);
+ // returns true if after the purge, the strike is empty
+ bool purgeAtlasAtY(GrAtlas* atlas, int yCoord);
+
+ friend class GrFontCache;
+};
+
+class GrFontCache {
+public:
+ GrFontCache(GrGpu*);
+ ~GrFontCache();
+
+ inline GrTextStrike* getStrike(GrFontScaler*);
+
+ void freeAll();
+ void abandonAll();
+
+ void purgeExceptFor(GrTextStrike*);
+
+ // testing
+ int countStrikes() const { return fCache.getArray().count(); }
+ const GrTextStrike* strikeAt(int index) const {
+ return fCache.getArray()[index];
+ }
+ GrTextStrike* getHeadStrike() const { return fHead; }
+
+#if GR_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ friend class GrFontPurgeListener;
+
+ class Key;
+ GrTHashTable<GrTextStrike, Key, 8> fCache;
+ // for LRU
+ GrTextStrike* fHead;
+ GrTextStrike* fTail;
+
+ GrGpu* fGpu;
+ GrAtlasMgr* fAtlasMgr;
+
+
+ GrTextStrike* generateStrike(GrFontScaler*, const Key&);
+ inline void detachStrikeFromList(GrTextStrike*);
+};
+
+#endif
+
diff --git a/gpu/include/GrTexture.h b/gpu/include/GrTexture.h
new file mode 100644
index 0000000000..71a58e621b
--- /dev/null
+++ b/gpu/include/GrTexture.h
@@ -0,0 +1,213 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTexture_DEFINED
+#define GrTexture_DEFINED
+
+#include "GrRefCnt.h"
+
+class GrTexture;
+
+/**
+ * GrRenderTarget represents a 2D buffer of pixels that can be rendered to.
+ * A context's render target is set by setRenderTarget(). Render targets are
+ * created by a createTexture with the kRenderTarget_TextureFlag flag.
+ * Additionally, the rendering destination set in the underlying 3D API at the
+ * time of GrContext's creation can be retrieved by calling
+ * currentRenderTarget() after creation before any calles to setRenderTarget().
+ */
+class GrRenderTarget : public GrRefCnt {
+public:
+ /**
+ * @return the width of the rendertarget
+ */
+ virtual uint32_t width() const = 0;
+ /**
+ * @return the height of the rendertarget
+ */
+ virtual uint32_t height() const = 0;
+
+ /**
+ * @return the texture associated with the rendertarget, may be NULL.
+ */
+ GrTexture* asTexture() {return fTexture;}
+
+protected:
+ GrRenderTarget(GrTexture* texture) : fTexture(texture) {}
+ GrTexture* fTexture;
+};
+
+class GrTexture : public GrRefCnt {
+public:
+ enum PixelConfig {
+ kUnknown_PixelConfig,
+ kAlpha_8_PixelConfig,
+ kIndex_8_PixelConfig,
+ kRGB_565_PixelConfig,
+ kRGBA_4444_PixelConfig, //!< premultiplied
+ kRGBA_8888_PixelConfig, //!< premultiplied
+ kRGBX_8888_PixelConfig, //!< treat the alpha channel as opaque
+ };
+ static size_t BytesPerPixel(PixelConfig);
+ static bool PixelConfigIsOpaque(PixelConfig);
+
+protected:
+ GrTexture(uint32_t contentWidth,
+ uint32_t contentHeight,
+ uint32_t allocWidth,
+ uint32_t allocHeight,
+ PixelConfig config) :
+ fAllocWidth(allocWidth),
+ fAllocHeight(allocHeight),
+ fContentWidth(contentWidth),
+ fContentHeight(contentHeight),
+ fConfig(config) {
+ // only make sense if alloc size is pow2
+ fShiftFixedX = 31 - Gr_clz(allocWidth);
+ fShiftFixedY = 31 - Gr_clz(allocHeight);
+ }
+public:
+ virtual ~GrTexture();
+
+ /**
+ * Retrieves the width of the content area of the texture. Reflects the
+ * width passed to GrGpu::createTexture().
+ *
+ * @return the width in texels
+ */
+ uint32_t contentWidth() const { return fContentWidth; }
+ /**
+ * Retrieves the height of the content area of the texture. Reflects the
+ * height passed to GrGpu::createTexture().
+ *
+ * @return the height in texels
+ */
+ uint32_t contentHeight() const { return fContentHeight; }
+
+ /**
+ * Retrieves the texture width actually allocated in texels.
+ *
+ * @return the width in texels
+ */
+ uint32_t allocWidth() const { return fAllocWidth; }
+ /**
+ * Retrieves the texture height actually allocated in texels.
+ *
+ * @return the height in texels
+ */
+ uint32_t allocHeight() const { return fAllocHeight; }
+
+ /**
+ * Convert from texels to normalized texture coords for POT textures
+ * only.
+ */
+ GrFixed normalizeFixedX(GrFixed x) const { GrAssert(GrIsPow2(fAllocWidth));
+ return x >> fShiftFixedX; }
+ GrFixed normalizeFixedY(GrFixed y) const { GrAssert(GrIsPow2(fAllocHeight));
+ return y >> fShiftFixedY; }
+
+ /**
+ * Retrieves the pixel config specified when the texture was created.
+ */
+ PixelConfig config() const { return fConfig; }
+
+ /**
+ * The number of bytes used by the texture
+ */
+ size_t sizeInBytes() const {
+ return fAllocWidth * fAllocHeight * BytesPerPixel(fConfig);
+ }
+
+ /**
+ * Updates a subrectangle of texels in the texture.
+ *
+ * @param x left edge of rectangle to update
+ * @param y top edge of rectangle to update
+ * @param width width of rectangle to update
+ * @param height height of rectangle to update
+ * @param srcData width*height texels of data in same format that was used
+ * at texture creation.
+ */
+ virtual void uploadTextureData(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height,
+ const void* srcData) = 0;
+ /**
+ * Indicates that GPU context in which this texture was created is destroyed
+ * and that Ganesh should not attempt to free the texture with the
+ * underlying API.
+ */
+ virtual void abandon() = 0;
+
+ /**
+ * Queries whether the texture was created as a render target.
+ *
+ * Use asRenderTarget() to use the texture as a render target if this
+ * returns true.
+ *
+ * @return true if the texture was created as a render target.
+ */
+ virtual bool isRenderTarget() const = 0;
+
+ /**
+ * Retrieves the render target underlying this texture that can be passed to
+ * GrGpu::setRenderTarget().
+ *
+ * If isRenderTarget() is false then the returned handle is undefined.
+ *
+ * @return handle to render target or undefined if the texture is not a
+ * render target
+ */
+ virtual GrRenderTarget* asRenderTarget() = 0;
+
+ /**
+ * Removes the "rendertargetness" from a texture. This may or may not
+ * actually do anything with the underlying 3D API.
+ */
+ virtual void removeRenderTarget() = 0;
+
+ /**
+ * Return the native ID or handle to the texture, depending on the
+ * platform. e.g. on opengl, return the texture ID.
+ */
+ virtual intptr_t getTextureHandle() = 0;
+
+#if GR_DEBUG
+ void validate() const {
+ this->INHERITED::validate();
+ }
+#else
+ void validate() const {}
+#endif
+
+private:
+ uint32_t fAllocWidth;
+ uint32_t fAllocHeight;
+ uint32_t fContentWidth;
+ uint32_t fContentHeight;
+ // these two shift a fixed-point value into normalized coordinates
+ // for this texture if the texture is power of two sized.
+ int fShiftFixedX;
+ int fShiftFixedY;
+ PixelConfig fConfig;
+
+ typedef GrRefCnt INHERITED;
+};
+
+#endif
+
diff --git a/gpu/include/GrTextureCache.h b/gpu/include/GrTextureCache.h
new file mode 100644
index 0000000000..e3d4f0a100
--- /dev/null
+++ b/gpu/include/GrTextureCache.h
@@ -0,0 +1,289 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTextureCache_DEFINED
+#define GrTextureCache_DEFINED
+
+#include "GrTypes.h"
+#include "GrTHashCache.h"
+
+class GrTexture;
+
+// return true if a<b, or false if b<a
+//
+#define RET_IF_LT_OR_GT(a, b) \
+ do { \
+ if ((a) < (b)) { \
+ return true; \
+ } \
+ if ((b) < (a)) { \
+ return false; \
+ } \
+ } while (0)
+
+/**
+ * Helper class for GrTextureCache, the Key is used to identify src data for
+ * a texture. It is identified by 2 32bit data fields which can hold any
+ * data (uninterpreted by the cache) and a width/height.
+ */
+class GrTextureKey {
+public:
+ enum {
+ kHashBits = 7,
+ kHashCount = 1 << kHashBits,
+ kHashMask = kHashCount - 1
+ };
+
+ GrTextureKey(uint32_t p0, uint32_t p1, uint16_t width, uint16_t height) {
+ fP0 = p0;
+ fP1 = p1;
+ fP2 = width | (height << 16);
+ GR_DEBUGCODE(fHashIndex = -1);
+ }
+
+ GrTextureKey(const GrTextureKey& src) {
+ fP0 = src.fP0;
+ fP1 = src.fP1;
+ fP2 = src.fP2;
+ finalize(src.fPrivateBits);
+ }
+
+ //!< returns hash value [0..kHashMask] for the key
+ int hashIndex() const { return fHashIndex; }
+
+ friend bool operator==(const GrTextureKey& a, const GrTextureKey& b) {
+ GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
+ return a.fP0 == b.fP0 && a.fP1 == b.fP1 && a.fP2 == b.fP2 &&
+ a.fPrivateBits == b.fPrivateBits;
+ }
+
+ friend bool operator!=(const GrTextureKey& a, const GrTextureKey& b) {
+ GR_DEBUGASSERT(-1 != a.fHashIndex && -1 != b.fHashIndex);
+ return !(a == b);
+ }
+
+ friend bool operator<(const GrTextureKey& a, const GrTextureKey& b) {
+ RET_IF_LT_OR_GT(a.fP0, b.fP0);
+ RET_IF_LT_OR_GT(a.fP1, b.fP1);
+ RET_IF_LT_OR_GT(a.fP2, b.fP2);
+ return a.fPrivateBits < b.fPrivateBits;
+ }
+
+private:
+ void finalize(uint32_t privateBits) {
+ fPrivateBits = privateBits;
+ this->computeHashIndex();
+ }
+
+ uint16_t width() const { return fP2 & 0xffff; }
+ uint16_t height() const { return (fP2 >> 16); }
+
+ static uint32_t rol(uint32_t x) {
+ return (x >> 24) | (x << 8);
+ }
+ static uint32_t ror(uint32_t x) {
+ return (x >> 8) | (x << 24);
+ }
+ static uint32_t rohalf(uint32_t x) {
+ return (x >> 16) | (x << 16);
+ }
+
+ void computeHashIndex() {
+ uint32_t hash = fP0 ^ rol(fP1) ^ ror(fP2) ^ rohalf(fPrivateBits);
+ // this way to mix and reduce hash to its index may have to change
+ // depending on how many bits we allocate to the index
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+ fHashIndex = hash & kHashMask;
+ }
+
+ uint32_t fP0;
+ uint32_t fP1;
+ uint32_t fP2;
+ uint32_t fPrivateBits;
+
+ // this is computed from the fP... fields
+ int fHashIndex;
+
+ friend class GrContext;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class GrTextureEntry {
+public:
+ GrTexture* texture() const { return fTexture; }
+ const GrTextureKey& key() const { return fKey; }
+
+#if GR_DEBUG
+ GrTextureEntry* next() const { return fNext; }
+ GrTextureEntry* prev() const { return fPrev; }
+#endif
+
+#if GR_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ GrTextureEntry(const GrTextureKey& key, GrTexture* texture);
+ ~GrTextureEntry();
+
+ bool isLocked() const { return fLockCount != 0; }
+ void lock() { ++fLockCount; }
+ void unlock() {
+ GrAssert(fLockCount > 0);
+ --fLockCount;
+ }
+
+ GrTextureKey fKey;
+ GrTexture* fTexture;
+
+ // track if we're in use, used when we need to purge
+ // we only purge unlocked entries
+ int fLockCount;
+
+ // we're a dlinklist
+ GrTextureEntry* fPrev;
+ GrTextureEntry* fNext;
+
+ friend class GrTextureCache;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "GrTHashCache.h"
+
+/**
+ * Cache of GrTexture objects.
+ *
+ * These have a corresponding GrTextureKey, built from 96bits identifying the
+ * texture/bitmap.
+ *
+ * The cache stores the entries in a double-linked list, which is its LRU.
+ * When an entry is "locked" (i.e. given to the caller), it is moved to the
+ * head of the list. If/when we must purge some of the entries, we walk the
+ * list backwards from the tail, since those are the least recently used.
+ *
+ * For fast searches, we maintain a sorted array (based on the GrTextureKey)
+ * which we can bsearch. When a new entry is added, it is inserted into this
+ * array.
+ *
+ * For even faster searches, a hash is computed from the Key. If there is
+ * a collision between two keys with the same hash, we fall back on the
+ * bsearch, and update the hash to reflect the most recent Key requested.
+ */
+class GrTextureCache {
+public:
+ GrTextureCache(int maxCount, size_t maxBytes);
+ ~GrTextureCache(); // uses kFreeTexture_DeleteMode
+
+ /**
+ * Search for an entry with the same Key. If found, "lock" it and return it.
+ * If not found, return null.
+ */
+ GrTextureEntry* findAndLock(const GrTextureKey&);
+
+ /**
+ * Create a new entry, based on the specified key and texture, and return
+ * its "locked" entry.
+ *
+ * Ownership of the texture is transferred to the Entry, which will unref()
+ * it when we are purged or deleted.
+ */
+ GrTextureEntry* createAndLock(const GrTextureKey&, GrTexture*);
+
+ /**
+ * Detach removes an entry from the cache. This prevents the entry from
+ * being found by a subsequent findAndLock() until it is reattached. The
+ * entry still counts against the cache's budget and should be reattached
+ * when exclusive access is no longer needed.
+ */
+ void detach(GrTextureEntry*);
+
+ /**
+ * Reattaches a texture to the cache and unlocks it. Allows it to be found
+ * by a subsequent findAndLock or be purged (provided its lock count is
+ * now 0.)
+ */
+ void reattachAndUnlock(GrTextureEntry*);
+
+ /**
+ * When done with an entry, call unlock(entry) on it, which returns it to
+ * a purgable state.
+ */
+ void unlock(GrTextureEntry*);
+
+ enum DeleteMode {
+ kFreeTexture_DeleteMode,
+ kAbandonTexture_DeleteMode
+ };
+ void deleteAll(DeleteMode);
+
+#if GR_DEBUG
+ void validate() const;
+#else
+ void validate() const {}
+#endif
+
+private:
+ void internalDetach(GrTextureEntry*, bool);
+ void attachToHead(GrTextureEntry*, bool);
+ void purgeAsNeeded(); // uses kFreeTexture_DeleteMode
+
+ class Key;
+ GrTHashTable<GrTextureEntry, Key, 8> fCache;
+
+ // manage the dlink list
+ GrTextureEntry* fHead;
+ GrTextureEntry* fTail;
+
+ // our budget, used in purgeAsNeeded()
+ const int fMaxCount;
+ const size_t fMaxBytes;
+
+ // our current stats, related to our budget
+ int fEntryCount;
+ size_t fEntryBytes;
+ int fClientDetachedCount;
+ size_t fClientDetachedBytes;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_DEBUG
+ class GrAutoTextureCacheValidate {
+ public:
+ GrAutoTextureCacheValidate(GrTextureCache* cache) : fCache(cache) {
+ cache->validate();
+ }
+ ~GrAutoTextureCacheValidate() {
+ fCache->validate();
+ }
+ private:
+ GrTextureCache* fCache;
+ };
+#else
+ class GrAutoTextureCacheValidate {
+ public:
+ GrAutoTextureCacheValidate(GrTextureCache*) {}
+ };
+#endif
+
+#endif
+
diff --git a/gpu/include/GrTouchGesture.h b/gpu/include/GrTouchGesture.h
new file mode 100644
index 0000000000..03f970b246
--- /dev/null
+++ b/gpu/include/GrTouchGesture.h
@@ -0,0 +1,56 @@
+#ifndef GrTouchGesture_DEFINED
+#define GrTouchGesture_DEFINED
+
+#include "GrTypes.h"
+#include "SkTDArray.h"
+#include "SkMatrix.h"
+
+#include "FlingState.h"
+
+class GrTouchGesture {
+public:
+ GrTouchGesture();
+ ~GrTouchGesture();
+
+ void touchBegin(void* owner, float x, float y);
+ void touchMoved(void* owner, float x, float y);
+ void touchEnd(void* owner);
+ void reset();
+
+ const SkMatrix& localM();
+ const SkMatrix& globalM() const { return fGlobalM; }
+
+private:
+ enum State {
+ kEmpty_State,
+ kTranslate_State,
+ kZoom_State,
+ };
+
+ struct Rec {
+ void* fOwner;
+ float fStartX, fStartY;
+ float fPrevX, fPrevY;
+ float fLastX, fLastY;
+ SkMSec fPrevT, fLastT;
+ };
+ SkTDArray<Rec> fTouches;
+
+ State fState;
+ SkMatrix fLocalM, fGlobalM;
+ FlingState fFlinger;
+ SkMSec fLastUpT;
+ SkPoint fLastUpP;
+
+
+ void flushLocalM();
+ int findRec(void* owner) const;
+ void appendNewRec(void* owner, float x, float y);
+ float computePinch(const Rec&, const Rec&);
+ float limitTotalZoom(float scale) const;
+ bool handleDblTap(float, float);
+};
+
+#endif
+
+
diff --git a/gpu/include/GrTypes.h b/gpu/include/GrTypes.h
new file mode 100644
index 0000000000..a491295192
--- /dev/null
+++ b/gpu/include/GrTypes.h
@@ -0,0 +1,142 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTypes_DEFINED
+#define GrTypes_DEFINED
+
+#include "GrConfig.h"
+
+#include <memory.h>
+#include <string.h>
+
+/**
+ * Macro to round n up to the next multiple of 4, or return it unchanged if
+ * n is already a multiple of 4
+ */
+#define GrALIGN4(n) (((n) + 3) >> 2 << 2)
+#define GrIsALIGN4(n) (((n) & 3) == 0)
+
+template <typename T> const T& GrMin(const T& a, const T& b) {
+ return (a < b) ? a : b;
+}
+
+template <typename T> const T& GrMax(const T& a, const T& b) {
+ return (b < a) ? a : b;
+}
+
+// compile time versions of min/max
+#define GR_CT_MAX(a, b) (((b) < (a)) ? (a) : (b))
+#define GR_CT_MIN(a, b) (((b) < (a)) ? (b) : (a))
+
+/**
+ * divide, rounding up
+ */
+inline uint32_t GrUIDivRoundUp(uint32_t x, uint32_t y) {
+ return (x + (y-1)) / y;
+}
+
+/**
+ * align up
+ */
+inline uint32_t GrUIAlignUp(uint32_t x, uint32_t alignment) {
+ return GrUIDivRoundUp(x, alignment) * alignment;
+}
+
+/**
+ * amount of pad needed to align up
+ */
+inline uint32_t GrUIAlignUpPad(uint32_t x, uint32_t alignment) {
+ return (alignment - x % alignment) % alignment;
+}
+
+/**
+ * align down
+ */
+inline uint32_t GrUIAlignDown(uint32_t x, uint32_t alignment) {
+ return (x / alignment) * alignment;
+}
+
+/**
+ * Count elements in an array
+ */
+#define GR_ARRAY_COUNT(array) (sizeof(array) / sizeof(array[0]))
+
+//!< allocate a block of memory, will never return NULL
+extern void* GrMalloc(size_t bytes);
+
+//!< free block allocated by GrMalloc. ptr may be NULL
+extern void GrFree(void* ptr);
+
+static inline void Gr_bzero(void* dst, size_t size) {
+ memset(dst, 0, size);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Return the number of leading zeros in n
+ */
+extern int Gr_clz(uint32_t n);
+
+/**
+ * Return true if n is a power of 2
+ */
+static inline bool GrIsPow2(unsigned n) {
+ return n && 0 == (n & (n - 1));
+}
+
+/**
+ * Return the next power of 2 >= n.
+ */
+static inline uint32_t GrNextPow2(uint32_t n) {
+ return n ? (1 << (32 - Gr_clz(n - 1))) : 1;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * 16.16 fixed point type
+ */
+typedef int32_t GrFixed;
+
+#if GR_DEBUG
+
+static inline int16_t GrToS16(intptr_t x) {
+ GrAssert((int16_t)x == x);
+ return (int16_t)x;
+}
+
+#else
+
+#define GrToS16(x) x
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Type used to describe format of vertices in arrays
+ * Values are defined in GrDrawTarget
+ */
+typedef uint16_t GrVertexLayout;
+
+///////////////////////////////////////////////////////////////////////////////
+
+// this is included only to make it easy to use this debugging facility
+#include "GrInstanceCounter.h"
+
+#endif
diff --git a/gpu/include/GrUserConfig.h b/gpu/include/GrUserConfig.h
new file mode 100644
index 0000000000..2e6f3919d6
--- /dev/null
+++ b/gpu/include/GrUserConfig.h
@@ -0,0 +1,57 @@
+#ifndef GrUserConfig_DEFINED
+#define GrUserConfig_DEFINED
+
+#if defined(GR_USER_CONFIG_FILE)
+ #error "default user config pulled in but GR_USER_CONFIG_FILE is defined."
+#endif
+
+#if 0
+ #undef GR_RELEASE
+ #undef GR_DEBUG
+ #define GR_RELEASE 0
+ #define GR_DEBUG 1
+#endif
+
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+//#define GR_FORCE_GLCHECKERR 1
+
+/*
+ * The default 32bit pixel config for texture upload is GL_RGBA. If your
+ * bitmaps map to a different GL enum, specify that with this define.
+ */
+//#define SK_GL_32BPP_COLOR_FORMAT GL_RGBA
+
+/*
+ * To diagnose texture cache performance, define this to 1 if you want to see
+ * a log statement everytime we upload an image to create a texture.
+ */
+//#define GR_DUMP_TEXTURE_UPLOAD 1
+
+////////////////////////////////////////////////////////////////////////////////
+// Decide Ganesh types
+
+#define GR_SCALAR_IS_FIXED 0
+#define GR_SCALAR_IS_FLOAT 1
+
+#define GR_TEXT_SCALAR_IS_USHORT 0
+#define GR_TEXT_SCALAR_IS_FIXED 0
+#define GR_TEXT_SCALAR_IS_FLOAT 1
+
+#endif
+
+
diff --git a/gpu/include/GrVertexBuffer.h b/gpu/include/GrVertexBuffer.h
new file mode 100644
index 0000000000..5e83de95d5
--- /dev/null
+++ b/gpu/include/GrVertexBuffer.h
@@ -0,0 +1,92 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrVertexBuffer_DEFINED
+#define GrVertexBuffer_DEFINED
+
+#include "GrRefCnt.h"
+
+class GrVertexBuffer : public GrRefCnt {
+protected:
+ GrVertexBuffer(uint32_t sizeInBytes, bool dynamic) :
+ fSizeInBytes(sizeInBytes),
+ fDynamic(dynamic) {}
+public:
+ virtual ~GrVertexBuffer() {}
+
+ /**
+ Retrieves the size of the vertex buffer
+
+ @return the size of the vertex buffer in bytes
+ */
+ uint32_t size() { return fSizeInBytes; }
+
+ /**
+ Retrieves whether the vertex buffer was created with the dynamic flag
+
+ @return true if the vertex buffer was created with the dynamic flag
+ */
+ bool dynamic() const { return fDynamic; }
+
+ /**
+ Indicates that GPU context in which this veretx buffer was created is
+ destroyed and that Ganesh should not attempt to free the texture with the
+ underlying API.
+ */
+ virtual void abandon() = 0;
+
+ /**
+ Locks the vertex buffer to be written by the CPU.
+
+ The previous content of the vertex buffer is invalidated. It is an error to
+ draw whil the buffer is locked. It is an error to call lock on an already
+ locked vertex buffer.
+
+ @return a pointer to the vertex data or NULL if the lock fails.
+ */
+ virtual void* lock() = 0;
+
+ /**
+ Unlocks the vertex buffer.
+
+ The pointer returned by the previous lock call will no longer be valid.
+ */
+ virtual void unlock() = 0;
+
+ /**
+ Queries whether the vertex buffer has been locked.
+
+ @return true if the vertex buffer is locked, false otherwise.
+ */
+ virtual bool isLocked() const = 0;
+
+ /**
+ Updates the vertex buffer data.
+
+ The size of the vertex buffer will be preserved. However, only the updated
+ region will have defined contents.
+
+ @return returns true if the update succeeds, false otherwise.
+ */
+ virtual bool updateData(const void* src, uint32_t srcSizeInBytes) = 0;
+
+private:
+ uint32_t fSizeInBytes;
+ bool fDynamic;
+};
+
+#endif
diff --git a/gpu/include/GrVertexBufferAllocPool.h b/gpu/include/GrVertexBufferAllocPool.h
new file mode 100644
index 0000000000..6a781aa344
--- /dev/null
+++ b/gpu/include/GrVertexBufferAllocPool.h
@@ -0,0 +1,141 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrVertexBufferAllocPool_DEFINED
+#define GrVertexBufferAllocPool_DEFINED
+
+#include "GrNoncopyable.h"
+#include "GrTDArray.h"
+#include "GrTArray.h"
+
+class GrVertexBuffer;
+class GrGpu;
+
+/**
+ * A pool of vertices in vertex buffers tied to a GrGpu.
+ *
+ * The pool has an alloc() function that returns a pointer into a locked
+ * vertex buffer. A client can release() if it has over-allocated.
+ *
+ * At creation time a minimum VB size can be specified. Additionally,
+ * a number of vertex buffers to preallocate can be specified. These will
+ * be allocated at the min size and kept until the pool is destroyed.
+ */
+class GrVertexBufferAllocPool : GrNoncopyable {
+public:
+ /**
+ * Constructor
+ *
+ * @param gpu The GrGpu used to create the vertex buffers.
+ * @param bufferSize The size of created VBs (unless an alloc request
+ * exceeds this size in which case a larger VB is
+ * created). This value is clamped to some
+ * reasonable minimum.
+ * @param preallocBufferCnt The pool will allocate this number of VBs at
+ * bufferSize and keep them until it is destroyed.
+ */
+ GrVertexBufferAllocPool(GrGpu* gpu,
+ size_t bufferSize = 0,
+ int preallocBufferCnt = 0);
+ ~GrVertexBufferAllocPool();
+
+ /**
+ * Ensures all VBs are unlocked. Call before using to draw.
+ */
+ void unlock();
+
+ /**
+ * Frees all vertex data that has been allocated with alloc().
+ */
+ void reset();
+
+ /**
+ * Returns a block of memory bytes size big. The vertex buffer
+ * containing the memory is returned in buffer.
+ *
+ * @param layout specifies type of vertices to allocate space for
+ * @param vertexCount number of vertices to allocate space for
+ * @param buffer returns the vertex buffer that will hold the
+ * vertices.
+ * @param startVertex returns the offset into buffer of the first vertex.
+ * In units of the size of a vertex using layout param.
+ * @return pointer to first vertex.
+ */
+ void* alloc(GrVertexLayout layout,
+ uint32_t vertexCount,
+ GrVertexBuffer** buffer,
+ uint32_t* startVertex);
+
+ /**
+ * Gets the number of vertices that can be allocated without changing VBs.
+ * This means either the last VB returned by alloc() if the last alloc did
+ * not exhaust it. If that VB was exhausted by the last alloc or alloc hasn't
+ * been called since reset() then it will be the number of vertices that
+ * would fit in an available preallocated VB. If no preallocated VB
+ * is available then it returns 0 since the next alloc would force a new
+ * VB to be created.
+ */
+ int currentBufferVertices(GrVertexLayout layout) const;
+
+ /**
+ * Gets the number of preallocated buffers that are yet to be used.
+ */
+ int preallocatedBuffersRemaining() const;
+
+ /**
+ * Gets the number of vertices that can fit in a preallocated vertex buffer.
+ * Zero if no preallocated buffers.
+ */
+ int preallocatedBufferVertices(GrVertexLayout layout) const;
+
+ /**
+ * gets the number of preallocated vertex buffers
+ */
+ int preallocatedBufferCount() const;
+
+
+ /**
+ * Releases the most recently allocated bytes back to the pool.
+ */
+ void release(size_t bytes);
+
+ /**
+ * Gets the GrGpu that this pool is associated with.
+ */
+ GrGpu* getGpu() { return fGpu; }
+
+
+private:
+ struct BufferBlock {
+ size_t fBytesFree;
+ GrVertexBuffer* fVertexBuffer;
+ };
+
+ bool createBlock(size_t size);
+ void destroyBlock();
+
+ GrTArray<BufferBlock> fBlocks;
+ GrTDArray<GrVertexBuffer*> fPreallocBuffers;
+ int fPreallocBuffersInUse;
+ int fFirstPreallocBuffer;
+
+ size_t fMinBlockSize;
+ GrGpu* fGpu;
+ void* fBufferPtr;
+};
+
+#endif
diff --git a/gpu/include/SkGpuCanvas.h b/gpu/include/SkGpuCanvas.h
new file mode 100644
index 0000000000..e8e6e7ae10
--- /dev/null
+++ b/gpu/include/SkGpuCanvas.h
@@ -0,0 +1,72 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef SkGpuCanvas_DEFINED
+#define SkGpuCanvas_DEFINED
+
+#include "SkCanvas.h"
+
+class GrContext;
+
+/**
+ * Subclass of canvas that creates devices compatible with the GrContext pass
+ * to the canvas' constructor.
+ */
+class SkGpuCanvas : public SkCanvas {
+public:
+ /**
+ * The GrContext object is reference counted. When passed to our
+ * constructor, its reference count is incremented. In our destructor, the
+ * GrGpu's reference count will be decremented.
+ */
+ explicit SkGpuCanvas(GrContext*);
+ virtual ~SkGpuCanvas();
+
+ /**
+ * Return our GrContext instance
+ */
+ GrContext* context() const { return fContext; }
+
+ /**
+ * Override from SkCanvas. Returns true, and if not-null, sets size to
+ * be the width/height of our viewport.
+ */
+ virtual bool getViewport(SkIPoint* size) const;
+
+ /**
+ * Override from SkCanvas. Returns a new device of the correct subclass,
+ * as determined by the GrGpu passed to our constructor.
+ */
+ virtual SkDevice* createDevice(SkBitmap::Config, int width, int height,
+ bool isOpaque, bool isLayer);
+
+#if 0
+ virtual int saveLayer(const SkRect* bounds, const SkPaint* paint,
+ SaveFlags flags = kARGB_ClipLayer_SaveFlag) {
+ return this->save(flags);
+ }
+#endif
+
+private:
+ GrContext* fContext;
+
+ typedef SkCanvas INHERITED;
+};
+
+#endif
+
+
diff --git a/gpu/include/SkGr.h b/gpu/include/SkGr.h
new file mode 100644
index 0000000000..4e9801f699
--- /dev/null
+++ b/gpu/include/SkGr.h
@@ -0,0 +1,238 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef SkGr_DEFINED
+#define SkGr_DEFINED
+
+#include <stddef.h>
+
+// tetrark headers
+#include "GrConfig.h"
+#include "GrContext.h"
+#include "GrFontScaler.h"
+#include "GrPathIter.h"
+#include "GrClipIterator.h"
+
+// skia headers
+#include "SkBitmap.h"
+#include "SkPath.h"
+#include "SkPoint.h"
+#include "SkRegion.h"
+#include "SkShader.h"
+
+#if (GR_DEBUG && defined(SK_RELEASE)) || (GR_RELEASE && defined(SK_DEBUG))
+// #error "inconsistent GR_DEBUG and SK_DEBUG"
+#endif
+
+#if GR_SCALAR_IS_FIXED
+ #ifdef SK_SCALAR_IS_FIXED
+ #define SK_SCALAR_IS_GR_SCALAR 1
+ #else
+ #define SK_SCALAR_IS_GR_SCALAR 0
+ #endif
+ #define SkScalarToGrScalar(x) SkScalarToFixed(x)
+
+#elif GR_SCALAR_IS_FLOAT
+
+ #ifdef SK_SCALAR_IS_FLOAT
+ #define SK_SCALAR_IS_GR_SCALAR 1
+ #else
+ #define SK_SCALAR_IS_GR_SCALAR 0
+ #endif
+ #define SkScalarToGrScalar(x) SkScalarToFloat(x)
+
+#else
+ #error "Ganesh scalar type not defined"
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+// Sk to Gr Type conversions
+
+// Verify that SkPoint and GrPoint are compatible if using the same scalar type
+#if 0/*SK_SCALAR_IS_GR_SCALAR*/
+ GR_STATIC_ASSERT(sizeof(SkPoint) == sizeof(GrPoint));
+ GR_STATIC_ASSERT(offsetof(SkPoint,fX) == offsetof(GrPoint,fX)));
+ GR_STATIC_ASSERT(offsetof(SkPoint,fY) == offsetof(GrPoint,fY)));
+#endif
+
+GR_STATIC_ASSERT((int)GrSamplerState::kClamp_WrapMode == (int)SkShader::kClamp_TileMode);
+GR_STATIC_ASSERT((int)GrSamplerState::kRepeat_WrapMode ==(
+ int)SkShader::kRepeat_TileMode);
+GR_STATIC_ASSERT((int)GrSamplerState::kMirror_WrapMode ==
+ (int)SkShader::kMirror_TileMode);
+
+#define sk_tile_mode_to_grwrap(X) ((GrSamplerState::WrapMode)(X))
+
+GR_STATIC_ASSERT((int)GrGpu::kZero_BlendCoeff == (int)SkXfermode::kZero_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kOne_BlendCoeff == (int)SkXfermode::kOne_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kSC_BlendCoeff == (int)SkXfermode::kSC_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kISC_BlendCoeff == (int)SkXfermode::kISC_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kDC_BlendCoeff == (int)SkXfermode::kDC_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kIDC_BlendCoeff == (int)SkXfermode::kIDC_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kSA_BlendCoeff == (int)SkXfermode::kSA_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kISA_BlendCoeff == (int)SkXfermode::kISA_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kDA_BlendCoeff == (int)SkXfermode::kDA_Coeff);
+GR_STATIC_ASSERT((int)GrGpu::kIDA_BlendCoeff == (int)SkXfermode::kIDA_Coeff);
+
+#define sk_blend_to_grblend(X) ((GrGpu::BlendCoeff)(X))
+
+GR_STATIC_ASSERT((int)SkPath::kMove_Verb == (int)GrPathIter::kMove_Command);
+GR_STATIC_ASSERT((int)SkPath::kLine_Verb == (int)GrPathIter::kLine_Command);
+GR_STATIC_ASSERT((int)SkPath::kQuad_Verb == (int)GrPathIter::kQuadratic_Command);
+GR_STATIC_ASSERT((int)SkPath::kCubic_Verb == (int)GrPathIter::kCubic_Command);
+GR_STATIC_ASSERT((int)SkPath::kClose_Verb == (int)GrPathIter::kClose_Command);
+GR_STATIC_ASSERT((int)SkPath::kDone_Verb == (int)GrPathIter::kEnd_Command);
+
+#define sk_path_verb_to_gr_path_command(X) ((GrPathIter::Command)(X))
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SkColorPriv.h"
+
+static inline GrRect Sk2Gr(const SkRect& src) {
+ return GrRect(SkScalarToGrScalar(src.fLeft),
+ SkScalarToGrScalar(src.fTop),
+ SkScalarToGrScalar(src.fRight),
+ SkScalarToGrScalar(src.fBottom));
+}
+
+class SkGr {
+public:
+ static inline SkIRect& SetIRect(SkIRect* dst, const GrIRect& src) {
+ GR_STATIC_ASSERT(sizeof(*dst) == sizeof(src));
+ memcpy(dst, &src, sizeof(*dst));
+ return *dst;
+ }
+
+ static inline GrIRect& SetIRect(GrIRect* dst, const SkIRect& src) {
+ GR_STATIC_ASSERT(sizeof(*dst) == sizeof(src));
+ memcpy(dst, &src, sizeof(*dst));
+ return *dst;
+ }
+
+ /**
+ * Convert the SkBitmap::Config to the corresponding PixelConfig, or
+ * kUnknown_PixelConfig if the conversion cannot be done.
+ */
+ static GrTexture::PixelConfig BitmapConfig2PixelConfig(SkBitmap::Config,
+ bool isOpaque);
+
+ static GrTexture::PixelConfig Bitmap2PixelConfig(const SkBitmap& bm) {
+ return BitmapConfig2PixelConfig(bm.config(), bm.isOpaque());
+ }
+
+ static void SkMatrix2GrMatrix(const SkMatrix& m, GrMatrix* g) {
+ g->setAll(SkScalarToGrScalar(m[0]),
+ SkScalarToGrScalar(m[1]),
+ SkScalarToGrScalar(m[2]),
+ SkScalarToGrScalar(m[3]),
+ SkScalarToGrScalar(m[4]),
+ SkScalarToGrScalar(m[5]),
+ SkScalarToGrScalar(m[6]),
+ SkScalarToGrScalar(m[7]),
+ SkScalarToGrScalar(m[8]));
+ }
+
+ static GrColor SkColor2GrColor(SkColor c) {
+ SkPMColor pm = SkPreMultiplyColor(c);
+ unsigned r = SkGetPackedR32(pm);
+ unsigned g = SkGetPackedG32(pm);
+ unsigned b = SkGetPackedB32(pm);
+ unsigned a = SkGetPackedA32(pm);
+ return GrColorPackRGBA(r, g, b, a);
+ }
+
+ /**
+ * This abandons all texture caches (for bitmaps and text) associated with
+ * the gpu, and frees any associated skia caches. It differs from
+ * deleteAllTextures in that it assumes that the gpu has lots its context,
+ * and thus the associated HW textures are no longer valid
+ */
+ static void AbandonAllTextures(GrContext*);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Classes
+
+class SkGrPathIter : public GrPathIter {
+public:
+ SkGrPathIter(const SkPath& path) : fIter(path, false), fPath(path) {}
+ virtual Command next(GrPoint pts[]);
+ virtual Command next();
+ virtual void rewind();
+ virtual ConvexHint hint() const;
+private:
+
+#if !SK_SCALAR_IS_GR_SCALAR
+ SkPoint fPoints[4];
+#endif
+ SkPath::Iter fIter;
+ const SkPath& fPath;
+};
+
+class SkGrClipIterator : public GrClipIterator {
+public:
+ void reset(const SkRegion& clip) {
+ fIter.reset(clip);
+ this->invalidateBoundsCache();
+ }
+
+ // overrides
+
+ virtual bool isDone() { return fIter.done(); }
+ virtual void getRect(GrIRect* rect) {
+ SkGr::SetIRect(rect, fIter.rect());
+ }
+ virtual void next() { fIter.next(); }
+ virtual void rewind() { fIter.rewind(); }
+ virtual void computeBounds(GrIRect* bounds);
+
+private:
+ SkRegion::Iterator fIter;
+};
+
+class SkGlyphCache;
+
+class SkGrFontScaler : public GrFontScaler {
+public:
+ explicit SkGrFontScaler(SkGlyphCache* strike);
+ virtual ~SkGrFontScaler();
+
+ // overrides
+ virtual const GrKey* getKey();
+ virtual bool getPackedGlyphBounds(GrGlyph::PackedID, GrIRect* bounds);
+ virtual bool getPackedGlyphImage(GrGlyph::PackedID, int width, int height,
+ int rowBytes, void* image);
+ virtual bool getGlyphPath(uint16_t glyphID, GrPath*);
+
+private:
+ SkGlyphCache* fStrike;
+ GrKey* fKey;
+// DECLARE_INSTANCE_COUNTER(SkGrFontScaler);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Helper functions
+
+GrTextureEntry* sk_gr_create_bitmap_texture(GrContext* ctx,
+ GrTextureKey* key,
+ const GrSamplerState& sampler,
+ const SkBitmap& bitmap);
+
+void sk_gr_set_paint(GrContext* ctx, const SkPaint& paint, bool justAlpha = false);
+
+#endif
diff --git a/gpu/include/SkGrTexturePixelRef.h b/gpu/include/SkGrTexturePixelRef.h
new file mode 100644
index 0000000000..1f5133f86d
--- /dev/null
+++ b/gpu/include/SkGrTexturePixelRef.h
@@ -0,0 +1,50 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef SkGrTexturePixelRef_DEFINED
+#define SkGrTexturePixelRef_DEFINED
+
+#include "SkPixelRef.h"
+#include "GrGpu.h"
+
+class SkGrTexturePixelRef : public SkPixelRef {
+public:
+ SkGrTexturePixelRef(GrTexture*);
+ virtual ~SkGrTexturePixelRef();
+
+ // override from SkPixelRef
+ virtual SkGpuTexture* getTexture() { return (SkGpuTexture*)fTexture; }
+
+protected:
+ // override from SkPixelRef
+ virtual void* onLockPixels(SkColorTable** ptr) {
+ if (ptr) {
+ *ptr = NULL;
+ }
+ return NULL;
+ }
+
+ // override from SkPixelRef
+ virtual void onUnlockPixels() {}
+
+private:
+ GrTexture* fTexture;
+ typedef SkPixelRef INHERITED;
+};
+
+#endif
+
diff --git a/gpu/include/SkUIView.h b/gpu/include/SkUIView.h
new file mode 100644
index 0000000000..5e12e004d6
--- /dev/null
+++ b/gpu/include/SkUIView.h
@@ -0,0 +1,64 @@
+#import <UIKit/UIKit.h>
+
+#include "SkMatrix.h"
+#include "FlingState.h"
+
+#import <OpenGLES/EAGL.h>
+#import <OpenGLES/ES1/gl.h>
+#import <OpenGLES/ES1/glext.h>
+
+class SkOSWindow;
+class SkEvent;
+
+@interface SkUIView : UIView <UIAccelerometerDelegate> {
+ BOOL fRedrawRequestPending;
+ SkOSWindow* fWind;
+ SkMatrix fMatrix, fLocalMatrix;
+ bool fNeedGestureEnded;
+
+ SkMatrix fRotateMatrix;
+
+ float fFirstPinchX, fFirstPinchY;
+ bool fNeedFirstPinch;
+
+ float fZoomAroundX, fZoomAroundY;
+ bool fZoomAround;
+
+ FlingState fFlingState;
+
+ GrAnimateFloat fWarpState;
+ bool fUseWarp;
+
+ struct {
+ EAGLContext* fContext;
+ GLuint fRenderbuffer;
+ GLuint fStencilbuffer;
+ GLuint fFramebuffer;
+ GLint fWidth;
+ GLint fHeight;
+ } fGL;
+
+ UILabel* fTitleLabel;
+
+ enum Backend {
+ kGL_Backend,
+ kRaster_Backend,
+ };
+
+ // these are visible to DetailViewController
+ Backend fBackend;
+ bool fComplexClip;
+}
+
+@property (nonatomic, assign) SkOSWindow *fWind;
+@property (nonatomic, retain) UILabel* fTitleLabel;
+@property (nonatomic, assign) Backend fBackend;
+@property (nonatomic, assign) bool fComplexClip;
+@property (nonatomic, assign, setter=setWarpState) bool fUseWarp;
+
+- (void)setSkTitle:(const char*)title;
+- (void)postInvalWithRect:(const SkIRect*)rectOrNil;
+- (BOOL)onHandleEvent:(const SkEvent&)event;
+
+@end
+
diff --git a/gpu/src/FlingState.cpp b/gpu/src/FlingState.cpp
new file mode 100644
index 0000000000..cb634cc976
--- /dev/null
+++ b/gpu/src/FlingState.cpp
@@ -0,0 +1,134 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "FlingState.h"
+#include "SkMatrix.h"
+#include "SkTime.h"
+
+#define DISCRETIZE_TRANSLATE_TO_AVOID_FLICKER true
+
+static const float MAX_FLING_SPEED = 1500;
+
+static float pin_max_fling(float speed) {
+ if (speed > MAX_FLING_SPEED) {
+ speed = MAX_FLING_SPEED;
+ }
+ return speed;
+}
+
+static double getseconds() {
+ return SkTime::GetMSecs() * 0.001;
+}
+
+// returns +1 or -1, depending on the sign of x
+// returns +1 if x is zero
+static SkScalar SkScalarSign(SkScalar x) {
+ SkScalar sign = SK_Scalar1;
+ if (x < 0) {
+ sign = -sign;
+ }
+ return sign;
+}
+
+static void unit_axis_align(SkVector* unit) {
+ const SkScalar TOLERANCE = SkDoubleToScalar(0.15);
+ if (SkScalarAbs(unit->fX) < TOLERANCE) {
+ unit->fX = 0;
+ unit->fY = SkScalarSign(unit->fY);
+ } else if (SkScalarAbs(unit->fY) < TOLERANCE) {
+ unit->fX = SkScalarSign(unit->fX);
+ unit->fY = 0;
+ }
+}
+
+void FlingState::reset(float sx, float sy) {
+ fActive = true;
+ fDirection.set(sx, sy);
+ fSpeed0 = SkPoint::Normalize(&fDirection);
+ fSpeed0 = pin_max_fling(fSpeed0);
+ fTime0 = getseconds();
+
+ unit_axis_align(&fDirection);
+// printf("---- speed %g dir %g %g\n", fSpeed0, fDirection.fX, fDirection.fY);
+}
+
+bool FlingState::evaluateMatrix(SkMatrix* matrix) {
+ if (!fActive) {
+ return false;
+ }
+
+ const float t = getseconds() - fTime0;
+ const float MIN_SPEED = 2;
+ const float K0 = 5.0;
+ const float K1 = 0.02;
+ const float speed = fSpeed0 * (sk_float_exp(- K0 * t) - K1);
+ if (speed <= MIN_SPEED) {
+ fActive = false;
+ return false;
+ }
+ float dist = (fSpeed0 - speed) / K0;
+
+// printf("---- time %g speed %g dist %g\n", t, speed, dist);
+ float tx = fDirection.fX * dist;
+ float ty = fDirection.fY * dist;
+ if (DISCRETIZE_TRANSLATE_TO_AVOID_FLICKER) {
+ tx = sk_float_round2int(tx);
+ ty = sk_float_round2int(ty);
+ }
+ matrix->setTranslate(tx, ty);
+// printf("---- evaluate (%g %g)\n", tx, ty);
+
+ return true;
+}
+
+////////////////////////////////////////
+
+GrAnimateFloat::GrAnimateFloat() : fTime0(0) {}
+
+void GrAnimateFloat::start(float v0, float v1, float duration) {
+ fValue0 = v0;
+ fValue1 = v1;
+ fDuration = duration;
+ if (duration > 0) {
+ fTime0 = SkTime::GetMSecs();
+ if (!fTime0) {
+ fTime0 = 1; // time0 is our sentinel
+ }
+ } else {
+ fTime0 = 0;
+ }
+}
+
+float GrAnimateFloat::evaluate() {
+ if (!fTime0) {
+ return fValue1;
+ }
+
+ double elapsed = (SkTime::GetMSecs() - fTime0) * 0.001;
+ if (elapsed >= fDuration) {
+ fTime0 = 0;
+ return fValue1;
+ }
+
+ double t = elapsed / fDuration;
+ if (true) {
+ t = (3 - 2 * t) * t * t;
+ }
+ return fValue0 + t * (fValue1 - fValue0);
+}
+
+
diff --git a/gpu/src/GrAllocPool.cpp b/gpu/src/GrAllocPool.cpp
new file mode 100644
index 0000000000..f133f9713a
--- /dev/null
+++ b/gpu/src/GrAllocPool.cpp
@@ -0,0 +1,127 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrAllocPool.h"
+
+#define GrAllocPool_MIN_BLOCK_SIZE ((size_t)128)
+
+struct GrAllocPool::Block {
+ Block* fNext;
+ char* fPtr;
+ size_t fBytesFree;
+ size_t fBytesTotal;
+
+ static Block* Create(size_t size, Block* next) {
+ GrAssert(size >= GrAllocPool_MIN_BLOCK_SIZE);
+
+ Block* block = (Block*)GrMalloc(sizeof(Block) + size);
+ block->fNext = next;
+ block->fPtr = (char*)block + sizeof(Block);
+ block->fBytesFree = size;
+ block->fBytesTotal = size;
+ return block;
+ }
+
+ bool canAlloc(size_t bytes) const {
+ return bytes <= fBytesFree;
+ }
+
+ void* alloc(size_t bytes) {
+ GrAssert(bytes <= fBytesFree);
+ fBytesFree -= bytes;
+ void* ptr = fPtr;
+ fPtr += bytes;
+ return ptr;
+ }
+
+ size_t release(size_t bytes) {
+ GrAssert(bytes > 0);
+ size_t free = GrMin(bytes, fBytesTotal - fBytesFree);
+ fBytesFree += free;
+ fPtr -= free;
+ return bytes - free;
+ }
+
+ bool empty() const { return fBytesTotal == fBytesFree; }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrAllocPool::GrAllocPool(size_t blockSize) {
+ fBlock = NULL;
+ fMinBlockSize = GrMax(blockSize, GrAllocPool_MIN_BLOCK_SIZE);
+ GR_DEBUGCODE(fBlocksAllocated = 0;)
+}
+
+GrAllocPool::~GrAllocPool() {
+ this->reset();
+}
+
+void GrAllocPool::reset() {
+ this->validate();
+
+ Block* block = fBlock;
+ while (block) {
+ Block* next = block->fNext;
+ GrFree(block);
+ block = next;
+ }
+ fBlock = NULL;
+ GR_DEBUGCODE(fBlocksAllocated = 0;)
+}
+
+void* GrAllocPool::alloc(size_t size) {
+ this->validate();
+
+ if (!fBlock || !fBlock->canAlloc(size)) {
+ size_t blockSize = GrMax(fMinBlockSize, size);
+ fBlock = Block::Create(blockSize, fBlock);
+ GR_DEBUGCODE(fBlocksAllocated += 1;)
+ }
+ return fBlock->alloc(size);
+}
+
+void GrAllocPool::release(size_t bytes) {
+ this->validate();
+
+ while (bytes && NULL != fBlock) {
+ bytes = fBlock->release(bytes);
+ if (fBlock->empty()) {
+ Block* next = fBlock->fNext;
+ GrFree(fBlock);
+ fBlock = next;
+ GR_DEBUGCODE(fBlocksAllocated -= 1;)
+ }
+ }
+}
+
+
+#if GR_DEBUG
+
+void GrAllocPool::validate() const {
+ Block* block = fBlock;
+ int count = 0;
+ while (block) {
+ count += 1;
+ block = block->fNext;
+ }
+ GrAssert(fBlocksAllocated == count);
+}
+
+#endif
+
+
diff --git a/gpu/src/GrAtlas.cpp b/gpu/src/GrAtlas.cpp
new file mode 100644
index 0000000000..b55a02908c
--- /dev/null
+++ b/gpu/src/GrAtlas.cpp
@@ -0,0 +1,187 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrAtlas.h"
+#include "GrGpu.h"
+#include "GrMemory.h"
+#include "GrRectanizer.h"
+#include "GrPlotMgr.h"
+
+#if 0
+#define GR_PLOT_WIDTH 8
+#define GR_PLOT_HEIGHT 4
+#define GR_ATLAS_WIDTH 256
+#define GR_ATLAS_HEIGHT 256
+
+#define GR_ATLAS_TEXTURE_WIDTH (GR_PLOT_WIDTH * GR_ATLAS_WIDTH)
+#define GR_ATLAS_TEXTURE_HEIGHT (GR_PLOT_HEIGHT * GR_ATLAS_HEIGHT)
+
+#else
+
+#define GR_ATLAS_TEXTURE_WIDTH 1024
+#define GR_ATLAS_TEXTURE_HEIGHT 2048
+
+#define GR_ATLAS_WIDTH 341
+#define GR_ATLAS_HEIGHT 341
+
+#define GR_PLOT_WIDTH (GR_ATLAS_TEXTURE_WIDTH / GR_ATLAS_WIDTH)
+#define GR_PLOT_HEIGHT (GR_ATLAS_TEXTURE_HEIGHT / GR_ATLAS_HEIGHT)
+
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#define BORDER 1
+
+#if GR_DEBUG
+ static int gCounter;
+#endif
+
+GrAtlas::GrAtlas(GrAtlasMgr* mgr, int plotX, int plotY) {
+ fAtlasMgr = mgr; // just a pointer, not an owner
+ fNext = NULL;
+ fTexture = mgr->getTexture(); // we're not an owner, just a pointer
+ fPlot.set(plotX, plotY);
+
+ fRects = GrRectanizer::Factory(GR_ATLAS_WIDTH - BORDER,
+ GR_ATLAS_HEIGHT - BORDER);
+
+#if GR_DEBUG
+ GrPrintf(" GrAtlas %p [%d %d] %d\n", this, plotX, plotY, gCounter);
+ gCounter += 1;
+#endif
+}
+
+GrAtlas::~GrAtlas() {
+ fAtlasMgr->freePlot(fPlot.fX, fPlot.fY);
+
+ delete fRects;
+
+#if GR_DEBUG
+ --gCounter;
+ GrPrintf("~GrAtlas %p [%d %d] %d\n", this, fPlot.fX, fPlot.fY, gCounter);
+#endif
+}
+
+static void adjustForPlot(GrIPoint16* loc, const GrIPoint16& plot) {
+ loc->fX += plot.fX * GR_ATLAS_WIDTH;
+ loc->fY += plot.fY * GR_ATLAS_HEIGHT;
+}
+
+bool GrAtlas::addSubImage(int width, int height, const void* image,
+ GrIPoint16* loc) {
+ if (!fRects->addRect(width + BORDER, height + BORDER, loc)) {
+ return false;
+ }
+
+ GrAutoSMalloc<1024> storage;
+ int srcW = width + 2*BORDER;
+ int srcH = height + 2*BORDER;
+ if (BORDER) {
+ uint8_t* ptr = (uint8_t*)storage.realloc(srcW * srcH);
+ Gr_bzero(ptr, srcW); // zero top row
+ ptr += srcW;
+ for (int y = 0; y < height; y++) {
+ *ptr++ = 0; // zero left edge
+ memcpy(ptr, image, width); ptr += width;
+ *ptr++ = 0; // zero right edge
+ image = (const void*)((const char*)image + width);
+ }
+ Gr_bzero(ptr, srcW); // zero bottom row
+ image = storage.get();
+ }
+ adjustForPlot(loc, fPlot);
+ fTexture->uploadTextureData(loc->fX, loc->fY, srcW, srcH, image);
+
+ // now tell the caller to skip the top/left BORDER
+ loc->fX += BORDER;
+ loc->fY += BORDER;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrAtlasMgr::GrAtlasMgr(GrGpu* gpu) {
+ fGpu = gpu;
+ gpu->ref();
+ fTexture = NULL;
+ fPlotMgr = new GrPlotMgr(GR_PLOT_WIDTH, GR_PLOT_HEIGHT);
+}
+
+GrAtlasMgr::~GrAtlasMgr() {
+ GrSafeUnref(fTexture);
+ delete fPlotMgr;
+ fGpu->unref();
+}
+
+GrAtlas* GrAtlasMgr::addToAtlas(GrAtlas* atlas,
+ int width, int height, const void* image,
+ GrIPoint16* loc) {
+ if (atlas && atlas->addSubImage(width, height, image, loc)) {
+ return atlas;
+ }
+
+ // If the above fails, then either we have no starting atlas, or the current
+ // one is full. Either way we need to allocate a new atlas
+
+ GrIPoint16 plot;
+ if (!fPlotMgr->newPlot(&plot)) {
+ return NULL;
+ }
+
+ if (NULL == fTexture) {
+ GrGpu::TextureDesc desc = {
+ GrGpu::kDynamicUpdate_TextureFlag,
+ GrGpu::kNone_AALevel,
+ GR_ATLAS_TEXTURE_WIDTH,
+ GR_ATLAS_TEXTURE_HEIGHT,
+ GrTexture::kAlpha_8_PixelConfig
+ };
+ fTexture = fGpu->createTexture(desc, NULL, 0);
+ if (NULL == fTexture) {
+ return NULL;
+ }
+ }
+
+ GrAtlas* newAtlas = new GrAtlas(this, plot.fX, plot.fY);
+ if (!newAtlas->addSubImage(width, height, image, loc)) {
+ delete newAtlas;
+ return NULL;
+ }
+
+ newAtlas->fNext = atlas;
+ return newAtlas;
+}
+
+void GrAtlasMgr::freePlot(int x, int y) {
+ GrAssert(fPlotMgr->isBusy(x, y));
+ fPlotMgr->freePlot(x, y);
+}
+
+void GrAtlasMgr::abandonAll() {
+#if 0
+ GrAtlas** curr = fList.begin();
+ GrAtlas** stop = fList.end();
+ for (; curr < stop; curr++) {
+ (*curr)->texture()->abandon();
+ delete *curr;
+ }
+ fList.reset();
+#endif
+}
+
+
diff --git a/gpu/src/GrClip.cpp b/gpu/src/GrClip.cpp
new file mode 100644
index 0000000000..b7a839bfd2
--- /dev/null
+++ b/gpu/src/GrClip.cpp
@@ -0,0 +1,136 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrClip.h"
+
+GrClip::GrClip() {
+ fBounds.setEmpty();
+ this->validate();
+}
+
+GrClip::GrClip(const GrClip& src) {
+ *this = src;
+}
+
+GrClip::GrClip(GrClipIterator* iter) {
+ fBounds.setEmpty();
+ this->setFromIterator(iter);
+}
+
+GrClip::~GrClip() {}
+
+GrClip& GrClip::operator=(const GrClip& src) {
+ fList = src.fList;
+ fBounds = src.fBounds;
+ this->validate();
+ return *this;
+}
+
+void GrClip::setEmpty() {
+ fList.reset();
+ fBounds.setEmpty();
+ this->validate();
+}
+
+void GrClip::setRect(const GrIRect& r) {
+ fList.reset();
+
+ // we need a canonical "empty" rect, so that our operator== will behave
+ // correctly with two empty clips.
+ if (r.isEmpty()) {
+ fBounds.setEmpty();
+ } else {
+ fBounds = r;
+ }
+ this->validate();
+}
+
+void GrClip::addRect(const GrIRect& r) {
+ if (!r.isEmpty()) {
+ this->validate();
+ if (this->isEmpty()) {
+ GrAssert(fList.count() == 0);
+ fBounds = r;
+ } else {
+ if (this->isRect()) {
+ *fList.append() = fBounds;
+ }
+ *fList.append() = r;
+ fBounds.unionWith(r);
+ }
+ this->validate();
+ }
+}
+
+void GrClip::setFromIterator(GrClipIterator* iter) {
+ this->setEmpty();
+ if (iter) {
+ for (iter->rewind(); !iter->isDone(); iter->next()) {
+ GrIRect r;
+ iter->getRect(&r);
+ this->addRect(r);
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrClipIter::reset(const GrClip& clip) { fClip = &clip; fIndex = 0; }
+
+bool GrClipIter::isDone() {
+ return (NULL == fClip) || (fIndex >= fClip->countRects());
+}
+
+void GrClipIter::rewind() { fIndex = 0; }
+void GrClipIter::getRect(GrIRect* r) { *r = fClip->getRects()[fIndex]; }
+void GrClipIter::next() { fIndex += 1; }
+void GrClipIter::computeBounds(GrIRect* r) {
+ if (NULL == fClip) {
+ r->setEmpty();
+ } else {
+ *r = fClip->getBounds();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_DEBUG
+
+void GrClip::validate() const {
+ if (fBounds.isEmpty()) {
+ GrAssert(0 == fBounds.fLeft);
+ GrAssert(0 == fBounds.fTop);
+ GrAssert(0 == fBounds.fRight);
+ GrAssert(0 == fBounds.fBottom);
+ GrAssert(0 == fList.count());
+ } else {
+ int count = fList.count();
+ if (count > 0) {
+ GrAssert(count > 1);
+ GrAssert(!fList[0].isEmpty());
+ GrIRect bounds = fList[0];
+ for (int i = 1; i < count; i++) {
+ GrAssert(!fList[i].isEmpty());
+ bounds.unionWith(fList[i]);
+ }
+ GrAssert(fBounds == bounds);
+ }
+ }
+}
+
+#endif
+
diff --git a/gpu/src/GrContext.cpp b/gpu/src/GrContext.cpp
new file mode 100644
index 0000000000..6e94ef787f
--- /dev/null
+++ b/gpu/src/GrContext.cpp
@@ -0,0 +1,1040 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrContext.h"
+#include "GrTextureCache.h"
+#include "GrTextStrike.h"
+#include "GrMemory.h"
+#include "GrPathIter.h"
+#include "GrClipIterator.h"
+#include "GrIndexBuffer.h"
+
+#define DEFER_TEXT_RENDERING 1
+
+static const size_t MAX_TEXTURE_CACHE_COUNT = 128;
+static const size_t MAX_TEXTURE_CACHE_BYTES = 8 * 1024 * 1024;
+
+#if DEFER_TEXT_RENDERING
+ static const uint32_t POOL_VB_SIZE = 2048 *
+ GrDrawTarget::VertexSize(GrDrawTarget::kTextFormat_VertexLayoutBit);
+ static const uint32_t NUM_POOL_VBS = 8;
+#else
+ static const uint32_t POOL_VB_SIZE = 0;
+ static const uint32_t NUM_POOL_VBS = 0;
+
+#endif
+
+GrContext* GrContext::Create(GrGpu::Engine engine,
+ GrGpu::Platform3DContext context3D) {
+ GrContext* ctx = NULL;
+ GrGpu* fGpu = GrGpu::Create(engine, context3D);
+ if (NULL != fGpu) {
+ ctx = new GrContext(fGpu);
+ fGpu->unref();
+ }
+ return ctx;
+}
+
+GrContext::~GrContext() {
+ fGpu->unref();
+ delete fTextureCache;
+ delete fFontCache;
+}
+
+void GrContext::abandonAllTextures() {
+ fTextureCache->deleteAll(GrTextureCache::kAbandonTexture_DeleteMode);
+ fFontCache->abandonAll();
+}
+
+GrTextureEntry* GrContext::findAndLockTexture(GrTextureKey* key,
+ const GrSamplerState& sampler) {
+ finalizeTextureKey(key, sampler);
+ return fTextureCache->findAndLock(*key);
+}
+
+static void stretchImage(void* dst,
+ int dstW,
+ int dstH,
+ void* src,
+ int srcW,
+ int srcH,
+ int bpp) {
+ GrFixed dx = (srcW << 16) / dstW;
+ GrFixed dy = (srcH << 16) / dstH;
+
+ GrFixed y = dy >> 1;
+
+ int dstXLimit = dstW*bpp;
+ for (int j = 0; j < dstH; ++j) {
+ GrFixed x = dx >> 1;
+ void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
+ void* dstRow = (uint8_t*)dst + j*dstW*bpp;
+ for (int i = 0; i < dstXLimit; i += bpp) {
+ memcpy((uint8_t*) dstRow + i,
+ (uint8_t*) srcRow + (x>>16)*bpp,
+ bpp);
+ x += dx;
+ }
+ y += dy;
+ }
+}
+
+GrTextureEntry* GrContext::createAndLockTexture(GrTextureKey* key,
+ const GrSamplerState& sampler,
+ const GrGpu::TextureDesc& desc,
+ void* srcData, size_t rowBytes) {
+ GrAssert(key->width() == desc.fWidth);
+ GrAssert(key->height() == desc.fHeight);
+
+#if GR_DUMP_TEXTURE_UPLOAD
+ GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
+#endif
+
+ GrTextureEntry* entry = NULL;
+ bool special = finalizeTextureKey(key, sampler);
+ if (special) {
+ GrTextureEntry* clampEntry;
+ GrTextureKey clampKey(*key);
+ clampEntry = findAndLockTexture(&clampKey, GrSamplerState::ClampNoFilter());
+
+ if (NULL == clampEntry) {
+ clampEntry = createAndLockTexture(&clampKey,
+ GrSamplerState::ClampNoFilter(),
+ desc, srcData, rowBytes);
+ GrAssert(NULL != clampEntry);
+ if (NULL == clampEntry) {
+ return NULL;
+ }
+ }
+ GrTexture* clampTexture = clampEntry->texture();
+ GrGpu::TextureDesc rtDesc = desc;
+ rtDesc.fFlags |= GrGpu::kRenderTarget_TextureFlag |
+ GrGpu::kNoPathRendering_TextureFlag;
+ rtDesc.fWidth = GrNextPow2(GrMax<int>(desc.fWidth,
+ fGpu->minRenderTargetWidth()));
+ rtDesc.fHeight = GrNextPow2(GrMax<int>(desc.fHeight,
+ fGpu->minRenderTargetHeight()));
+
+ GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
+
+ if (NULL != texture) {
+ GrGpu::AutoStateRestore asr(fGpu);
+ fGpu->setRenderTarget(texture->asRenderTarget());
+ fGpu->setTexture(clampEntry->texture());
+ fGpu->setStencilPass(GrGpu::kNone_StencilPass);
+ fGpu->setTextureMatrix(GrMatrix::I());
+ fGpu->setViewMatrix(GrMatrix::I());
+ fGpu->setAlpha(0xff);
+ fGpu->setBlendFunc(GrGpu::kOne_BlendCoeff, GrGpu::kZero_BlendCoeff);
+ fGpu->disableState(GrGpu::kDither_StateBit |
+ GrGpu::kClip_StateBit |
+ GrGpu::kAntialias_StateBit);
+ GrSamplerState stretchSampler(GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kClamp_WrapMode,
+ sampler.isFilter());
+ fGpu->setSamplerState(stretchSampler);
+
+ static const GrVertexLayout layout =
+ GrDrawTarget::kSeparateTexCoord_VertexLayoutBit;
+ GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
+
+ if (arg.succeeded()) {
+ GrPoint* verts = (GrPoint*) arg.vertices();
+ verts[0].setIRectFan(0, 0,
+ texture->contentWidth(),
+ texture->contentHeight(),
+ 2*sizeof(GrPoint));
+ GrScalar tw = GrFixedToScalar(GR_Fixed1 *
+ clampTexture->contentWidth() /
+ clampTexture->allocWidth());
+ GrScalar th = GrFixedToScalar(GR_Fixed1 *
+ clampTexture->contentHeight() /
+ clampTexture->allocHeight());
+ verts[1].setRectFan(0, 0, tw, th, 2*sizeof(GrPoint));
+ fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
+ 0, 4);
+ entry = fTextureCache->createAndLock(*key, texture);
+ }
+ texture->removeRenderTarget();
+ } else {
+ // TODO: Our CPU stretch doesn't filter. But we create separate
+ // stretched textures when the sampler state is either filtered or
+ // not. Either implement filtered stretch blit on CPU or just create
+ // one when FBO case fails.
+
+ rtDesc.fFlags = 0;
+ // no longer need to clamp at min RT size.
+ rtDesc.fWidth = GrNextPow2(desc.fWidth);
+ rtDesc.fHeight = GrNextPow2(desc.fHeight);
+ int bpp = GrTexture::BytesPerPixel(desc.fFormat);
+ GrAutoSMalloc<128*128*4> stretchedPixels(bpp *
+ rtDesc.fWidth *
+ rtDesc.fHeight);
+ stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
+ srcData, desc.fWidth, desc.fHeight, bpp);
+
+ size_t stretchedRowBytes = rtDesc.fWidth * bpp;
+
+ GrTexture* texture = fGpu->createTexture(rtDesc,
+ stretchedPixels.get(),
+ stretchedRowBytes);
+ GrAssert(NULL != texture);
+ entry = fTextureCache->createAndLock(*key, texture);
+ }
+ fTextureCache->unlock(clampEntry);
+
+ } else {
+ GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes);
+ if (NULL != texture) {
+ entry = fTextureCache->createAndLock(*key, texture);
+ } else {
+ entry = NULL;
+ }
+ }
+ return entry;
+}
+
+void GrContext::unlockTexture(GrTextureEntry* entry) {
+ fTextureCache->unlock(entry);
+}
+
+void GrContext::detachCachedTexture(GrTextureEntry* entry) {
+ fTextureCache->detach(entry);
+}
+
+void GrContext::reattachAndUnlockCachedTexture(GrTextureEntry* entry) {
+ fTextureCache->reattachAndUnlock(entry);
+}
+
+GrTexture* GrContext::createUncachedTexture(const GrGpu::TextureDesc& desc,
+ void* srcData,
+ size_t rowBytes) {
+ return fGpu->createTexture(desc, srcData, rowBytes);
+}
+
+GrRenderTarget* GrContext::createPlatformRenderTarget(intptr_t platformRenderTarget,
+ int width, int height) {
+ return fGpu->createPlatformRenderTarget(platformRenderTarget,
+ width, height);
+}
+
+bool GrContext::supportsIndex8PixelConfig(const GrSamplerState& sampler,
+ int width, int height) {
+ if (!fGpu->supports8BitPalette()) {
+ return false;
+ }
+
+ bool needsRepeat = sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
+ sampler.getWrapY() != GrSamplerState::kClamp_WrapMode;
+ bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
+
+ switch (fGpu->npotTextureSupport()) {
+ case GrGpu::kNone_NPOTTextureType:
+ return isPow2;
+ case GrGpu::kNoRepeat_NPOTTextureType:
+ return isPow2 || !needsRepeat;
+ case GrGpu::kNonRendertarget_NPOTTextureType:
+ case GrGpu::kFull_NPOTTextureType:
+ return true;
+ }
+ // should never get here
+ GrAssert(!"Bad enum from fGpu->npotTextureSupport");
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrContext::eraseColor(GrColor color) {
+ fGpu->eraseColor(color);
+}
+
+void GrContext::drawFull(bool useTexture) {
+ // set rect to be big enough to fill the space, but not super-huge, so we
+ // don't overflow fixed-point implementations
+ GrRect r(fGpu->getClip().getBounds());
+ GrMatrix inverse;
+ if (fGpu->getViewInverse(&inverse)) {
+ inverse.mapRect(&r);
+ } else {
+ GrPrintf("---- fGpu->getViewInverse failed\n");
+ }
+
+ this->fillRect(r, useTexture);
+}
+
+/* create a triangle strip that strokes the specified triangle. There are 8
+ unique vertices, but we repreat the last 2 to close up. Alternatively we
+ could use an indices array, and then only send 8 verts, but not sure that
+ would be faster.
+ */
+static void setStrokeRectStrip(GrPoint verts[10], const GrRect& rect,
+ GrScalar width) {
+ const GrScalar rad = GrScalarHalf(width);
+
+ verts[0].set(rect.fLeft + rad, rect.fTop + rad);
+ verts[1].set(rect.fLeft - rad, rect.fTop - rad);
+ verts[2].set(rect.fRight - rad, rect.fTop + rad);
+ verts[3].set(rect.fRight + rad, rect.fTop - rad);
+ verts[4].set(rect.fRight - rad, rect.fBottom - rad);
+ verts[5].set(rect.fRight + rad, rect.fBottom + rad);
+ verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
+ verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
+ verts[8] = verts[0];
+ verts[9] = verts[1];
+}
+
+void GrContext::drawRect(const GrRect& rect, bool useTexture, GrScalar width) {
+ GrVertexLayout layout = useTexture ?
+ GrDrawTarget::kPositionAsTexCoord_VertexLayoutBit :
+ 0;
+
+ static const int worstCaseVertCount = 10;
+ GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, worstCaseVertCount, 0);
+ if (!geo.succeeded()) {
+ return;
+ }
+
+ this->flushText();
+
+ int vertCount;
+ GrGpu::PrimitiveType primType;
+ GrPoint* vertex = geo.positions();
+
+ if (width >= 0) {
+ if (width > 0) {
+ vertCount = 10;
+ primType = GrGpu::kTriangleStrip_PrimitiveType;
+ setStrokeRectStrip(vertex, rect, width);
+ } else {
+ // hairline
+ vertCount = 5;
+ primType = GrGpu::kLineStrip_PrimitiveType;
+ vertex[0].set(rect.fLeft, rect.fTop);
+ vertex[1].set(rect.fRight, rect.fTop);
+ vertex[2].set(rect.fRight, rect.fBottom);
+ vertex[3].set(rect.fLeft, rect.fBottom);
+ vertex[4].set(rect.fLeft, rect.fTop);
+ }
+ } else {
+ vertCount = 4;
+ primType = GrGpu::kTriangleFan_PrimitiveType;
+ vertex->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom);
+ }
+
+ fGpu->drawNonIndexed(primType, 0, vertCount);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+#define NEW_EVAL 1 // Use adaptive path tesselation
+#define STENCIL_OFF 0 // Always disable stencil (even when needed)
+#define CPU_TRANSFORM 0 // Transform path verts on CPU
+
+#if NEW_EVAL
+
+#define EVAL_TOL GR_Scalar1
+
+static uint32_t quadratic_point_count(const GrPoint points[], GrScalar tol) {
+ GrScalar d = points[1].distanceToLineSegmentBetween(points[0], points[2]);
+ // TODO: fixed points sqrt
+ if (d < tol) {
+ return 1;
+ } else {
+ // Each time we subdivide, d should be cut in 4. So we need to
+ // subdivide x = log4(d/tol) times. x subdivisions creates 2^(x)
+ // points.
+ // 2^(log4(x)) = sqrt(x);
+ d = ceilf(sqrtf(d/tol));
+ return GrNextPow2((uint32_t)d);
+ }
+}
+
+static uint32_t generate_quadratic_points(const GrPoint& p0,
+ const GrPoint& p1,
+ const GrPoint& p2,
+ GrScalar tolSqd,
+ GrPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (p1.distanceToLineSegmentBetweenSqd(p0, p2)) < tolSqd) {
+ (*points)[0] = p2;
+ *points += 1;
+ return 1;
+ }
+
+ GrPoint q[] = {
+ GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
+ GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
+ };
+ GrPoint r(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY));
+
+ pointsLeft >>= 1;
+ uint32_t a = generate_quadratic_points(p0, q[0], r, tolSqd, points, pointsLeft);
+ uint32_t b = generate_quadratic_points(r, q[1], p2, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+static uint32_t cubic_point_count(const GrPoint points[], GrScalar tol) {
+ GrScalar d = GrMax(points[1].distanceToLineSegmentBetweenSqd(points[0], points[3]),
+ points[2].distanceToLineSegmentBetweenSqd(points[0], points[3]));
+ d = sqrtf(d);
+ if (d < tol) {
+ return 1;
+ } else {
+ d = ceilf(sqrtf(d/tol));
+ return GrNextPow2((uint32_t)d);
+ }
+}
+
+static uint32_t generate_cubic_points(const GrPoint& p0,
+ const GrPoint& p1,
+ const GrPoint& p2,
+ const GrPoint& p3,
+ GrScalar tolSqd,
+ GrPoint** points,
+ uint32_t pointsLeft) {
+ if (pointsLeft < 2 ||
+ (p1.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd &&
+ p2.distanceToLineSegmentBetweenSqd(p0, p3) < tolSqd)) {
+ (*points)[0] = p3;
+ *points += 1;
+ return 1;
+ }
+ GrPoint q[] = {
+ GrPoint(GrScalarAve(p0.fX, p1.fX), GrScalarAve(p0.fY, p1.fY)),
+ GrPoint(GrScalarAve(p1.fX, p2.fX), GrScalarAve(p1.fY, p2.fY)),
+ GrPoint(GrScalarAve(p2.fX, p3.fX), GrScalarAve(p2.fY, p3.fY))
+ };
+ GrPoint r[] = {
+ GrPoint(GrScalarAve(q[0].fX, q[1].fX), GrScalarAve(q[0].fY, q[1].fY)),
+ GrPoint(GrScalarAve(q[1].fX, q[2].fX), GrScalarAve(q[1].fY, q[2].fY))
+ };
+ GrPoint s(GrScalarAve(r[0].fX, r[1].fX), GrScalarAve(r[0].fY, r[1].fY));
+ pointsLeft >>= 1;
+ uint32_t a = generate_cubic_points(p0, q[0], r[0], s, tolSqd, points, pointsLeft);
+ uint32_t b = generate_cubic_points(s, r[1], q[2], p3, tolSqd, points, pointsLeft);
+ return a + b;
+}
+
+#else // !NEW_EVAL
+
+static GrScalar gr_eval_quad(const GrScalar coord[], GrScalar t) {
+ GrScalar A = coord[0] - 2 * coord[2] + coord[4];
+ GrScalar B = 2 * (coord[2] - coord[0]);
+ GrScalar C = coord[0];
+
+ return GrMul(GrMul(A, t) + B, t) + C;
+}
+
+static void gr_eval_quad_at(const GrPoint src[3], GrScalar t, GrPoint* pt) {
+ GrAssert(src);
+ GrAssert(pt);
+ GrAssert(t >= 0 && t <= GR_Scalar1);
+ pt->set(gr_eval_quad(&src[0].fX, t), gr_eval_quad(&src[0].fY, t));
+}
+
+static GrScalar gr_eval_cubic(const GrScalar coord[], GrScalar t) {
+ GrScalar A = coord[6] - coord[0] + 3 * (coord[2] - coord[4]);
+ GrScalar B = 3 * (coord[0] - 2 * coord[2] + coord[4]);
+ GrScalar C = 3 * (coord[2] - coord[0]);
+ GrScalar D = coord[0];
+
+ return GrMul(GrMul(GrMul(A, t) + B, t) + C, t) + D;
+}
+
+static void gr_eval_cubic_at(const GrPoint src[4], GrScalar t, GrPoint* pt) {
+ GrAssert(src);
+ GrAssert(pt);
+ GrAssert(t >= 0 && t <= GR_Scalar1);
+
+ pt->set(gr_eval_cubic(&src[0].fX, t), gr_eval_cubic(&src[0].fY, t));
+}
+
+#endif // !NEW_EVAL
+
+static int worst_case_point_count(GrPathIter* path,
+ int* subpaths,
+ const GrMatrix& matrix,
+ GrScalar tol) {
+ int pointCount = 0;
+ *subpaths = 1;
+
+ bool first = true;
+
+ GrPathIter::Command cmd;
+
+ GrPoint pts[4];
+ while ((cmd = path->next(pts)) != GrPathIter::kEnd_Command) {
+
+ switch (cmd) {
+ case GrPathIter::kLine_Command:
+ pointCount += 1;
+ break;
+ case GrPathIter::kQuadratic_Command:
+#if NEW_EVAL
+ matrix.mapPoints(pts, pts, 3);
+ pointCount += quadratic_point_count(pts, tol);
+#else
+ pointCount += 9;
+#endif
+ break;
+ case GrPathIter::kCubic_Command:
+#if NEW_EVAL
+ matrix.mapPoints(pts, pts, 4);
+ pointCount += cubic_point_count(pts, tol);
+#else
+ pointCount += 17;
+#endif
+ break;
+ case GrPathIter::kMove_Command:
+ pointCount += 1;
+ if (!first) {
+ ++(*subpaths);
+ }
+ break;
+ default:
+ break;
+ }
+ first = false;
+ }
+ return pointCount;
+}
+
+static inline bool single_pass_path(const GrPathIter& path,
+ GrContext::PathFills fill,
+ bool useTex,
+ const GrGpu& gpu) {
+#if STENCIL_OFF
+ return true;
+#else
+ if (GrContext::kEvenOdd_PathFill == fill) {
+ GrPathIter::ConvexHint hint = path.hint();
+ return hint == GrPathIter::kConvex_ConvexHint ||
+ hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint;
+ } else if (GrContext::kWinding_PathFill == fill) {
+ GrPathIter::ConvexHint hint = path.hint();
+ return hint == GrPathIter::kConvex_ConvexHint ||
+ hint == GrPathIter::kNonOverlappingConvexPieces_ConvexHint ||
+ (hint == GrPathIter::kSameWindingConvexPieces_ConvexHint &&
+ gpu.canDisableBlend() && !gpu.isDitherState());
+
+ }
+ return false;
+#endif
+}
+
+void GrContext::drawPath(GrPathIter* path, PathFills fill,
+ bool useTexture, const GrPoint* translate) {
+
+ flushText();
+
+ GrGpu::AutoStateRestore asr(fGpu);
+
+#if NEW_EVAL
+ GrMatrix viewM;
+ fGpu->getViewMatrix(&viewM);
+ // In order to tesselate the path we get a bound on how much the matrix can
+ // stretch when mapping to screen coordinates.
+ GrScalar stretch = viewM.getMaxStretch();
+ bool useStretch = stretch > 0;
+ GrScalar tol = EVAL_TOL;
+ if (!useStretch) {
+ // TODO: deal with perspective in some better way.
+ tol /= 10;
+ } else {
+ // TODO: fixed point divide
+ GrScalar sinv = 1 / stretch;
+ tol = GrMul(tol, sinv);
+ viewM = GrMatrix::I();
+ }
+ GrScalar tolSqd = GrMul(tol, tol);
+#else
+ // pass to worst_case... but won't be used.
+ static const GrScalar tol = -1;
+#endif
+
+ int subpathCnt;
+ int maxPts = worst_case_point_count(path,
+ &subpathCnt,
+#if CPU_TRANSFORM
+ cpuMatrix,
+#else
+ GrMatrix::I(),
+#endif
+ tol);
+ GrVertexLayout layout = 0;
+ if (useTexture) {
+ layout = GrDrawTarget::kPositionAsTexCoord_VertexLayoutBit;
+ }
+ // add 4 to hold the bounding rect
+ GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, maxPts + 4, 0);
+
+ GrPoint* base = (GrPoint*) arg.vertices();
+ GrPoint* vert = base;
+ GrPoint* subpathBase = base;
+
+ GrAutoSTMalloc<8, uint16_t> subpathVertCount(subpathCnt);
+
+ path->rewind();
+
+ // TODO: use primitve restart if available rather than multiple draws
+ GrGpu::PrimitiveType type;
+ int passCount = 0;
+ GrGpu::StencilPass passes[3];
+ bool reverse = false;
+
+ if (kHairLine_PathFill == fill) {
+ type = GrGpu::kLineStrip_PrimitiveType;
+ passCount = 1;
+ passes[0] = GrGpu::kNone_StencilPass;
+ } else {
+ type = GrGpu::kTriangleFan_PrimitiveType;
+ if (single_pass_path(*path, fill, useTexture, *fGpu)) {
+ passCount = 1;
+ passes[0] = GrGpu::kNone_StencilPass;
+ } else {
+ switch (fill) {
+ case kInverseEvenOdd_PathFill:
+ reverse = true;
+ // fallthrough
+ case kEvenOdd_PathFill:
+ passCount = 2;
+ passes[0] = GrGpu::kEvenOddStencil_StencilPass;
+ passes[1] = GrGpu::kEvenOddColor_StencilPass;
+ break;
+
+ case kInverseWinding_PathFill:
+ reverse = true;
+ // fallthrough
+ case kWinding_PathFill:
+ passes[0] = GrGpu::kWindingStencil1_StencilPass;
+ if (fGpu->supportsSingleStencilPassWinding()) {
+ passes[1] = GrGpu::kWindingColor_StencilPass;
+ passCount = 2;
+ } else {
+ passes[1] = GrGpu::kWindingStencil2_StencilPass;
+ passes[2] = GrGpu::kWindingColor_StencilPass;
+ passCount = 3;
+ }
+ break;
+ default:
+ GrAssert(!"Unknown path fill!");
+ return;
+ }
+ }
+ }
+ fGpu->setReverseFill(reverse);
+#if CPU_TRANSFORM
+ GrMatrix cpuMatrix;
+ fGpu->getViewMatrix(&cpuMatrix);
+ fGpu->setViewMatrix(GrMatrix::I());
+#endif
+
+ GrPoint pts[4];
+
+ bool first = true;
+ int subpath = 0;
+
+ for (;;) {
+ GrPathIter::Command cmd = path->next(pts);
+#if CPU_TRANSFORM
+ int numPts = GrPathIter::NumCommandPoints(cmd);
+ cpuMatrix.mapPoints(pts, pts, numPts);
+#endif
+ switch (cmd) {
+ case GrPathIter::kMove_Command:
+ if (!first) {
+ subpathVertCount[subpath] = vert-subpathBase;
+ subpathBase = vert;
+ ++subpath;
+ }
+ *vert = pts[0];
+ vert++;
+ break;
+ case GrPathIter::kLine_Command:
+ *vert = pts[1];
+ vert++;
+ break;
+ case GrPathIter::kQuadratic_Command: {
+#if NEW_EVAL
+
+ generate_quadratic_points(pts[0], pts[1], pts[2],
+ tolSqd, &vert,
+ quadratic_point_count(pts, tol));
+#else
+ const int n = 8;
+ const GrScalar dt = GR_Scalar1 / n;
+ GrScalar t = dt;
+ for (int i = 1; i < n; i++) {
+ gr_eval_quad_at(pts, t, (GrPoint*)vert);
+ t += dt;
+ vert++;
+ }
+ vert->set(pts[2].fX, pts[2].fY);
+ vert++;
+#endif
+ break;
+ }
+ case GrPathIter::kCubic_Command: {
+#if NEW_EVAL
+ generate_cubic_points(pts[0], pts[1], pts[2], pts[3],
+ tolSqd, &vert,
+ cubic_point_count(pts, tol));
+#else
+ const int n = 16;
+ const GrScalar dt = GR_Scalar1 / n;
+ GrScalar t = dt;
+ for (int i = 1; i < n; i++) {
+ gr_eval_cubic_at(pts, t, (GrPoint*)vert);
+ t += dt;
+ vert++;
+ }
+ vert->set(pts[3].fX, pts[3].fY);
+ vert++;
+#endif
+ break;
+ }
+ case GrPathIter::kClose_Command:
+ break;
+ case GrPathIter::kEnd_Command:
+ subpathVertCount[subpath] = vert-subpathBase;
+ ++subpath; // this could be only in debug
+ goto FINISHED;
+ }
+ first = false;
+ }
+FINISHED:
+ GrAssert(subpath == subpathCnt);
+ GrAssert((vert - base) <= maxPts);
+
+ if (translate) {
+ int count = vert - base;
+ for (int i = 0; i < count; i++) {
+ base[i].offset(translate->fX, translate->fY);
+ }
+ }
+
+ // arbitrary path complexity cutoff
+ bool useBounds = fill != kHairLine_PathFill &&
+ (reverse || (vert - base) > 8);
+ GrPoint* boundsVerts = base + maxPts;
+ if (useBounds) {
+ GrRect bounds;
+ if (reverse) {
+ GrAssert(NULL != fGpu->currentRenderTarget());
+ // draw over the whole world.
+ bounds.setLTRB(0, 0,
+ GrIntToScalar(fGpu->currentRenderTarget()->width()),
+ GrIntToScalar(fGpu->currentRenderTarget()->height()));
+ } else {
+ bounds.setBounds((GrPoint*)base, vert - base);
+ }
+ boundsVerts[0].setRectFan(bounds.fLeft, bounds.fTop, bounds.fRight,
+ bounds.fBottom);
+ }
+
+ for (int p = 0; p < passCount; ++p) {
+ fGpu->setStencilPass(passes[p]);
+ if (useBounds && (GrGpu::kEvenOddColor_StencilPass == passes[p] ||
+ GrGpu::kWindingColor_StencilPass == passes[p])) {
+ fGpu->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType,
+ maxPts, 4);
+ } else {
+ int baseVertex = 0;
+ for (int sp = 0; sp < subpathCnt; ++sp) {
+ fGpu->drawNonIndexed(type,
+ baseVertex,
+ subpathVertCount[sp]);
+ baseVertex += subpathVertCount[sp];
+ }
+ }
+ }
+}
+
+void GrContext::flush(bool flushRenderTarget) {
+ flushText();
+ if (flushRenderTarget) {
+ fGpu->forceRenderTargetFlush();
+ }
+}
+
+void GrContext::flushText() {
+ fTextDrawBuffer.playback(fGpu);
+ fTextDrawBuffer.reset();
+}
+
+bool GrContext::readPixels(int left, int top, int width, int height,
+ GrTexture::PixelConfig config, void* buffer) {
+ this->flush(true);
+ return fGpu->readPixels(left, top, width, height, config, buffer);
+}
+
+void GrContext::writePixels(int left, int top, int width, int height,
+ GrTexture::PixelConfig config, const void* buffer,
+ size_t stride) {
+ const GrGpu::TextureDesc desc = {
+ 0, GrGpu::kNone_AALevel, width, height, config
+ };
+ GrTexture* texture = fGpu->createTexture(desc, buffer, stride);
+ if (NULL == texture) {
+ return;
+ }
+
+ this->flush(true);
+
+ GrAutoUnref aur(texture);
+ GrDrawTarget::AutoStateRestore asr(fGpu);
+
+ GrMatrix matrix;
+ matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
+ fGpu->setViewMatrix(matrix);
+ matrix.setScale(GR_Scalar1 / texture->allocWidth(),
+ GR_Scalar1 / texture->allocHeight());
+ fGpu->setTextureMatrix(matrix);
+
+ fGpu->disableState(GrDrawTarget::kClip_StateBit);
+ fGpu->setAlpha(0xFF);
+ fGpu->setBlendFunc(GrDrawTarget::kOne_BlendCoeff,
+ GrDrawTarget::kZero_BlendCoeff);
+ fGpu->setTexture(texture);
+ fGpu->setSamplerState(GrSamplerState::ClampNoFilter());
+
+ this->fillRect(GrRect(0, 0, GrIntToScalar(width), GrIntToScalar(height)),
+ true);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+/* -------------------------------------------------------
+ * Mimicking the GrGpu interface for now
+ * TODO: define appropriate higher-level API for context
+ */
+
+void GrContext::resetContext() {
+ fGpu->resetContext();
+}
+
+GrVertexBuffer* GrContext::createVertexBuffer(uint32_t size, bool dynamic) {
+ return fGpu->createVertexBuffer(size, dynamic);
+}
+
+GrIndexBuffer* GrContext::createIndexBuffer(uint32_t size, bool dynamic) {
+ return fGpu->createIndexBuffer(size, dynamic);
+}
+
+void GrContext::setTexture(GrTexture* texture) {
+ fGpu->setTexture(texture);
+}
+
+void GrContext::setRenderTarget(GrRenderTarget* target) {
+ flushText();
+ fGpu->setRenderTarget(target);
+}
+
+GrRenderTarget* GrContext::currentRenderTarget() const {
+ return fGpu->currentRenderTarget();
+}
+
+void GrContext::setDefaultRenderTargetSize(uint32_t width, uint32_t height) {
+ fGpu->setDefaultRenderTargetSize(width, height);
+}
+
+void GrContext::setSamplerState(const GrSamplerState& samplerState) {
+ fGpu->setSamplerState(samplerState);
+}
+
+void GrContext::setTextureMatrix(const GrMatrix& m) {
+ fGpu->setTextureMatrix(m);
+}
+
+void GrContext::getViewMatrix(GrMatrix* m) const {
+ fGpu->getViewMatrix(m);
+}
+
+void GrContext::setViewMatrix(const GrMatrix& m) {
+ fGpu->setViewMatrix(m);
+}
+
+bool GrContext::reserveAndLockGeometry(GrVertexLayout vertexLayout,
+ uint32_t vertexCount,
+ uint32_t indexCount,
+ void** vertices,
+ void** indices) {
+ return fGpu->reserveAndLockGeometry(vertexLayout,
+ vertexCount,
+ indexCount,
+ vertices,
+ indices);
+}
+
+void GrContext::drawIndexed(GrGpu::PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+ flushText();
+ fGpu->drawIndexed(type,
+ startVertex,
+ startIndex,
+ vertexCount,
+ indexCount);
+}
+
+void GrContext::drawNonIndexed(GrGpu::PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount) {
+ flushText();
+ fGpu->drawNonIndexed(type,
+ startVertex,
+ vertexCount);
+}
+
+void GrContext::setVertexSourceToArray(const void* array,
+ GrVertexLayout vertexLayout) {
+ fGpu->setVertexSourceToArray(array, vertexLayout);
+}
+
+void GrContext::setIndexSourceToArray(const void* array) {
+ fGpu->setIndexSourceToArray(array);
+}
+
+void GrContext::setVertexSourceToBuffer(GrVertexBuffer* buffer,
+ GrVertexLayout vertexLayout) {
+ fGpu->setVertexSourceToBuffer(buffer, vertexLayout);
+}
+
+void GrContext::setIndexSourceToBuffer(GrIndexBuffer* buffer) {
+ fGpu->setIndexSourceToBuffer(buffer);
+}
+
+void GrContext::releaseReservedGeometry() {
+ fGpu->releaseReservedGeometry();
+}
+
+void GrContext::setClip(const GrClip& clip) {
+ fGpu->setClip(clip);
+ fGpu->enableState(GrDrawTarget::kClip_StateBit);
+}
+
+void GrContext::setAlpha(uint8_t alpha) {
+ fGpu->setAlpha(alpha);
+}
+
+void GrContext::setColor(GrColor color) {
+ fGpu->setColor(color);
+}
+
+static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
+ intptr_t mask = 1 << shift;
+ if (pred) {
+ bits |= mask;
+ } else {
+ bits &= ~mask;
+ }
+ return bits;
+}
+
+void GrContext::setAntiAlias(bool aa) {
+ if (aa) {
+ fGpu->enableState(GrGpu::kAntialias_StateBit);
+ } else {
+ fGpu->disableState(GrGpu::kAntialias_StateBit);
+ }
+}
+
+void GrContext::setDither(bool dither) {
+ // hack for now, since iPad dither is hella-slow
+ dither = false;
+
+ if (dither) {
+ fGpu->enableState(GrGpu::kDither_StateBit);
+ } else {
+ fGpu->disableState(GrGpu::kDither_StateBit);
+ }
+}
+
+void GrContext::setPointSize(float size) {
+ fGpu->setPointSize(size);
+}
+
+void GrContext::setBlendFunc(GrGpu::BlendCoeff srcCoef,
+ GrGpu::BlendCoeff dstCoef) {
+ fGpu->setBlendFunc(srcCoef, dstCoef);
+}
+
+void GrContext::resetStats() {
+ fGpu->resetStats();
+}
+
+const GrGpu::Stats& GrContext::getStats() const {
+ return fGpu->getStats();
+}
+
+void GrContext::printStats() const {
+ fGpu->printStats();
+}
+
+GrContext::GrContext(GrGpu* gpu) :
+ fVBAllocPool(gpu,
+ gpu->supportsBufferLocking() ? POOL_VB_SIZE : 0,
+ gpu->supportsBufferLocking() ? NUM_POOL_VBS : 0),
+ fTextDrawBuffer(gpu->supportsBufferLocking() ? &fVBAllocPool : NULL) {
+ fGpu = gpu;
+ fGpu->ref();
+ fTextureCache = new GrTextureCache(MAX_TEXTURE_CACHE_COUNT,
+ MAX_TEXTURE_CACHE_BYTES);
+ fFontCache = new GrFontCache(fGpu);
+}
+
+bool GrContext::finalizeTextureKey(GrTextureKey* key,
+ const GrSamplerState& sampler) const {
+ uint32_t bits = 0;
+ uint16_t width = key->width();
+ uint16_t height = key->height();
+ if (fGpu->npotTextureSupport() < GrGpu::kNonRendertarget_NPOTTextureType) {
+ if ((sampler.getWrapX() != GrSamplerState::kClamp_WrapMode ||
+ sampler.getWrapY() != GrSamplerState::kClamp_WrapMode) &&
+ (!GrIsPow2(width) || !GrIsPow2(height))) {
+ bits |= 1;
+ bits |= sampler.isFilter() ? 2 : 0;
+ }
+ }
+ key->finalize(bits);
+ return 0 != bits;
+}
+
+GrDrawTarget* GrContext::getTextTarget() {
+#if DEFER_TEXT_RENDERING
+ fTextDrawBuffer.initializeDrawStateAndClip(*fGpu);
+ return &fTextDrawBuffer;
+#else
+ return fGpu;
+#endif
+}
+
+const GrIndexBuffer* GrContext::quadIndexBuffer() const {
+ return fGpu->quadIndexBuffer();
+}
+
+int GrContext::maxQuadsInIndexBuffer() const {
+ return fGpu->maxQuadsInIndexBuffer();
+}
+
+
+
+
diff --git a/gpu/src/GrDrawMesh.cpp b/gpu/src/GrDrawMesh.cpp
new file mode 100644
index 0000000000..bd79005762
--- /dev/null
+++ b/gpu/src/GrDrawMesh.cpp
@@ -0,0 +1,140 @@
+#include "GrMesh.h"
+#include "SkCanvas.h"
+
+GrMesh::GrMesh() : fPts(NULL), fCount(0), fIndices(NULL), fIndexCount(0) {}
+
+GrMesh::~GrMesh() {
+ delete[] fPts;
+ delete[] fIndices;
+}
+
+GrMesh& GrMesh::operator=(const GrMesh& src) {
+ delete[] fPts;
+ delete[] fIndices;
+
+ fBounds = src.fBounds;
+ fRows = src.fRows;
+ fCols = src.fCols;
+
+ fCount = src.fCount;
+ fPts = new SkPoint[fCount * 2];
+ fTex = fPts + fCount;
+ memcpy(fPts, src.fPts, fCount * 2 * sizeof(SkPoint));
+
+ delete[] fIndices;
+ fIndexCount = src.fIndexCount;
+ fIndices = new uint16_t[fIndexCount];
+ memcpy(fIndices, src.fIndices, fIndexCount * sizeof(uint16_t));
+
+ return *this;
+}
+
+void GrMesh::init(const SkRect& bounds, int rows, int cols,
+ const SkRect& texture) {
+ SkASSERT(rows > 0 && cols > 0);
+
+ fBounds = bounds;
+ fRows = rows;
+ fCols = cols;
+
+ delete[] fPts;
+ fCount = (rows + 1) * (cols + 1);
+ fPts = new SkPoint[fCount * 2];
+ fTex = fPts + fCount;
+
+ delete[] fIndices;
+ fIndexCount = rows * cols * 6;
+ fIndices = new uint16_t[fIndexCount];
+
+ SkPoint* pts = fPts;
+ const SkScalar dx = bounds.width() / rows;
+ const SkScalar dy = bounds.height() / cols;
+ SkPoint* tex = fTex;
+ const SkScalar dtx = texture.width() / rows;
+ const SkScalar dty = texture.height() / cols;
+ uint16_t* idx = fIndices;
+ int index = 0;
+ for (int y = 0; y <= cols; y++) {
+ for (int x = 0; x <= rows; x++) {
+ pts->set(bounds.fLeft + x*dx, bounds.fTop + y*dy);
+ pts += 1;
+ tex->set(texture.fLeft + x*dtx, texture.fTop + y*dty);
+ tex += 1;
+
+ if (y < cols && x < rows) {
+ *idx++ = index;
+ *idx++ = index + rows + 1;
+ *idx++ = index + 1;
+
+ *idx++ = index + 1;
+ *idx++ = index + rows + 1;
+ *idx++ = index + rows + 2;
+
+ index += 1;
+ }
+ }
+ index += 1;
+ }
+}
+
+void GrMesh::draw(SkCanvas* canvas, const SkPaint& paint) {
+ canvas->drawVertices(SkCanvas::kTriangles_VertexMode, fCount,
+ fPts, fTex, NULL, NULL, fIndices, fIndexCount,
+ paint);
+}
+
+/////////////////////////////////////////////
+
+#include "SkBoundaryPatch.h"
+#include "SkMeshUtils.h"
+
+static SkPoint SkPointInterp(const SkPoint& a, const SkPoint& b, SkScalar t) {
+ return SkPoint::Make(SkScalarInterp(a.fX, b.fX, t),
+ SkScalarInterp(a.fY, b.fY, t));
+}
+
+static void set_cubic(SkPoint pts[4], SkScalar x0, SkScalar y0,
+ SkScalar x3, SkScalar y3, SkScalar scale = 1) {
+ SkPoint tmp, tmp2;
+
+ pts[0].set(x0, y0);
+ pts[3].set(x3, y3);
+
+ tmp = SkPointInterp(pts[0], pts[3], SK_Scalar1/3);
+ tmp2 = pts[0] - tmp;
+ tmp2.rotateCW();
+ tmp2.scale(scale);
+ pts[1] = tmp + tmp2;
+
+ tmp = SkPointInterp(pts[0], pts[3], 2*SK_Scalar1/3);
+ tmp2 = pts[3] - tmp;
+ tmp2.rotateCW();
+ tmp2.scale(scale);
+ pts[2] = tmp + tmp2;
+}
+
+void test_patch(SkCanvas* canvas, const SkBitmap& bm, SkScalar scale) {
+ const float w = bm.width();
+ const float h = bm.height();
+ SkCubicBoundary cubic;
+ set_cubic(cubic.fPts + 0, 0, 0, w, 0, scale);
+ set_cubic(cubic.fPts + 3, w, 0, w, h, scale);
+ set_cubic(cubic.fPts + 6, w, h, 0, h, -scale);
+ set_cubic(cubic.fPts + 9, 0, h, 0, 0, scale);
+
+ SkBoundaryPatch patch;
+ patch.setBoundary(&cubic);
+
+ const int Rows = 16;
+ const int Cols = 16;
+ SkPoint pts[Rows * Cols];
+ patch.evalPatch(pts, Rows, Cols);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setFilterBitmap(true);
+
+ SkMeshUtils::Draw(canvas, bm, Rows, Cols, pts, NULL, paint);
+}
+
+
diff --git a/gpu/src/GrDrawTarget.cpp b/gpu/src/GrDrawTarget.cpp
new file mode 100644
index 0000000000..82f94a35df
--- /dev/null
+++ b/gpu/src/GrDrawTarget.cpp
@@ -0,0 +1,296 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrDrawTarget.h"
+#include "GrGpuVertex.h"
+
+#define VERTEX_LAYOUT_ASSERTS \
+ GrAssert(!(vertexLayout & kTextFormat_VertexLayoutBit) || \
+ vertexLayout == kTextFormat_VertexLayoutBit); \
+ GrAssert(!(vertexLayout & kSeparateTexCoord_VertexLayoutBit) || \
+ !(vertexLayout & kPositionAsTexCoord_VertexLayoutBit));
+
+size_t GrDrawTarget::VertexSize(GrVertexLayout vertexLayout) {
+ VERTEX_LAYOUT_ASSERTS
+ if ((vertexLayout & kTextFormat_VertexLayoutBit)) {
+ return 2 * sizeof(GrGpuTextVertex);
+ } else {
+ size_t size = sizeof(GrPoint);
+ if (vertexLayout & kSeparateTexCoord_VertexLayoutBit) {
+ size += sizeof(GrPoint);
+ }
+ if (vertexLayout & kColor_VertexLayoutBit) {
+ size += sizeof(GrColor);
+ }
+ return size;
+ }
+}
+
+int GrDrawTarget::VertexTexCoordOffset(GrVertexLayout vertexLayout) {
+ VERTEX_LAYOUT_ASSERTS
+ if ((vertexLayout & kTextFormat_VertexLayoutBit)) {
+ return sizeof(GrGpuTextVertex);
+ } else if (vertexLayout & kSeparateTexCoord_VertexLayoutBit) {
+ return sizeof(GrPoint);
+ } else if (vertexLayout & kPositionAsTexCoord_VertexLayoutBit) {
+ return 0;
+ }
+ return -1;
+}
+
+int GrDrawTarget::VertexColorOffset(GrVertexLayout vertexLayout) {
+ VERTEX_LAYOUT_ASSERTS
+ if (vertexLayout & kColor_VertexLayoutBit) {
+ if (vertexLayout & kSeparateTexCoord_VertexLayoutBit) {
+ return 2 * sizeof(GrPoint);
+ } else {
+ return sizeof(GrPoint);
+ }
+ }
+ return -1;
+}
+
+int GrDrawTarget::VertexSizeAndOffsets(GrVertexLayout vertexLayout,
+ int* texCoordOffset,
+ int* colorOffset) {
+ VERTEX_LAYOUT_ASSERTS
+
+ GrAssert(NULL != texCoordOffset);
+ GrAssert(NULL != colorOffset);
+
+ if ((vertexLayout & kTextFormat_VertexLayoutBit)) {
+ *texCoordOffset = sizeof(GrGpuTextVertex);
+ *colorOffset = 0;
+ return 2 * sizeof(GrGpuTextVertex);
+ } else {
+ size_t size = sizeof(GrPoint);
+ if (vertexLayout & kSeparateTexCoord_VertexLayoutBit) {
+ *texCoordOffset = sizeof(GrPoint);
+ size += sizeof(GrPoint);
+ } else if (vertexLayout & kPositionAsTexCoord_VertexLayoutBit) {
+ *texCoordOffset = 0;
+ } else {
+ *texCoordOffset = -1;
+ }
+ if (vertexLayout & kColor_VertexLayoutBit) {
+ *colorOffset = size;
+ size += sizeof(GrColor);
+ } else {
+ *colorOffset = -1;
+ }
+ return size;
+ }
+}
+
+bool GrDrawTarget::VertexHasTexCoords(GrVertexLayout vertexLayout) {
+ return !!(vertexLayout & (kSeparateTexCoord_VertexLayoutBit |
+ kPositionAsTexCoord_VertexLayoutBit |
+ kTextFormat_VertexLayoutBit));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrDrawTarget::GrDrawTarget() {
+ fReservedGeometry.fLocked = false;
+#if GR_DEBUG
+ fReservedGeometry.fVertexCount = ~0;
+ fReservedGeometry.fIndexCount = ~0;
+#endif
+ fGeometrySrc.fVertexSrc = kReserved_GeometrySrcType;
+ fGeometrySrc.fIndexSrc = kReserved_GeometrySrcType;
+}
+
+void GrDrawTarget::setClip(const GrClip& clip) {
+ clipWillChange(clip);
+ fClip = clip;
+}
+
+const GrClip& GrDrawTarget::getClip() const {
+ return fClip;
+}
+
+void GrDrawTarget::setTexture(GrTexture* tex) {
+ fCurrDrawState.fTexture = tex;
+}
+
+GrTexture* GrDrawTarget::currentTexture() const {
+ return fCurrDrawState.fTexture;
+}
+
+void GrDrawTarget::setRenderTarget(GrRenderTarget* target) {
+ fCurrDrawState.fRenderTarget = target;
+}
+
+GrRenderTarget* GrDrawTarget::currentRenderTarget() const {
+ return fCurrDrawState.fRenderTarget;
+}
+
+void GrDrawTarget::concatViewMatrix(const GrMatrix& matrix) {
+ GrMatrix mv;
+ mv.setConcat(fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode], matrix);
+ this->loadMatrix(mv, kModelView_MatrixMode);
+}
+
+void GrDrawTarget::getViewMatrix(GrMatrix* matrix) const {
+ *matrix = fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode];
+}
+
+bool GrDrawTarget::getViewInverse(GrMatrix* matrix) const {
+ // Can we cache this somewhere?
+
+ GrMatrix inverse;
+ if (fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode].invert(&inverse)) {
+ if (matrix) {
+ *matrix = inverse;
+ }
+ return true;
+ }
+ return false;
+}
+
+void GrDrawTarget::setSamplerState(const GrSamplerState& state) {
+ fCurrDrawState.fSamplerState = state;
+}
+
+void GrDrawTarget::setStencilPass(StencilPass pass) {
+ fCurrDrawState.fStencilPass = pass;
+}
+
+void GrDrawTarget::setReverseFill(bool reverse) {
+ fCurrDrawState.fReverseFill = reverse;
+}
+
+void GrDrawTarget::enableState(uint32_t bits) {
+ fCurrDrawState.fFlagBits |= bits;
+}
+
+void GrDrawTarget::disableState(uint32_t bits) {
+ fCurrDrawState.fFlagBits &= ~(bits);
+}
+
+void GrDrawTarget::loadMatrix(const GrMatrix& matrix, MatrixMode m) {
+ fCurrDrawState.fMatrixModeCache[m] = matrix;
+}
+
+void GrDrawTarget::setPointSize(float size) {
+ fCurrDrawState.fPointSize = size;
+}
+
+void GrDrawTarget::setBlendFunc(BlendCoeff srcCoef,
+ BlendCoeff dstCoef) {
+ fCurrDrawState.fSrcBlend = srcCoef;
+ fCurrDrawState.fDstBlend = dstCoef;
+}
+
+void GrDrawTarget::setColor(GrColor c) {
+ fCurrDrawState.fColor = c;
+}
+
+void GrDrawTarget::setAlpha(uint8_t a) {
+ this->setColor((a << 24) | (a << 16) | (a << 8) | a);
+}
+
+void GrDrawTarget::saveCurrentDrawState(SavedDrawState* state) const {
+ state->fState = fCurrDrawState;
+}
+
+void GrDrawTarget::restoreDrawState(const SavedDrawState& state) {
+ fCurrDrawState = state.fState;
+}
+
+void GrDrawTarget::copyDrawState(const GrDrawTarget& srcTarget) {
+ fCurrDrawState = srcTarget.fCurrDrawState;
+}
+
+
+bool GrDrawTarget::reserveAndLockGeometry(GrVertexLayout vertexLayout,
+ uint32_t vertexCount,
+ uint32_t indexCount,
+ void** vertices,
+ void** indices) {
+ GrAssert(!fReservedGeometry.fLocked);
+ fReservedGeometry.fVertexCount = vertexCount;
+ fReservedGeometry.fIndexCount = indexCount;
+
+ fReservedGeometry.fLocked = acquireGeometryHelper(vertexLayout,
+ vertices,
+ indices);
+ if (fReservedGeometry.fLocked) {
+ if (vertexCount) {
+ fGeometrySrc.fVertexSrc = kReserved_GeometrySrcType;
+ fGeometrySrc.fVertexLayout = vertexLayout;
+ }
+ if (indexCount) {
+ fGeometrySrc.fIndexSrc = kReserved_GeometrySrcType;
+ }
+ }
+ return fReservedGeometry.fLocked;
+}
+
+bool GrDrawTarget::geometryHints(GrVertexLayout vertexLayout,
+ int32_t* vertexCount,
+ int32_t* indexCount) const {
+ GrAssert(!fReservedGeometry.fLocked);
+ if (NULL != vertexCount) {
+ *vertexCount = -1;
+ }
+ if (NULL != indexCount) {
+ *indexCount = -1;
+ }
+ return false;
+}
+
+void GrDrawTarget::releaseReservedGeometry() {
+ GrAssert(fReservedGeometry.fLocked);
+ releaseGeometryHelper();
+ fReservedGeometry.fLocked = false;
+}
+
+void GrDrawTarget::setVertexSourceToArray(const void* array,
+ GrVertexLayout vertexLayout) {
+ fGeometrySrc.fVertexSrc = kArray_GeometrySrcType;
+ fGeometrySrc.fVertexArray = array;
+ fGeometrySrc.fVertexLayout = vertexLayout;
+}
+
+void GrDrawTarget::setIndexSourceToArray(const void* array) {
+ fGeometrySrc.fIndexSrc = kArray_GeometrySrcType;
+ fGeometrySrc.fIndexArray = array;
+}
+
+void GrDrawTarget::setVertexSourceToBuffer(const GrVertexBuffer* buffer,
+ GrVertexLayout vertexLayout) {
+ fGeometrySrc.fVertexSrc = kBuffer_GeometrySrcType;
+ fGeometrySrc.fVertexBuffer = buffer;
+ fGeometrySrc.fVertexLayout = vertexLayout;
+}
+
+void GrDrawTarget::setIndexSourceToBuffer(const GrIndexBuffer* buffer) {
+ fGeometrySrc.fIndexSrc = kBuffer_GeometrySrcType;
+ fGeometrySrc.fIndexBuffer = buffer;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrDrawTarget::AutoStateRestore::AutoStateRestore(GrDrawTarget* target) {
+ fDrawTarget = target;
+ fDrawTarget->saveCurrentDrawState(&fDrawState);
+}
+
+GrDrawTarget::AutoStateRestore::~AutoStateRestore() {
+ fDrawTarget->restoreDrawState(fDrawState);
+}
diff --git a/gpu/src/GrGLIndexBuffer.cpp b/gpu/src/GrGLIndexBuffer.cpp
new file mode 100644
index 0000000000..82cffaaec4
--- /dev/null
+++ b/gpu/src/GrGLIndexBuffer.cpp
@@ -0,0 +1,106 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGLIndexBuffer.h"
+#include "GrGpuGL.h"
+
+GrGLIndexBuffer::GrGLIndexBuffer(GLuint id, GrGpuGL* gl, uint32_t sizeInBytes,
+ bool dynamic) :
+ INHERITED(sizeInBytes, dynamic),
+ fGL(gl),
+ fBufferID(id),
+ fLockPtr(NULL) {
+}
+
+GLuint GrGLIndexBuffer::bufferID() const {
+ return fBufferID;
+}
+
+GrGLIndexBuffer::~GrGLIndexBuffer() {
+ // make sure we've not been abandoned
+ if (fBufferID) {
+ fGL->notifyIndexBufferDelete(this);
+ GR_GL(DeleteBuffers(1, &fBufferID));
+ }
+}
+
+void GrGLIndexBuffer::abandon() {
+ fBufferID = 0;
+ fGL = NULL;
+ fLockPtr = NULL;
+}
+
+void* GrGLIndexBuffer::lock() {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (fGL->supportsBufferLocking()) {
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, fBufferID));
+ fGL->notifyIndexBufferBind(this);
+ // call bufferData with null ptr to allow driver to perform renaming
+ // If this call is removed revisit updateData to be sure it doesn't
+ // leave buffer undersized (as it currently does).
+ GR_GL(BufferData(GL_ELEMENT_ARRAY_BUFFER, size(), NULL,
+ dynamic() ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW));
+ fLockPtr = GR_GLEXT(fGL->extensions(),
+ MapBuffer(GL_ELEMENT_ARRAY_BUFFER, GR_WRITE_ONLY));
+
+ return fLockPtr;
+ }
+ return NULL;
+}
+
+void GrGLIndexBuffer::unlock() {
+ GrAssert(fBufferID);
+ GrAssert(isLocked());
+
+ if (fGL->supportsBufferLocking()) {
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, fBufferID));
+ fGL->notifyIndexBufferBind(this);
+ GR_GLEXT(fGL->extensions(),
+ UnmapBuffer(GL_ELEMENT_ARRAY_BUFFER));
+ fLockPtr = NULL;
+ }
+}
+
+bool GrGLIndexBuffer::isLocked() const {
+ GrAssert(fBufferID);
+#if GR_DEBUG
+ if (fGL->supportsBufferLocking()) {
+ GLint mapped;
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, fBufferID));
+ fGL->notifyIndexBufferBind(this);
+ GR_GL(GetBufferParameteriv(GL_ELEMENT_ARRAY_BUFFER,
+ GR_BUFFER_MAPPED, &mapped));
+ GrAssert(!!mapped == !!fLockPtr);
+ }
+#endif
+ return NULL != fLockPtr;
+}
+
+bool GrGLIndexBuffer::updateData(const void* src, uint32_t srcSizeInBytes) {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (srcSizeInBytes > size()) {
+ return false;
+ }
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, fBufferID));
+ fGL->notifyIndexBufferBind(this);
+ GR_GL(BufferData(GL_ELEMENT_ARRAY_BUFFER, srcSizeInBytes, src,
+ dynamic() ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW));
+ return true;
+}
+
diff --git a/gpu/src/GrGLTexture.cpp b/gpu/src/GrGLTexture.cpp
new file mode 100644
index 0000000000..b75cad5ec0
--- /dev/null
+++ b/gpu/src/GrGLTexture.cpp
@@ -0,0 +1,174 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGLTexture.h"
+#include "GrGpuGL.h"
+
+GrGLRenderTarget::GrGLRenderTarget(const GLRenderTargetIDs& ids,
+ const GrIRect& viewport,
+ GrGLTexture* texture,
+ GrGpuGL* gl) : INHERITED(texture) {
+ fGL = gl;
+ fRTFBOID = ids.fRTFBOID;
+ fTexFBOID = ids.fTexFBOID;
+ fStencilRenderbufferID = ids.fStencilRenderbufferID;
+ fMSColorRenderbufferID = ids.fMSColorRenderbufferID;
+ fNeedsResolve = false;
+ fViewport = viewport;
+ fOwnIDs = ids.fOwnIDs;
+ // viewport should be GL's viewport with top >= bottom
+ GrAssert(viewport.height() <= 0);
+}
+
+GrGLRenderTarget::~GrGLRenderTarget() {
+ fGL->notifyRenderTargetDelete(this);
+ if (fOwnIDs) {
+ if (fTexFBOID) {
+ GR_GLEXT(fGL->extensions(), DeleteFramebuffers(1, &fTexFBOID));
+ }
+ if (fRTFBOID && fRTFBOID != fTexFBOID) {
+ GR_GLEXT(fGL->extensions(), DeleteFramebuffers(1, &fRTFBOID));
+ }
+ if (fStencilRenderbufferID) {
+ GR_GLEXT(fGL->extensions(), DeleteRenderbuffers(1, &fStencilRenderbufferID));
+ }
+ if (fMSColorRenderbufferID) {
+ GR_GLEXT(fGL->extensions(), DeleteRenderbuffers(1, &fMSColorRenderbufferID));
+ }
+ }
+}
+
+void GrGLRenderTarget::abandon() {
+ fRTFBOID = 0;
+ fTexFBOID = 0;
+ fStencilRenderbufferID = 0;
+ fMSColorRenderbufferID = 0;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+const GLenum GrGLTexture::gWrapMode2GLWrap[] = {
+ GL_CLAMP_TO_EDGE,
+ GL_REPEAT,
+#ifdef GL_MIRRORED_REPEAT
+ GL_MIRRORED_REPEAT
+#else
+ GL_REPEAT // GL_MIRRORED_REPEAT not supported :(
+#endif
+};
+
+
+GrGLTexture::GrGLTexture(const GLTextureDesc& textureDesc,
+ const GLRenderTargetIDs& rtIDs,
+ GrGpuGL* gl) :
+ INHERITED(textureDesc.fContentWidth,
+ textureDesc.fContentHeight,
+ textureDesc.fAllocWidth,
+ textureDesc.fAllocHeight,
+ textureDesc.fFormat),
+ fTextureID(textureDesc.fTextureID),
+ fUploadFormat(textureDesc.fUploadFormat),
+ fUploadByteCount(textureDesc.fUploadByteCount),
+ fUploadType(textureDesc.fUploadType),
+ fOrientation(textureDesc.fOrientation),
+ fRenderTarget(NULL),
+ fGpuGL(gl) {
+
+ GrAssert(0 != textureDesc.fTextureID);
+
+ if (rtIDs.fTexFBOID) {
+ GrIRect vp;
+ vp.fLeft = 0;
+ vp.fRight = (int32_t) textureDesc.fContentWidth;
+ // viewport for GL is top > bottom
+ vp.fTop = (int32_t) textureDesc.fAllocHeight;
+ vp.fBottom = (int32_t) textureDesc.fAllocHeight -
+ (int32_t)textureDesc.fContentHeight;
+ fRenderTarget = new GrGLRenderTarget(rtIDs, vp, this, gl);
+ }
+
+ fSamplerState.setClampNoFilter();
+
+ GR_GL(BindTexture(GL_TEXTURE_2D, fTextureID));
+ gl->notifyTextureBind(this);
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
+}
+
+GrGLTexture::~GrGLTexture() {
+ // make sure we haven't been abandoned
+ if (fTextureID) {
+ fGpuGL->notifyTextureDelete(this);
+ GR_GL(DeleteTextures(1, &fTextureID));
+ }
+ delete fRenderTarget;
+}
+
+void GrGLTexture::abandon() {
+ fTextureID = 0;
+ if (NULL != fRenderTarget) {
+ fRenderTarget->abandon();
+ }
+}
+
+bool GrGLTexture::isRenderTarget() const {
+ return NULL != fRenderTarget;
+}
+
+GrRenderTarget* GrGLTexture::asRenderTarget() {
+ return (GrRenderTarget*)fRenderTarget;
+}
+
+void GrGLTexture::removeRenderTarget() {
+ GrAssert(NULL != fRenderTarget);
+ if (NULL != fRenderTarget) {
+ // must do this notify before the delete
+ fGpuGL->notifyTextureRemoveRenderTarget(this);
+ delete fRenderTarget;
+ fRenderTarget = NULL;
+ }
+}
+
+void GrGLTexture::uploadTextureData(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height,
+ const void* srcData) {
+ // glCompressedTexSubImage2D doesn't support any formats
+ // (at least without extensions)
+ GrAssert(fUploadFormat != GR_PALETTE8_RGBA8);
+
+ // If we need to update textures that are created upside down
+ // then we have to modify this code to flip the srcData
+ GrAssert(kTopDown_Orientation == fOrientation);
+ GR_GL(BindTexture(GL_TEXTURE_2D, fTextureID));
+ fGpuGL->notifyTextureBind(this);
+ GR_GL(PixelStorei(GL_UNPACK_ALIGNMENT, fUploadByteCount));
+ GR_GL(TexSubImage2D(GL_TEXTURE_2D, 0, x, y, width, height,
+ fUploadFormat, fUploadType, srcData));
+
+}
+
+intptr_t GrGLTexture::getTextureHandle() {
+ return fTextureID;
+}
+
+
+
diff --git a/gpu/src/GrGLVertexBuffer.cpp b/gpu/src/GrGLVertexBuffer.cpp
new file mode 100644
index 0000000000..b4ddc244e0
--- /dev/null
+++ b/gpu/src/GrGLVertexBuffer.cpp
@@ -0,0 +1,103 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGLVertexBuffer.h"
+#include "GrGpuGL.h"
+
+GrGLVertexBuffer::GrGLVertexBuffer(GLuint id, GrGpuGL* gl, uint32_t sizeInBytes,
+ bool dynamic) :
+ INHERITED(sizeInBytes, dynamic),
+ fGL(gl),
+ fBufferID(id),
+ fLockPtr(NULL) {
+}
+
+GrGLVertexBuffer::~GrGLVertexBuffer() {
+ // make sure we've not been abandoned
+ if (fBufferID) {
+ fGL->notifyVertexBufferDelete(this);
+ GR_GL(DeleteBuffers(1, &fBufferID));
+ }
+}
+
+GLuint GrGLVertexBuffer::bufferID() const {
+ return fBufferID;
+}
+
+void GrGLVertexBuffer::abandon() {
+ fBufferID = 0;
+ fGL = NULL;
+ fLockPtr = NULL;
+}
+
+void* GrGLVertexBuffer::lock() {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (fGL->supportsBufferLocking()) {
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, fBufferID));
+ fGL->notifyVertexBufferBind(this);
+ // call bufferData with null ptr to allow driver to perform renaming
+ // If this call is removed revisit updateData to be sure it doesn't
+ // leave buffer undersized (as it currently does).
+ GR_GL(BufferData(GL_ARRAY_BUFFER, size(), NULL,
+ dynamic() ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW));
+ fLockPtr = GR_GLEXT(fGL->extensions(),
+ MapBuffer(GL_ARRAY_BUFFER, GR_WRITE_ONLY));
+ return fLockPtr;
+ }
+ return NULL;
+}
+
+void GrGLVertexBuffer::unlock() {
+ GrAssert(fBufferID);
+ GrAssert(isLocked());
+ if (fGL->supportsBufferLocking()) {
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, fBufferID));
+ fGL->notifyVertexBufferBind(this);
+ GR_GLEXT(fGL->extensions(),
+ UnmapBuffer(GL_ARRAY_BUFFER));
+ fLockPtr = NULL;
+ }
+}
+
+bool GrGLVertexBuffer::isLocked() const {
+ GrAssert(fBufferID);
+#if GR_DEBUG
+ if (fGL->supportsBufferLocking()) {
+ GLint mapped;
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, fBufferID));
+ fGL->notifyVertexBufferBind(this);
+ GR_GL(GetBufferParameteriv(GL_ARRAY_BUFFER, GR_BUFFER_MAPPED, &mapped));
+ GrAssert(!!mapped == !!fLockPtr);
+ }
+#endif
+ return NULL != fLockPtr;
+}
+
+bool GrGLVertexBuffer::updateData(const void* src, uint32_t srcSizeInBytes) {
+ GrAssert(fBufferID);
+ GrAssert(!isLocked());
+ if (srcSizeInBytes > size()) {
+ return false;
+ }
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, fBufferID));
+ fGL->notifyVertexBufferBind(this);
+ GR_GL(BufferData(GL_ARRAY_BUFFER, srcSizeInBytes, src,
+ dynamic() ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW));
+ return true;
+}
+
diff --git a/gpu/src/GrGpu.cpp b/gpu/src/GrGpu.cpp
new file mode 100644
index 0000000000..c6340bdc0b
--- /dev/null
+++ b/gpu/src/GrGpu.cpp
@@ -0,0 +1,343 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGpu.h"
+#include "GrMemory.h"
+#include "GrTextStrike.h"
+#include "GrTextureCache.h"
+#include "GrClipIterator.h"
+#include "GrIndexBuffer.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+size_t GrTexture::BytesPerPixel(PixelConfig config) {
+ switch (config) {
+ case kAlpha_8_PixelConfig:
+ case kIndex_8_PixelConfig:
+ return 1;
+ case kRGB_565_PixelConfig:
+ case kRGBA_4444_PixelConfig:
+ return 2;
+ case kRGBA_8888_PixelConfig:
+ case kRGBX_8888_PixelConfig:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+bool GrTexture::PixelConfigIsOpaque(PixelConfig config) {
+ switch (config) {
+ case GrTexture::kRGB_565_PixelConfig:
+ case GrTexture::kRGBX_8888_PixelConfig:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+extern void gr_run_unittests();
+
+GrGpu::GrGpu() : f8bitPaletteSupport(false),
+ fNPOTTextureSupport(kNone_NPOTTextureType),
+ fQuadIndexBuffer(NULL) {
+#if GR_DEBUG
+// gr_run_unittests();
+#endif
+ resetStats();
+}
+
+GrGpu::~GrGpu() {
+ if (NULL != fQuadIndexBuffer) {
+ fQuadIndexBuffer->unref();
+ }
+}
+
+void GrGpu::resetContext() {
+}
+
+void GrGpu::unimpl(const char msg[]) {
+// GrPrintf("--- GrGpu unimplemented(\"%s\")\n", msg);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool GrGpu::canDisableBlend() const {
+ if ((kOne_BlendCoeff == fCurrDrawState.fSrcBlend) &&
+ (kZero_BlendCoeff == fCurrDrawState.fDstBlend)) {
+ return true;
+ }
+
+ // If we have vertex color without alpha then we can't force blend off
+ if ((fGeometrySrc.fVertexLayout & kColor_VertexLayoutBit) ||
+ 0xff != GrColorUnpackA(fCurrDrawState.fColor)) {
+ return false;
+ }
+
+ // If the src coef will always be 1...
+ bool fullSrc = kSA_BlendCoeff == fCurrDrawState.fSrcBlend ||
+ kOne_BlendCoeff == fCurrDrawState.fSrcBlend;
+
+ // ...and the dst coef is always 0...
+ bool noDst = kISA_BlendCoeff == fCurrDrawState.fDstBlend ||
+ kZero_BlendCoeff == fCurrDrawState.fDstBlend;
+
+ // ...and there isn't a texture with an alpha channel...
+ bool noTexAlpha = !VertexHasTexCoords(fGeometrySrc.fVertexLayout) ||
+ fCurrDrawState.fTexture->config() == GrTexture::kRGB_565_PixelConfig ||
+ fCurrDrawState.fTexture->config() == GrTexture::kRGBX_8888_PixelConfig;
+
+ // ...then we disable blend.
+ return fullSrc && noDst && noTexAlpha;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static const int MAX_QUADS = 512; // max possible: (1 << 14) - 1;
+
+GR_STATIC_ASSERT(4 * MAX_QUADS <= UINT16_MAX);
+
+static inline void fillIndices(uint16_t* indices, int quadCount) {
+ for (int i = 0; i < quadCount; ++i) {
+ indices[6 * i + 0] = 4 * i + 0;
+ indices[6 * i + 1] = 4 * i + 1;
+ indices[6 * i + 2] = 4 * i + 2;
+ indices[6 * i + 3] = 4 * i + 0;
+ indices[6 * i + 4] = 4 * i + 2;
+ indices[6 * i + 5] = 4 * i + 3;
+ }
+}
+
+const GrIndexBuffer* GrGpu::quadIndexBuffer() const {
+ if (NULL == fQuadIndexBuffer) {
+ static const int SIZE = sizeof(uint16_t) * 6 * MAX_QUADS;
+ GrGpu* me = const_cast<GrGpu*>(this);
+ fQuadIndexBuffer = me->createIndexBuffer(SIZE, false);
+ if (NULL != fQuadIndexBuffer) {
+ uint16_t* indices = (uint16_t*)fQuadIndexBuffer->lock();
+ if (NULL != indices) {
+ fillIndices(indices, MAX_QUADS);
+ fQuadIndexBuffer->unlock();
+ } else {
+ indices = (uint16_t*)GrMalloc(SIZE);
+ fillIndices(indices, MAX_QUADS);
+ if (!fQuadIndexBuffer->updateData(indices, SIZE)) {
+ fQuadIndexBuffer->unref();
+ fQuadIndexBuffer = NULL;
+ GrAssert(!"Can't get indices into buffer!");
+ }
+ GrFree(indices);
+ }
+ }
+ }
+
+ return fQuadIndexBuffer;
+}
+
+int GrGpu::maxQuadsInIndexBuffer() const {
+ return (NULL == this->quadIndexBuffer()) ? 0 : MAX_QUADS;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGpu::clipWillChange(const GrClip& clip) {
+ if (clip != fClip) {
+ fClipState.fClipIsDirty = true;
+ }
+}
+
+bool GrGpu::setupClipAndFlushState(PrimitiveType type) {
+ const GrIRect* r = NULL;
+
+ if (fCurrDrawState.fFlagBits & kClip_StateBit) {
+ fClipState.fClipInStencil = fClip.countRects() > 1;
+
+ if (fClipState.fClipInStencil &&
+ (fClipState.fClipIsDirty ||
+ fClipState.fStencilClipTarget != fCurrDrawState.fRenderTarget)) {
+
+ AutoStateRestore asr(this);
+ AutoGeometrySrcRestore agsr(this);
+ this->disableState(kClip_StateBit);
+ eraseStencilClip();
+
+ int rectTotal = fClip.countRects();
+ static const int PtsPerRect = 4;
+ // this may be called while geometry is already reserved by the
+ // client. So we use our own vertex array where we avoid malloc
+ // if we have 4 or fewer rects.
+ GrAutoSTMalloc<PtsPerRect * 4, GrPoint> vertices(PtsPerRect *
+ rectTotal);
+ this->setVertexSourceToArray(vertices.get(), 0);
+ int currRect = 0;
+ while (currRect < rectTotal) {
+ int rectCount = GrMin(this->maxQuadsInIndexBuffer(),
+ rectTotal - currRect);
+
+ GrPoint* verts = (GrPoint*)vertices +
+ (currRect * PtsPerRect);
+
+ for (int i = 0; i < rectCount; i++) {
+ GrRect r(fClip.getRects()[i + currRect]);
+ verts = r.setRectFan(verts);
+ }
+ this->setIndexSourceToBuffer(quadIndexBuffer());
+
+ this->setViewMatrix(GrMatrix::I());
+ this->setStencilPass((GrDrawTarget::StencilPass)kSetClip_StencilPass);
+ this->drawIndexed(GrGpu::kTriangles_PrimitiveType,
+ currRect * PtsPerRect, 0,
+ rectCount * PtsPerRect, rectCount * 6);
+
+ currRect += rectCount;
+ }
+ fClipState.fStencilClipTarget = fCurrDrawState.fRenderTarget;
+ }
+ fClipState.fClipIsDirty = false;
+ if (!fClipState.fClipInStencil) {
+ r = &fClip.getBounds();
+ }
+ }
+ // Must flush the scissor after graphics state
+ if (!flushGraphicsState(type)) {
+ return false;
+ }
+ flushScissor(r);
+ return true;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGpu::drawIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+ GrAssert(kReserved_GeometrySrcType != fGeometrySrc.fVertexSrc ||
+ fReservedGeometry.fLocked);
+ GrAssert(kReserved_GeometrySrcType != fGeometrySrc.fIndexSrc ||
+ fReservedGeometry.fLocked);
+
+ if (!setupClipAndFlushState(type)) {
+ return;
+ }
+
+#if GR_COLLECT_STATS
+ fStats.fVertexCnt += vertexCount;
+ fStats.fIndexCnt += indexCount;
+ fStats.fDrawCnt += 1;
+#endif
+
+ setupGeometry(startVertex, startIndex, vertexCount, indexCount);
+
+ drawIndexedHelper(type, startVertex, startIndex,
+ vertexCount, indexCount);
+}
+
+void GrGpu::drawNonIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount) {
+ GrAssert(kReserved_GeometrySrcType != fGeometrySrc.fVertexSrc ||
+ fReservedGeometry.fLocked);
+
+ if (!setupClipAndFlushState(type)) {
+ return;
+ }
+#if GR_COLLECT_STATS
+ fStats.fVertexCnt += vertexCount;
+ fStats.fDrawCnt += 1;
+#endif
+
+ setupGeometry(startVertex, 0, vertexCount, 0);
+
+ drawNonIndexedHelper(type, startVertex, vertexCount);
+}
+
+bool GrGpu::acquireGeometryHelper(GrVertexLayout vertexLayout,
+ void** vertices,
+ void** indices) {
+ GrAssert((fReservedGeometry.fVertexCount == 0) ||
+ (NULL != vertices));
+ if (NULL != vertices) {
+ *vertices = fVertices.realloc(VertexSize(vertexLayout) *
+ fReservedGeometry.fVertexCount);
+ if (!*vertices && fReservedGeometry.fVertexCount) {
+ return false;
+ }
+ }
+ GrAssert((fReservedGeometry.fIndexCount == 0) ||
+ (NULL != indices));
+ if (NULL != indices) {
+ *indices = fIndices.realloc(sizeof(uint16_t) *
+ fReservedGeometry.fIndexCount);
+ if (!*indices && fReservedGeometry.fIndexCount) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrGpu::releaseGeometryHelper() {
+ return;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const GrGpu::Stats& GrGpu::getStats() const {
+ return fStats;
+}
+
+void GrGpu::resetStats() {
+ memset(&fStats, 0, sizeof(fStats));
+}
+
+void GrGpu::printStats() const {
+ if (GR_COLLECT_STATS) {
+ GrPrintf(
+ "-v-------------------------GPU STATS----------------------------v-\n"
+ "Stats collection is: %s\n"
+ "Draws: %04d, Verts: %04d, Indices: %04d\n"
+ "ProgChanges: %04d, TexChanges: %04d, RTChanges: %04d\n"
+ "TexCreates: %04d, RTCreates:%04d\n"
+ "-^--------------------------------------------------------------^-\n",
+ (GR_COLLECT_STATS ? "ON" : "OFF"),
+ fStats.fDrawCnt, fStats.fVertexCnt, fStats.fIndexCnt,
+ fStats.fProgChngCnt, fStats.fTextureChngCnt, fStats.fRenderTargetChngCnt,
+ fStats.fTextureCreateCnt, fStats.fRenderTargetCreateCnt);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrTexture::~GrTexture() {
+ // use this to set a break-point if needed
+// Gr_clz(3);
+}
+
+const GrSamplerState GrSamplerState::gClampNoFilter(
+ GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kClamp_WrapMode,
+ GrSamplerState::kNormal_SampleMode,
+ false);
+
+
+
+
diff --git a/gpu/src/GrGpuD3D9.cpp b/gpu/src/GrGpuD3D9.cpp
new file mode 100644
index 0000000000..868097d67f
--- /dev/null
+++ b/gpu/src/GrGpuD3D9.cpp
@@ -0,0 +1,1484 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGpuD3D9.h"
+#include "GrGpuVertex.h"
+
+void d3dCheckErr(HRESULT hr) {
+ GrAssert(SUCCEEDED(hr));
+}
+
+#if GR_DEBUG
+ #define GR_D3D9(OBJ, X) d3dCheckErr(OBJ-> X);
+#else
+ #define GR_D3D9(OBJ, X) OBJ-> X;
+#endif
+
+#if GR_SCALAR_IS_FIXED
+ //mobile d3d allows 3 component fixed point verts
+ #error "fixed is unsupported in D3D9"
+#elif GR_SCALAR_IS_FLOAT
+ #define FVF_POS_TYPE D3DFVF_XYZ
+ #define FVF_TEX_TYPE (D3DFVF_TEX1 | D3DFVF_TEXCOORDSIZE2(1))
+#else
+ #error "unknown GPU type"
+#endif
+
+#define FVF_COL_TYPE D3DFVF_DIFFUSE
+
+#if GR_TEXT_SCALAR_IS_FIXED
+ //mobile d3d allows 3 component fixed point verts
+ #error "fixed is unsupported in D3D9"
+#elif GR_TEXT_SCALAR_IS_FLOAT
+ #define FVF_POS_TYPE_TEXT D3DFVF_XYZ
+ #define FVF_TEX_TYPE_TEXT (D3DFVF_TEX1 | D3DFVF_TEXCOORDSIZE2(1))
+#elif GR_TEXT_SCALAR_IS_USHORT
+ #error "positions must be float in fixed-pipe D3D9"
+#else
+ #error "unknown GPU text type"
+#endif
+
+static const int STAGE_NUM_VERTS = 512;
+static const int STAGE_VERTEX_SIZE = sizeof(GrPoint)*STAGE_NUM_VERTS;
+static const int STAGE_INDEX_SIZE = 2*STAGE_NUM_VERTS*2*3;
+ // 2 bytes per index *
+ // 2 triangles per vert (euler char) * 3
+ // 3 indices/triangle
+
+static const D3DTRANSFORMSTATETYPE gMatrixMode2D3D9Matrix[] = {
+ D3DTS_WORLD, // kModelView_MatrixMode
+ D3DTS_TEXTURE0, // kTexture_MatrixMode
+};
+
+static const D3DPRIMITIVETYPE gPrimType2D3D9PrimType[] = {
+ D3DPT_TRIANGLELIST,
+ D3DPT_TRIANGLESTRIP,
+ D3DPT_TRIANGLEFAN,
+ D3DPT_POINTLIST,
+ D3DPT_LINELIST,
+ D3DPT_LINESTRIP,
+};
+
+const GrGpuD3D9::VertDecls GrGpuD3D9::gVertFlags2VertDeclIdx[] = {
+ kPosOnly_VertDecl, // no flags
+ kTex_VertDecl, // kTexCoord_VertFlag
+ kColors_VertDecl, // kColors_VertFlag
+ kTexAndColors_VertDecl, // kColors_VertFlag & kColors_VertFlag
+ kInvalid_VertDecl, // kPositionAsTexCoord_VertFlag
+ kPosAsTex_VertDecl, // kPositionAsTexCoord_VertFlag & kTexCoord_VertFlag
+ kInvalid_VertDecl, // kPositionAsTexCoord_VertFlag & kColors_VertFlag
+ kPosAsTexAndColors_VertDecl // kPositionAsTexCoord_VertFlag & kTexCoord_VertFlag & kColors_VertFlag
+};
+
+const DWORD GrGpuD3D9::gDeclToFVFs[] = {
+ FVF_POS_TYPE, // kPosOnly_VertDecl
+ FVF_POS_TYPE | FVF_TEX_TYPE, // kTex_VertDecl
+ FVF_POS_TYPE | FVF_COL_TYPE, // kColors_VertDecl
+ FVF_POS_TYPE | FVF_TEX_TYPE | FVF_COL_TYPE, // kTexAndColors_VertDecl
+ FVF_POS_TYPE, // kPosAsTex_VertDecl
+ FVF_POS_TYPE | FVF_COL_TYPE, // kPosAsTexAndColors_VertDecl
+};
+
+const DWORD GrGpuD3D9::gTextFVF = FVF_POS_TYPE_TEXT | FVF_TEX_TYPE_TEXT;
+
+#if (SK_A32_SHIFT == 24) && (SK_R32_SHIFT == 16) && \
+ (SK_G32_SHIFT == 8) && (SK_B32_SHIFT == 0)
+ #define GR_D3D9_32BPP_COLOR_FORMAT D3DFMT_A8R8G8B8
+#elif (SK_A32_SHIFT == 24) && (SK_B32_SHIFT == 16) && \
+ (SK_G32_SHIFT == 8) && (SK_R32_SHIFT == 0)
+ #define GR_D3D9_32BPP_COLOR_FORMAT D3DFMT_A8B8G8R8
+#else
+ #error "Skia's 32bit color format is not understood by D3D9."
+#endif
+
+static const DWORD gXfermodeCoeff2Blend[] = {
+ D3DBLEND_ZERO,
+ D3DBLEND_ONE,
+ D3DBLEND_SRCCOLOR,
+ D3DBLEND_INVSRCCOLOR,
+ D3DBLEND_DESTCOLOR,
+ D3DBLEND_INVDESTCOLOR,
+ D3DBLEND_SRCALPHA,
+ D3DBLEND_INVSRCALPHA,
+ D3DBLEND_DESTALPHA,
+ D3DBLEND_INVDESTALPHA,
+};
+
+static const DWORD gTileMode2D3D9Wrap[] = {
+ D3DTADDRESS_CLAMP,
+ D3DTADDRESS_WRAP,
+ D3DTADDRESS_MIRROR
+};
+
+static bool can_be_texture(GrTexture::PixelConfig config, D3DFORMAT* format) {
+ switch (config) {
+ case GrTexture::kRGBA_8888_PixelConfig:
+ *format = GR_D3D9_32BPP_COLOR_FORMAT;
+ break;
+ case GrTexture::kRGB_565_PixelConfig:
+ *format = D3DFMT_R5G6B5;
+ break;
+ case GrTexture::kRGBA_4444_PixelConfig:
+ *format = D3DFMT_A4R4G4B4;
+ break;
+ case GrTexture::kIndex_8_PixelConfig:
+ // we promote index to argb32
+ *format = GR_D3D9_32BPP_COLOR_FORMAT;
+ break;
+ case GrTexture::kAlpha_8_PixelConfig:
+ *format = D3DFMT_A8;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static int format_bytes(D3DFORMAT format) {
+ switch (format) {
+ case GR_D3D9_32BPP_COLOR_FORMAT:
+ return 4;
+ case D3DFMT_R5G6B5:
+ return 2;
+ case D3DFMT_A4R4G4B4:
+ return 2;
+ case D3DFMT_A8:
+ return 1;
+ default:
+ GrAssert(!"Unexpected D3D format!");
+ return 0;
+ }
+}
+
+uint32_t vertex_to_primitive_count(GrGpu::PrimitiveTypes type,
+ uint32_t vertexCount) {
+ switch (type) {
+ case GrGpu::kTriangles_PrimitiveType:
+ return vertexCount / 3;
+ case GrGpu::kTriangleStrip_PrimitiveType: // fallthru
+ case GrGpu::kTriangleFan_PrimitiveType:
+ return vertexCount > 2 ? vertexCount - 2 : 0;
+ case GrGpu::kPoints_PrimitiveType:
+ return vertexCount;
+ case GrGpu::kLines_PrimitiveType:
+ return vertexCount / 2;
+ case GrGpu::kLineStrip_PrimitiveType:
+ return vertexCount > 1 ? vertexCount - 1 : 0;
+ default:
+ GrAssert(!"Unknown primitive type!");
+ return 0;
+ }
+}
+
+void gr_matrix_to_d3d_matrix(D3DMATRIX* d3dmat, GrMatrix& grmat) {
+ d3dmat->_11 = grmat[GrMatrix::kScaleX];
+ d3dmat->_21 = grmat[GrMatrix::kSkewX];
+ d3dmat->_31 = 0;
+ d3dmat->_41 = grmat[GrMatrix::kTransX];
+
+ d3dmat->_12 = grmat[GrMatrix::kSkewY];
+ d3dmat->_22 = grmat[GrMatrix::kScaleY];
+ d3dmat->_32 = 0;
+ d3dmat->_42 = grmat[GrMatrix::kTransY];
+
+ d3dmat->_13 = 0;
+ d3dmat->_23 = 0;
+ d3dmat->_33 = 1;
+ d3dmat->_43 = 0;
+
+ d3dmat->_14 = grmat[GrMatrix::kPersp0];
+ d3dmat->_24 = grmat[GrMatrix::kPersp1];
+ d3dmat->_34 = 0;
+ d3dmat->_44 = grmat[GrMatrix::kPersp2];
+}
+
+bool color_and_stencil_compatible(const D3DSURFACE_DESC& rtDesc,
+ const D3DSURFACE_DESC& dsDesc) {
+ return (rtDesc.Width <= dsDesc.Width) &&
+ (rtDesc.Height <= dsDesc.Height) &&
+ (rtDesc.MultiSampleType == dsDesc.MultiSampleType) &&
+ (rtDesc.MultiSampleQuality == dsDesc.MultiSampleQuality);
+}
+
+int format_stencil_bits(D3DFORMAT format) {
+ switch (format) {
+ case D3DFMT_D24S8:
+ case D3DFMT_D24FS8:
+ case D3DFMT_S8_LOCKABLE:
+ return 8;
+ }
+ return 0;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrGpuD3D9::GrGpuD3D9(IDirect3DDevice9* device) : GrGpu(), fDevice(device) {
+ GrPrintf("----------------------- create GrGpuD3D9 %p --------------\n", this);
+
+ fDeviceEx = NULL;
+ fDevice->QueryInterface(__uuidof(::IDirect3DDevice9Ex), (void**)&fDeviceEx);
+
+ fLastBlendOff = false;
+ GR_D3D9(fDevice, SetRenderState(D3DRS_ALPHABLENDENABLE, TRUE));
+
+ GR_D3D9(fDevice, SetRenderState(D3DRS_SCISSORTESTENABLE, TRUE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_ZENABLE, D3DZB_FALSE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_LIGHTING, FALSE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORVERTEX, TRUE));
+
+ fLastVertexState.fFlagBits = 0;
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_COLOROP, D3DTOP_SELECTARG2));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_ALPHAOP, D3DTOP_SELECTARG2));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_COLORARG1, D3DTA_TEXTURE));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_ALPHAARG1, D3DTA_TEXTURE));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_COLORARG2, D3DTA_CONSTANT));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_ALPHAARG2, D3DTA_CONSTANT));
+
+ fLastVertFVF = -1;
+
+ fLastColorArg1 = D3DTA_TEXTURE;
+
+ fLastDrawState.fSamplerState.fFilter = false;
+ GR_D3D9(fDevice, SetSamplerState(0, D3DSAMP_MAGFILTER, D3DTEXF_POINT));
+ GR_D3D9(fDevice, SetSamplerState(0, D3DSAMP_MINFILTER, D3DTEXF_POINT));
+ GR_D3D9(fDevice, SetSamplerState(0, D3DSAMP_MIPFILTER, D3DTEXF_NONE));
+
+ fNextDrawState.fSamplerState.fWrapX = (GrGpu::WrapModes)-1; // illegal
+ fNextDrawState.fSamplerState.fWrapY = (GrGpu::WrapModes)-1; // illegal
+
+ GR_D3D9(fDevice, SetPixelShader(NULL));
+ GR_D3D9(fDevice, SetVertexShader(NULL));
+
+ fLastDrawState.fFlagBits = 0;
+ GR_D3D9(fDevice, SetRenderState(D3DRS_DITHERENABLE, FALSE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_ANTIALIASEDLINEENABLE, FALSE));
+
+ // illegal values
+ fLastDrawState.fSrcBlend = (BlendCoeff)-1;
+ fLastDrawState.fDstBlend = (BlendCoeff)-1;
+ fLastDrawState.fColor = GrColor_ILLEGAL;
+ fLastDrawState.fPointSize = fLastDrawState.fLineWidth = -1;
+ fLastDrawState.fTexture = NULL;
+
+ GR_D3D9(fDevice,GetRenderTarget(0,&fDefaultRenderTarget.fColor));
+ fLastDrawState.fRenderTarget = (GrRenderTarget*) &fDefaultRenderTarget;
+ GrAssert(NULL != fDefaultRenderTarget.fColor);
+
+ // We need a stencil buffer to do path rendering.
+ D3DSURFACE_DESC rtDesc;
+ GR_D3D9(fDefaultRenderTarget.fColor, GetDesc(&rtDesc));
+ fDefaultRenderTarget.fStencil = NULL;
+ GR_D3D9(fDevice, GetDepthStencilSurface(&fDefaultRenderTarget.fStencil));
+ // make sure any existing depth stencil is compatible with the rendertarget
+ // and has at least 8 bits of stencil
+ if (NULL != fDefaultRenderTarget.fStencil) {
+ D3DSURFACE_DESC dsDesc;
+ GR_D3D9(fDefaultRenderTarget.fStencil, GetDesc(&dsDesc));
+ if (!color_and_stencil_compatible(rtDesc, dsDesc) ||
+ format_stencil_bits(dsDesc.Format) < 8) {
+ fDefaultRenderTarget.fStencil = NULL;
+ } else {
+ // add a ref so that we can safely Release in destructor
+ fDefaultRenderTarget.fStencil->AddRef();
+ }
+ }
+ if (NULL == fDefaultRenderTarget.fStencil) {
+ fDefaultRenderTarget.fStencil = createStencil(rtDesc.Width,
+ rtDesc.Height,
+ rtDesc.MultiSampleType,
+ rtDesc.MultiSampleQuality);
+ GrAssert(NULL != fDefaultRenderTarget.fStencil);
+ GR_D3D9(fDevice, SetDepthStencilSurface(fDefaultRenderTarget.fStencil));
+ }
+
+ fLastDrawState.fScissorRect.setEmpty();
+ RECT rect;
+ rect.left = rect.right = rect.top = rect.bottom = 0;
+ GR_D3D9(fDevice,SetScissorRect(&rect));
+
+ D3DMATRIX identity;
+ memset(&identity, 0, sizeof(identity));
+ identity._11 = identity._22 = identity._33 = identity._44 = 1.f;
+ for (int i = 0; i < kMatrixModeCount; i++) {
+ fLastDrawState.fMatrixModeCache[i].setIdentity();
+ GR_D3D9(fDevice, SetTransform(gMatrixMode2D3D9Matrix[i], &identity));
+ }
+ GR_D3D9(fDevice,
+ SetTextureStageState(0, D3DTSS_TEXTURETRANSFORMFLAGS, D3DTTFF_COUNT2));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_TEXCOORDINDEX, 0));
+ fLastTexGen = false;
+
+ fLastDrawState.fViewportW = -1;
+ fLastDrawState.fViewportH = -1;
+
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILENABLE, FALSE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILMASK, 0xffffffff));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILWRITEMASK, 0xffffffff));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORWRITEENABLE, 0xf));
+ fLastDrawState.fDrawMode = kOther_DrawMode;
+ fLastDrawState.fPathPass = (PathPass)-1;
+ fLastDrawState.fReverseFill = false;
+
+ fNextDrawState = fLastDrawState;
+ fNextVertexState = fLastVertexState;
+
+ fLastIndexBuffer = NULL;
+
+ fStageVBuffer = (GrD3D9VertexBuffer*)
+ this->createVertexBuffer(STAGE_VERTEX_SIZE, true);
+ GrAssert(NULL != fStageVBuffer);
+
+ fStageIBuffer = (GrD3D9IndexBuffer*)
+ this->createIndexBuffer(STAGE_INDEX_SIZE, true);
+ GrAssert(NULL != fStageIBuffer);
+
+ TextureDesc dummyDesc = {
+ 0,
+ kNone_AALevel,
+ 1,
+ 1,
+ false,
+ GrTexture::kAlpha_8_PixelConfig
+ };
+ fDummyTexture = (GrD3D9Texture*) this->createTexture(dummyDesc, NULL);
+ GrAssert(NULL != fDummyTexture);
+
+ fNPOTTextureSupport = kFull_NPOTTextureType;
+ D3DCAPS9 caps;
+ GR_D3D9(fDevice, GetDeviceCaps(&caps));
+ fSingleStencilPassForWinding =
+ 0 != (caps.StencilCaps & D3DSTENCILCAPS_TWOSIDED);
+ GrAssert(D3DSTENCILOP_INVERT & caps.StencilCaps);
+ GrAssert(D3DSTENCILOP_INCR & caps.StencilCaps);
+ GrAssert(D3DSTENCILOP_DECR & caps.StencilCaps);
+ GrAssert(D3DSTENCILOP_ZERO & caps.StencilCaps);
+
+ // start off with all zeros, keep this after fNextDrawState assignment
+ eraseStencil();
+ fDefaultRenderTarget.fClearStencil = false;
+}
+
+GrGpuD3D9::~GrGpuD3D9() {
+ fStageVBuffer->unref();
+ fStageIBuffer->unref();
+ fDummyTexture->unref();
+ // Currently we are assuming that the default render target
+ // existed before our constructor was called. We don't ever create
+ // it and we never add a ref to it, so don't release.
+ // We do create a stencil buffer if needed, though.
+ fDefaultRenderTarget.fStencil->Release();
+}
+
+IDirect3DSurface9* GrGpuD3D9::createStencil(uint32_t width,
+ uint32_t height,
+ D3DMULTISAMPLE_TYPE msType,
+ DWORD msQual) {
+ IDirect3DSurface9* dsSurface = NULL;
+ // Direct3D9 Ex adds a stencil only format.
+ if (NULL != fDeviceEx) {
+ GR_D3D9(fDeviceEx, CreateDepthStencilSurfaceEx(width, height,
+ D3DFMT_S8_LOCKABLE,
+ msType, msQual, FALSE,
+ &dsSurface, NULL,
+ D3DUSAGE_DEPTHSTENCIL));
+ fDeviceEx->Release();
+ }
+ if (NULL == dsSurface) {
+ fDevice->CreateDepthStencilSurface(width, height, D3DFMT_D24S8, msType,
+ msQual, FALSE, &dsSurface, NULL);
+ }
+ return dsSurface;
+}
+
+GrTexture* GrGpuD3D9::createTexture(const TextureDesc& desc,
+ const void* srcData) {
+ D3DFORMAT d3dformat;
+ bool renderTarget = (desc.fFlags & kRenderTarget_TextureFlag);
+ if (desc.fAALevel != kNone_AALevel && renderTarget) {
+ GrPrintf("Requested AA RT/Tex but not yet implemented in D3D.");
+ }
+ if (can_be_texture(desc.fFormat, &d3dformat)) {
+ DWORD usage = desc.fDynamic ? D3DUSAGE_DYNAMIC : 0;
+ usage |= renderTarget ? D3DUSAGE_RENDERTARGET : 0;
+ IDirect3DTexture9* d3dTex = NULL;
+
+ GR_D3D9(fDevice, CreateTexture(desc.fWidth, desc.fHeight, 1, usage,
+ d3dformat, D3DPOOL_DEFAULT, &d3dTex,
+ NULL));
+
+ // In D3D9 the depth-stencil can be larger but not smaller than the RT
+ IDirect3DSurface9* depthStencil = NULL;
+ D3DSURFACE_DESC dsDesc;
+ fDefaultRenderTarget.fStencil->GetDesc(&dsDesc);
+ // check if existing depth stencil is compatible
+ if ((renderTarget) &&
+ ((desc.fWidth > dsDesc.Width) ||
+ (desc.fHeight > dsDesc.Height) ||
+ (dsDesc.MultiSampleType != D3DMULTISAMPLE_NONE))) {
+ depthStencil = createStencil(desc.fWidth, desc.fHeight,
+ D3DMULTISAMPLE_NONE, 0);
+ GrAssert(NULL != depthStencil);
+ }
+ if (d3dTex) {
+ GrD3D9Texture* texture = new GrD3D9Texture(desc.fWidth,
+ desc.fHeight,
+ desc.fFormat,
+ d3dTex,
+ depthStencil,
+ true,
+ this);
+ if (NULL != srcData) {
+ texture->uploadTextureData(0, 0, desc.fWidth,
+ desc.fHeight, srcData);
+ }
+ return texture;
+ }
+ }
+ return NULL;
+}
+
+GrVertexBuffer* GrGpuD3D9::createVertexBuffer(uint32_t size, bool dynamic) {
+ DWORD usage = (dynamic & kRenderTarget_TextureFlag) ? D3DUSAGE_DYNAMIC : 0;
+ usage |= D3DUSAGE_WRITEONLY;
+ IDirect3DVertexBuffer9* vbuffer = NULL;
+ GR_D3D9(fDevice, CreateVertexBuffer(size, usage, 0,
+ D3DPOOL_DEFAULT, &vbuffer, NULL));
+ if (vbuffer) {
+ return new GrD3D9VertexBuffer(size, dynamic, vbuffer, this);
+ }
+ return NULL;
+}
+
+GrIndexBuffer* GrGpuD3D9::createIndexBuffer(uint32_t size, bool dynamic) {
+ DWORD usage = (dynamic & kRenderTarget_TextureFlag) ? D3DUSAGE_DYNAMIC : 0;
+ usage |= D3DUSAGE_WRITEONLY;
+ IDirect3DIndexBuffer9* ibuffer = NULL;
+ GR_D3D9(fDevice, CreateIndexBuffer(size, usage, D3DFMT_INDEX16,
+ D3DPOOL_DEFAULT, &ibuffer, NULL));
+ if (ibuffer) {
+ return new GrD3D9IndexBuffer(size, dynamic, ibuffer, this);
+ }
+ return NULL;
+}
+
+bool GrGpuD3D9::flushGraphicsState(PrimitiveTypes type) {
+ GrAssert(fNextDrawState.fViewportW != -1);
+
+ if (fNextDrawState.fDrawMode == kRadialTexture_DrawMode ||
+ fNextDrawState.fDrawMode == kSweepTexture_DrawMode ||
+ fNextDrawState.fDrawMode == kTwoPointRadialTexture_DrawMode) {
+ unimpl("Fixed pipe doesn't support radial/sweep gradient");
+ return false;
+ }
+
+ uint32_t stateDiff = fNextDrawState.fFlagBits ^ fLastDrawState.fFlagBits;
+
+ if (fLastDrawState.fRenderTarget != fNextDrawState.fRenderTarget) {
+ setRenderTargetImm();
+ GrD3D9RenderTarget& rt = *(GrD3D9RenderTarget*)fNextDrawState.fRenderTarget;
+ if (rt.fClearStencil) {
+ eraseStencil();
+ rt.fClearStencil = false;
+ }
+ // may need to change how AA is handled.
+ stateDiff |= (1 << kAntialias_StateFlag);
+ }
+
+ if (stateDiff)
+ {
+ if (stateDiff & (1<<kDither_StateFlag)) {
+ GR_D3D9(fDevice,SetRenderState(D3DRS_DITHERENABLE,
+ (fNextDrawState.fFlagBits & (1<<kDither_StateFlag)) ?
+ TRUE :
+ FALSE));
+ }
+ if (stateDiff & (1<<kAntialias_StateFlag)) {
+ DWORD aa = fNextDrawState.fFlagBits & (1<<kAntialias_StateFlag) ?
+ TRUE : FALSE;
+ GrD3D9RenderTarget& rt = *(GrD3D9RenderTarget*)fNextDrawState.fRenderTarget;
+ D3DSURFACE_DESC desc;
+ rt.fColor->GetDesc(&desc);
+ if (desc.MultiSampleType != D3DMULTISAMPLE_NONE) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_MULTISAMPLEANTIALIAS, aa));
+ } else {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_ANTIALIASEDLINEENABLE, aa));
+ }
+ }
+ fLastDrawState.fFlagBits = fNextDrawState.fFlagBits;
+ }
+ bool blendOff = canDisableBlend();
+ if (fLastBlendOff != blendOff) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_ALPHABLENDENABLE,
+ blendOff ? FALSE : TRUE));
+ fLastBlendOff = blendOff;
+ }
+ if (!blendOff) {
+ if (fLastDrawState.fSrcBlend != fNextDrawState.fSrcBlend) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_SRCBLEND,
+ gXfermodeCoeff2Blend[fNextDrawState.fSrcBlend]));
+ fLastDrawState.fSrcBlend = fNextDrawState.fSrcBlend;
+ }
+ if (fLastDrawState.fDstBlend != fNextDrawState.fDstBlend) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_DESTBLEND,
+ gXfermodeCoeff2Blend[fNextDrawState.fDstBlend]));
+ fLastDrawState.fDstBlend = fNextDrawState.fDstBlend;
+ }
+ }
+
+ // bind texture and set sampler state
+ if (fNextVertexState.fFlagBits & (1 << kTexCoord_VertFlag)) {
+ GrD3D9Texture* nextTexture = (GrD3D9Texture*)fNextDrawState.fTexture;
+ if (NULL != nextTexture) {
+ if (fLastDrawState.fTexture != nextTexture) {
+ GR_D3D9(fDevice, SetTexture(0, nextTexture->texture()));
+ DWORD nextColorArg1 = nextTexture->format() == D3DFMT_A8 ?
+ (D3DTA_TEXTURE | D3DTA_ALPHAREPLICATE) :
+ D3DTA_TEXTURE;
+ if (fLastColorArg1 != nextColorArg1) {
+ GR_D3D9(fDevice, SetTextureStageState(0,
+ D3DTSS_COLORARG1,
+ nextColorArg1));
+ fLastColorArg1 = nextColorArg1;
+ }
+ fLastDrawState.fTexture = nextTexture;
+ }
+
+ if (fLastDrawState.fSamplerState.fFilter !=
+ fNextDrawState.fSamplerState.fFilter) {
+ DWORD filter = fNextDrawState.fSamplerState.fFilter ?
+ D3DTEXF_LINEAR :
+ D3DTEXF_POINT;
+ GR_D3D9(fDevice, SetSamplerState(0, D3DSAMP_MAGFILTER, filter));
+ GR_D3D9(fDevice, SetSamplerState(0, D3DSAMP_MINFILTER, filter));
+ fLastDrawState.fSamplerState.fFilter =
+ fNextDrawState.fSamplerState.fFilter;
+ }
+ if (fLastDrawState.fSamplerState.fWrapX !=
+ fNextDrawState.fSamplerState.fWrapX) {
+ GR_D3D9(fDevice, SetSamplerState(0, D3DSAMP_ADDRESSU,
+ gTileMode2D3D9Wrap[fNextDrawState.fSamplerState.fWrapX]));
+ fLastDrawState.fSamplerState.fWrapX =
+ fNextDrawState.fSamplerState.fWrapX;
+ }
+ if (fLastDrawState.fSamplerState.fWrapY !=
+ fNextDrawState.fSamplerState.fWrapY) {
+ GR_D3D9(fDevice, SetSamplerState(0, D3DSAMP_ADDRESSV,
+ gTileMode2D3D9Wrap[fNextDrawState.fSamplerState.fWrapY]));
+ fLastDrawState.fSamplerState.fWrapY =
+ fNextDrawState.fSamplerState.fWrapY;
+ }
+ } else {
+ GrAssert(!"Rendering with texture vert flag set but no bound texture");
+ if (NULL != fLastDrawState.fTexture) {
+ GR_D3D9(fDevice,SetTexture(0, NULL));
+ // GrPrintf("---- bindtexture 0\n");
+ fLastDrawState.fTexture = NULL;
+ }
+ }
+ }
+
+ // check for circular rendering
+ GrAssert(!(fNextVertexState.fFlagBits & (1 << kTexCoord_VertFlag)) ||
+ NULL == fNextDrawState.fRenderTarget ||
+ NULL == fNextDrawState.fTexture ||
+ fNextDrawState.fTexture->asRenderTarget() != fNextDrawState.fRenderTarget);
+
+ if ((type == GrGpu::kLineStrip_PrimitiveType ||
+ type == GrGpu::kLines_PrimitiveType) &&
+ fLastDrawState.fLineWidth != fNextDrawState.fLineWidth) {
+ // D3D9 doesn't support wide lines!
+ //GrAssert(fNextDrawState.fLineWidth == 1);
+ }
+
+ bool stencilChange =
+ fLastDrawState.fPathPass != fNextDrawState.fPathPass ||
+ (kNone_PathPass != fNextDrawState.fPathPass &&
+ fLastDrawState.fReverseFill != fNextDrawState.fReverseFill);
+
+ if (stencilChange) {
+ switch (fNextDrawState.fPathPass) {
+ case kNone_PathPass:
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILENABLE, FALSE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORWRITEENABLE, 0xf));
+ if (!fSingleStencilPassForWinding) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE));
+ }
+ break;
+ case kEvenOddStencil_PathPass:
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILENABLE, TRUE));
+ if (fSingleStencilPassForWinding) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_TWOSIDEDSTENCILMODE, FALSE));
+ } else {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE));
+ }
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFUNC, D3DCMP_ALWAYS));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFAIL, D3DSTENCILOP_INVERT));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILPASS, D3DSTENCILOP_INVERT));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILZFAIL, D3DSTENCILOP_INVERT));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORWRITEENABLE, 0x0));
+ break;
+ case kEvenOddColor_PathPass:
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILENABLE, TRUE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILREF, 0xffffffff));
+ if (fSingleStencilPassForWinding) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_TWOSIDEDSTENCILMODE, FALSE));
+ } else {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE));
+ }
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFUNC,
+ fNextDrawState.fReverseFill ? D3DCMP_NOTEQUAL : D3DCMP_EQUAL));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFAIL, D3DSTENCILOP_ZERO));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILPASS, D3DSTENCILOP_ZERO));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILZFAIL, D3DSTENCILOP_ZERO));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORWRITEENABLE, 0xf));
+ break;
+ case kWindingStencil1_PathPass:
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILENABLE, TRUE));
+ if (fSingleStencilPassForWinding) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_TWOSIDEDSTENCILMODE, TRUE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CCW_STENCILFUNC, D3DCMP_ALWAYS));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CCW_STENCILFAIL, D3DSTENCILOP_DECR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CCW_STENCILPASS, D3DSTENCILOP_DECR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CCW_STENCILZFAIL, D3DSTENCILOP_DECR));
+ } else {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CULLMODE, D3DCULL_CW));
+ }
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFUNC, D3DCMP_ALWAYS));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFAIL, D3DSTENCILOP_INCR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILPASS, D3DSTENCILOP_INCR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILZFAIL, D3DSTENCILOP_INCR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORWRITEENABLE, 0x0));
+ break;
+ case kWindingStencil2_PathPass:
+ GrAssert(!fSingleStencilPassForWinding);
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CULLMODE, D3DCULL_CCW));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILENABLE, TRUE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFUNC, D3DCMP_ALWAYS));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFAIL, D3DSTENCILOP_DECR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILPASS, D3DSTENCILOP_DECR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILZFAIL, D3DSTENCILOP_DECR));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORWRITEENABLE, 0x0));
+ break;
+ case kWindingColor_PathPass:
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILENABLE, TRUE));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILREF, 0x00000000));
+ if (fSingleStencilPassForWinding) {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_TWOSIDEDSTENCILMODE, FALSE));
+ } else {
+ GR_D3D9(fDevice, SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE));
+ }
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFUNC,
+ fNextDrawState.fReverseFill ? D3DCMP_EQUAL : D3DCMP_NOTEQUAL));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILFAIL, D3DSTENCILOP_ZERO));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILPASS, D3DSTENCILOP_ZERO));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_STENCILZFAIL, D3DSTENCILOP_ZERO));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_COLORWRITEENABLE, 0xf));
+ break;
+ default:
+ GrAssert(!"Unexpected path pass.");
+ break;
+ }
+ fLastDrawState.fPathPass = fNextDrawState.fPathPass;
+ fLastDrawState.fReverseFill = fNextDrawState.fReverseFill;
+ }
+ fLastDrawState.fDrawMode = fNextDrawState.fDrawMode;
+
+ //////////////////////////////////////////////////////////////////////////
+ // Fixed pipe stuff
+
+ DWORD vertFVF = (fNextDrawState.fDrawMode == kTextGlyphs_DrawMode) ?
+ gTextFVF :
+ gDeclToFVFs[gVertFlags2VertDeclIdx[fNextVertexState.fFlagBits]];
+ GrAssert(-1 != vertFVF);
+ if (fLastVertFVF != vertFVF) {
+ GR_D3D9(fDevice, SetFVF(vertFVF));
+ fLastVertFVF = vertFVF;
+ }
+
+ // this has to stay after the set render target because
+ // setrendertarget resets the viewport
+ if (fLastDrawState.fViewportW != fNextDrawState.fViewportW ||
+ fLastDrawState.fViewportH != fNextDrawState.fViewportH) {
+ D3DVIEWPORT9 vp;
+ vp.X = vp.Y = 0;
+ vp.MinZ = 0; vp.MaxZ = 1;
+ vp.Width = fNextDrawState.fViewportW;
+ vp.Height = fNextDrawState.fViewportH;
+ GR_D3D9(fDevice, SetViewport(&vp));
+
+ D3DMATRIX mat;
+ sk_bzero(&mat, sizeof(mat));
+
+ float invW = 1.f / fNextDrawState.fViewportW;
+ float invH = 1.f / fNextDrawState.fViewportH;
+
+ mat._11 = 2.f * invW;
+ mat._22 = -2.f * invH;
+ //mat._33 = -1.f;
+ mat._33 = 1.f;
+ mat._44 = 1;
+
+ // included here is 1/2 pixel adjustment because
+ // d3d9 puts pixel *centers* at integer offset in viewport space.
+ mat._41 = -1.f - invW;
+ mat._42 = 1.f + invH;
+
+ GR_D3D9(fDevice, SetTransform(D3DTS_PROJECTION, &mat));
+
+ fLastDrawState.fViewportW = fNextDrawState.fViewportW;
+ fLastDrawState.fViewportH = fNextDrawState.fViewportH;
+ }
+
+ uint32_t vertDiff = fNextVertexState.fFlagBits ^ fLastVertexState.fFlagBits;
+
+ if (vertDiff) {
+ if (vertDiff & (1 << kTexCoord_VertFlag)) {
+ if (fNextVertexState.fFlagBits & (1 << kTexCoord_VertFlag)) {
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_COLOROP,
+ D3DTOP_MODULATE));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_ALPHAOP,
+ D3DTOP_MODULATE));
+ } else {
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_COLOROP,
+ D3DTOP_SELECTARG2));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_ALPHAOP,
+ D3DTOP_SELECTARG2));
+ }
+ }
+ if (vertDiff & (1 << kColors_VertFlag)) {
+ if (fNextVertexState.fFlagBits & (1 << kColors_VertFlag)) {
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_COLORARG2,
+ D3DTA_CURRENT));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_ALPHAARG2,
+ D3DTA_CURRENT));
+ } else {
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_COLORARG2,
+ D3DTA_CONSTANT));
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_ALPHAARG2,
+ D3DTA_CONSTANT));
+ }
+ }
+ fLastVertexState.fFlagBits = fNextVertexState.fFlagBits;
+ }
+
+ if (fLastDrawState.fTexture != fDummyTexture &&
+ !(fNextVertexState.fFlagBits & ((1 << kColors_VertFlag) ||
+ (1 << kColors_VertFlag)))) {
+ GR_D3D9(fDevice, SetTexture(0, fDummyTexture->texture()));
+ fLastDrawState.fTexture = fDummyTexture;
+ }
+
+ if (fLastDrawState.fPointSize != fNextDrawState.fPointSize) {
+ GR_D3D9(fDevice,SetRenderState(D3DRS_POINTSIZE,
+ *(DWORD*)&fNextDrawState.fPointSize));
+ fLastDrawState.fPointSize = fNextDrawState.fPointSize;
+ }
+
+ if (!(fNextVertexState.fFlagBits & (1 << kColors_VertFlag)) &&
+ fLastDrawState.fColor != fNextDrawState.fColor) {
+ GR_D3D9(fDevice, SetTextureStageState(0, D3DTSS_CONSTANT,
+ fNextDrawState.fColor));
+ fLastDrawState.fColor = fNextDrawState.fColor;
+ }
+ bool mvChanged = fLastDrawState.fMatrixModeCache[kModelView_MatrixMode] !=
+ fNextDrawState.fMatrixModeCache[kModelView_MatrixMode];
+ if (mvChanged) {
+ D3DMATRIX mat;
+ gr_matrix_to_d3d_matrix(&mat,
+ fNextDrawState.fMatrixModeCache[kModelView_MatrixMode]);
+ GR_D3D9(fDevice, SetTransform(
+ gMatrixMode2D3D9Matrix[kModelView_MatrixMode], &mat));
+ fLastDrawState.fMatrixModeCache[kModelView_MatrixMode] =
+ fNextDrawState.fMatrixModeCache[kModelView_MatrixMode];
+ }
+
+ // Since fixed-pipe FVF doesn't allow using positions
+ // as tex coords like vertex decls do in for shaders, we
+ // use tex gen
+ // D3D9 tex coord gen uses the camera space vertex pos as the
+ // texture coordinates. We want to use the worldspace pos so
+ // we invert the view matrix as part of the texture matrix.
+ if ((fNextVertexState.fFlagBits & (1 << kTexCoord_VertFlag))) {
+ bool texGen = 0 != (fNextVertexState.fFlagBits &
+ (1 << kPositionAsTexCoord_VertFlag));
+
+ bool texGenChange = fLastTexGen != texGen;
+
+ if (fLastDrawState.fMatrixModeCache[kTexture_MatrixMode] !=
+ fNextDrawState.fMatrixModeCache[kTexture_MatrixMode] ||
+ texGenChange ||
+ (texGen && mvChanged)) {
+ GrMatrix* m;
+ GrMatrix temp;
+ D3DMATRIX d3dMat;
+ if (texGen) {
+ fNextDrawState.fMatrixModeCache[kModelView_MatrixMode].
+ invert(&temp);
+ temp.postConcat(fNextDrawState.fMatrixModeCache[kTexture_MatrixMode]);
+ m = &temp;
+ } else {
+ m = &fNextDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ }
+
+ gr_matrix_to_d3d_matrix(&d3dMat, *m);
+ GR_D3D9(fDevice, SetTransform(
+ gMatrixMode2D3D9Matrix[kTexture_MatrixMode], &d3dMat));
+ fLastDrawState.fMatrixModeCache[kTexture_MatrixMode] =
+ fNextDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ if (texGenChange) {
+ GR_D3D9(fDevice, SetTextureStageState(0,
+ D3DTSS_TEXCOORDINDEX,
+ texGen ? D3DTSS_TCI_CAMERASPACEPOSITION : 0));
+ }
+ fLastTexGen = texGen;
+ }
+ }
+ return true;
+}
+
+void GrGpuD3D9::flushScissor() {
+ if (fLastDrawState.fScissorRect != fNextDrawState.fScissorRect) {
+ RECT rect;
+ rect.left = fNextDrawState.fScissorRect.fLeft;
+ rect.right = fNextDrawState.fScissorRect.fRight;
+ rect.top = fNextDrawState.fScissorRect.fTop;
+ rect.bottom = fNextDrawState.fScissorRect.fBottom;
+
+ GR_D3D9(fDevice, SetScissorRect(&rect));
+ fLastDrawState.fScissorRect != fNextDrawState.fScissorRect;
+ }
+
+}
+
+void GrGpuD3D9::eraseColor(GrColor color) {
+
+ DWORD clr = D3DCLEAR_TARGET;
+
+ if (fLastDrawState.fRenderTarget != fNextDrawState.fRenderTarget) {
+ setRenderTargetImm();
+ // In D3D9 setting the render target resets the viewport
+ fLastDrawState.fViewportH = -1;
+ GrD3D9RenderTarget& rt = *(GrD3D9RenderTarget*)fNextDrawState.fRenderTarget;
+ if ((NULL != rt.fStencil) && rt.fClearStencil) {
+ clr |= D3DCLEAR_STENCIL;
+ }
+ }
+
+ D3DCOLOR d3dColor = D3DCOLOR_ARGB(GrColorUnpackA(color),
+ GrColorUnpackR(color),
+ GrColorUnpackG(color),
+ GrColorUnpackB(color));
+
+ // we enable the scissor in the preamble and flush
+ // assumes it is always enabled
+ GR_D3D9(fDevice, SetRenderState(D3DRS_SCISSORTESTENABLE, FALSE));
+ GR_D3D9(fDevice, Clear(0, NULL, clr, d3dColor, 0.f, 0x0));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_SCISSORTESTENABLE, TRUE));
+}
+
+void GrGpuD3D9::eraseStencil() {
+ if (fLastDrawState.fRenderTarget != fNextDrawState.fRenderTarget) {
+ setRenderTargetImm();
+ // In D3D9 setting the render target resets the viewport
+ fLastDrawState.fViewportH = -1;
+ }
+
+ // we enable the scissor in the preamble and flush
+ // assumes it is always enabled
+ GR_D3D9(fDevice, SetRenderState(D3DRS_SCISSORTESTENABLE, FALSE));
+ GR_D3D9(fDevice,Clear(0, NULL, D3DCLEAR_STENCIL, 0x0, 0.f, 0x0));
+ GR_D3D9(fDevice, SetRenderState(D3DRS_SCISSORTESTENABLE, TRUE));
+}
+
+GrD3D9VertexBuffer* GrGpuD3D9::setupVBufferStage(int vsize,
+ int* baseVertex,
+ int vertexCount,
+ DrawModes mode) {
+ GrD3D9VertexBuffer*vbuf;
+ if (vsize*(vertexCount) > STAGE_VERTEX_SIZE) {
+ GrPrintf("Staging vertex buffer is too small!");
+ vbuf = (GrD3D9VertexBuffer*) createVertexBuffer(vsize*vertexCount, true);
+ if (NULL == vbuf) {
+ GrAssert(!"Temporary vertex buffer failed!");
+ return NULL;
+ }
+ } else {
+ vbuf = fStageVBuffer;
+ // add a reference so that caller can unref without deleting
+ vbuf->ref();
+ }
+ void* vptr = vbuf->lock();
+ if (NULL == vptr) {
+ GrAssert(!"Locking staging/temp vbuffer failed!");
+ vbuf->unref();
+ return NULL;
+ }
+ vptr = (char*) vptr;
+
+ if (mode == kTextGlyphs_DrawMode) {
+ GrAssert((fNextVertexState.fFlagBits & (1 << kTexCoord_VertFlag)) &&
+ !(fNextVertexState.fFlagBits & (1 << kColors_VertFlag)) &&
+ !(fNextVertexState.fFlagBits &
+ (1 << kPositionAsTexCoord_VertFlag)));
+ GrAssert(sizeof(GrGpuTextVertex) == 2*sizeof(float));
+ for (int i = 0; i < vertexCount; ++i) {
+ GrGpuTextVertex* posxy = (GrGpuTextVertex*)((char*)vptr + i*vsize);
+ float* posz = (float*)posxy + 2;
+ GrGpuTextVertex* tex = (GrGpuTextVertex*)(posz+1);
+ *posxy = *((GrGpuTextVertex*)fNextVertexState.fArrays.positions + i + *baseVertex);
+ *posz = .5;
+ *tex = *((GrGpuTextVertex*)fNextVertexState.fArrays.texCoords + i + *baseVertex);
+ }
+ } else {
+ switch (fNextVertexState.fFlagBits) {
+ GrAssert(sizeof(GrPoint) == 2*sizeof(float));
+ case 0: // position only
+ case (1 << kTexCoord_VertFlag) |
+ (1 << kPositionAsTexCoord_VertFlag):
+ for (int i = 0; i < vertexCount; ++i) {
+ GrPoint* posxy = (GrPoint*)((char*)vptr + i*vsize);
+ float* posz = (float*)(posxy + 1);
+ *posxy = *((GrPoint*)fNextVertexState.fArrays.positions + i + *baseVertex);
+ *posz = .5;
+ }
+ break;
+ case (1 << kTexCoord_VertFlag):
+ for (int i = 0; i < vertexCount; ++i) {
+ GrPoint* posxy = (GrPoint*)((char*)vptr + i*vsize);
+ float* posz = (float*)(posxy + 1);
+ GrPoint* tex = (GrPoint*)(posz + 1);
+ *posxy = *((GrPoint*)fNextVertexState.fArrays.positions + i + *baseVertex);
+ *posz = .5;
+ *tex = *((GrPoint*)fNextVertexState.fArrays.texCoords + i + *baseVertex);
+ }
+ break;
+ case (1 << kColors_VertFlag):
+ case (1 << kTexCoord_VertFlag) |
+ (1 << kPositionAsTexCoord_VertFlag) |
+ (1 << kColors_VertFlag):
+ for (int i = 0; i < vertexCount; ++i) {
+ GrPoint* posxy = (GrPoint*)((char*)vptr + i*vsize);
+ float* posz = (float*)(posxy + 1);
+ uint32_t* col = (uint32_t*)(posz + 1);
+ *posxy = *((GrPoint*)fNextVertexState.fArrays.positions + i + *baseVertex);
+ *posz = .5;
+ *col = *((uint32_t*)fNextVertexState.fArrays.colors + i + *baseVertex);
+ }
+ break;
+ case (1 << kTexCoord_VertFlag) | (1 << kColors_VertFlag):
+ for (int i = 0; i < vertexCount; ++i) {
+ GrPoint* posxy = (GrPoint*)((char*)vptr + i*vsize);
+ float* posz = (float*)posxy + 2;
+ uint32_t* col = (uint32_t*)(posz + 1);
+ GrPoint* tex = (GrPoint*)(col + 1);
+ *posxy = *((GrPoint*)fNextVertexState.fArrays.positions + i + *baseVertex);
+ *posz = .5;
+ *col = *((uint32_t*)fNextVertexState.fArrays.colors + i + *baseVertex);
+ *tex = *((GrPoint*)fNextVertexState.fArrays.texCoords + i + *baseVertex);
+ }
+ break;
+ default:
+ GrAssert(!"Unexpected vertex flags!");
+ }
+ }
+ *baseVertex = 0;
+ vbuf->unlock();
+ return vbuf;
+}
+
+void GrGpuD3D9::setRenderTargetImm() {
+ GrD3D9RenderTarget& rt = *(GrD3D9RenderTarget*)fNextDrawState.fRenderTarget;
+ GrAssert(NULL != rt.fColor);
+ GR_D3D9(fDevice, SetRenderTarget(0,
+ (IDirect3DSurface9*) rt.fColor));
+ if (NULL != rt.fStencil) {
+ GR_D3D9(fDevice, SetDepthStencilSurface(rt.fStencil));
+ } else {
+ GrAssert(NULL != fDefaultRenderTarget.fStencil);
+ GR_DEBUGCODE(D3DSURFACE_DESC rtDesc;)
+ GR_DEBUGCODE(D3DSURFACE_DESC dsDesc;)
+ GR_DEBUGCODE(GR_D3D9(rt.fColor, GetDesc(&rtDesc));)
+ GR_DEBUGCODE(GR_D3D9(fDefaultRenderTarget.fStencil, \
+ GetDesc(&dsDesc));)
+ GR_DEBUGCODE(GrAssert(color_and_stencil_compatible(rtDesc,
+ dsDesc));)
+ GR_D3D9(fDevice,
+ SetDepthStencilSurface(fDefaultRenderTarget.fStencil));
+ }
+ fLastDrawState.fRenderTarget = fNextDrawState.fRenderTarget;
+}
+
+GrD3D9IndexBuffer* GrGpuD3D9::setupIBufferStage(int* startIndex, int indexCount,
+ const uint16_t* indices) {
+ GrD3D9IndexBuffer* ibuf;
+ if (indexCount*2 > STAGE_INDEX_SIZE) {
+ GrPrintf("Staging index buffer is too small!");
+ ibuf = (GrD3D9IndexBuffer*) createIndexBuffer(indexCount*2, true);
+ if (NULL == ibuf) {
+ GrAssert(!"Temporary index buffer is too small!");
+ return NULL;
+ }
+ } else {
+ ibuf = fStageIBuffer;
+ // add a reference so that caller can unref without deleting
+ ibuf->ref();
+ }
+ void* iptr = ibuf->lock();
+ if (NULL == iptr) {
+ GrAssert(!"Locking staging/temp ibuffer failed!");
+ ibuf->unref();
+ return NULL;
+ }
+ memcpy(iptr, indices + *startIndex, 2*indexCount);
+ *startIndex = 0;
+ ibuf->unlock();
+ return ibuf;
+}
+
+int GrGpuD3D9::vertexSize(int vertFlagBits, GrGpu::DrawModes mode) {
+ if (mode == kTextGlyphs_DrawMode) {
+ return 5*sizeof(float);
+ } else {
+ switch (vertFlagBits) {
+ case 0: // position only
+ case (1 << kTexCoord_VertFlag) |
+ (1 << kPositionAsTexCoord_VertFlag):
+ return 3*sizeof(float);
+ case (1 << kTexCoord_VertFlag):
+ return 5*sizeof(float);
+ case (1 << kColors_VertFlag):
+ case (1 << kTexCoord_VertFlag) |
+ (1 << kPositionAsTexCoord_VertFlag) |
+ (1 << kColors_VertFlag):
+ return 3*sizeof(float) + 4;
+ case (1 << kTexCoord_VertFlag) | (1 << kColors_VertFlag):
+ return 5*sizeof(float) + 4;
+ default:
+ GrAssert(!"Unexpected vertex flags!");
+ return 0;
+ }
+ }
+}
+
+void GrGpuD3D9::drawIndexArrayApi(PrimitiveTypes type,
+ int baseVertex,
+ int vertexCount,
+ int indexCount,
+ const uint16_t* indexArray,
+ bool redrawHint) {
+ int vsize = vertexSize(fNextVertexState.fFlagBits, fNextDrawState.fDrawMode);
+
+ GrD3D9VertexBuffer* vbuf;
+ if (fNextVertexState.fUsingBuffer) {
+ vbuf = (GrD3D9VertexBuffer*) fNextVertexState.fBuffer;
+ } else {
+ vbuf = setupVBufferStage(vsize, &baseVertex, vertexCount,
+ fNextDrawState.fDrawMode);
+ if (NULL == vbuf) {
+ return;
+ }
+ }
+ int startIndex = 0;
+ GrD3D9IndexBuffer* ibuf =
+ setupIBufferStage(&startIndex, indexCount, indexArray);
+ if (NULL == ibuf) {
+ vbuf->unref();
+ }
+
+ GR_D3D9(fDevice,SetStreamSource(0, vbuf->buffer(), 0, vsize));
+ GR_D3D9(fDevice,SetIndices(ibuf->buffer()));
+ GR_D3D9(fDevice,DrawIndexedPrimitive(gPrimType2D3D9PrimType[type],
+ baseVertex, 0, vertexCount, startIndex,
+ vertex_to_primitive_count(type,
+ indexCount)));
+ vbuf->unref();
+ ibuf->unref();
+}
+
+void GrGpuD3D9::drawIndexBufferApi(PrimitiveTypes type,
+ int baseVertex,
+ int startIndex,
+ int vertexCount,
+ int indexCount,
+ GrIndexBuffer* indexBuffer,
+ bool redrawHint) {
+ int vsize = vertexSize(fNextVertexState.fFlagBits, fNextDrawState.fDrawMode);
+ GrD3D9VertexBuffer* vbuf;
+ bool unrefVBuf = false;
+ if (fNextVertexState.fUsingBuffer) {
+ vbuf = (GrD3D9VertexBuffer*) fNextVertexState.fBuffer;
+ } else {
+ vbuf = setupVBufferStage(vsize, &baseVertex, vertexCount,
+ fNextDrawState.fDrawMode);
+ if (NULL == vbuf) {
+ return;
+ }
+ }
+ GR_D3D9(fDevice,SetStreamSource(0, vbuf->buffer(), 0, vsize));
+ GR_D3D9(fDevice,SetIndices(((GrD3D9IndexBuffer*)indexBuffer)->buffer()));
+ GR_D3D9(fDevice,DrawIndexedPrimitive(gPrimType2D3D9PrimType[type],
+ baseVertex, 0, vertexCount, startIndex,
+ vertex_to_primitive_count(type,
+ indexCount)));
+ vbuf->unref();
+}
+
+void GrGpuD3D9::drawNonIndexedApi(PrimitiveTypes type,
+ int baseVertex,
+ int vertexCount,
+ bool redrawHint) {
+
+ int vsize = vertexSize(fNextVertexState.fFlagBits, fNextDrawState.fDrawMode);
+ GrD3D9VertexBuffer* vbuf;
+
+ if (fNextVertexState.fUsingBuffer) {
+ vbuf = (GrD3D9VertexBuffer*) fNextVertexState.fBuffer;
+ } else {
+ vbuf = setupVBufferStage(vsize, &baseVertex, vertexCount,
+ fNextDrawState.fDrawMode);
+ }
+ GR_D3D9(fDevice,SetStreamSource(0, vbuf->buffer(), 0, vsize));
+ GR_D3D9(fDevice,DrawPrimitive(gPrimType2D3D9PrimType[type], baseVertex,
+ vertex_to_primitive_count(type, vertexCount)));
+ vbuf->unref();
+}
+
+void GrGpuD3D9::notifyVertexBufferBind(GrD3D9VertexBuffer* buffer) {
+}
+
+void GrGpuD3D9::notifyVertexBufferDelete(GrD3D9VertexBuffer* buffer) {
+ if (fNextVertexState.fUsingBuffer && fNextVertexState.fBuffer == buffer) {
+ fNextVertexState.fBuffer = NULL;
+ }
+}
+
+void GrGpuD3D9::notifyIndexBufferBind(GrD3D9IndexBuffer* buffer) {
+}
+
+void GrGpuD3D9::notifyIndexBufferDelete(GrD3D9IndexBuffer* buffer) {
+ if (fLastIndexBuffer == buffer) {
+ fLastIndexBuffer = NULL;
+ }
+}
+
+void GrGpuD3D9::notifyTextureDelete(GrD3D9Texture* texture) {
+ if (fNextDrawState.fTexture == texture ||
+ fLastDrawState.fTexture == texture) {
+ fNextDrawState.fTexture = NULL;
+ fLastDrawState.fTexture = NULL;
+ GR_D3D9(fDevice, SetTexture(0, NULL));
+ }
+ if (fNextDrawState.fRenderTarget == texture->asRenderTarget() ||
+ fLastDrawState.fRenderTarget == texture->asRenderTarget()) {
+ fNextDrawState.fRenderTarget = (GrRenderTarget*) &fDefaultRenderTarget;
+ setRenderTargetImm();
+ }
+}
+
+void GrGpuD3D9::notifyTextureRemoveRenderTarget(GrD3D9Texture* texture) {
+ if (fNextDrawState.fRenderTarget == texture->asRenderTarget() ||
+ fLastDrawState.fRenderTarget == texture->asRenderTarget()) {
+ fNextDrawState.fRenderTarget = (GrRenderTarget*) &fDefaultRenderTarget;
+ setRenderTargetImm();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrD3D9Texture::GrD3D9Texture(uint32_t width,
+ uint32_t height,
+ GrTexture::PixelConfig config,
+ IDirect3DTexture9* texture,
+ IDirect3DSurface9* stencil,
+ bool clearStencil,
+ GrGpuD3D9* gpuD3D9) :
+ INHERITED(width, height, width, height, config),
+ fTexture(texture),
+ fStencil(stencil),
+ fGpuD3D9(gpuD3D9) {
+ GrAssert(NULL != fTexture);
+ fTexture->GetLevelDesc(0, &fDesc);
+
+ if (fDesc.Usage & D3DUSAGE_RENDERTARGET) {
+ BOOL result = fTexture->GetSurfaceLevel(0, &fRenderTarget.fColor);
+ GrAssert(S_OK == result && NULL != fRenderTarget.fColor);
+ fRenderTarget.fStencil = stencil;
+ fRenderTarget.fClearStencil = clearStencil;
+ } else {
+ GrAssert(NULL == stencil);
+ fRenderTarget.fColor = NULL;
+ fRenderTarget.fStencil = NULL;
+ fRenderTarget.fClearStencil = false;
+ }
+}
+
+GrD3D9Texture::~GrD3D9Texture() {
+ fGpuD3D9->notifyTextureDelete(this);
+ if (NULL != fRenderTarget.fColor) {
+ fRenderTarget.fColor->Release();
+ }
+ if (NULL != fRenderTarget.fStencil) {
+ fRenderTarget.fStencil->Release();
+ }
+ if (NULL != fTexture) {
+ fTexture->Release();
+ }
+}
+
+void GrD3D9Texture::abandon() {
+ GrAssert(NULL != fTexture);
+ // release on device already deleted the objects?
+ fTexture = NULL;
+ fRenderTarget.fColor = NULL;
+ fRenderTarget.fStencil = NULL;
+}
+
+bool GrD3D9Texture::isRenderTarget() {
+ GrAssert(NULL != fTexture);
+ return (fDesc.Usage & D3DUSAGE_RENDERTARGET);
+}
+
+void GrD3D9Texture::removeRenderTarget() {
+ fGpuD3D9->notifyTextureRemoveRenderTarget(this);
+ if (NULL != fRenderTarget.fColor) {
+ fRenderTarget.fColor->Release();
+ fRenderTarget.fColor = NULL;
+ }
+ if (NULL != fRenderTarget.fStencil) {
+ fRenderTarget.fStencil->Release();
+ fRenderTarget.fStencil = NULL;
+ }
+ fRenderTarget.fClearStencil = false;
+}
+
+void GrD3D9Texture::uploadTextureData(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height,
+ const void* srcData) {
+ GrAssert(NULL != fTexture);
+ HRESULT hr;
+#if 0 // is it ever beneficial to lock the texture directly?
+ if (fDesc.Usage & D3DUSAGE_DYNAMIC) {
+ D3DLOCKED_RECT lock;
+ RECT rect;
+ rect.left = x;
+ rect.right = x + width;
+ rect.top = y;
+ rect.bottom = y + height;
+ hr = fTexture->LockRect(0, &lock, &rect, 0);
+ if (FAILED(hr)) {
+ GrAssert(!"Failed to lock texture!");
+ return;
+ }
+ int bpp = format_bytes(fDesc.Format);
+ if (lock.Pitch == width * bpp) {
+ memcpy(lock.pBits, srcData, width*height*bpp);
+ } else {
+ for (uint32_t y = 0; y < height; ++y) {
+ memcpy((char*)lock.pBits + y * lock.Pitch,
+ (char*)srcData + y*width*bpp, width*bpp);
+ }
+ }
+ hr = fTexture->UnlockRect(0);
+ GrAssert(SUCCEEDED(hr));
+ } else
+#endif
+ {
+ // should the temp textures be cached
+ // somewhere so we aren't recreating them?
+ IDirect3DDevice9* device;
+ hr = fTexture->GetDevice(&device);
+ if (FAILED(hr) || NULL == device) {
+ GrAssert("getting device from texture failed!");
+ return;
+ }
+ IDirect3DTexture9* tempTexture;
+ GR_D3D9(device, CreateTexture(width, height, 1, 0, fDesc.Format,
+ D3DPOOL_SYSTEMMEM, &tempTexture, NULL));
+ GrAssert(NULL != tempTexture);
+ IDirect3DSurface9* tempSurface;
+ GR_D3D9(tempTexture, GetSurfaceLevel(0, &tempSurface));
+ GrAssert(NULL != tempTexture);
+
+ D3DLOCKED_RECT lock;
+ GR_D3D9(tempSurface, LockRect(&lock, NULL, 0));
+ GrAssert(NULL != lock.pBits);
+
+ // For 4444 D3D uses ARGB for while Skia uses RGBA
+ if (D3DFMT_A4R4G4B4 == fDesc.Format) {
+ WORD* src = (WORD*)srcData;
+ for (uint32_t y = 0; y < height; ++y) {
+ for (uint32_t x = 0; x < width; ++x, ++src) {
+ WORD* dst = (WORD*)((char*)lock.pBits + y * lock.Pitch)+x;
+ *dst = ((0xfff0 & *src) >> 4) | ((0x000f & *src) << 12);
+ }
+ }
+ } else {
+ int bpp = format_bytes(fDesc.Format);
+ if (lock.Pitch == width * bpp) {
+ memcpy(lock.pBits, srcData, width*height*bpp);
+ } else {
+ for (uint32_t y = 0; y < height; ++y) {
+ memcpy((char*)lock.pBits + y * lock.Pitch,
+ (char*)srcData + y*width*bpp, width*bpp);
+ }
+ }
+ }
+ GR_D3D9(tempSurface, UnlockRect());
+
+ IDirect3DSurface9* level0;
+ GR_D3D9(fTexture, GetSurfaceLevel(0, &level0));
+
+ POINT xy = {x, y};
+ GR_D3D9(device, UpdateSurface(tempSurface, NULL, level0, &xy));
+
+ tempSurface->Release();
+ tempTexture->Release();
+ level0->Release();
+ device->Release();
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrD3D9VertexBuffer::GrD3D9VertexBuffer(uint32_t size,
+ bool dynamic,
+ IDirect3DVertexBuffer9* vbuffer,
+ GrGpuD3D9* gpuD3D9) :
+ INHERITED(size, dynamic),
+ fBuffer(vbuffer),
+ fLocked(false),
+ fGpuD3D9(gpuD3D9) {
+ HRESULT hr = fBuffer->GetDesc(&fDesc);
+ GrAssert(SUCCEEDED(hr));
+}
+
+GrD3D9VertexBuffer::~GrD3D9VertexBuffer() {
+ fGpuD3D9->notifyVertexBufferDelete(this);
+ if (NULL != fBuffer) {
+ fBuffer->Release();
+ }
+}
+
+void GrD3D9VertexBuffer::abandon() {
+ GrAssert(NULL != fBuffer);
+ fBuffer = NULL;
+}
+
+void* GrD3D9VertexBuffer::lock() {
+ GrAssert(NULL != fBuffer);
+ HRESULT hr;
+ void* data = NULL;
+ hr = fBuffer->Lock(0, fDesc.Size, &data, D3DLOCK_DISCARD);
+ fLocked = SUCCEEDED(hr);
+ GrAssert(fLocked && NULL != data);
+ return data;
+}
+
+void GrD3D9VertexBuffer::unlock() {
+ GrAssert(fLocked);
+ HRESULT hr = fBuffer->Unlock();
+ GrAssert(SUCCEEDED(hr));
+}
+
+bool GrD3D9VertexBuffer::isLocked() {
+ return fLocked;
+}
+
+bool GrD3D9VertexBuffer::updateData(const void* src, uint32_t srcSizeInBytes) {
+ GrAssert(srcSizeInBytes <= fDesc.Size);
+ HRESULT hr;
+ void* data;
+ hr = fBuffer->Lock(0, fDesc.Size, &data, D3DLOCK_DISCARD);
+ GrAssert(SUCCEEDED(hr));
+ if (SUCCEEDED(hr)) {
+ fLocked = true;
+ GrAssert(NULL != data);
+ memcpy(data, src, srcSizeInBytes);
+ hr = fBuffer->Unlock();
+ fLocked = SUCCEEDED(hr);
+ GrAssert(!fLocked);
+ return !fLocked;
+ }
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrD3D9IndexBuffer::GrD3D9IndexBuffer(uint32_t size,
+ bool dynamic,
+ IDirect3DIndexBuffer9* ibuffer,
+ GrGpuD3D9* gpuD3D9) :
+ INHERITED(size, dynamic),
+ fBuffer(ibuffer),
+ fLocked(false),
+ fGpuD3D9(gpuD3D9) {
+ HRESULT hr = fBuffer->GetDesc(&fDesc);
+ GrAssert(SUCCEEDED(hr));
+}
+
+GrD3D9IndexBuffer::~GrD3D9IndexBuffer() {
+ fGpuD3D9->notifyIndexBufferDelete(this);
+ if (NULL != fBuffer) {
+ fBuffer->Release();
+ }
+}
+
+void GrD3D9IndexBuffer::abandon() {
+ GrAssert(NULL != fBuffer);
+ fBuffer = NULL;
+}
+
+void* GrD3D9IndexBuffer::lock() {
+ GrAssert(NULL != fBuffer);
+ HRESULT hr;
+ void* data = NULL;
+ hr = fBuffer->Lock(0, fDesc.Size, &data, D3DLOCK_DISCARD);
+ fLocked = SUCCEEDED(hr);
+ GrAssert(fLocked && NULL != data);
+ return data;
+}
+
+void GrD3D9IndexBuffer::unlock() {
+ GrAssert(fLocked);
+ HRESULT hr = fBuffer->Unlock();
+ GrAssert(SUCCEEDED(hr));
+}
+
+bool GrD3D9IndexBuffer::isLocked() {
+ return fLocked;
+}
+
+bool GrD3D9IndexBuffer::updateData(const void* src, uint32_t srcSizeInBytes) {
+ GrAssert(srcSizeInBytes <= fDesc.Size);
+ HRESULT hr;
+ void* data;
+ hr = fBuffer->Lock(0, fDesc.Size, &data, D3DLOCK_DISCARD);
+ GrAssert(SUCCEEDED(hr));
+ if (SUCCEEDED(hr)) {
+ fLocked = true;
+ GrAssert(NULL != data);
+ memcpy(data, src, srcSizeInBytes);
+ hr = fBuffer->Unlock();
+ fLocked = SUCCEEDED(hr);
+ GrAssert(!fLocked);
+ return !fLocked;
+ }
+ return false;
+}
diff --git a/gpu/src/GrGpuFactory.cpp b/gpu/src/GrGpuFactory.cpp
new file mode 100644
index 0000000000..b3627c9a6f
--- /dev/null
+++ b/gpu/src/GrGpuFactory.cpp
@@ -0,0 +1,78 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrTypes.h"
+
+// must be before GrGLConfig.h
+#if GR_WIN32_BUILD
+// #include "GrGpuD3D9.h"
+#endif
+
+#include "GrGLConfig.h"
+
+#define GR_USE_GLSHADERS2 0
+
+#if GR_SUPPORT_GLES1 || GR_SUPPORT_GLDESKTOP
+ #include "GrGpuGLFixed.h"
+#endif
+
+#if GR_SUPPORT_GLES2 || GR_SUPPORT_GLDESKTOP
+ #if GR_USE_GLSHADERS2
+ #include "GrGpuGLShaders2.h"
+ #else
+ #include "GrGpuGLShaders.h"
+ #endif
+#endif
+
+#include "GrGpu.h"
+
+GrGpu* GrGpu::Create(Engine engine, Platform3DContext context3D) {
+ GrGpu* gpu = NULL;
+
+ switch (engine) {
+ case kOpenGL_Shaders_Engine:
+ GrAssert(NULL == context3D);
+#if GR_SUPPORT_GLES2 || GR_SUPPORT_GLDESKTOP
+ #if GR_USE_GLSHADERS2
+ gpu = new GrGpuGLShaders2;
+ #else
+ gpu = new GrGpuGLShaders;
+ #endif
+#endif
+ break;
+ case kOpenGL_Fixed_Engine:
+ GrAssert(NULL == context3D);
+#if GR_SUPPORT_GLES1 || GR_SUPPORT_GLDESKTOP
+ gpu = new GrGpuGLFixed;
+#endif
+ break;
+ case kDirect3D9_Engine:
+ GrAssert(NULL != context3D);
+#if GR_WIN32_BUILD
+// gpu = new GrGpuD3D9((IDirect3DDevice9*)context3D);
+#endif
+ break;
+ default:
+ GrAssert(!"unknown engine");
+ break;
+ }
+
+ return gpu;
+}
+
+
+
diff --git a/gpu/src/GrGpuGL.cpp b/gpu/src/GrGpuGL.cpp
new file mode 100644
index 0000000000..8a1169e4c4
--- /dev/null
+++ b/gpu/src/GrGpuGL.cpp
@@ -0,0 +1,1824 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+#include "GrGpuGL.h"
+#include "GrMemory.h"
+#include <stdio.h>
+
+static const GLuint GR_MAX_GLUINT = ~0;
+static const GLint GR_INVAL_GLINT = ~0;
+
+#define SKIP_CACHE_CHECK true
+
+static const GLenum gXfermodeCoeff2Blend[] = {
+ GL_ZERO,
+ GL_ONE,
+ GL_SRC_COLOR,
+ GL_ONE_MINUS_SRC_COLOR,
+ GL_DST_COLOR,
+ GL_ONE_MINUS_DST_COLOR,
+ GL_SRC_ALPHA,
+ GL_ONE_MINUS_SRC_ALPHA,
+ GL_DST_ALPHA,
+ GL_ONE_MINUS_DST_ALPHA,
+};
+
+bool has_gl_extension(const char* ext) {
+ const char* glstr = (const char*) glGetString(GL_EXTENSIONS);
+
+ int extLength = strlen(ext);
+
+ while (true) {
+ int n = strcspn(glstr, " ");
+ if (n == extLength && 0 == strncmp(ext, glstr, n)) {
+ return true;
+ }
+ if (0 == glstr[n]) {
+ return false;
+ }
+ glstr += n+1;
+ }
+}
+
+void gl_version(int* major, int* minor) {
+ const char* v = (const char*) glGetString(GL_VERSION);
+ if (NULL == v) {
+ GrAssert(0);
+ *major = 0;
+ *minor = 0;
+ return;
+ }
+#if GR_GL_DESKTOP
+ int n = sscanf(v, "%d.%d", major, minor);
+ if (n != 2) {
+ GrAssert(0);
+ *major = 0;
+ *minor = 0;
+ return;
+ }
+#else
+ char profile[2];
+ int n = sscanf(v, "OpenGL ES-%c%c %d.%d", profile, profile+1, major, minor);
+ bool ok = 4 == n;
+ if (!ok) {
+ int n = sscanf(v, "OpenGL ES %d.%d", major, minor);
+ ok = 2 == n;
+ }
+ if (!ok) {
+ GrAssert(0);
+ *major = 0;
+ *minor = 0;
+ return;
+ }
+#endif
+}
+///////////////////////////////////////////////////////////////////////////////
+
+bool fbo_test(GrGLExts exts, int w, int h) {
+ GLuint testFBO;
+ GR_GLEXT(exts, GenFramebuffers(1, &testFBO));
+ GR_GLEXT(exts, BindFramebuffer(GR_FRAMEBUFFER, testFBO));
+ GLuint testRTTex;
+ GR_GL(GenTextures(1, &testRTTex));
+ GR_GL(BindTexture(GL_TEXTURE_2D, testRTTex));
+
+ GR_GL(TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h,
+ 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL));
+ GR_GL(BindTexture(GL_TEXTURE_2D, 0));
+ GR_GLEXT(exts, FramebufferTexture2D(GR_FRAMEBUFFER, GR_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D, testRTTex, 0));
+ GLenum status = GR_GLEXT(exts, CheckFramebufferStatus(GR_FRAMEBUFFER));
+ GR_GLEXT(exts, DeleteFramebuffers(1, &testFBO));
+ GR_GL(DeleteTextures(1, &testRTTex));
+ return status == GR_FRAMEBUFFER_COMPLETE;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGpuGL::GrGpuGL() {
+ GrPrintf("------------------------- create GrGpuGL %p --------------\n",
+ this);
+ GrPrintf("------ VENDOR %s\n", glGetString(GL_VENDOR));
+ GrPrintf("------ RENDERER %s\n", glGetString(GL_RENDERER));
+ GrPrintf("------ VERSION %s\n", glGetString(GL_VERSION));
+ GrPrintf("------ EXTENSIONS\n %s \n", glGetString(GL_EXTENSIONS));
+
+ GrGLClearErr();
+
+ GrGLInitExtensions(&fExts);
+
+ resetContextHelper();
+
+ GrGLRenderTarget::GLRenderTargetIDs defaultRTIDs;
+ GR_GL(GetIntegerv(GR_FRAMEBUFFER_BINDING, (GLint*)&defaultRTIDs.fRTFBOID));
+ defaultRTIDs.fTexFBOID = defaultRTIDs.fRTFBOID;
+ defaultRTIDs.fMSColorRenderbufferID = 0;
+ defaultRTIDs.fStencilRenderbufferID = 0;
+ GLint vp[4];
+ GR_GL(GetIntegerv(GL_VIEWPORT, vp));
+ fHWBounds.fViewportRect.setLTRB(vp[0],
+ vp[1] + vp[3],
+ vp[0] + vp[2],
+ vp[1]);
+ defaultRTIDs.fOwnIDs = false;
+
+ fDefaultRenderTarget = new GrGLRenderTarget(defaultRTIDs,
+ fHWBounds.fViewportRect,
+ NULL,
+ this);
+ fHWDrawState.fRenderTarget = fDefaultRenderTarget;
+ fRenderTargetChanged = true;
+
+ fCurrDrawState = fHWDrawState;
+
+ ////////////////////////////////////////////////////////////////////////////
+ // Check for supported features.
+
+ int major, minor;
+ gl_version(&major, &minor);
+
+ GLint numFormats;
+ GR_GL(GetIntegerv(GL_NUM_COMPRESSED_TEXTURE_FORMATS, &numFormats));
+ GrAutoSTMalloc<10, GLint> formats(numFormats);
+ GR_GL(GetIntegerv(GL_COMPRESSED_TEXTURE_FORMATS, formats));
+ for (int i = 0; i < numFormats; ++i) {
+ if (formats[i] == GR_PALETTE8_RGBA8) {
+ f8bitPaletteSupport = true;
+ break;
+ }
+ }
+ GrPrintf("Palette8 support: %s\n", (f8bitPaletteSupport ? "YES" : "NO"));
+
+ GR_STATIC_ASSERT(0 == kNone_AALevel);
+ GR_STATIC_ASSERT(1 == kLow_AALevel);
+ GR_STATIC_ASSERT(2 == kMed_AALevel);
+ GR_STATIC_ASSERT(3 == kHigh_AALevel);
+
+ memset(fAASamples, 0, sizeof(fAASamples));
+ fMSFBOType = kNone_MSFBO;
+ if (has_gl_extension("GL_IMG_multisampled_render_to_texture")) {
+ fMSFBOType = kIMG_MSFBO;
+ GrPrintf("MSAA Support: IMG ES EXT.\n");
+ }
+ else if (has_gl_extension("GL_APPLE_framebuffer_multisample")) {
+ fMSFBOType = kApple_MSFBO;
+ GrPrintf("MSAA Support: APPLE ES EXT.\n");
+ }
+#if GR_GL_DESKTOP
+ else if ((major >= 3) ||
+ has_gl_extension("GL_ARB_framebuffer_object") ||
+ (has_gl_extension("GL_EXT_framebuffer_multisample") &&
+ has_gl_extension("GL_EXT_framebuffer_blit"))) {
+ fMSFBOType = kDesktop_MSFBO;
+ GrPrintf("MSAA Support: DESKTOP\n");
+ }
+#endif
+ else {
+ GrPrintf("MSAA Support: NONE\n");
+ }
+
+ if (kNone_MSFBO != fMSFBOType) {
+ GLint maxSamples;
+ GLenum maxSampleGetter = (kIMG_MSFBO == fMSFBOType) ?
+ GR_MAX_SAMPLES_IMG :
+ GR_MAX_SAMPLES;
+ GR_GL(GetIntegerv(maxSampleGetter, &maxSamples));
+ if (maxSamples > 1 ) {
+ fAASamples[kNone_AALevel] = 0;
+ fAASamples[kLow_AALevel] = GrMax(2,
+ GrFixedFloorToInt((GR_FixedHalf) *
+ maxSamples));
+ fAASamples[kMed_AALevel] = GrMax(2,
+ GrFixedFloorToInt(((GR_Fixed1*3)/4) *
+ maxSamples));
+ fAASamples[kHigh_AALevel] = maxSamples;
+ }
+ GrPrintf("\tMax Samples: %d\n", maxSamples);
+ }
+
+#if GR_GL_DESKTOP
+ fHasStencilWrap = (major >= 2 || (major == 1 && minor >= 4)) ||
+ has_gl_extension("GL_EXT_stencil_wrap");
+#else
+ fHasStencilWrap = (major >= 2) || has_gl_extension("GL_OES_stencil_wrap");
+#endif
+ GrPrintf("Stencil Wrap: %s\n", (fHasStencilWrap ? "YES" : "NO"));
+
+#if GR_GL_DESKTOP
+ // we could also look for GL_ATI_separate_stencil extension or
+ // GL_EXT_stencil_two_side but they use different function signatures
+ // than GL2.0+ (and than each other).
+ fSingleStencilPassForWinding = (major >= 2);
+#else
+ // ES 2 has two sided stencil but 1.1 doesn't. There doesn't seem to be
+ // an ES1 extension.
+ fSingleStencilPassForWinding = (major >= 2);
+#endif
+ GrPrintf("Single Stencil Pass For Winding: %s\n", (fSingleStencilPassForWinding ? "YES" : "NO"));
+
+
+#if GR_GL_DESKTOP
+ fRGBA8Renderbuffer = true;
+#else
+ fRGBA8Renderbuffer = has_gl_extension("GL_OES_rgb8_rgba8");
+#endif
+ GrPrintf("RGBA Renderbuffer: %s\n", (fRGBA8Renderbuffer ? "YES" : "NO"));
+
+
+#if GR_GL_DESKTOP
+ fBufferLockSupport = true; // we require VBO support and the desktop VBO
+ // extension includes glMapBuffer.
+#else
+ fBufferLockSupport = has_gl_extension("GL_OES_mapbuffer");
+#endif
+ GrPrintf("Map Buffer: %s\n", (fBufferLockSupport ? "YES" : "NO"));
+
+#if GR_GL_DESKTOP
+ fNPOTTextureSupport =
+ (major >= 2 || has_gl_extension("GL_ARB_texture_non_power_of_two")) ?
+ kFull_NPOTTextureType :
+ kNone_NPOTTextureType;
+#else
+ if (has_gl_extension("GL_OES_texture_npot")) {
+ fNPOTTextureSupport = kFull_NPOTTextureType;
+ } else if (major >= 2 ||
+ has_gl_extension("GL_APPLE_texture_2D_limited_npot")) {
+ fNPOTTextureSupport = kNoRepeat_NPOTTextureType;
+ } else {
+ fNPOTTextureSupport = kNone_NPOTTextureType;
+ }
+#endif
+ ////////////////////////////////////////////////////////////////////////////
+ // Experiments to determine limitations that can't be queried. TODO: Make
+ // these a preprocess that generate some compile time constants.
+
+ /* Experimentation has found that some GLs that support NPOT textures
+ do not support FBOs with a NPOT texture. They report "unsupported" FBO
+ status. I don't know how to explicitly query for this. Do an
+ experiment. Note they may support NPOT with a renderbuffer but not a
+ texture. Presumably, the implementation bloats the renderbuffer
+ internally to the next POT.
+ */
+ if (fNPOTTextureSupport == kFull_NPOTTextureType) {
+ bool npotFBOSuccess = fbo_test(fExts, 200, 200);
+ if (!npotFBOSuccess) {
+ fNPOTTextureSupport = kNonRendertarget_NPOTTextureType;
+ GrPrintf("NPOT Renderbuffer Test: FAILED\n");
+ } else {
+ GrPrintf("NPOT Renderbuffer Test: PASSED\n");
+ }
+ }
+ switch (fNPOTTextureSupport) {
+ case kNone_NPOTTextureType:
+ GrPrintf("NPOT Support: NONE\n");
+ break;
+ case kNoRepeat_NPOTTextureType:
+ GrPrintf("NPOT Support: NO REPEAT\n");
+ break;
+ case kNonRendertarget_NPOTTextureType:
+ GrPrintf("NPOT Support: NO FBOTEX\n");
+ break;
+ case kFull_NPOTTextureType:
+ GrPrintf("NPOT Support: FULL\n");
+ break;
+ }
+
+ // sanity check to make sure we can at least create an FBO from a POT texture
+ if (fNPOTTextureSupport < kFull_NPOTTextureType) {
+ bool npotFBOSuccess = fbo_test(fExts, 128, 128);
+ if (!npotFBOSuccess) {
+ GrPrintf("FBO Sanity Test: FAILED\n");
+ } else {
+ GrPrintf("FBO Sanity Test: PASSED\n");
+ }
+ }
+
+ /* The iPhone 4 has a restriction that for an FBO with texture color
+ attachment with height <= 8 then the width must be <= height. Here
+ we look for such a limitation.
+ */
+ fMinRenderTargetHeight = GR_INVAL_GLINT;
+ GLint maxRenderSize;
+ glGetIntegerv(GR_MAX_RENDERBUFFER_SIZE, &maxRenderSize);
+
+ GrPrintf("Small height FBO texture experiments\n");
+ for (GLuint i = 1; i <= 256;
+ (kFull_NPOTTextureType != fNPOTTextureSupport) ? i *= 2 : ++i) {
+ GLuint w = maxRenderSize;
+ GLuint h = i;
+ if (fbo_test(fExts, w, h)) {
+ GrPrintf("\t[%d, %d]: PASSED\n", w, h);
+ fMinRenderTargetHeight = i;
+ break;
+ } else {
+ GrPrintf("\t[%d, %d]: FAILED\n", w, h);
+ }
+ }
+ GrAssert(GR_INVAL_GLINT != fMinRenderTargetHeight);
+
+ GrPrintf("Small width FBO texture experiments\n");
+ fMinRenderTargetWidth = GR_MAX_GLUINT;
+ for (GLuint i = 1; i <= 256;
+ (kFull_NPOTTextureType != fNPOTTextureSupport) ? i *= 2 : ++i) {
+ GLuint w = i;
+ GLuint h = maxRenderSize;
+ if (fbo_test(fExts, w, h)) {
+ GrPrintf("\t[%d, %d]: PASSED\n", w, h);
+ fMinRenderTargetWidth = i;
+ break;
+ } else {
+ GrPrintf("\t[%d, %d]: FAILED\n", w, h);
+ }
+ }
+ GrAssert(GR_INVAL_GLINT != fMinRenderTargetWidth);
+
+#if GR_IOS_BUILD
+ /*
+ The iPad seems to fail, at least sometimes, if the height is < 16,
+ so we pin the values here for now. A better fix might be to
+ conditionalize this based on known that its an iPad (or some other
+ check).
+ */
+ fMinRenderTargetWidth = GrMax<GLuint>(fMinRenderTargetWidth, 16);
+ fMinRenderTargetHeight = GrMax<GLuint>(fMinRenderTargetHeight, 16);
+#endif
+ // bind back to original FBO
+ GR_GLEXT(fExts, BindFramebuffer(GR_FRAMEBUFFER, defaultRTIDs.fRTFBOID));
+#if GR_COLLECT_STATS
+ ++fStats.fRenderTargetChngCnt;
+#endif
+ eraseStencil(0, ~0);
+}
+
+GrGpuGL::~GrGpuGL() {
+ fDefaultRenderTarget->abandon();
+ fDefaultRenderTarget->unref();
+}
+
+void GrGpuGL::resetContextHelper() {
+// We detect cases when blending is effectively off
+ fHWBlendDisabled = false;
+ GR_GL(Enable(GL_BLEND));
+
+ // this is always disabled
+ GR_GL(Disable(GL_CULL_FACE));
+
+ GR_GL(Disable(GL_DITHER));
+#if GR_GL_DESKTOP
+ GR_GL(Disable(GL_LINE_SMOOTH));
+ GR_GL(Disable(GL_POINT_SMOOTH));
+ GR_GL(Disable(GL_MULTISAMPLE));
+#endif
+
+ // we only ever use lines in hairline mode
+ GR_GL(LineWidth(1));
+
+ GR_GL(ActiveTexture(GL_TEXTURE0));
+
+ fHWDrawState.fFlagBits = 0;
+
+ // illegal values
+ fHWDrawState.fSrcBlend = (BlendCoeff)-1;
+ fHWDrawState.fDstBlend = (BlendCoeff)-1;
+ fHWDrawState.fColor = GrColor_ILLEGAL;
+ fHWDrawState.fPointSize = -1;
+ fHWDrawState.fTexture = NULL;
+
+ GR_GL(Scissor(0,0,0,0));
+ fHWBounds.fScissorRect.setLTRB(0,0,0,0);
+ fHWBounds.fScissorEnabled = false;
+ GR_GL(Disable(GL_SCISSOR_TEST));
+
+ fHWDrawState.fSamplerState.setRadial2Params(-GR_ScalarMax,
+ -GR_ScalarMax,
+ true);
+
+ for (int i = 0; i < kMatrixModeCount; i++) {
+ fHWDrawState.fMatrixModeCache[i].setScale(GR_ScalarMax, GR_ScalarMax); // illegal
+ }
+
+ // disabling the stencil test also disables
+ // stencil buffer writes
+ GR_GL(Disable(GL_STENCIL_TEST));
+ GR_GL(StencilMask(0xffffffff));
+ GR_GL(ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
+ fHWDrawState.fReverseFill = false;
+ fHWDrawState.fStencilPass = kNone_StencilPass;
+ fHWStencilClip = false;
+
+ fHWGeometryState.fIndexBuffer = NULL;
+ fHWGeometryState.fVertexBuffer = NULL;
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, 0));
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0));
+
+ fHWDrawState.fRenderTarget = NULL;
+}
+
+void GrGpuGL::resetContext() {
+ INHERITED::resetContext();
+ resetContextHelper();
+}
+
+
+// defines stencil formats from more to less preferred
+#if GR_GL_ES
+ GLenum GR_GL_STENCIL_FORMAT_ARRAY[] = {
+ GR_STENCIL_INDEX8,
+ };
+#else
+ GLenum GR_GL_STENCIL_FORMAT_ARRAY[] = {
+ GR_STENCIL_INDEX8,
+ GR_STENCIL_INDEX16,
+ GR_UNSIGNED_INT_24_8,
+ GR_DEPTH_STENCIL,
+ };
+#endif
+
+// good to set a break-point here to know when createTexture fails
+static GrTexture* return_null_texture() {
+// GrAssert(!"null texture");
+ return NULL;
+}
+
+#if GR_DEBUG
+static size_t as_size_t(int x) {
+ return x;
+}
+#endif
+
+GrRenderTarget* GrGpuGL::createPlatformRenderTarget(
+ intptr_t platformRenderTarget,
+ int width, int height) {
+ GrGLRenderTarget::GLRenderTargetIDs rtIDs;
+ rtIDs.fStencilRenderbufferID = 0;
+ rtIDs.fMSColorRenderbufferID = 0;
+ rtIDs.fTexFBOID = 0;
+ rtIDs.fOwnIDs = false;
+
+ GrIRect viewport;
+
+ // viewport is in GL coords (top >= bottom)
+ viewport.setLTRB(0, height, width, 0);
+
+ rtIDs.fRTFBOID = (GLuint)platformRenderTarget;
+ rtIDs.fTexFBOID = (GLuint)platformRenderTarget;
+
+ GrGLRenderTarget* rt = new GrGLRenderTarget(rtIDs, viewport, NULL, this);
+
+ return rt;
+}
+
+GrTexture* GrGpuGL::createTexture(const TextureDesc& desc,
+ const void* srcData, size_t rowBytes) {
+
+#if GR_COLLECT_STATS
+ ++fStats.fTextureCreateCnt;
+#endif
+ GrGLTexture::GLTextureDesc glDesc;
+ GLenum internalFormat;
+
+ glDesc.fContentWidth = desc.fWidth;
+ glDesc.fContentHeight = desc.fHeight;
+ glDesc.fAllocWidth = desc.fWidth;
+ glDesc.fAllocHeight = desc.fHeight;
+ glDesc.fFormat = desc.fFormat;
+
+ bool renderTarget = 0 != (desc.fFlags & kRenderTarget_TextureFlag);
+ if (!canBeTexture(desc.fFormat,
+ &internalFormat,
+ &glDesc.fUploadFormat,
+ &glDesc.fUploadType)) {
+ return return_null_texture();
+ }
+
+ GrAssert(as_size_t(desc.fAALevel) < GR_ARRAY_COUNT(fAASamples));
+ GLint samples = fAASamples[desc.fAALevel];
+ if (kNone_MSFBO == fMSFBOType && desc.fAALevel != kNone_AALevel) {
+ GrPrintf("AA RT requested but not supported on this platform.");
+ }
+
+ GR_GL(GenTextures(1, &glDesc.fTextureID));
+ if (!glDesc.fTextureID) {
+ return return_null_texture();
+ }
+
+ glDesc.fUploadByteCount = GrTexture::BytesPerPixel(desc.fFormat);
+
+ /*
+ * check if our srcData has extra bytes past each row. If so, we need
+ * to trim those off here, since GL doesn't let us pass the rowBytes as
+ * a parameter to glTexImage2D
+ */
+#if GR_GL_DESKTOP
+ if (srcData) {
+ GR_GL(PixelStorei(GL_UNPACK_ROW_LENGTH,
+ rowBytes / glDesc.fUploadByteCount));
+ }
+#else
+ GrAutoSMalloc<128 * 128> trimStorage;
+ size_t trimRowBytes = desc.fWidth * glDesc.fUploadByteCount;
+ if (srcData && (trimRowBytes < rowBytes)) {
+ size_t trimSize = desc.fHeight * trimRowBytes;
+ trimStorage.realloc(trimSize);
+ // now copy the data into our new storage, skipping the trailing bytes
+ const char* src = (const char*)srcData;
+ char* dst = (char*)trimStorage.get();
+ for (uint32_t y = 0; y < desc.fHeight; y++) {
+ memcpy(dst, src, trimRowBytes);
+ src += rowBytes;
+ dst += trimRowBytes;
+ }
+ // now point srcData to our trimmed version
+ srcData = trimStorage.get();
+ }
+#endif
+
+ if (fNPOTTextureSupport < kNonRendertarget_NPOTTextureType ||
+ (fNPOTTextureSupport == kNonRendertarget_NPOTTextureType &&
+ renderTarget)) {
+ glDesc.fAllocWidth = GrNextPow2(desc.fWidth);
+ glDesc.fAllocHeight = GrNextPow2(desc.fHeight);
+ }
+
+ if (renderTarget) {
+ glDesc.fAllocWidth = GrMax<int>(fMinRenderTargetWidth,
+ glDesc.fAllocWidth);
+ glDesc.fAllocHeight = GrMax<int>(fMinRenderTargetHeight,
+ glDesc.fAllocHeight);
+ }
+
+ GR_GL(BindTexture(GL_TEXTURE_2D, glDesc.fTextureID));
+#if GR_COLLECT_STATS
+ ++fStats.fTextureChngCnt;
+#endif
+
+ GR_GL(PixelStorei(GL_UNPACK_ALIGNMENT, glDesc.fUploadByteCount));
+ if (GrTexture::kIndex_8_PixelConfig == desc.fFormat &&
+ supports8BitPalette()) {
+ // ES only supports CompressedTexImage2D, not CompressedTexSubimage2D
+ GrAssert(desc.fWidth == glDesc.fAllocWidth);
+ GrAssert(desc.fHeight == glDesc.fAllocHeight);
+ GLsizei imageSize = glDesc.fAllocWidth * glDesc.fAllocHeight +
+ kColorTableSize;
+ GR_GL(CompressedTexImage2D(GL_TEXTURE_2D, 0, glDesc.fUploadFormat,
+ glDesc.fAllocWidth, glDesc.fAllocHeight,
+ 0, imageSize, srcData));
+ GrGL_RestoreResetRowLength();
+ } else {
+ if (NULL != srcData && (glDesc.fAllocWidth != desc.fWidth ||
+ glDesc.fAllocHeight != desc.fHeight)) {
+ GR_GL(TexImage2D(GL_TEXTURE_2D, 0, internalFormat,
+ glDesc.fAllocWidth, glDesc.fAllocHeight,
+ 0, glDesc.fUploadFormat, glDesc.fUploadType, NULL));
+ GR_GL(TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, desc.fWidth,
+ desc.fHeight, glDesc.fUploadFormat,
+ glDesc.fUploadType, srcData));
+ GrGL_RestoreResetRowLength();
+
+ uint32_t extraW = glDesc.fAllocWidth - desc.fWidth;
+ uint32_t extraH = glDesc.fAllocHeight - desc.fHeight;
+ uint32_t maxTexels = extraW * extraH;
+ maxTexels = GrMax(extraW * desc.fHeight, maxTexels);
+ maxTexels = GrMax(desc.fWidth * extraH, maxTexels);
+
+ GrAutoSMalloc<128*128> texels(glDesc.fUploadByteCount * maxTexels);
+
+ uint32_t rowSize = desc.fWidth * glDesc.fUploadByteCount;
+ if (extraH) {
+ uint8_t* lastRowStart = (uint8_t*) srcData +
+ (desc.fHeight - 1) * rowSize;
+ uint8_t* extraRowStart = (uint8_t*)texels.get();
+
+ for (uint32_t i = 0; i < extraH; ++i) {
+ memcpy(extraRowStart, lastRowStart, rowSize);
+ extraRowStart += rowSize;
+ }
+ GR_GL(TexSubImage2D(GL_TEXTURE_2D, 0, 0, desc.fHeight, desc.fWidth,
+ extraH, glDesc.fUploadFormat, glDesc.fUploadType,
+ texels.get()));
+ }
+ if (extraW) {
+ uint8_t* edgeTexel = (uint8_t*)srcData + rowSize - glDesc.fUploadByteCount;
+ uint8_t* extraTexel = (uint8_t*)texels.get();
+ for (uint32_t j = 0; j < desc.fHeight; ++j) {
+ for (uint32_t i = 0; i < extraW; ++i) {
+ memcpy(extraTexel, edgeTexel, glDesc.fUploadByteCount);
+ extraTexel += glDesc.fUploadByteCount;
+ }
+ edgeTexel += rowSize;
+ }
+ GR_GL(TexSubImage2D(GL_TEXTURE_2D, 0, desc.fWidth, 0, extraW,
+ desc.fHeight, glDesc.fUploadFormat,
+ glDesc.fUploadType, texels.get()));
+ }
+ if (extraW && extraH) {
+ uint8_t* cornerTexel = (uint8_t*)srcData + desc.fHeight * rowSize
+ - glDesc.fUploadByteCount;
+ uint8_t* extraTexel = (uint8_t*)texels.get();
+ for (uint32_t i = 0; i < extraW*extraH; ++i) {
+ memcpy(extraTexel, cornerTexel, glDesc.fUploadByteCount);
+ extraTexel += glDesc.fUploadByteCount;
+ }
+ GR_GL(TexSubImage2D(GL_TEXTURE_2D, 0, desc.fWidth, desc.fHeight,
+ extraW, extraH, glDesc.fUploadFormat,
+ glDesc.fUploadType, texels.get()));
+ }
+
+ } else {
+ GR_GL(TexImage2D(GL_TEXTURE_2D, 0, internalFormat, glDesc.fAllocWidth,
+ glDesc.fAllocHeight, 0, glDesc.fUploadFormat,
+ glDesc.fUploadType, srcData));
+ GrGL_RestoreResetRowLength();
+ }
+ }
+
+ glDesc.fOrientation = GrGLTexture::kTopDown_Orientation;
+
+ GrGLRenderTarget::GLRenderTargetIDs rtIDs;
+ rtIDs.fStencilRenderbufferID = 0;
+ rtIDs.fMSColorRenderbufferID = 0;
+ rtIDs.fRTFBOID = 0;
+ rtIDs.fTexFBOID = 0;
+ rtIDs.fOwnIDs = true;
+ GLenum msColorRenderbufferFormat = -1;
+
+ if (renderTarget) {
+#if GR_COLLECT_STATS
+ ++fStats.fRenderTargetCreateCnt;
+#endif
+ bool failed = true;
+ GLenum status;
+ GLint err;
+
+ // If need have both RT flag and srcData we have
+ // to invert the data before uploading because FBO
+ // will be rendered bottom up
+ GrAssert(NULL == srcData);
+ glDesc.fOrientation = GrGLTexture::kBottomUp_Orientation;
+
+ GR_GLEXT(fExts, GenFramebuffers(1, &rtIDs.fTexFBOID));
+ GrAssert(rtIDs.fTexFBOID);
+
+ // If we are using multisampling and any extension other than the IMG
+ // one we will create two FBOs. We render to one and then resolve to
+ // the texture bound to the other. The IMG extension does an implicit
+ // resolve.
+ if (samples > 1 && kIMG_MSFBO != fMSFBOType && kNone_MSFBO != fMSFBOType) {
+ GR_GLEXT(fExts, GenFramebuffers(1, &rtIDs.fRTFBOID));
+ GrAssert(0 != rtIDs.fRTFBOID);
+ GR_GLEXT(fExts, GenRenderbuffers(1, &rtIDs.fMSColorRenderbufferID));
+ GrAssert(0 != rtIDs.fMSColorRenderbufferID);
+ if (!fboInternalFormat(desc.fFormat, &msColorRenderbufferFormat)) {
+ GR_GLEXT(fExts,
+ DeleteRenderbuffers(1, &rtIDs.fMSColorRenderbufferID));
+ GR_GL(DeleteTextures(1, &glDesc.fTextureID));
+ GR_GLEXT(fExts, DeleteFramebuffers(1, &rtIDs.fTexFBOID));
+ GR_GLEXT(fExts, DeleteFramebuffers(1, &rtIDs.fRTFBOID));
+ fHWDrawState.fTexture = NULL;
+ return return_null_texture();
+ }
+ } else {
+ rtIDs.fRTFBOID = rtIDs.fTexFBOID;
+ }
+ int attempts = 1;
+ if (!(kNoPathRendering_TextureFlag & desc.fFlags)) {
+ GR_GLEXT(fExts, GenRenderbuffers(1, &rtIDs.fStencilRenderbufferID));
+ GrAssert(0 != rtIDs.fStencilRenderbufferID);
+ attempts = GR_ARRAY_COUNT(GR_GL_STENCIL_FORMAT_ARRAY);
+ }
+
+ // need to unbind the texture before we call FramebufferTexture2D
+ GR_GL(BindTexture(GL_TEXTURE_2D, 0));
+#if GR_COLLECT_STATS
+ ++fStats.fTextureChngCnt;
+#endif
+
+ fHWDrawState.fTexture = NULL;
+
+ err = ~GL_NO_ERROR;
+ for (int i = 0; i < attempts; ++i) {
+ if (rtIDs.fStencilRenderbufferID) {
+ GR_GLEXT(fExts, BindRenderbuffer(GR_RENDERBUFFER,
+ rtIDs.fStencilRenderbufferID));
+ if (samples > 1) {
+ GR_GLEXT_NO_ERR(fExts, RenderbufferStorageMultisample(
+ GR_RENDERBUFFER,
+ samples,
+ GR_GL_STENCIL_FORMAT_ARRAY[i],
+ glDesc.fAllocWidth,
+ glDesc.fAllocHeight));
+ } else {
+ GR_GLEXT_NO_ERR(fExts, RenderbufferStorage(
+ GR_RENDERBUFFER,
+ GR_GL_STENCIL_FORMAT_ARRAY[i],
+ glDesc.fAllocWidth,
+ glDesc.fAllocHeight));
+ }
+ err = glGetError();
+ if (err != GL_NO_ERROR) {
+ continue;
+ }
+ }
+ if (rtIDs.fRTFBOID != rtIDs.fTexFBOID) {
+ GrAssert(samples > 1);
+ GR_GLEXT(fExts, BindRenderbuffer(GR_RENDERBUFFER,
+ rtIDs.fMSColorRenderbufferID));
+ GR_GLEXT_NO_ERR(fExts, RenderbufferStorageMultisample(
+ GR_RENDERBUFFER,
+ samples,
+ msColorRenderbufferFormat,
+ glDesc.fAllocWidth,
+ glDesc.fAllocHeight));
+ err = glGetError();
+ if (err != GL_NO_ERROR) {
+ continue;
+ }
+ }
+ GR_GLEXT(fExts, BindFramebuffer(GR_FRAMEBUFFER, rtIDs.fTexFBOID));
+
+#if GR_COLLECT_STATS
+ ++fStats.fRenderTargetChngCnt;
+#endif
+ if (kIMG_MSFBO == fMSFBOType && samples > 1) {
+ GR_GLEXT(fExts, FramebufferTexture2DMultisample(
+ GR_FRAMEBUFFER,
+ GR_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ glDesc.fTextureID,
+ 0,
+ samples));
+
+ } else {
+ GR_GLEXT(fExts, FramebufferTexture2D(GR_FRAMEBUFFER,
+ GR_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ glDesc.fTextureID, 0));
+ }
+ if (rtIDs.fRTFBOID != rtIDs.fTexFBOID) {
+ GLenum status = GR_GLEXT(fExts,
+ CheckFramebufferStatus(GR_FRAMEBUFFER));
+ if (status != GR_FRAMEBUFFER_COMPLETE) {
+ GrPrintf("-- glCheckFramebufferStatus %x %d %d\n",
+ status, desc.fWidth, desc.fHeight);
+ continue;
+ }
+ GR_GLEXT(fExts, BindFramebuffer(GR_FRAMEBUFFER, rtIDs.fRTFBOID));
+ #if GR_COLLECT_STATS
+ ++fStats.fRenderTargetChngCnt;
+ #endif
+ GR_GLEXT(fExts, FramebufferRenderbuffer(GR_FRAMEBUFFER,
+ GR_COLOR_ATTACHMENT0,
+ GR_RENDERBUFFER,
+ rtIDs.fMSColorRenderbufferID));
+
+ }
+ if (rtIDs.fStencilRenderbufferID) {
+ // bind the stencil to rt fbo if present, othewise the tex fbo
+ GR_GLEXT(fExts, FramebufferRenderbuffer(GR_FRAMEBUFFER,
+ GR_STENCIL_ATTACHMENT,
+ GR_RENDERBUFFER,
+ rtIDs.fStencilRenderbufferID));
+ }
+ status = GR_GLEXT(fExts, CheckFramebufferStatus(GR_FRAMEBUFFER));
+
+#if GR_GL_DESKTOP
+ // On some implementations you have to be bound as DEPTH_STENCIL.
+ // (Even binding to DEPTH and STENCIL separately with the same
+ // buffer doesn't work.)
+ if (rtIDs.fStencilRenderbufferID &&
+ status != GR_FRAMEBUFFER_COMPLETE) {
+ GR_GLEXT(fExts, FramebufferRenderbuffer(GR_FRAMEBUFFER,
+ GR_STENCIL_ATTACHMENT,
+ GR_RENDERBUFFER,
+ 0));
+ GR_GLEXT(fExts,
+ FramebufferRenderbuffer(GR_FRAMEBUFFER,
+ GR_DEPTH_STENCIL_ATTACHMENT,
+ GR_RENDERBUFFER,
+ rtIDs.fStencilRenderbufferID));
+ status = GR_GLEXT(fExts, CheckFramebufferStatus(GR_FRAMEBUFFER));
+ }
+#endif
+ if (status != GR_FRAMEBUFFER_COMPLETE) {
+ GrPrintf("-- glCheckFramebufferStatus %x %d %d\n",
+ status, desc.fWidth, desc.fHeight);
+#if GR_GL_DESKTOP
+ if (rtIDs.fStencilRenderbufferID) {
+ GR_GLEXT(fExts, FramebufferRenderbuffer(GR_FRAMEBUFFER,
+ GR_DEPTH_STENCIL_ATTACHMENT,
+ GR_RENDERBUFFER,
+ 0));
+ }
+#endif
+ continue;
+ }
+ // we're successful!
+ failed = false;
+ break;
+ }
+ if (failed) {
+ if (rtIDs.fStencilRenderbufferID) {
+ GR_GLEXT(fExts,
+ DeleteRenderbuffers(1, &rtIDs.fStencilRenderbufferID));
+ }
+ if (rtIDs.fMSColorRenderbufferID) {
+ GR_GLEXT(fExts,
+ DeleteRenderbuffers(1, &rtIDs.fMSColorRenderbufferID));
+ }
+ if (rtIDs.fRTFBOID != rtIDs.fTexFBOID) {
+ GR_GLEXT(fExts, DeleteFramebuffers(1, &rtIDs.fRTFBOID));
+ }
+ if (rtIDs.fTexFBOID) {
+ GR_GLEXT(fExts, DeleteFramebuffers(1, &rtIDs.fTexFBOID));
+ }
+ GR_GL(DeleteTextures(1, &glDesc.fTextureID));
+ return return_null_texture();
+ }
+ }
+#ifdef TRACE_TEXTURE_CREATION
+ GrPrintf("--- new texture [%d] size=(%d %d) bpp=%d\n",
+ tex->fTextureID, width, height, tex->fUploadByteCount);
+#endif
+ GrGLTexture* tex = new GrGLTexture(glDesc, rtIDs, this);
+
+ if (0 != rtIDs.fTexFBOID) {
+ GrRenderTarget* rt = tex->asRenderTarget();
+ // We've messed with FBO state but may not have set the correct viewport
+ // so just dirty the rendertarget state to force a resend.
+ fHWDrawState.fRenderTarget = NULL;
+
+ // clear the new stencil buffer if we have one
+ if (!(desc.fFlags & kNoPathRendering_TextureFlag)) {
+ GrRenderTarget* rtSave = fCurrDrawState.fRenderTarget;
+ fCurrDrawState.fRenderTarget = rt;
+ eraseStencil(0, ~0);
+ fCurrDrawState.fRenderTarget = rtSave;
+ }
+ }
+ return tex;
+}
+
+GrRenderTarget* GrGpuGL::defaultRenderTarget() {
+ return fDefaultRenderTarget;
+}
+
+GrVertexBuffer* GrGpuGL::createVertexBuffer(uint32_t size, bool dynamic) {
+ GLuint id;
+ GR_GL(GenBuffers(1, &id));
+ if (id) {
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, id));
+ GrGLClearErr();
+ // make sure driver can allocate memory for this buffer
+ GR_GL_NO_ERR(BufferData(GL_ARRAY_BUFFER, size, NULL,
+ dynamic ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW));
+ if (glGetError() != GL_NO_ERROR) {
+ GR_GL(DeleteBuffers(1, &id));
+ // deleting bound buffer does implicit bind to 0
+ fHWGeometryState.fVertexBuffer = NULL;
+ return NULL;
+ }
+ GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(id, this,
+ size, dynamic);
+ fHWGeometryState.fVertexBuffer = vertexBuffer;
+ return vertexBuffer;
+ }
+ return NULL;
+}
+
+GrIndexBuffer* GrGpuGL::createIndexBuffer(uint32_t size, bool dynamic) {
+ GLuint id;
+ GR_GL(GenBuffers(1, &id));
+ if (id) {
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, id));
+ GrGLClearErr();
+ // make sure driver can allocate memory for this buffer
+ GR_GL_NO_ERR(BufferData(GL_ELEMENT_ARRAY_BUFFER, size, NULL,
+ dynamic ? GL_DYNAMIC_DRAW : GL_STATIC_DRAW));
+ if (glGetError() != GL_NO_ERROR) {
+ GR_GL(DeleteBuffers(1, &id));
+ // deleting bound buffer does implicit bind to 0
+ fHWGeometryState.fIndexBuffer = NULL;
+ return NULL;
+ }
+ GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(id, this,
+ size, dynamic);
+ fHWGeometryState.fIndexBuffer = indexBuffer;
+ return indexBuffer;
+ }
+ return NULL;
+}
+
+void GrGpuGL::setDefaultRenderTargetSize(uint32_t width, uint32_t height) {
+ GrIRect viewport(0, height, width, 0);
+ if (viewport != fDefaultRenderTarget->viewport()) {
+ fDefaultRenderTarget->setViewport(viewport);
+ if (fHWDrawState.fRenderTarget == fDefaultRenderTarget) {
+ fHWDrawState.fRenderTarget = NULL;
+ }
+ }
+}
+
+void GrGpuGL::flushScissor(const GrIRect* rect) {
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+ const GrIRect& vp =
+ ((GrGLRenderTarget*)fCurrDrawState.fRenderTarget)->viewport();
+
+ if (NULL != rect &&
+ rect->contains(vp)) {
+ rect = NULL;
+ }
+
+ if (NULL != rect) {
+ GrIRect scissor;
+ // viewport is already in GL coords
+ // create a scissor in GL coords (top > bottom)
+ scissor.setLTRB(vp.fLeft + rect->fLeft,
+ vp.fTop - rect->fTop,
+ vp.fLeft + rect->fRight,
+ vp.fTop - rect->fBottom);
+
+ if (fHWBounds.fScissorRect != scissor) {
+ GR_GL(Scissor(scissor.fLeft, scissor.fBottom,
+ scissor.width(), -scissor.height()));
+ fHWBounds.fScissorRect = scissor;
+ }
+
+ if (!fHWBounds.fScissorEnabled) {
+ GR_GL(Enable(GL_SCISSOR_TEST));
+ fHWBounds.fScissorEnabled = true;
+ }
+ } else {
+ if (fHWBounds.fScissorEnabled) {
+ GR_GL(Disable(GL_SCISSOR_TEST));
+ fHWBounds.fScissorEnabled = false;
+ }
+ }
+}
+
+void GrGpuGL::setSamplerStateImm(const GrSamplerState& state) {
+
+ GLenum filter = state.isFilter() ? GL_LINEAR : GL_NEAREST;
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, filter));
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, filter));
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GrGLTexture::gWrapMode2GLWrap[state.getWrapX()]));
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
+ GrGLTexture::gWrapMode2GLWrap[state.getWrapY()]));
+
+}
+
+void GrGpuGL::eraseColor(GrColor color) {
+ flushRenderTarget();
+ if (fHWBounds.fScissorEnabled) {
+ GR_GL(Disable(GL_SCISSOR_TEST));
+ }
+ GR_GL(ColorMask(GL_TRUE,GL_TRUE,GL_TRUE,GL_TRUE));
+ GR_GL(ClearColor(GrColorUnpackR(color)/255.f,
+ GrColorUnpackG(color)/255.f,
+ GrColorUnpackB(color)/255.f,
+ GrColorUnpackA(color)/255.f));
+ GR_GL(Clear(GL_COLOR_BUFFER_BIT));
+ fHWBounds.fScissorEnabled = false;
+ fWriteMaskChanged = true;
+}
+
+void GrGpuGL::eraseStencil(uint32_t value, uint32_t mask) {
+ flushRenderTarget();
+ if (fHWBounds.fScissorEnabled) {
+ GR_GL(Disable(GL_SCISSOR_TEST));
+ }
+ GR_GL(StencilMask(mask));
+ GR_GL(ClearStencil(value));
+ GR_GL(Clear(GL_STENCIL_BUFFER_BIT));
+ fHWBounds.fScissorEnabled = false;
+ fWriteMaskChanged = true;
+}
+
+void GrGpuGL::eraseStencilClip() {
+ GLint stencilBitCount;
+ GR_GL(GetIntegerv(GL_STENCIL_BITS, &stencilBitCount));
+ GrAssert(stencilBitCount > 0);
+ GLint clipStencilMask = (1 << (stencilBitCount - 1));
+ eraseStencil(0, clipStencilMask);
+}
+
+void GrGpuGL::forceRenderTargetFlush() {
+ flushRenderTarget();
+}
+
+bool GrGpuGL::readPixels(int left, int top, int width, int height,
+ GrTexture::PixelConfig config, void* buffer) {
+ GLenum internalFormat; // we don't use this for glReadPixels
+ GLenum format;
+ GLenum type;
+ if (!this->canBeTexture(config, &internalFormat, &format, &type)) {
+ return false;
+ }
+
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+ const GrIRect& vp = ((GrGLRenderTarget*)fCurrDrawState.fRenderTarget)->viewport();
+
+ // Brian says that viewport rects are already upside down (grrrrr)
+ glReadPixels(left, -vp.height() - top - height, width, height,
+ format, type, buffer);
+
+ // now reverse the order of the rows, since GL's are bottom-to-top, but our
+ // API presents top-to-bottom
+ {
+ size_t stride = width * GrTexture::BytesPerPixel(config);
+ GrAutoMalloc rowStorage(stride);
+ void* tmp = rowStorage.get();
+
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * stride;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmp, top, stride);
+ memcpy(top, bottom, stride);
+ memcpy(bottom, tmp, stride);
+ top += stride;
+ bottom -= stride;
+ }
+ }
+ return true;
+}
+
+void GrGpuGL::flushRenderTarget() {
+ if (fHWDrawState.fRenderTarget != fCurrDrawState.fRenderTarget) {
+ GrGLRenderTarget* rt = (GrGLRenderTarget*)fCurrDrawState.fRenderTarget;
+ GR_GLEXT(fExts, BindFramebuffer(GR_FRAMEBUFFER, rt->renderFBOID()));
+ #if GR_COLLECT_STATS
+ ++fStats.fRenderTargetChngCnt;
+ #endif
+ rt->setDirty(true);
+ #if GR_DEBUG
+ GLenum status = GR_GLEXT(fExts, CheckFramebufferStatus(GR_FRAMEBUFFER));
+ if (status != GR_FRAMEBUFFER_COMPLETE) {
+ GrPrintf("-- glCheckFramebufferStatus %x\n", status);
+ }
+ #endif
+ fHWDrawState.fRenderTarget = fCurrDrawState.fRenderTarget;
+ const GrIRect& vp = rt->viewport();
+ fRenderTargetChanged = true;
+ if (fHWBounds.fViewportRect != vp) {
+ GR_GL(Viewport(vp.fLeft,
+ vp.fBottom,
+ vp.width(),
+ -vp.height()));
+ fHWBounds.fViewportRect = vp;
+ }
+ }
+}
+
+GLenum gPrimitiveType2GLMode[] = {
+ GL_TRIANGLES,
+ GL_TRIANGLE_STRIP,
+ GL_TRIANGLE_FAN,
+ GL_POINTS,
+ GL_LINES,
+ GL_LINE_STRIP
+};
+
+void GrGpuGL::drawIndexedHelper(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+ GrAssert((size_t)type < GR_ARRAY_COUNT(gPrimitiveType2GLMode));
+
+ GLvoid* indices = (GLvoid*)(sizeof(uint16_t) * startIndex);
+ if (kReserved_GeometrySrcType == fGeometrySrc.fIndexSrc) {
+ indices = (GLvoid*)((intptr_t)indices + (intptr_t)fIndices.get());
+ } else if (kArray_GeometrySrcType == fGeometrySrc.fIndexSrc) {
+ indices = (GLvoid*)((intptr_t)indices +
+ (intptr_t)fGeometrySrc.fIndexArray);
+ }
+
+ GR_GL(DrawElements(gPrimitiveType2GLMode[type], indexCount,
+ GL_UNSIGNED_SHORT, indices));
+}
+
+void GrGpuGL::drawNonIndexedHelper(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount) {
+ GrAssert((size_t)type < GR_ARRAY_COUNT(gPrimitiveType2GLMode));
+
+ GR_GL(DrawArrays(gPrimitiveType2GLMode[type], 0, vertexCount));
+}
+
+#if !defined(SK_GL_HAS_COLOR4UB)
+static inline GrFixed byte2fixed(unsigned value) {
+ return (value + (value >> 7)) << 8;
+}
+#endif
+
+void GrGpuGL::resolveTextureRenderTarget(GrGLTexture* texture) {
+ GrGLRenderTarget* rt = (GrGLRenderTarget*) texture->asRenderTarget();
+
+ if (NULL != rt && rt->needsResolve()) {
+ GrAssert(kNone_MSFBO != fMSFBOType);
+ GrAssert(rt->textureFBOID() != rt->renderFBOID());
+ GR_GLEXT(fExts, BindFramebuffer(GR_READ_FRAMEBUFFER,
+ rt->renderFBOID()));
+ GR_GLEXT(fExts, BindFramebuffer(GR_DRAW_FRAMEBUFFER,
+ rt->textureFBOID()));
+ #if GR_COLLECT_STATS
+ ++fStats.fRenderTargetChngCnt;
+ #endif
+ // make sure we go through set render target
+ fHWDrawState.fRenderTarget = NULL;
+
+ GLint left = 0;
+ GLint right = texture->contentWidth();
+ // we will have rendered to the top of the FBO.
+ GLint top = texture->allocHeight();
+ GLint bottom = texture->allocHeight() - texture->contentHeight();
+ if (kApple_MSFBO == fMSFBOType) {
+ GR_GL(Enable(GL_SCISSOR_TEST));
+ GR_GL(Scissor(left, bottom, right-left, top-bottom));
+ GR_GLEXT(fExts, ResolveMultisampleFramebuffer());
+ fHWBounds.fScissorRect.setEmpty();
+ fHWBounds.fScissorEnabled = true;
+ } else {
+ GR_GLEXT(fExts, BlitFramebuffer(left, bottom, right, top,
+ left, bottom, right, top,
+ GL_COLOR_BUFFER_BIT, GL_NEAREST));
+ }
+ rt->setDirty(false);
+
+ }
+}
+
+void GrGpuGL::flushStencil() {
+
+ // use stencil for clipping if clipping is enabled and the clip
+ // has been written into the stencil.
+ bool stencilClip = fClipState.fClipInStencil &&
+ (kClip_StateBit & fCurrDrawState.fFlagBits);
+ bool stencilChange =
+ fWriteMaskChanged ||
+ fHWStencilClip != stencilClip ||
+ fHWDrawState.fStencilPass != fCurrDrawState.fStencilPass ||
+ (kNone_StencilPass != fCurrDrawState.fStencilPass &&
+ (StencilPass)kSetClip_StencilPass != fCurrDrawState.fStencilPass &&
+ fHWDrawState.fReverseFill != fCurrDrawState.fReverseFill);
+
+ if (stencilChange) {
+ GLint stencilBitCount;
+ GLint clipStencilMask;
+ GLint pathStencilMask;
+ GR_GL(GetIntegerv(GL_STENCIL_BITS, &stencilBitCount));
+ GrAssert(stencilBitCount > 0 ||
+ kNone_StencilPass == fCurrDrawState.fStencilPass);
+ clipStencilMask = (1 << (stencilBitCount - 1));
+ pathStencilMask = clipStencilMask - 1;
+ switch (fCurrDrawState.fStencilPass) {
+ case kNone_StencilPass:
+ if (stencilClip) {
+ GR_GL(Enable(GL_STENCIL_TEST));
+ GR_GL(StencilFunc(GL_EQUAL,
+ clipStencilMask,
+ clipStencilMask));
+ GR_GL(StencilOp(GL_KEEP, GL_KEEP, GL_KEEP));
+ } else {
+ GR_GL(Disable(GL_STENCIL_TEST));
+ }
+ GR_GL(ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
+ if (!fSingleStencilPassForWinding) {
+ GR_GL(Disable(GL_CULL_FACE));
+ }
+ break;
+ case kEvenOddStencil_StencilPass:
+ GR_GL(Enable(GL_STENCIL_TEST));
+ if (stencilClip) {
+ GR_GL(StencilFunc(GL_EQUAL, clipStencilMask, clipStencilMask));
+ } else {
+ GR_GL(StencilFunc(GL_ALWAYS, 0x0, 0x0));
+ }
+ GR_GL(StencilMask(pathStencilMask));
+ GR_GL(ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
+ GR_GL(StencilOp(GL_KEEP, GL_INVERT, GL_INVERT));
+ GR_GL(ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE));
+ if (!fSingleStencilPassForWinding) {
+ GR_GL(Disable(GL_CULL_FACE));
+ }
+ break;
+ case kEvenOddColor_StencilPass: {
+ GR_GL(Enable(GL_STENCIL_TEST));
+ GLint funcRef = 0;
+ GLuint funcMask = pathStencilMask;
+ if (stencilClip) {
+ funcRef |= clipStencilMask;
+ funcMask |= clipStencilMask;
+ }
+ if (!fCurrDrawState.fReverseFill) {
+ funcRef |= pathStencilMask;
+ }
+ glStencilFunc(GL_EQUAL, funcRef, funcMask);
+ glStencilMask(pathStencilMask);
+ GR_GL(StencilOp(GL_ZERO, GL_ZERO, GL_ZERO));
+ GR_GL(ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
+ if (!fSingleStencilPassForWinding) {
+ GR_GL(Disable(GL_CULL_FACE));
+ }
+ } break;
+ case kWindingStencil1_StencilPass:
+ GR_GL(Enable(GL_STENCIL_TEST));
+ if (fHasStencilWrap) {
+ if (stencilClip) {
+ GR_GL(StencilFunc(GL_EQUAL,
+ clipStencilMask,
+ clipStencilMask));
+ } else {
+ GR_GL(StencilFunc(GL_ALWAYS, 0x0, 0x0));
+ }
+ if (fSingleStencilPassForWinding) {
+ GR_GL(StencilOpSeparate(GL_FRONT, GL_KEEP,
+ GL_INCR_WRAP, GL_INCR_WRAP));
+ GR_GL(StencilOpSeparate(GL_BACK, GL_KEEP,
+ GL_DECR_WRAP, GL_DECR_WRAP));
+ } else {
+ GR_GL(StencilOp(GL_KEEP, GL_INCR_WRAP, GL_INCR_WRAP));
+ GR_GL(Enable(GL_CULL_FACE));
+ GR_GL(CullFace(GL_BACK));
+ }
+ } else {
+ // If we don't have wrap then we use the Func to detect
+ // values that would wrap (0 on decr and mask on incr). We
+ // make the func fail on these values and use the sfail op
+ // to effectively wrap by inverting.
+ // This applies whether we are doing a two-pass (front faces
+ // followed by back faces) or a single pass (separate func/op)
+
+ // Note that in the case where we are also using stencil to
+ // clip this means we will write into the path bits in clipped
+ // out pixels. We still apply the clip bit in the color pass
+ // stencil func so we don't draw color outside the clip.
+ // We also will clear the stencil bits in clipped pixels by
+ // using zero in the sfail op with write mask set to the
+ // path mask.
+ GR_GL(Enable(GL_STENCIL_TEST));
+ if (fSingleStencilPassForWinding) {
+ GR_GL(StencilFuncSeparate(GL_FRONT,
+ GL_NOTEQUAL,
+ pathStencilMask,
+ pathStencilMask));
+ GR_GL(StencilFuncSeparate(GL_BACK,
+ GL_NOTEQUAL,
+ 0x0,
+ pathStencilMask));
+ GR_GL(StencilOpSeparate(GL_FRONT, GL_INVERT,
+ GL_INCR, GL_INCR));
+ GR_GL(StencilOpSeparate(GL_BACK, GL_INVERT,
+ GL_DECR, GL_DECR));
+ } else {
+ GR_GL(StencilFunc(GL_NOTEQUAL,
+ pathStencilMask,
+ pathStencilMask));
+ GR_GL(StencilOp(GL_INVERT, GL_INCR, GL_INCR));
+ GR_GL(Enable(GL_CULL_FACE));
+ GR_GL(CullFace(GL_BACK));
+ }
+ }
+ GR_GL(StencilMask(pathStencilMask));
+ GR_GL(ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE));
+ break;
+ case kWindingStencil2_StencilPass:
+ GrAssert(!fSingleStencilPassForWinding);
+ GR_GL(Enable(GL_STENCIL_TEST));
+ if (fHasStencilWrap) {
+ if (stencilClip) {
+ GR_GL(StencilFunc(GL_EQUAL,
+ clipStencilMask,
+ clipStencilMask));
+ } else {
+ GR_GL(StencilFunc(GL_ALWAYS, 0x0, 0x0));
+ }
+ GR_GL(StencilOp(GL_DECR_WRAP, GL_DECR_WRAP, GL_DECR_WRAP));
+ } else {
+ GR_GL(StencilFunc(GL_NOTEQUAL, 0x0, pathStencilMask));
+ GR_GL(StencilOp(GL_INVERT, GL_DECR, GL_DECR));
+ }
+ GR_GL(StencilMask(pathStencilMask));
+ GR_GL(Enable(GL_CULL_FACE));
+ GR_GL(CullFace(GL_FRONT));
+ GR_GL(ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE));
+ break;
+ case kWindingColor_StencilPass: {
+ GR_GL(Enable(GL_STENCIL_TEST));
+ GLint funcRef = 0;
+ GLuint funcMask = pathStencilMask;
+ GLenum funcFunc;
+ if (stencilClip) {
+ funcRef |= clipStencilMask;
+ funcMask |= clipStencilMask;
+ }
+ if (fCurrDrawState.fReverseFill) {
+ funcFunc = GL_EQUAL;
+ } else {
+ funcFunc = GL_LESS;
+ }
+ GR_GL(StencilFunc(funcFunc, funcRef, funcMask));
+ GR_GL(StencilMask(pathStencilMask));
+ // must zero in sfail because winding w/o wrap will write
+ // path stencil bits in clipped out pixels
+ GR_GL(StencilOp(GL_ZERO, GL_ZERO, GL_ZERO));
+ GR_GL(ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
+ if (!fSingleStencilPassForWinding) {
+ GR_GL(Disable(GL_CULL_FACE));
+ }
+ } break;
+ case kSetClip_StencilPass:
+ GR_GL(Enable(GL_STENCIL_TEST));
+ GR_GL(StencilFunc(GL_ALWAYS, clipStencilMask, clipStencilMask));
+ GR_GL(StencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE));
+ GR_GL(StencilMask(clipStencilMask));
+ GR_GL(ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE));
+ if (!fSingleStencilPassForWinding) {
+ GR_GL(Disable(GL_CULL_FACE));
+ }
+ break;
+ default:
+ GrAssert(!"Unexpected stencil pass.");
+ break;
+
+ }
+ fHWDrawState.fStencilPass = fCurrDrawState.fStencilPass;
+ fHWDrawState.fReverseFill = fCurrDrawState.fReverseFill;
+ fWriteMaskChanged = false;
+ fHWStencilClip = stencilClip;
+ }
+}
+
+void GrGpuGL::flushGLStateCommon(PrimitiveType type) {
+
+ bool usingTexture = VertexHasTexCoords(fGeometrySrc.fVertexLayout);
+
+ // bind texture and set sampler state
+ if (usingTexture) {
+ GrGLTexture* nextTexture = (GrGLTexture*)fCurrDrawState.fTexture;
+
+ if (NULL != nextTexture) {
+ // if we created a rt/tex and rendered to it without using a texture
+ // and now we're texuring from the rt it will still be the last bound
+ // texture, but it needs resolving. So keep this out of the last
+ // != next check.
+ resolveTextureRenderTarget(nextTexture);
+
+ if (fHWDrawState.fTexture != nextTexture) {
+
+ GR_GL(BindTexture(GL_TEXTURE_2D, nextTexture->textureID()));
+ #if GR_COLLECT_STATS
+ ++fStats.fTextureChngCnt;
+ #endif
+ //GrPrintf("---- bindtexture %d\n", nextTexture->textureID());
+ fHWDrawState.fTexture = nextTexture;
+ }
+ const GrSamplerState& lastSampler = nextTexture->samplerState();
+ if (lastSampler.isFilter() != fCurrDrawState.fSamplerState.isFilter()) {
+ GLenum filter = fCurrDrawState.fSamplerState.isFilter() ?
+ GL_LINEAR :
+ GL_NEAREST;
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ filter));
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ filter));
+ }
+ if (lastSampler.getWrapX() != fCurrDrawState.fSamplerState.getWrapX()) {
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GrGLTexture::gWrapMode2GLWrap[
+ fCurrDrawState.fSamplerState.getWrapX()]));
+ }
+ if (lastSampler.getWrapY() != fCurrDrawState.fSamplerState.getWrapY()) {
+ GR_GL(TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
+ GrGLTexture::gWrapMode2GLWrap[
+ fCurrDrawState.fSamplerState.getWrapY()]));
+ }
+ nextTexture->setSamplerState(fCurrDrawState.fSamplerState);
+ } else {
+ GrAssert(!"Rendering with texture vert flag set but no texture");
+ if (NULL != fHWDrawState.fTexture) {
+ GR_GL(BindTexture(GL_TEXTURE_2D, 0));
+ // GrPrintf("---- bindtexture 0\n");
+ #if GR_COLLECT_STATS
+ ++fStats.fTextureChngCnt;
+ #endif
+ fHWDrawState.fTexture = NULL;
+ }
+ }
+ }
+
+ flushRenderTarget();
+
+ if ((fCurrDrawState.fFlagBits & kDither_StateBit) !=
+ (fHWDrawState.fFlagBits & kDither_StateBit)) {
+ if (fCurrDrawState.fFlagBits & kDither_StateBit) {
+ GR_GL(Enable(GL_DITHER));
+ } else {
+ GR_GL(Disable(GL_DITHER));
+ }
+ }
+
+#if GR_GL_DESKTOP
+ // ES doesn't support toggling GL_MULTISAMPLE and doesn't have
+ // smooth lines.
+ if (fRenderTargetChanged ||
+ (fCurrDrawState.fFlagBits & kAntialias_StateBit) !=
+ (fHWDrawState.fFlagBits & kAntialias_StateBit)) {
+ GLint msaa = 0;
+ // only perform query if we know MSAA is supported.
+ // calling on non-MSAA target caused a crash in one environment,
+ // though I don't think it should.
+ if (!fAASamples[kHigh_AALevel]) {
+ GR_GL(GetIntegerv(GL_SAMPLE_BUFFERS, &msaa));
+ }
+ if (fCurrDrawState.fFlagBits & kAntialias_StateBit) {
+ if (msaa) {
+ GR_GL(Enable(GL_MULTISAMPLE));
+ } else {
+ GR_GL(Enable(GL_LINE_SMOOTH));
+ }
+ } else {
+ if (msaa) {
+ GR_GL(Disable(GL_MULTISAMPLE));
+ }
+ GR_GL(Disable(GL_LINE_SMOOTH));
+ }
+ }
+#endif
+
+ bool blendOff = canDisableBlend();
+ if (fHWBlendDisabled != blendOff) {
+ if (blendOff) {
+ GR_GL(Disable(GL_BLEND));
+ } else {
+ GR_GL(Enable(GL_BLEND));
+ }
+ fHWBlendDisabled = blendOff;
+ }
+
+ if (!blendOff) {
+ if (fHWDrawState.fSrcBlend != fCurrDrawState.fSrcBlend ||
+ fHWDrawState.fDstBlend != fCurrDrawState.fDstBlend) {
+ GR_GL(BlendFunc(gXfermodeCoeff2Blend[fCurrDrawState.fSrcBlend],
+ gXfermodeCoeff2Blend[fCurrDrawState.fDstBlend]));
+ fHWDrawState.fSrcBlend = fCurrDrawState.fSrcBlend;
+ fHWDrawState.fDstBlend = fCurrDrawState.fDstBlend;
+ }
+ }
+
+ // check for circular rendering
+ GrAssert(!usingTexture ||
+ NULL == fCurrDrawState.fRenderTarget ||
+ NULL == fCurrDrawState.fTexture ||
+ fCurrDrawState.fTexture->asRenderTarget() != fCurrDrawState.fRenderTarget);
+
+ flushStencil();
+
+ fHWDrawState.fFlagBits = fCurrDrawState.fFlagBits;
+}
+
+void GrGpuGL::notifyVertexBufferBind(const GrGLVertexBuffer* buffer) {
+ fHWGeometryState.fVertexBuffer = buffer;
+}
+
+void GrGpuGL::notifyVertexBufferDelete(const GrGLVertexBuffer* buffer) {
+ GrAssert(!(kBuffer_GeometrySrcType == fGeometrySrc.fVertexSrc &&
+ buffer == fGeometrySrc.fVertexBuffer));
+
+ if (fHWGeometryState.fVertexBuffer == buffer) {
+ // deleting bound buffer does implied bind to 0
+ fHWGeometryState.fVertexBuffer = NULL;
+ }
+}
+
+void GrGpuGL::notifyIndexBufferBind(const GrGLIndexBuffer* buffer) {
+ fGeometrySrc.fIndexBuffer = buffer;
+}
+
+void GrGpuGL::notifyIndexBufferDelete(const GrGLIndexBuffer* buffer) {
+ GrAssert(!(kBuffer_GeometrySrcType == fGeometrySrc.fIndexSrc &&
+ buffer == fGeometrySrc.fIndexBuffer));
+
+ if (fHWGeometryState.fIndexBuffer == buffer) {
+ // deleting bound buffer does implied bind to 0
+ fHWGeometryState.fIndexBuffer = NULL;
+ }
+}
+
+void GrGpuGL::notifyTextureBind(GrGLTexture* texture) {
+ fHWDrawState.fTexture = texture;
+#if GR_COLLECT_STATS
+ ++fStats.fTextureChngCnt;
+#endif
+}
+
+void GrGpuGL::notifyRenderTargetDelete(GrRenderTarget* renderTarget) {
+ GrAssert(NULL != renderTarget);
+
+ // if the bound FBO is destroyed we can't rely on the implicit bind to 0
+ // a) we want the default RT which may not be FBO 0
+ // b) we set more state than just FBO based on the RT
+ // So trash the HW state to force an RT flush next time
+ if (fCurrDrawState.fRenderTarget == renderTarget) {
+ fCurrDrawState.fRenderTarget = (GrRenderTarget*)&fDefaultRenderTarget;
+ }
+ if (fHWDrawState.fRenderTarget == renderTarget) {
+ fHWDrawState.fRenderTarget = NULL;
+ }
+ if (fClipState.fStencilClipTarget == renderTarget) {
+ fClipState.fStencilClipTarget = NULL;
+ }
+}
+
+void GrGpuGL::notifyTextureDelete(GrGLTexture* texture) {
+ if (fCurrDrawState.fTexture == texture) {
+ fCurrDrawState.fTexture = NULL;
+ }
+ if (fHWDrawState.fTexture == texture) {
+ // deleting bound texture does implied bind to 0
+ fHWDrawState.fTexture = NULL;
+ }
+}
+
+void GrGpuGL::notifyTextureRemoveRenderTarget(GrGLTexture* texture) {
+ GrAssert(NULL != texture->asRenderTarget());
+
+ // if there is a pending resolve, perform it.
+ resolveTextureRenderTarget(texture);
+}
+
+bool GrGpuGL::canBeTexture(GrTexture::PixelConfig config,
+ GLenum* internalFormat,
+ GLenum* format,
+ GLenum* type) {
+ switch (config) {
+ case GrTexture::kRGBA_8888_PixelConfig:
+ case GrTexture::kRGBX_8888_PixelConfig: // todo: can we tell it our X?
+ *format = SK_GL_32BPP_COLOR_FORMAT;
+ *internalFormat = GL_RGBA;
+ *type = GL_UNSIGNED_BYTE;
+ break;
+ case GrTexture::kRGB_565_PixelConfig:
+ *format = GL_RGB;
+ *internalFormat = GL_RGB;
+ *type = GL_UNSIGNED_SHORT_5_6_5;
+ break;
+ case GrTexture::kRGBA_4444_PixelConfig:
+ *format = GL_RGBA;
+ *internalFormat = GL_RGBA;
+ *type = GL_UNSIGNED_SHORT_4_4_4_4;
+ break;
+ case GrTexture::kIndex_8_PixelConfig:
+ if (this->supports8BitPalette()) {
+ *format = GR_PALETTE8_RGBA8;
+ *internalFormat = GR_PALETTE8_RGBA8;
+ *type = GL_UNSIGNED_BYTE; // unused I think
+ } else {
+ return false;
+ }
+ break;
+ case GrTexture::kAlpha_8_PixelConfig:
+ *format = GL_ALPHA;
+ *internalFormat = GL_ALPHA;
+ *type = GL_UNSIGNED_BYTE;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/* On ES the internalFormat and format must match for TexImage and we use
+ GL_RGB, GL_RGBA for color formats. We also generally like having the driver
+ decide the internalFormat. However, on ES internalFormat for
+ RenderBufferStorage* has to be a specific format (not a base format like
+ GL_RGBA).
+ */
+bool GrGpuGL::fboInternalFormat(GrTexture::PixelConfig config, GLenum* format) {
+ switch (config) {
+ case GrTexture::kRGBA_8888_PixelConfig:
+ case GrTexture::kRGBX_8888_PixelConfig:
+ if (fRGBA8Renderbuffer) {
+ *format = GR_RGBA8;
+ return true;
+ } else {
+ return false;
+ }
+#if GR_GL_ES // ES2 supports 565. ES1 supports it with FBO extension
+ // desktop GL has no such internal format
+ case GrTexture::kRGB_565_PixelConfig:
+ *format = GR_RGB565;
+ return true;
+#endif
+ case GrTexture::kRGBA_4444_PixelConfig:
+ *format = GL_RGBA4;
+ return true;
+ default:
+ return false;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrGLCheckErr(const char* location, const char* call) {
+ uint32_t err = glGetError();
+ if (GL_NO_ERROR != err) {
+ GrPrintf("---- glGetError %x", err);
+ if (NULL != location) {
+ GrPrintf(" at\n\t%s", location);
+ }
+ if (NULL != call) {
+ GrPrintf("\n\t\t%s", call);
+ }
+ GrPrintf("\n");
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+typedef void (*glProc)(void);
+
+void get_gl_proc(const char procName[], glProc *address) {
+#if GR_WIN32_BUILD
+ *address = wglGetProcAddress(procName);
+ GrAssert(NULL != *address);
+#elif GR_MAC_BUILD || GR_IOS_BUILD
+ GrAssert(!"Extensions don't need to be initialized!");
+#elif GR_ANDROID_BUILD
+ *address = eglGetProcAddress(procName);
+ GrAssert(NULL != *address);
+#elif GR_LINUX_BUILD
+ GR_STATIC_ASSERT(!"Add environment-dependent implementation here");
+ //*address = glXGetProcAddressARB(procName);
+ //*address = eglGetProcAddress(procName);
+#elif GR_QNX_BUILD
+ *address = eglGetProcAddress(procName);
+ GrAssert(NULL != *address);
+#else
+ // hopefully we're on a system with EGL
+ *address = eglGetProcAddress(procName);
+ GrAssert(NULL != *address);
+#endif
+}
+
+#define GET_PROC(EXT_STRUCT, PROC_NAME, EXT_TAG) \
+ get_gl_proc("gl" #PROC_NAME #EXT_TAG, (glProc*)&EXT_STRUCT-> PROC_NAME);
+
+extern void GrGLInitExtensions(GrGLExts* exts) {
+ exts->GenFramebuffers = NULL;
+ exts->BindFramebuffer = NULL;
+ exts->FramebufferTexture2D = NULL;
+ exts->CheckFramebufferStatus = NULL;
+ exts->DeleteFramebuffers = NULL;
+ exts->RenderbufferStorage = NULL;
+ exts->GenRenderbuffers = NULL;
+ exts->DeleteRenderbuffers = NULL;
+ exts->FramebufferRenderbuffer = NULL;
+ exts->BindRenderbuffer = NULL;
+ exts->RenderbufferStorageMultisample = NULL;
+ exts->BlitFramebuffer = NULL;
+ exts->ResolveMultisampleFramebuffer = NULL;
+ exts->FramebufferTexture2DMultisample = NULL;
+ exts->MapBuffer = NULL;
+ exts->UnmapBuffer = NULL;
+
+#if GR_MAC_BUILD
+ exts->GenFramebuffers = glGenFramebuffers;
+ exts->BindFramebuffer = glBindFramebuffer;
+ exts->FramebufferTexture2D = glFramebufferTexture2D;
+ exts->CheckFramebufferStatus = glCheckFramebufferStatus;
+ exts->DeleteFramebuffers = glDeleteFramebuffers;
+ exts->RenderbufferStorage = glRenderbufferStorage;
+ exts->GenRenderbuffers = glGenRenderbuffers;
+ exts->DeleteRenderbuffers = glDeleteRenderbuffers;
+ exts->FramebufferRenderbuffer = glFramebufferRenderbuffer;
+ exts->BindRenderbuffer = glBindRenderbuffer;
+ exts->RenderbufferStorageMultisample = glRenderbufferStorageMultisample;
+ exts->BlitFramebuffer = glBlitFramebuffer;
+ exts->MapBuffer = glMapBuffer;
+ exts->UnmapBuffer = glUnmapBuffer;
+#elif GR_IOS_BUILD
+ exts->GenFramebuffers = glGenFramebuffers;
+ exts->BindFramebuffer = glBindFramebuffer;
+ exts->FramebufferTexture2D = glFramebufferTexture2D;
+ exts->CheckFramebufferStatus = glCheckFramebufferStatus;
+ exts->DeleteFramebuffers = glDeleteFramebuffers;
+ exts->RenderbufferStorage = glRenderbufferStorage;
+ exts->GenRenderbuffers = glGenRenderbuffers;
+ exts->DeleteRenderbuffers = glDeleteRenderbuffers;
+ exts->FramebufferRenderbuffer = glFramebufferRenderbuffer;
+ exts->BindRenderbuffer = glBindRenderbuffer;
+ exts->RenderbufferStorageMultisample = glRenderbufferStorageMultisampleAPPLE;
+ exts->ResolveMultisampleFramebuffer = glResolveMultisampleFramebufferAPPLE;
+ exts->MapBuffer = glMapBufferOES;
+ exts->UnmapBuffer = glUnmapBufferOES;
+#else
+ GLint major, minor;
+ gl_version(&major, &minor);
+ #if GR_GL_DESKTOP
+ if (major >= 3) {// FBO, FBOMS, and FBOBLIT part of 3.0
+ exts->GenFramebuffers = glGenFramebuffers;
+ exts->BindFramebuffer = glBindFramebuffer;
+ exts->FramebufferTexture2D = glFramebufferTexture2D;
+ exts->CheckFramebufferStatus = glCheckFramebufferStatus;
+ exts->DeleteFramebuffers = glDeleteFramebuffers;
+ exts->RenderbufferStorage = glRenderbufferStorage;
+ exts->GenRenderbuffers = glGenRenderbuffers;
+ exts->DeleteRenderbuffers = glDeleteRenderbuffers;
+ exts->FramebufferRenderbuffer = glFramebufferRenderbuffer;
+ exts->BindRenderbuffer = glBindRenderbuffer;
+ exts->RenderbufferStorageMultisample = glRenderbufferStorageMultisample;
+ exts->BlitFramebuffer = glBlitFramebuffer;
+ } else if (has_gl_extension("GL_ARB_framebuffer_object")) {
+ GET_PROC(exts, GenFramebuffers, ARB);
+ GET_PROC(exts, BindFramebuffer, ARB);
+ GET_PROC(exts, FramebufferTexture2D, ARB);
+ GET_PROC(exts, CheckFramebufferStatus, ARB);
+ GET_PROC(exts, DeleteFramebuffers, ARB);
+ GET_PROC(exts, RenderbufferStorage, ARB);
+ GET_PROC(exts, GenRenderbuffers, ARB);
+ GET_PROC(exts, DeleteRenderbuffers, ARB);
+ GET_PROC(exts, FramebufferRenderbuffer, ARB);
+ GET_PROC(exts, BindRenderbuffer, ARB);
+ GET_PROC(exts, RenderbufferStorageMultisample, ARB);
+ GET_PROC(exts, BlitFramebuffer, ARB);
+ } else {
+ // we require some form of FBO
+ GrAssert(has_gl_extension("GL_EXT_framebuffer_object"));
+ GET_PROC(exts, GenFramebuffers, EXT);
+ GET_PROC(exts, BindFramebuffer, EXT);
+ GET_PROC(exts, FramebufferTexture2D, EXT);
+ GET_PROC(exts, CheckFramebufferStatus, EXT);
+ GET_PROC(exts, DeleteFramebuffers, EXT);
+ GET_PROC(exts, RenderbufferStorage, EXT);
+ GET_PROC(exts, GenRenderbuffers, EXT);
+ GET_PROC(exts, DeleteRenderbuffers, EXT);
+ GET_PROC(exts, FramebufferRenderbuffer, EXT);
+ GET_PROC(exts, BindRenderbuffer, EXT);
+ if (has_gl_extension("GL_EXT_framebuffer_multisample")) {
+ GET_PROC(exts, RenderbufferStorageMultisample, EXT);
+ }
+ if (has_gl_extension("GL_EXT_framebuffer_blit")) {
+ GET_PROC(exts, BlitFramebuffer, EXT);
+ }
+ }
+ // we assume we have at least GL 1.5 or higher (VBOs introduced in 1.5)
+ exts->MapBuffer = glMapBuffer;
+ exts->UnmapBuffer = glUnmapBuffer;
+ #else // !GR_GL_DESKTOP
+ if (major >= 2) {// ES 2.0 supports FBO
+ exts->GenFramebuffers = glGenFramebuffers;
+ exts->BindFramebuffer = glBindFramebuffer;
+ exts->FramebufferTexture2D = glFramebufferTexture2D;
+ exts->CheckFramebufferStatus = glCheckFramebufferStatus;
+ exts->DeleteFramebuffers = glDeleteFramebuffers;
+ exts->RenderbufferStorage = glRenderbufferStorage;
+ exts->GenRenderbuffers = glGenRenderbuffers;
+ exts->DeleteRenderbuffers = glDeleteRenderbuffers;
+ exts->FramebufferRenderbuffer = glFramebufferRenderbuffer;
+ exts->BindRenderbuffer = glBindRenderbuffer;
+ } else {
+ // we require some form of FBO
+ GrAssert(has_gl_extension("GL_OES_framebuffer_object"));
+
+ GET_PROC(exts, GenFramebuffers, OES);
+ GET_PROC(exts, BindFramebuffer, OES);
+ GET_PROC(exts, FramebufferTexture2D, OES);
+ GET_PROC(exts, CheckFramebufferStatus, OES);
+ GET_PROC(exts, DeleteFramebuffers, OES);
+ GET_PROC(exts, RenderbufferStorage, OES);
+ GET_PROC(exts, GenRenderbuffers, OES);
+ GET_PROC(exts, DeleteRenderbuffers, OES);
+ GET_PROC(exts, FramebufferRenderbuffer, OES);
+ GET_PROC(exts, BindRenderbuffer, OES);
+ }
+ if (has_gl_extension("GL_APPLE_framebuffer_multisample")) {
+ GET_PROC(exts, ResolveMultisampleFramebuffer, APPLE);
+ }
+ if (has_gl_extension("GL_IMG_multisampled_render_to_texture")) {
+ GET_PROC(exts, FramebufferTexture2DMultisample, IMG);
+ }
+ if (has_gl_extension("GL_OES_mapbuffer")) {
+ GET_PROC(exts, MapBuffer, OES);
+ GET_PROC(exts, UnmapBuffer, OES);
+ }
+ #endif // !GR_GL_DESKTOP
+#endif // BUILD
+}
+
+bool gPrintGL = true;
+
diff --git a/gpu/src/GrGpuGL.h b/gpu/src/GrGpuGL.h
new file mode 100644
index 0000000000..bedb85af72
--- /dev/null
+++ b/gpu/src/GrGpuGL.h
@@ -0,0 +1,188 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGpuGL_DEFINED
+#define GrGpuGL_DEFINED
+
+#include "GrGpu.h"
+#include "GrGLConfig.h"
+#include "GrGLTexture.h"
+
+#include "GrGLVertexBuffer.h"
+#include "GrGLIndexBuffer.h"
+
+class GrGpuGL : public GrGpu {
+public:
+ GrGpuGL();
+ virtual ~GrGpuGL();
+
+ // overrides from GrGpu
+ virtual void resetContext();
+
+ virtual GrTexture* createTexture(const TextureDesc& desc,
+ const void* srcData, size_t rowBytes);
+ virtual GrVertexBuffer* createVertexBuffer(uint32_t size, bool dynamic);
+ virtual GrIndexBuffer* createIndexBuffer(uint32_t size, bool dynamic);
+
+ virtual GrRenderTarget* createPlatformRenderTarget(
+ intptr_t platformRenderTarget,
+ int width, int height);
+
+ virtual GrRenderTarget* defaultRenderTarget();
+
+ virtual void setDefaultRenderTargetSize(uint32_t width, uint32_t height);
+
+ virtual void eraseColor(GrColor color);
+
+ virtual void forceRenderTargetFlush();
+
+ virtual bool readPixels(int left, int top, int width, int height,
+ GrTexture::PixelConfig, void* buffer);
+
+ /**
+ * Gets the struct containing the GL extensions for the context
+ * underlying the GrGpuGL
+ *
+ * @param struct containing extension function pointers
+ */
+ const GrGLExts& extensions() { return fExts; }
+
+protected:
+ struct {
+ const void*
+ fPositionPtr;
+ GrVertexLayout fVertexLayout;
+ const GrVertexBuffer* fVertexBuffer;
+ const GrIndexBuffer* fIndexBuffer;
+ } fHWGeometryState;
+
+ DrawState fHWDrawState;
+ bool fHWStencilClip;
+
+ virtual void drawIndexedHelper(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+
+ virtual void drawNonIndexedHelper(PrimitiveType type,
+ uint32_t vertexCount,
+ uint32_t numVertices);
+
+ virtual void flushScissor(const GrIRect* rect);
+
+ void eraseStencil(uint32_t value, uint32_t mask);
+ virtual void eraseStencilClip();
+
+ // flushes state that is common to fixed and programmable GL
+ // dither
+ // line smoothing
+ // blend func
+ // texture binding
+ // sampler state (filtering, tiling)
+ // FBO binding
+ // line width
+ void flushGLStateCommon(PrimitiveType type);
+
+ // pushes the filtering and tiling modes to GL
+ void setSamplerStateImm(const GrSamplerState& samplerState);
+
+ // set when this class changes the rendertarget.
+ // Subclass should notice at flush time, take appropriate action,
+ // and set false.
+ bool fRenderTargetChanged;
+
+ // set by eraseColor or eraseStencil. Picked up in in flushStencil.
+ bool fWriteMaskChanged;
+
+ // last scissor / viewport scissor state seen by the GL.
+ BoundsState fHWBounds;
+
+private:
+ GrGLExts fExts;
+
+ GrGLRenderTarget* fDefaultRenderTarget;
+
+ void resetContextHelper();
+
+ // notify callbacks to update state tracking when related
+ // objects are bound to GL or deleted outside of the class
+ void notifyVertexBufferBind(const GrGLVertexBuffer* buffer);
+ void notifyVertexBufferDelete(const GrGLVertexBuffer* buffer);
+ void notifyIndexBufferBind(const GrGLIndexBuffer* buffer);
+ void notifyIndexBufferDelete(const GrGLIndexBuffer* buffer);
+ void notifyTextureBind(GrGLTexture* texture);
+ void notifyTextureDelete(GrGLTexture* texture);
+ void notifyRenderTargetDelete(GrRenderTarget* renderTarget);
+ void notifyTextureRemoveRenderTarget(GrGLTexture* texture);
+
+ void flushRenderTarget();
+ void flushStencil();
+ void resolveTextureRenderTarget(GrGLTexture* texture);
+
+ bool canBeTexture(GrTexture::PixelConfig config,
+ GLenum* internalFormat,
+ GLenum* format,
+ GLenum* type);
+ bool fboInternalFormat(GrTexture::PixelConfig config, GLenum* format);
+
+ friend class GrGLVertexBuffer;
+ friend class GrGLIndexBuffer;
+ friend class GrGLTexture;
+ friend class GrGLRenderTarget;
+
+ bool fHWBlendDisabled;
+
+ GLuint fAASamples[4];
+ enum {
+ kNone_MSFBO = 0,
+ kDesktop_MSFBO,
+ kApple_MSFBO,
+ kIMG_MSFBO
+ } fMSFBOType;
+
+ // Do we have stencil wrap ops.
+ bool fHasStencilWrap;
+
+ // ES requires an extension to support RGBA8 in RenderBufferStorage
+ bool fRGBA8Renderbuffer;
+
+ typedef GrGpu INHERITED;
+};
+
+bool has_gl_extension(const char* ext);
+void gl_version(int* major, int* minor);
+
+/**
+ * GrGL_RestoreResetRowLength() will reset GL_UNPACK_ROW_LENGTH to 0. We write
+ * this wrapper, since GL_UNPACK_ROW_LENGTH is not available on all GL versions
+ */
+#if GR_GL_DESKTOP
+ static inline void GrGL_RestoreResetRowLength() {
+ GR_GL(PixelStorei(GL_UNPACK_ROW_LENGTH, 0));
+ }
+#else
+ #define GrGL_RestoreResetRowLength()
+#endif
+
+#if SK_TextGLType != GL_FIXED
+ #define SK_GL_HAS_COLOR4UB
+#endif
+
+#endif
+
+
diff --git a/gpu/src/GrGpuGLFixed.cpp b/gpu/src/GrGpuGLFixed.cpp
new file mode 100644
index 0000000000..77bec40979
--- /dev/null
+++ b/gpu/src/GrGpuGLFixed.cpp
@@ -0,0 +1,342 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGLConfig.h"
+
+#if GR_SUPPORT_GLES1 || GR_SUPPORT_GLDESKTOP
+
+#include "GrGpuGLFixed.h"
+#include "GrGpuVertex.h"
+
+#define SKIP_CACHE_CHECK true
+
+struct GrGpuMatrix {
+ GrScalar fMat[16];
+
+ void reset() {
+ Gr_bzero(fMat, sizeof(fMat));
+ fMat[0] = fMat[5] = fMat[10] = fMat[15] = GR_Scalar1;
+ }
+
+ void set(const GrMatrix& m) {
+ Gr_bzero(fMat, sizeof(fMat));
+ fMat[0] = m[GrMatrix::kScaleX];
+ fMat[4] = m[GrMatrix::kSkewX];
+ fMat[12] = m[GrMatrix::kTransX];
+
+ fMat[1] = m[GrMatrix::kSkewY];
+ fMat[5] = m[GrMatrix::kScaleY];
+ fMat[13] = m[GrMatrix::kTransY];
+
+ fMat[3] = m[GrMatrix::kPersp0];
+ fMat[7] = m[GrMatrix::kPersp1];
+ fMat[15] = m[GrMatrix::kPersp2];
+
+ fMat[10] = GR_Scalar1; // z-scale
+ }
+};
+
+// these must match the order in the corresponding enum in GrGpu.h
+static const GLenum gMatrixMode2Enum[] = {
+ GL_MODELVIEW, GL_TEXTURE
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrGpuGLFixed::GrGpuGLFixed() {
+ resetContextHelper();
+}
+
+GrGpuGLFixed::~GrGpuGLFixed() {
+}
+
+void GrGpuGLFixed::resetContext() {
+ INHERITED::resetContext();
+ resetContextHelper();
+}
+
+void GrGpuGLFixed::resetContextHelper() {
+ GR_GL(Disable(GL_TEXTURE_2D));
+
+ GR_GL(EnableClientState(GL_VERTEX_ARRAY));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_COMBINE_RGB, GL_MODULATE));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_SRC0_RGB, GL_TEXTURE0));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_SRC1_RGB, GL_PRIMARY_COLOR));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_RGB, GL_SRC_COLOR));
+
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_COMBINE_ALPHA, GL_MODULATE));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_SRC0_ALPHA, GL_TEXTURE0));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_ALPHA, GL_SRC_ALPHA));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_SRC1_ALPHA, GL_PRIMARY_COLOR));
+ GR_GL(TexEnvi(GL_TEXTURE_ENV, GL_OPERAND1_ALPHA, GL_SRC_ALPHA));
+
+ // this changes between GL_SRC_COLR and GL_SRC_ALPHA depending upon
+ // whether we have a (premultiplied) RGBA texture or just an ALPHA texture
+ //glTexEnvi(GL_TEXTURE_ENV, GL_OPERAND0_RGB, GL_SRC_COLOR);
+ fHWRGBOperand0 = (TextureEnvRGBOperands) -1;
+
+ GR_GL(ClientActiveTexture(GL_TEXTURE0));
+
+ fHWGeometryState.fVertexLayout = 0;
+ fHWGeometryState.fPositionPtr = (void*) ~0;
+ GR_GL(EnableClientState(GL_VERTEX_ARRAY));
+ GR_GL(DisableClientState(GL_TEXTURE_COORD_ARRAY));
+ GR_GL(ShadeModel(GL_FLAT));
+ GR_GL(DisableClientState(GL_COLOR_ARRAY));
+
+ GrGLClearErr();
+ fTextVerts = false;
+
+ fHWTextureOrientation = (GrGLTexture::Orientation)-1; // illegal
+ fBaseVertex = 0xffffffff;
+}
+
+
+void GrGpuGLFixed::flushProjectionMatrix() {
+ float mat[16];
+ Gr_bzero(mat, sizeof(mat));
+
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+
+ mat[0] = 2.f / fCurrDrawState.fRenderTarget->width();
+ mat[5] = -2.f / fCurrDrawState.fRenderTarget->height();
+ mat[10] = -1.f;
+ mat[15] = 1;
+
+ mat[12] = -1.f;
+ mat[13] = 1.f;
+
+ GR_GL(MatrixMode(GL_PROJECTION));
+ GR_GL(LoadMatrixf(mat));
+}
+
+bool GrGpuGLFixed::flushGraphicsState(PrimitiveType type) {
+
+ bool usingTexture = VertexHasTexCoords(fGeometrySrc.fVertexLayout);
+
+ if (usingTexture && fCurrDrawState.fSamplerState.isGradient()) {
+ unimpl("Fixed pipe doesn't support radial/sweep gradients");
+ return false;
+ }
+
+ flushGLStateCommon(type);
+
+ if (fRenderTargetChanged) {
+ flushProjectionMatrix();
+ fRenderTargetChanged = false;
+ }
+
+ bool wasUsingTexture = VertexHasTexCoords(fHWGeometryState.fVertexLayout);
+ if (usingTexture != wasUsingTexture) {
+ if (usingTexture) {
+ GR_GL(Enable(GL_TEXTURE_2D));
+ } else {
+ GR_GL(Disable(GL_TEXTURE_2D));
+ }
+ }
+
+ uint32_t vertColor = (fGeometrySrc.fVertexLayout & kColor_VertexLayoutBit);
+ uint32_t prevVertColor = (fHWGeometryState.fVertexLayout &
+ kColor_VertexLayoutBit);
+
+ if (vertColor != prevVertColor) {
+ if (vertColor) {
+ GrAssert(fCurrDrawState.fSamplerState.getSampleMode() !=
+ GrSamplerState::kAlphaMod_SampleMode);
+ GR_GL(ShadeModel(GL_SMOOTH));
+ // invalidate the immediate mode color
+ fHWDrawState.fColor = GrColor_ILLEGAL;
+ } else {
+ GR_GL(ShadeModel(GL_FLAT));
+ }
+ }
+
+ if (kPoints_PrimitiveType == type &&
+ fHWDrawState.fPointSize != fCurrDrawState.fPointSize) {
+ GR_GL(PointSize(fCurrDrawState.fPointSize));
+ fHWDrawState.fPointSize = fCurrDrawState.fPointSize;
+ }
+
+ if (!vertColor && fHWDrawState.fColor != fCurrDrawState.fColor) {
+ GR_GL(Color4ub(GrColorUnpackR(fCurrDrawState.fColor),
+ GrColorUnpackG(fCurrDrawState.fColor),
+ GrColorUnpackB(fCurrDrawState.fColor),
+ GrColorUnpackA(fCurrDrawState.fColor)));
+ fHWDrawState.fColor = fCurrDrawState.fColor;
+ }
+
+ // set texture environment, decide whether we are modulating by RGB or A.
+ if (usingTexture) {
+ GrGLTexture* texture = (GrGLTexture*)fCurrDrawState.fTexture;
+ if (NULL != texture) {
+ TextureEnvRGBOperands nextRGBOperand0 =
+ (texture->uploadFormat() == GL_ALPHA) ?
+ kAlpha_TextureEnvRGBOperand :
+ kColor_TextureEnvRGBOperand;
+ if (fHWRGBOperand0 != nextRGBOperand0) {
+ GR_GL(TexEnvi(GL_TEXTURE_ENV,
+ GL_OPERAND0_RGB,
+ (nextRGBOperand0==kAlpha_TextureEnvRGBOperand) ?
+ GL_SRC_ALPHA :
+ GL_SRC_COLOR));
+ fHWRGBOperand0 = nextRGBOperand0;
+ }
+
+ if (fHWTextureOrientation != texture->orientation() ||
+ fHWDrawState.fMatrixModeCache[kTexture_MatrixMode] !=
+ fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode]) {
+ GrGpuMatrix glm;
+ if (GrGLTexture::kBottomUp_Orientation == texture->orientation()) {
+ GrMatrix m(
+ GR_Scalar1, 0, 0,
+ 0, -GR_Scalar1, GR_Scalar1,
+ 0, 0, GrMatrix::I()[8]
+ );
+ m.preConcat(fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode]);
+ glm.set(m);
+ } else {
+ glm.set(fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode]);
+ }
+ GR_GL(MatrixMode(gMatrixMode2Enum[kTexture_MatrixMode]));
+ GR_GL(LoadMatrixf(glm.fMat));
+ fHWDrawState.fMatrixModeCache[kTexture_MatrixMode] =
+ fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ fHWTextureOrientation = texture->orientation();
+ }
+ } else {
+ GrAssert(!"Rendering with texture vert flag set but no bound texture");
+ return false;
+ }
+ }
+
+ if (fHWDrawState.fMatrixModeCache[kModelView_MatrixMode] !=
+ fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode]) {
+ GrGpuMatrix glm;
+ glm.set(fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode]);
+ GR_GL(MatrixMode(gMatrixMode2Enum[kModelView_MatrixMode]));
+ GR_GL(LoadMatrixf(glm.fMat));
+ fHWDrawState.fMatrixModeCache[kModelView_MatrixMode] =
+ fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode];
+ }
+ return true;
+}
+
+void GrGpuGLFixed::setupGeometry(uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+
+ int newColorOffset, newTexCoordOffset;
+
+ GLsizei newStride = VertexSizeAndOffsets(fGeometrySrc.fVertexLayout,
+ &newTexCoordOffset,
+ &newColorOffset);
+ int oldColorOffset, oldTexCoordOffset;
+ GLsizei oldStride = VertexSizeAndOffsets(fHWGeometryState.fVertexLayout,
+ &oldTexCoordOffset,
+ &oldColorOffset);
+
+ const GLvoid* posPtr = (GLvoid*)(newStride * startVertex);
+
+ if (kBuffer_GeometrySrcType == fGeometrySrc.fVertexSrc) {
+ GrAssert(NULL != fGeometrySrc.fVertexBuffer);
+ GrAssert(!fGeometrySrc.fVertexBuffer->isLocked());
+ if (fHWGeometryState.fVertexBuffer != fGeometrySrc.fVertexBuffer) {
+ GrGLVertexBuffer* buf =
+ (GrGLVertexBuffer*)fGeometrySrc.fVertexBuffer;
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, buf->bufferID()));
+ fHWGeometryState.fVertexBuffer = fGeometrySrc.fVertexBuffer;
+ }
+ } else {
+ if (kArray_GeometrySrcType == fGeometrySrc.fVertexSrc) {
+ posPtr = (void*)((intptr_t)fGeometrySrc.fVertexArray +
+ (intptr_t)posPtr);
+ } else {
+ GrAssert(kReserved_GeometrySrcType == fGeometrySrc.fVertexSrc);
+ posPtr = (void*)((intptr_t)fVertices.get() + (intptr_t)posPtr);
+ }
+ if (NULL != fHWGeometryState.fVertexBuffer) {
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, 0));
+ fHWGeometryState.fVertexBuffer = NULL;
+ }
+ }
+
+ if (kBuffer_GeometrySrcType == fGeometrySrc.fIndexSrc) {
+ GrAssert(NULL != fGeometrySrc.fIndexBuffer);
+ GrAssert(!fGeometrySrc.fIndexBuffer->isLocked());
+ if (fHWGeometryState.fIndexBuffer != fGeometrySrc.fIndexBuffer) {
+ GrGLIndexBuffer* buf =
+ (GrGLIndexBuffer*)fGeometrySrc.fIndexBuffer;
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, buf->bufferID()));
+ fHWGeometryState.fIndexBuffer = fGeometrySrc.fIndexBuffer;
+ }
+ } else if (NULL != fHWGeometryState.fIndexBuffer) {
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0));
+ fHWGeometryState.fIndexBuffer = NULL;
+ }
+
+ GLenum scalarType;
+ if (fGeometrySrc.fVertexLayout & kTextFormat_VertexLayoutBit) {
+ scalarType = GrGLTextType;
+ } else {
+ scalarType = GrGLType;
+ }
+
+ bool baseChange = posPtr != fHWGeometryState.fPositionPtr;
+ bool scalarChange =
+ (GrGLTextType != GrGLType) &&
+ (kTextFormat_VertexLayoutBit &
+ (fHWGeometryState.fVertexLayout ^ fGeometrySrc.fVertexLayout));
+ bool strideChange = newStride != oldStride;
+ bool posChange = baseChange || scalarChange || strideChange;
+
+ if (posChange) {
+ GR_GL(VertexPointer(2, scalarType, newStride, posPtr));
+ fHWGeometryState.fPositionPtr = posPtr;
+ }
+
+ // need to enable array if tex coord offset is 0 (using positions as coords)
+ if (newTexCoordOffset >= 0) {
+ GLvoid* texCoordPtr = (int8_t*)posPtr + newTexCoordOffset;
+ if (oldTexCoordOffset < 0) {
+ GR_GL(EnableClientState(GL_TEXTURE_COORD_ARRAY));
+ }
+ if (posChange || newTexCoordOffset != oldTexCoordOffset) {
+ GR_GL(TexCoordPointer(2, scalarType, newStride, texCoordPtr));
+ }
+ } else if (oldTexCoordOffset >= 0) {
+ GR_GL(DisableClientState(GL_TEXTURE_COORD_ARRAY));
+ }
+
+ if (newColorOffset > 0) {
+ GLvoid* colorPtr = (int8_t*)posPtr + newColorOffset;
+ if (oldColorOffset <= 0) {
+ GR_GL(EnableClientState(GL_COLOR_ARRAY));
+ }
+ if (posChange || newColorOffset != oldColorOffset) {
+ GR_GL(ColorPointer(4, GL_UNSIGNED_BYTE, newStride, colorPtr));
+ }
+ } else if (oldColorOffset > 0) {
+ GR_GL(DisableClientState(GL_COLOR_ARRAY));
+ }
+
+ fHWGeometryState.fVertexLayout = fGeometrySrc.fVertexLayout;
+}
+
+#endif
+
diff --git a/gpu/src/GrGpuGLFixed.h b/gpu/src/GrGpuGLFixed.h
new file mode 100644
index 0000000000..f3a033234c
--- /dev/null
+++ b/gpu/src/GrGpuGLFixed.h
@@ -0,0 +1,68 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGpuGLFixed_DEFINED
+#define GrGpuGLFixed_DEFINED
+
+#include "GrGpuGL.h"
+
+// Fixed Pipeline OpenGL or OpenGL ES 1.x
+class GrGpuGLFixed : public GrGpuGL {
+public:
+ GrGpuGLFixed();
+ virtual ~GrGpuGLFixed();
+
+ virtual void resetContext();
+
+protected:
+ // overrides from GrGpu
+ virtual bool flushGraphicsState(PrimitiveType type);
+ virtual void setupGeometry(uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+
+private:
+ void resetContextHelper();
+
+ // when the texture is GL_RGBA we set the GL_COMBINE texture
+ // environment rgb operand 0 to be GL_COLOR to modulate each incoming frag's
+ // RGB by the texture's RGB. When the texture is GL_ALPHA we set
+ // the operand to GL_ALPHA so that the incoming frag's RGB is modulated
+ // by the texture's alpha.
+ enum TextureEnvRGBOperands {
+ kAlpha_TextureEnvRGBOperand,
+ kColor_TextureEnvRGBOperand,
+ };
+ TextureEnvRGBOperands fHWRGBOperand0;
+
+ void flushProjectionMatrix();
+
+ // are the currently bound vertex buffers/arrays laid
+ // out for text or other drawing.
+ bool fTextVerts;
+
+ // On GL we have to build the base vertex offset into the
+ // glVertexPointer/glTexCoordPointer/etc
+ int fBaseVertex;
+
+ GrGLTexture::Orientation fHWTextureOrientation;
+
+ typedef GrGpuGL INHERITED;
+};
+
+#endif
diff --git a/gpu/src/GrGpuGLShaders.cpp b/gpu/src/GrGpuGLShaders.cpp
new file mode 100644
index 0000000000..8f4bfaf00b
--- /dev/null
+++ b/gpu/src/GrGpuGLShaders.cpp
@@ -0,0 +1,937 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGLConfig.h"
+
+#if GR_SUPPORT_GLES2 || GR_SUPPORT_GLDESKTOP
+
+#include "GrGpuGLShaders.h"
+#include "GrGpuVertex.h"
+#include "GrMemory.h"
+
+#define ATTRIBUTE_MATRIX 0
+
+#define ATTRIBUTE_TEXT_COLOR 1
+
+#if ATTRIBUTE_MATRIX
+ #define DECL_MATRIX(name) "attribute mat3 " #name ";\n"
+#else
+ #define DECL_MATRIX(name) "uniform mat3 " #name ";\n"
+#endif
+
+#define SKIP_CACHE_CHECK true
+
+#if GR_SUPPORT_GLES2
+ #define GR_PRECISION "mediump"
+ #define GR_SHADER_PRECISION "precision mediump float;\n"
+#else
+ #define GR_PRECISION ""
+ #define GR_SHADER_PRECISION ""
+#endif
+
+static const char* gvshad[] = {
+ // 0: kTextureVertCoords_Program, kTextureVertCoordsProj_Program,
+ // kRadialTextureVertCoords_Program, kSweepTextureVertCoords_Program
+ "attribute vec2 aPosition;\n"
+ "attribute vec4 aColor;\n"
+ "varying vec3 vTexture;\n"
+ "varying vec4 vColor;\n"
+ DECL_MATRIX(viewM)
+ DECL_MATRIX(texM)
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " gl_PointSize = 1.0;\n"
+ " vTexture = texM * vec3(aPosition,1);\n"
+ " vColor = aColor;\n"
+ "}\n",
+
+ // 1: kTextureTexCoords_Program, kTextureTexCoordsProj_Program,
+ // kRadialTextureTexCoords_Program, kSweepTextureTexCoords_Program
+ "attribute vec2 aPosition;\n"
+ "attribute vec2 aTexture;\n"
+ "attribute vec4 aColor;\n"
+ "varying vec3 vTexture;\n"
+ "varying vec4 vColor;\n"
+ DECL_MATRIX(viewM)
+ DECL_MATRIX(texM)
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " gl_PointSize = 1.0;\n"
+ " vTexture = texM * vec3(aTexture,1);\n"
+ " vColor = aColor;\n"
+ "}\n",
+
+ // 2: kText_Program
+ "attribute vec2 aPosition;\n"
+ "attribute vec2 aTexture;\n"
+ "varying vec2 vTexture;\n"
+ DECL_MATRIX(viewM)
+#if ATTRIBUTE_TEXT_COLOR
+ "varying vec4 vColor;\n"
+ "attribute vec4 aColor;\n"
+#endif
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " vTexture = aTexture;\n"
+#if ATTRIBUTE_TEXT_COLOR
+ " vColor = aColor;\n"
+#endif
+ "}\n",
+
+ // 3: kNoTexture_Program
+ "attribute vec2 aPosition;\n"
+ "attribute vec4 aColor;\n"
+ "varying vec4 vColor;\n"
+ DECL_MATRIX(viewM)
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " gl_PointSize = 1.0;\n"
+ " vColor = aColor;\n"
+ "}\n",
+
+ // 4: kTextureVertCoordsNoColor_Program
+ "attribute vec2 aPosition;\n"
+ "attribute vec4 aColor;\n"
+ "varying vec3 vTexture;\n"
+ DECL_MATRIX(viewM)
+ DECL_MATRIX(texM)
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " vTexture = texM * vec3(aPosition,1);\n"
+ "}\n",
+
+ // 5: kTextureTexCoordsNoColor_Program
+ "attribute vec2 aPosition;\n"
+ "attribute vec2 aTexture;\n"
+ "varying vec3 vTexture;\n"
+ DECL_MATRIX(viewM)
+ DECL_MATRIX(texM)
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " gl_PointSize = 1.0;\n"
+ " vTexture = texM * vec3(aTexture,1);\n"
+ "}\n",
+
+ // 6: kTwoPointRadialTextureVertCoords_Program
+ "uniform " GR_PRECISION " float uParams[6];\n"
+ // 0 is t^2 term of quadratic
+ // 1 is one-half the inverse of above
+ // 2 is x offset of the second circle (post tex-matrix)
+ // 3 is the radius of the first circle (post tex-matrix)
+ // 4 is the first circle radius squared
+ // 5 is 1 to use + in the quadratic eq or -1 to use -
+ DECL_MATRIX(viewM)
+ DECL_MATRIX(texM)
+ "attribute vec2 aPosition;\n"
+ "attribute vec4 aColor;\n"
+ "varying vec4 vColor;\n"
+ "varying float vB;\n" // t coeffecient of quadratic.
+ "varying vec2 t;\n" // coordinates in canonical space
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " t = vec2(texM * vec3(aPosition,1));\n"
+ " vColor = aColor;\n"
+ " vB = 2.0 * (uParams[2] * t.x - uParams[3]);\n"
+ "}\n",
+
+ // 6: kTwoPointRadialTextureVertCoords_Program
+ "uniform " GR_PRECISION " float uParams[6];\n"
+ DECL_MATRIX(viewM)
+ DECL_MATRIX(texM)
+ "attribute vec2 aPosition;\n"
+ "attribute vec2 aTexture;\n"
+ "attribute vec4 aColor;\n"
+ "varying vec4 vColor;\n"
+ "varying float vB;\n" // t coeffecient of quadratic.
+ "varying vec2 t;\n" // coordinates in canonical space
+ "void main() {\n"
+ " vec3 pos3 = viewM*vec3(aPosition,1);\n"
+ " gl_Position = vec4(pos3.xy,0,pos3.z);\n"
+ " t = vec2(texM * vec3(aTexture,1));\n"
+ " vColor = aColor;\n"
+ " vB = 2.0 * (uParams[2] * t.x - uParams[3]);\n"
+ "}\n",
+};
+
+static const char* gfshad[] = {
+ // 0: kTextureVertCoords_Program, kTextureTexCoords_Program
+ GR_SHADER_PRECISION
+ "varying vec3 vTexture;\n"
+ "varying vec4 vColor;\n"
+ "uniform sampler2D sTexture;\n"
+ "void main() {\n"
+ " gl_FragColor = vColor * texture2D(sTexture, vTexture.xy);\n"
+ "}\n",
+
+ // 1: kTextureVertCoordsProj_Program, kTextureTexCoordsProj_Program
+ GR_SHADER_PRECISION
+ "varying vec3 vTexture;\n"
+ "varying vec4 vColor;\n"
+ "uniform sampler2D sTexture;\n"
+ "void main() {\n"
+ // On Brian's PC laptop with Intel Gfx texture2DProj seems to be broken
+ // but it works everywhere else tested.
+#if GR_GLSL_2DPROJ_BROKEN
+ " gl_FragColor = vColor * texture2D(sTexture, vTexture.xy / vTexture.z);\n"
+#else
+ " gl_FragColor = vColor * texture2DProj(sTexture, vTexture);\n"
+#endif
+
+ "}\n",
+
+ // 2: kText_Program
+ GR_SHADER_PRECISION
+ "varying vec2 vTexture;\n"
+#if ATTRIBUTE_TEXT_COLOR
+ "varying vec4 vColor;\n"
+#else
+ "uniform vec4 uColor;\n"
+#endif
+ "uniform sampler2D sTexture;\n"
+ "void main() {\n"
+#if ATTRIBUTE_TEXT_COLOR
+ " gl_FragColor = vColor * texture2D(sTexture, vTexture).a;\n"
+#else
+ " gl_FragColor = uColor * texture2D(sTexture, vTexture).a;\n"
+#endif
+ "}\n",
+
+ // 3: kNoTexture_Program
+ GR_SHADER_PRECISION
+ "varying vec4 vColor;\n"
+ "void main() {\n"
+ " gl_FragColor = vColor;\n"
+ "}\n",
+
+ // 4: kTextureVertCoordsNoColor_Program
+ GR_SHADER_PRECISION
+ "varying vec3 vTexture;\n"
+ "uniform sampler2D sTexture;\n"
+ "void main() {\n"
+ " gl_FragColor = texture2D(sTexture, vTexture.xy);\n"
+ "}\n",
+
+ // 5: kRadialTextureVertCoords_Program, kRadialTextureTexCoords_Program
+ GR_SHADER_PRECISION
+ "varying vec3 vTexture;\n"
+ "varying vec4 vColor;\n"
+ "uniform sampler2D sTexture;\n"
+ "void main() {\n"
+ " gl_FragColor = vColor * texture2D(sTexture, vec2(length(vTexture.xy), 0.5));\n"
+ "}\n",
+
+ // 6: kSweepTextureVertCoords_Program, kSweepTextureTexCoords_Program
+ GR_SHADER_PRECISION
+ "varying vec3 vTexture;\n"
+ "varying vec4 vColor;\n"
+ "uniform sampler2D sTexture;\n"
+ "void main() {\n"
+ " vec2 t = vec2(atan(-vTexture.y, -vTexture.x)*0.1591549430918 + 0.5,\n"
+ " 0.5);\n"
+ " gl_FragColor = vColor * texture2D(sTexture, t);\n"
+ "}\n",
+
+ // 7: kTwoPointRadialTextureVertCoords_Program, kTwoPointRadialTextureTexCoords_Program
+ GR_SHADER_PRECISION
+ "varying vec4 vColor;\n"
+ "varying float vB;\n" // t coeffecient of quadratic.
+ "varying vec2 t;\n" // coordinates in canonical radial gradient space
+ "uniform sampler2D sTexture;\n"
+ "uniform float uParams[6];\n"
+ "void main() {\n"
+ "float c = t.x*t.x + t.y*t.y - uParams[4];\n"
+ "float ac4 = uParams[0] * c * 4.0;\n"
+ "float root = sqrt(abs(vB * vB - ac4));\n"
+ "float t = (-vB + uParams[5] * root) * uParams[1];\n"
+ "gl_FragColor = vColor * texture2D(sTexture, vec2(t,0.5))\n;"
+ "}\n",
+};
+
+// determines which frag/vert shaders are used for each program in Programs enum
+
+static const struct {
+ int fVShaderIdx;
+ int fFShaderIdx;
+ bool fHasTexMatrix;
+ bool fHasTexCoords;
+ bool fTwoPointRadial;
+ GrGpuGLShaders::ColorType fColorType;
+} gProgramLoadData[] = {
+ // kTextureVertCoords_Program
+ {0, 0, true, false, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kTextureVertCoordsProj_Program
+ {0, 1, true, false, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kTextureTexCoords_Program
+ {1, 0, true, true, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kTextureTexCoordsProj_Program
+ {1, 1, true, true, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kTextureVertCoordsNoColor_Program
+ {4, 4, true, false, false, GrGpuGLShaders::kNone_ColorType },
+ // kTextureTexCoordsNoColor_Program
+ {5, 4, true, false, false, GrGpuGLShaders::kNone_ColorType },
+ // kText_Program
+#if ATTRIBUTE_TEXT_COLOR
+ {2, 2, false, true, false, GrGpuGLShaders::kAttrib_ColorType },
+#else
+ {2, 2, false, true, false, GrGpuGLShaders::kUniform_ColorType },
+#endif
+ // kRadialTextureVertCoords_Program
+ {0, 5, true, false, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kRadialTextureTexCoords_Program
+ {1, 5, true, true, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kSweepTextureVertCoords_Program
+ {0, 6, true, false, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kSweepTextureTexCoords_Program
+ {1, 6, true, true, false, GrGpuGLShaders::kAttrib_ColorType },
+ // kTwoPointRadialTextureVertCoords_Program
+ {6, 7, true, false, true, GrGpuGLShaders::kAttrib_ColorType },
+ // kTwoPointRadialTextureTexCoords_Program
+ {7, 7, true, true, true, GrGpuGLShaders::kAttrib_ColorType },
+ // kNoTexture_Program
+ {3, 3, false, false, false, GrGpuGLShaders::kAttrib_ColorType },
+};
+
+#define GR_GL_POS_ATTR_LOCATION 0
+#define GR_GL_TEX_ATTR_LOCATION 1
+#define GR_GL_COL_ATTR_LOCATION 2
+#if ATTRIBUTE_MATRIX
+ #define GR_GL_MAT_ATTR_LOCATION 3
+ #define GR_GL_TEXMAT_ATTR_LOCATION 6
+#endif
+
+GLuint GrGpuGLShaders::loadShader(GLenum type, const char* src) {
+ GLuint shader = GR_GL(CreateShader(type));
+ if (0 == shader) {
+ return 0;
+ }
+
+ GLint compiled;
+ GR_GL(ShaderSource(shader, 1, &src, NULL));
+ GR_GL(CompileShader(shader));
+ GR_GL(GetShaderiv(shader, GL_COMPILE_STATUS, &compiled));
+
+ if (!compiled) {
+ GLint infoLen;
+ GR_GL(GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen));
+ GrAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ GR_GL(GetShaderInfoLog(shader, infoLen+1, NULL, (char*)log.get()));
+ GrPrintf((char*)log.get());
+ }
+ GrAssert(!"Shader compilation failed!");
+ GR_GL(DeleteShader(shader));
+ return 0;
+ }
+ return shader;
+}
+
+bool GrGpuGLShaders::createProgram(GLuint vshader, GLuint fshader,
+ bool hasTexMatrix,
+ bool hasTexCoords,
+ GrGpuGLShaders::ColorType colorType,
+ bool twoPointRadial,
+ ProgramData* program) {
+ program->fProgramID = GR_GL(CreateProgram());
+ program->fVShaderID = vshader;
+ program->fFShaderID = fshader;
+
+ GrAssert(0 != program->fProgramID);
+
+ GR_GL(AttachShader(program->fProgramID, vshader));
+ GR_GL(AttachShader(program->fProgramID, fshader));
+
+ GR_GL(BindAttribLocation(program->fProgramID,
+ GR_GL_POS_ATTR_LOCATION,
+ "aPosition"));
+ if (hasTexCoords) {
+ GR_GL(BindAttribLocation(program->fProgramID,
+ GR_GL_TEX_ATTR_LOCATION,
+ "aTexture"));
+ }
+#if ATTRIBUTE_MATRIX
+ if (hasTexMatrix) {
+ GR_GL(BindAttribLocation(program->fProgramID,
+ GR_GL_TEXMAT_ATTR_LOCATION,
+ "texM"));
+ // set to something arbitrary to signal to flush that program
+ // uses the texture matrix.
+ program->fTexMatrixLocation = 1000;
+ }
+#endif
+ if (colorType == kAttrib_ColorType) {
+ GR_GL(BindAttribLocation(program->fProgramID,
+ GR_GL_COL_ATTR_LOCATION,
+ "aColor"));
+ }
+#if ATTRIBUTE_MATRIX
+ GR_GL(BindAttribLocation(program->fProgramID,
+ GR_GL_MAT_ATTR_LOCATION,
+ "viewM"));
+#endif
+
+ GR_GL(LinkProgram(program->fProgramID));
+
+ GLint linked;
+ GR_GL(GetProgramiv(program->fProgramID, GL_LINK_STATUS, &linked));
+ if (!linked) {
+ GLint infoLen;
+ GR_GL(GetProgramiv(program->fProgramID, GL_INFO_LOG_LENGTH, &infoLen));
+ GrAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ GR_GL(GetProgramInfoLog(program->fProgramID,
+ infoLen+1,
+ NULL,
+ (char*)log.get()));
+ GrPrintf((char*)log.get());
+ }
+ GrAssert(!"Error linking program");
+ GR_GL(DeleteProgram(program->fProgramID));
+ program->fProgramID = 0;
+ return false;
+ }
+ program->fColorType = colorType;
+
+#if !ATTRIBUTE_MATRIX
+ program->fMatrixLocation =
+ GR_GL(GetUniformLocation(program->fProgramID, "viewM"));
+ program->fTexMatrixLocation =
+ GR_GL(GetUniformLocation(program->fProgramID, "texM"));
+#endif
+ program->fColorLocation =
+ GR_GL(GetUniformLocation(program->fProgramID, "uColor"));
+ program->fTwoPointParamsLocation =
+ GR_GL(GetUniformLocation(program->fProgramID, "uParams"));
+
+ GLint samplerLocation =
+ GR_GL(GetUniformLocation(program->fProgramID, "sTexture"));
+
+#if !ATTRIBUTE_MATRIX
+ if (-1 == program->fMatrixLocation) {
+ GrAssert(!"Cannot find matrix uniform in program");
+ GR_GL(DeleteProgram(program->fProgramID));
+ program->fProgramID = 0;
+ return false;
+ }
+#endif
+
+ bool hasTexture = hasTexCoords || hasTexMatrix;
+
+ if (-1 == samplerLocation && hasTexture) {
+ GrAssert(!"Expected to find texture sampler");
+ GR_GL(DeleteProgram(program->fProgramID));
+ program->fProgramID = 0;
+ return false;
+ } else if (-1 != samplerLocation && !hasTexture) {
+ GrAssert(!"unexpectedly found texture sampler");
+ }
+#if !ATTRIBUTE_MATRIX
+ if (-1 == program->fTexMatrixLocation && hasTexMatrix) {
+ GrAssert(!"Expected to find texture matrix");
+ GR_GL(DeleteProgram(program->fProgramID));
+ program->fProgramID = 0;
+ return false;
+ } else if (-1 != program->fTexMatrixLocation && !hasTexMatrix) {
+ GrAssert(!"unexpectedly found texture matrix");
+ }
+#endif
+
+ if (-1 == program->fColorLocation &&
+ (kUniform_ColorType == colorType)) {
+ GR_GL(DeleteProgram(program->fProgramID));
+ program->fProgramID = 0;
+ return false;
+ } else if (-1 != program->fColorLocation &&
+ (kUniform_ColorType != colorType)) {
+ GrAssert(!"Unexpectedly found color uniform");
+ }
+
+ if (twoPointRadial) {
+ if (-1 == program->fTwoPointParamsLocation) {
+ GrAssert(!"Didn't find expected uniform for 2pt radial gradient");
+ GR_GL(DeleteProgram(program->fProgramID));
+ program->fProgramID = 0;
+ return false;
+ }
+ } else {
+ GrAssert(-1 == program->fTwoPointParamsLocation);
+ }
+
+ GR_GL(UseProgram(program->fProgramID));
+ if (-1 != samplerLocation) {
+ GR_GL(Uniform1i(samplerLocation, 0));
+ }
+
+ return true;
+}
+
+GrGpuGLShaders::GrGpuGLShaders() {
+
+ resetContextHelper();
+
+ GLuint vshadIDs[GR_ARRAY_COUNT(gvshad)];
+ for (size_t s = 0; s < GR_ARRAY_COUNT(gvshad); ++s) {
+ vshadIDs[s] = loadShader(GL_VERTEX_SHADER, gvshad[s]);
+ }
+
+ GLuint fshadIDs[GR_ARRAY_COUNT(gfshad)];
+ for (size_t s = 0; s < GR_ARRAY_COUNT(gfshad); ++s) {
+ fshadIDs[s] = loadShader(GL_FRAGMENT_SHADER, gfshad[s]);
+ }
+
+ GR_STATIC_ASSERT(kProgramCount == GR_ARRAY_COUNT(gProgramLoadData));
+ for (int p = 0; p < kProgramCount; ++p) {
+ GR_DEBUGCODE(bool result = )
+ createProgram(vshadIDs[gProgramLoadData[p].fVShaderIdx],
+ fshadIDs[gProgramLoadData[p].fFShaderIdx],
+ gProgramLoadData[p].fHasTexMatrix,
+ gProgramLoadData[p].fHasTexCoords,
+ gProgramLoadData[p].fColorType,
+ gProgramLoadData[p].fTwoPointRadial,
+ &fPrograms[p]);
+ GR_DEBUGASSERT(result);
+
+ for (int m = 0; m < kMatrixModeCount; ++m) {
+ fPrograms[p].fMatrixModeCache[m].setScale(GR_ScalarMax,
+ GR_ScalarMax); // illegal
+ };
+ fPrograms[p].fColor = GrColor_ILLEGAL;
+ fPrograms[p].fTextureOrientation = (GrGLTexture::Orientation)-1; // illegal
+
+ // these aren't strictly invalid, just really unlikely.
+ fPrograms[p].fRadial2CenterX1 = GR_ScalarMin;
+ fPrograms[p].fRadial2Radius0 = GR_ScalarMin;
+ fPrograms[p].fRadial2PosRoot = true; // arbitrary
+ }
+}
+
+GrGpuGLShaders::~GrGpuGLShaders() {
+ // shaders get deleted once for each program that uses them, do we care?
+ // probably not
+ for (int i = 0; i < kProgramCount; ++i) {
+ GR_GL(DeleteProgram(fPrograms[i].fProgramID));
+ GR_GL(DeleteShader(fPrograms[i].fVShaderID));
+ GR_GL(DeleteShader(fPrograms[i].fFShaderID));
+ }
+}
+
+void GrGpuGLShaders::resetContext() {
+ INHERITED::resetContext();
+ resetContextHelper();
+}
+
+void GrGpuGLShaders::resetContextHelper() {
+ fHWProgram = (Programs)-1;
+ fTextureOrientation = (GrGLTexture::Orientation)-1; // illegal
+
+ fHWGeometryState.fVertexLayout = 0;
+ fHWGeometryState.fPositionPtr = (void*) ~0;
+ GR_GL(DisableVertexAttribArray(GR_GL_COL_ATTR_LOCATION));
+ GR_GL(DisableVertexAttribArray(GR_GL_TEX_ATTR_LOCATION));
+ GR_GL(EnableVertexAttribArray(GR_GL_POS_ATTR_LOCATION));
+}
+
+
+void GrGpuGLShaders::flushMatrix(GLint location) {
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+ GrMatrix m (
+ GrIntToScalar(2) / fCurrDrawState.fRenderTarget->width(), 0, -GR_Scalar1,
+ 0,-GrIntToScalar(2) / fCurrDrawState.fRenderTarget->height(), GR_Scalar1,
+ 0, 0, GrMatrix::I()[8]);
+ m.setConcat(m, fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode]);
+
+ // ES doesn't allow you to pass true to the transpose param,
+ // so do our own transpose
+ GrScalar mt[] = {
+ m[GrMatrix::kScaleX],
+ m[GrMatrix::kSkewY],
+ m[GrMatrix::kPersp0],
+ m[GrMatrix::kSkewX],
+ m[GrMatrix::kScaleY],
+ m[GrMatrix::kPersp1],
+ m[GrMatrix::kTransX],
+ m[GrMatrix::kTransY],
+ m[GrMatrix::kPersp2]
+ };
+#if ATTRIBUTE_MATRIX
+ glVertexAttrib4fv(GR_GL_MAT_ATTR_LOCATION+0, mt+0);
+ glVertexAttrib4fv(GR_GL_MAT_ATTR_LOCATION+1, mt+3);
+ glVertexAttrib4fv(GR_GL_MAT_ATTR_LOCATION+2, mt+6);
+#else
+ GR_GL(UniformMatrix3fv(location,1,false,mt));
+#endif
+}
+
+void GrGpuGLShaders::flushTexMatrix(GLint location,
+ GrGLTexture::Orientation orientation) {
+ GrMatrix* m;
+ GrMatrix temp;
+ if (GrGLTexture::kBottomUp_Orientation == orientation) {
+ temp.setAll(
+ GR_Scalar1, 0, 0,
+ 0, -GR_Scalar1, GR_Scalar1,
+ 0, 0, GrMatrix::I()[8]
+ );
+ temp.preConcat(fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode]);
+ m = &temp;
+ } else {
+ GrAssert(GrGLTexture::kTopDown_Orientation == orientation);
+ m = &fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ }
+
+ // ES doesn't allow you to pass true to the transpose param,
+ // so do our own transpose
+ GrScalar mt[] = {
+ (*m)[GrMatrix::kScaleX],
+ (*m)[GrMatrix::kSkewY],
+ (*m)[GrMatrix::kPersp0],
+ (*m)[GrMatrix::kSkewX],
+ (*m)[GrMatrix::kScaleY],
+ (*m)[GrMatrix::kPersp1],
+ (*m)[GrMatrix::kTransX],
+ (*m)[GrMatrix::kTransY],
+ (*m)[GrMatrix::kPersp2]
+ };
+#if ATTRIBUTE_MATRIX
+ glVertexAttrib4fv(GR_GL_TEXMAT_ATTR_LOCATION+0, mt+0);
+ glVertexAttrib4fv(GR_GL_TEXMAT_ATTR_LOCATION+1, mt+3);
+ glVertexAttrib4fv(GR_GL_TEXMAT_ATTR_LOCATION+2, mt+6);
+#else
+ GR_GL(UniformMatrix3fv(location,1,false,mt));
+#endif
+}
+
+void GrGpuGLShaders::flushTwoPointRadial(GLint paramsLocation,
+ const GrSamplerState& state) {
+ GrScalar centerX1 = state.getRadial2CenterX1();
+ GrScalar radius0 = state.getRadial2Radius0();
+
+ GrScalar a = GrMul(centerX1, centerX1) - GR_Scalar1;
+
+ float unis[6] = {
+ GrScalarToFloat(a),
+ 1 / (2.f * unis[0]),
+ GrScalarToFloat(centerX1),
+ GrScalarToFloat(radius0),
+ GrScalarToFloat(GrMul(radius0, radius0)),
+ state.isRadial2PosRoot() ? 1.f : -1.f
+ };
+ GR_GL(Uniform1fv(paramsLocation, 6, unis));
+}
+
+void GrGpuGLShaders::flushProgram(PrimitiveType type) {
+
+ Programs nextProgram = kNoTexture_Program;
+
+ if (!VertexHasTexCoords(fGeometrySrc.fVertexLayout)) {
+ goto HAVE_NEXT_PROGRAM;
+ }
+
+ GrAssert(fCurrDrawState.fTexture);
+
+ switch (fCurrDrawState.fSamplerState.getSampleMode()) {
+ case GrSamplerState::kRadial_SampleMode:
+ GrAssert(!fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode].hasPerspective());
+ if (fGeometrySrc.fVertexLayout & kPositionAsTexCoord_VertexLayoutBit) {
+ nextProgram = kRadialTextureVertCoords_Program;
+ } else {
+ nextProgram = kRadialTextureTexCoords_Program;
+ }
+ break;
+ case GrSamplerState::kSweep_SampleMode:
+ GrAssert(!fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode].hasPerspective());
+ if (fGeometrySrc.fVertexLayout & kPositionAsTexCoord_VertexLayoutBit) {
+ nextProgram = kSweepTextureVertCoords_Program;
+ } else {
+ nextProgram = kSweepTextureTexCoords_Program;
+ }
+ break;
+ case GrSamplerState::kRadial2_SampleMode:
+ GrAssert(!fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode].hasPerspective());
+ if (fGeometrySrc.fVertexLayout & kPositionAsTexCoord_VertexLayoutBit) {
+ nextProgram = kTwoPointRadialTextureVertCoords_Program;
+ } else {
+ nextProgram = kTwoPointRadialTextureTexCoords_Program;
+ }
+ break;
+ case GrSamplerState::kAlphaMod_SampleMode:
+ GrAssert(((GrGLTexture*)fCurrDrawState.fTexture)->orientation() ==
+ GrGLTexture::kTopDown_Orientation);
+ (((GrGLTexture*)fCurrDrawState.fTexture)->uploadFormat() == GL_ALPHA);
+
+ nextProgram = kText_Program;
+ break;
+ case GrSamplerState::kNormal_SampleMode: {
+ GR_DEBUGCODE(GrGLTexture* tex = (GrGLTexture*)fCurrDrawState.fTexture;)
+ GrAssert(tex);
+
+ bool persp = fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode].hasPerspective();
+
+ if (fGeometrySrc.fVertexLayout & kPositionAsTexCoord_VertexLayoutBit) {
+ nextProgram = persp ? kTextureVertCoordsProj_Program :
+ kTextureVertCoords_Program;
+ } else {
+ nextProgram = persp ? kTextureTexCoordsProj_Program :
+ kTextureTexCoords_Program;
+ }
+ // check for case when frag shader can skip the color modulation
+ if (!persp && !(fGeometrySrc.fVertexLayout
+ & kColor_VertexLayoutBit) &&
+ 0xffffffff == fCurrDrawState.fColor) {
+ switch (nextProgram) {
+ case kTextureVertCoords_Program:
+ nextProgram = kTextureVertCoordsNoColor_Program;
+ break;
+ case kTextureTexCoords_Program:
+ nextProgram = kTextureTexCoordsNoColor_Program;
+ break;
+ default:
+ GrAssert("Unexpected");
+ break;
+ }
+ }
+ } break;
+ default:
+ GrAssert(!"Unknown samplemode");
+ break;
+ }
+
+HAVE_NEXT_PROGRAM:
+ if (fHWProgram != nextProgram) {
+ GR_GL(UseProgram(fPrograms[nextProgram].fProgramID));
+ fHWProgram = nextProgram;
+#if GR_COLLECT_STATS
+ ++fStats.fProgChngCnt;
+#endif
+ }
+}
+
+bool GrGpuGLShaders::flushGraphicsState(PrimitiveType type) {
+
+ flushGLStateCommon(type);
+
+ if (fRenderTargetChanged) {
+ // our coords are in pixel space and the GL matrices map to NDC
+ // so if the viewport changed, our matrix is now wrong.
+#if ATTRIBUTE_MATRIX
+ fHWDrawState.fMatrixModeCache[kModelView_MatrixMode].setScale(GR_ScalarMax,
+ GR_ScalarMax);
+#else
+ // we assume all shader matrices may be wrong after viewport changes
+ for (int p = 0; p < kProgramCount; ++p) {
+ // set to illegal matrix
+ fPrograms[p].fMatrixModeCache[kModelView_MatrixMode].setScale(GR_ScalarMax,
+ GR_ScalarMax);
+ }
+#endif
+ fRenderTargetChanged = false;
+ }
+
+ flushProgram(type);
+
+ if (fGeometrySrc.fVertexLayout & kColor_VertexLayoutBit) {
+ // invalidate the immediate mode color
+ fHWDrawState.fColor = GrColor_ILLEGAL;
+ } else {
+ // if we don't have per-vert colors either set the color attr
+ // or color uniform (depending on which program).
+ if (-1 != fPrograms[fHWProgram].fColorLocation) {
+ GrAssert(kUniform_ColorType == fPrograms[fHWProgram].fColorType);
+ if (fPrograms[fHWProgram].fColor != fCurrDrawState.fColor) {
+ float c[] = {
+ GrColorUnpackR(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackG(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackB(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackA(fCurrDrawState.fColor) / 255.f
+ };
+ GR_GL(Uniform4fv(fPrograms[fHWProgram].fColorLocation, 1, c));
+ fPrograms[fHWProgram].fColor = fCurrDrawState.fColor;
+ }
+ } else if (kAttrib_ColorType == fPrograms[fHWProgram].fColorType &&
+ fHWDrawState.fColor != fCurrDrawState.fColor) {
+ // OpenGL ES only supports the float varities of glVertexAttrib
+ float c[] = {
+ GrColorUnpackR(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackG(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackB(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackA(fCurrDrawState.fColor) / 255.f
+ };
+ GR_GL(VertexAttrib4fv(GR_GL_COL_ATTR_LOCATION, c));
+ fHWDrawState.fColor = fCurrDrawState.fColor;
+ }
+ }
+
+#if ATTRIBUTE_MATRIX
+ GrMatrix* currentMats = fHWDrawState.fMatrixModeCache;
+ GrGLTexture::Orientation& orientation = fTextureOrientation;
+#else
+ GrMatrix* currentMats = fPrograms[fHWProgram].fMatrixModeCache;
+ GrGLTexture::Orientation& orientation =
+ fPrograms[fHWProgram].fTextureOrientation;
+#endif
+
+ if (currentMats[kModelView_MatrixMode] !=
+ fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode]) {
+ flushMatrix(fPrograms[fHWProgram].fMatrixLocation);
+ currentMats[kModelView_MatrixMode] =
+ fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode];
+ }
+
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTexture;
+ if (NULL != texture) {
+ if (-1 != fPrograms[fHWProgram].fTexMatrixLocation &&
+ (currentMats[kTexture_MatrixMode] !=
+ fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode] ||
+ orientation != texture->orientation())) {
+ flushTexMatrix(fPrograms[fHWProgram].fTexMatrixLocation,
+ texture->orientation());
+ currentMats[kTexture_MatrixMode] =
+ fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ orientation = texture->orientation();
+ }
+ }
+
+ const GrSamplerState& sampler = fCurrDrawState.fSamplerState;
+ if (-1 != fPrograms[fHWProgram].fTwoPointParamsLocation &&
+ (fPrograms[fHWProgram].fRadial2CenterX1 != sampler.getRadial2CenterX1() ||
+ fPrograms[fHWProgram].fRadial2Radius0 != sampler.getRadial2Radius0() ||
+ fPrograms[fHWProgram].fRadial2PosRoot != sampler.isRadial2PosRoot())) {
+
+ flushTwoPointRadial(fPrograms[fHWProgram].fTwoPointParamsLocation,
+ sampler);
+ fPrograms[fHWProgram].fRadial2CenterX1 = sampler.getRadial2CenterX1();
+ fPrograms[fHWProgram].fRadial2Radius0 = sampler.getRadial2Radius0();
+ fPrograms[fHWProgram].fRadial2PosRoot = sampler.isRadial2PosRoot();
+ }
+
+ return true;
+}
+
+void GrGpuGLShaders::setupGeometry(uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+
+ int newColorOffset, newTexCoordOffset;
+
+ GLsizei newStride = VertexSizeAndOffsets(fGeometrySrc.fVertexLayout,
+ &newTexCoordOffset,
+ &newColorOffset);
+ int oldColorOffset, oldTexCoordOffset;
+ GLsizei oldStride = VertexSizeAndOffsets(fHWGeometryState.fVertexLayout,
+ &oldTexCoordOffset,
+ &oldColorOffset);
+
+ const GLvoid* posPtr = (GLvoid*)(newStride * startVertex);
+
+ if (kBuffer_GeometrySrcType == fGeometrySrc.fVertexSrc) {
+ GrAssert(NULL != fGeometrySrc.fVertexBuffer);
+ GrAssert(!fGeometrySrc.fVertexBuffer->isLocked());
+ if (fHWGeometryState.fVertexBuffer != fGeometrySrc.fVertexBuffer) {
+ GrGLVertexBuffer* buf =
+ (GrGLVertexBuffer*)fGeometrySrc.fVertexBuffer;
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, buf->bufferID()));
+ fHWGeometryState.fVertexBuffer = fGeometrySrc.fVertexBuffer;
+ }
+ } else {
+ if (kArray_GeometrySrcType == fGeometrySrc.fVertexSrc) {
+ posPtr = (void*)((intptr_t)fGeometrySrc.fVertexArray +
+ (intptr_t)posPtr);
+ } else {
+ GrAssert(kReserved_GeometrySrcType == fGeometrySrc.fVertexSrc);
+ posPtr = (void*)((intptr_t)fVertices.get() + (intptr_t)posPtr);
+ }
+ if (NULL != fHWGeometryState.fVertexBuffer) {
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, 0));
+ fHWGeometryState.fVertexBuffer = NULL;
+ }
+ }
+
+ if (kBuffer_GeometrySrcType == fGeometrySrc.fIndexSrc) {
+ GrAssert(NULL != fGeometrySrc.fIndexBuffer);
+ GrAssert(!fGeometrySrc.fIndexBuffer->isLocked());
+ if (fHWGeometryState.fIndexBuffer != fGeometrySrc.fIndexBuffer) {
+ GrGLIndexBuffer* buf =
+ (GrGLIndexBuffer*)fGeometrySrc.fIndexBuffer;
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, buf->bufferID()));
+ fHWGeometryState.fIndexBuffer = fGeometrySrc.fIndexBuffer;
+ }
+ } else if (NULL != fHWGeometryState.fIndexBuffer) {
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0));
+ fHWGeometryState.fIndexBuffer = NULL;
+ }
+
+ GLenum scalarType;
+ bool texCoordNorm;
+ if (fGeometrySrc.fVertexLayout & kTextFormat_VertexLayoutBit) {
+ scalarType = GrGLTextType;
+ texCoordNorm = GR_GL_TEXT_TEXTURE_NORMALIZED;
+ } else {
+ scalarType = GrGLType;
+ texCoordNorm = false;
+ }
+
+ bool baseChange = posPtr != fHWGeometryState.fPositionPtr;
+ bool scalarChange = (GrGLTextType != GrGLType) &&
+ (kTextFormat_VertexLayoutBit &
+ (fHWGeometryState.fVertexLayout ^
+ fGeometrySrc.fVertexLayout));
+ bool strideChange = newStride != oldStride;
+ bool posChange = baseChange || scalarChange || strideChange;
+
+ if (posChange) {
+ GR_GL(VertexAttribPointer(GR_GL_POS_ATTR_LOCATION, 2, scalarType,
+ false, newStride, posPtr));
+ fHWGeometryState.fPositionPtr = posPtr;
+ }
+
+ if (newTexCoordOffset > 0) {
+ GLvoid* texCoordPtr = (int8_t*)posPtr + newTexCoordOffset;
+ if (oldTexCoordOffset <= 0) {
+ GR_GL(EnableVertexAttribArray(GR_GL_TEX_ATTR_LOCATION));
+ }
+ if (posChange || newTexCoordOffset != oldTexCoordOffset) {
+ GR_GL(VertexAttribPointer(GR_GL_TEX_ATTR_LOCATION, 2, scalarType,
+ texCoordNorm, newStride, texCoordPtr));
+ }
+ } else if (oldTexCoordOffset > 0) {
+ GR_GL(DisableVertexAttribArray(GR_GL_TEX_ATTR_LOCATION));
+ }
+
+ if (newColorOffset > 0) {
+ GLvoid* colorPtr = (int8_t*)posPtr + newColorOffset;
+ if (oldColorOffset <= 0) {
+ GR_GL(EnableVertexAttribArray(GR_GL_COL_ATTR_LOCATION));
+ }
+ if (posChange || newColorOffset != oldColorOffset) {
+ GR_GL(VertexAttribPointer(GR_GL_COL_ATTR_LOCATION, 4,
+ GL_UNSIGNED_BYTE,
+ true, newStride, colorPtr));
+ }
+ } else if (oldColorOffset > 0) {
+ GR_GL(DisableVertexAttribArray(GR_GL_COL_ATTR_LOCATION));
+ }
+
+ fHWGeometryState.fVertexLayout = fGeometrySrc.fVertexLayout;
+}
+#endif
diff --git a/gpu/src/GrGpuGLShaders.h b/gpu/src/GrGpuGLShaders.h
new file mode 100644
index 0000000000..cba387bcce
--- /dev/null
+++ b/gpu/src/GrGpuGLShaders.h
@@ -0,0 +1,153 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGpuGLShaders_DEFINED
+#define GrGpuGLShaders_DEFINED
+
+#include "GrGpuGL.h"
+
+// Programmable OpenGL or OpenGL ES 2.0
+class GrGpuGLShaders : public GrGpuGL {
+public:
+ GrGpuGLShaders();
+ virtual ~GrGpuGLShaders();
+
+ virtual void resetContext();
+
+ // type of colors used by a program
+ enum ColorType {
+ kNone_ColorType,
+ kAttrib_ColorType,
+ kUniform_ColorType,
+ };
+protected:
+ // overrides from GrGpu
+ virtual bool flushGraphicsState(PrimitiveType type);
+ virtual void setupGeometry(uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+
+private:
+ void resetContextHelper();
+
+ // sets the texture matrix uniform for currently bound program
+ void flushTexMatrix(GLint location,
+ GrGLTexture::Orientation orientation);
+ // sets the MVP matrix uniform for currently bound program
+ void flushMatrix(GLint location);
+
+ void flushTwoPointRadial(GLint paramsLocation, const GrSamplerState&);
+
+ // reads shader from array and compiles it with GL, returns shader ID or 0 if failed
+ GLuint loadShader(GLenum type, const char* src);
+
+ struct ProgramData;
+ // creates a GL program with two shaders attached.
+ // Gets the relevant uniform locations.
+ // Sets the texture sampler if present to texture 0
+ // Binds the program
+ // returns true if succeeded.
+ bool createProgram(GLuint vshader,
+ GLuint fshader,
+ bool hasTexMatrix,
+ bool hasTexCoords,
+ ColorType colorType,
+ bool twoPointRadial,
+ ProgramData* program);
+
+ // called at flush time to setup the appropriate program
+ void flushProgram(PrimitiveType type);
+
+ enum Programs {
+ // use vertex coordinates
+ kTextureVertCoords_Program = 0,
+ kTextureVertCoordsProj_Program,
+
+ // use separate tex coords
+ kTextureTexCoords_Program,
+ kTextureTexCoordsProj_Program,
+
+ // constant color texture, no proj
+ // verts as a tex coords
+ kTextureVertCoordsNoColor_Program,
+
+ // constant color texture, no proj
+ // separate tex coords
+ kTextureTexCoordsNoColor_Program,
+
+ // special program for text glyphs
+ kText_Program,
+
+ // programs for radial texture lookup
+ kRadialTextureVertCoords_Program,
+ kRadialTextureTexCoords_Program,
+
+ // programs for sweep texture lookup
+ kSweepTextureVertCoords_Program,
+ kSweepTextureTexCoords_Program,
+
+ // programs for two-point radial lookup
+ kTwoPointRadialTextureVertCoords_Program,
+ kTwoPointRadialTextureTexCoords_Program,
+
+ // color only drawing
+ kNoTexture_Program,
+
+ kProgramCount
+ };
+
+ // Records per-program information
+ // we can specify the attribute locations so that they are constant
+ // across our shaders. But the driver determines the uniform locations
+ // at link time. We don't need to remember the sampler uniform location
+ // because we will bind a texture slot to it and never change it
+ // Uniforms are program-local so we can't rely on fHWState to hold the
+ // previous uniform state after a program change.
+ struct ProgramData {
+ // IDs
+ GLuint fVShaderID;
+ GLuint fFShaderID;
+ GLuint fProgramID;
+
+ // shader uniform locations (-1 if shader doesn't use them)
+ GLint fMatrixLocation;
+ GLint fTexMatrixLocation;
+ GLint fColorLocation;
+ GLint fTwoPointParamsLocation;
+
+ ColorType fColorType;
+
+ // these reflect the current values of uniforms
+ // (GL uniform values travel with program)
+ GrMatrix fMatrixModeCache[kMatrixModeCount];
+ GrColor fColor;
+ GrGLTexture::Orientation fTextureOrientation;
+ GrScalar fRadial2CenterX1;
+ GrScalar fRadial2Radius0;
+ bool fRadial2PosRoot;
+ };
+
+ ProgramData fPrograms[kProgramCount];
+ Programs fHWProgram;
+
+ GrGLTexture::Orientation fTextureOrientation;
+
+ typedef GrGpuGL INHERITED;
+};
+
+#endif
diff --git a/gpu/src/GrGpuGLShaders2.cpp b/gpu/src/GrGpuGLShaders2.cpp
new file mode 100644
index 0000000000..0a15be1c40
--- /dev/null
+++ b/gpu/src/GrGpuGLShaders2.cpp
@@ -0,0 +1,1388 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrGLConfig.h"
+
+#if GR_SUPPORT_GLES2 || GR_SUPPORT_GLDESKTOP
+
+#include "GrGpuGLShaders2.h"
+#include "GrGpuVertex.h"
+#include "GrMemory.h"
+#include "GrStringBuilder.h"
+
+
+#define ATTRIBUTE_MATRIX 0
+
+#define SKIP_COLOR_MODULATE_OPT 0
+
+#define PRINT_SHADERS 0
+
+#define SKIP_CACHE_CHECK true
+
+#if GR_SUPPORT_GLES2
+ #define GR_PRECISION "mediump"
+ const char GR_SHADER_PRECISION[] = "precision mediump float;\n";
+#else
+ #define GR_PRECISION ""
+ const char GR_SHADER_PRECISION[] = "";
+#endif
+
+#define POS_ATTR_LOCATION 0
+#define TEX_ATTR_LOCATION 1
+#define COL_ATTR_LOCATION 2
+#if ATTRIBUTE_MATRIX
+#define VIEWMAT_ATTR_LOCATION 3
+#define TEXMAT_ATTR_LOCATION(X) (6 + 3 * (X))
+#define BOGUS_MATRIX_UNI_LOCATION 1000
+#endif
+
+const int GrGpuGLShaders2::NUM_STAGES = 1;
+
+struct GrGpuGLShaders2::StageUniLocations {
+ GLint fTextureMatrixUni;
+ GLint fSamplerUni;
+ GLint fRadial2Uni;
+};
+
+struct GrGpuGLShaders2::UniLocations {
+ GLint fViewMatrixUni;
+ StageUniLocations fStages[NUM_STAGES];
+};
+
+// Records per-program information
+// we can specify the attribute locations so that they are constant
+// across our shaders. But the driver determines the uniform locations
+// at link time. We don't need to remember the sampler uniform location
+// because we will bind a texture slot to it and never change it
+// Uniforms are program-local so we can't rely on fHWState to hold the
+// previous uniform state after a program change.
+struct GrGpuGLShaders2::Program {
+ // IDs
+ GLuint fVShaderID;
+ GLuint fFShaderID;
+ GLuint fProgramID;
+
+ // shader uniform locations (-1 if shader doesn't use them)
+ UniLocations fUniLocations;
+
+ // these reflect the current values of uniforms
+ // (GL uniform values travel with program)
+ GrMatrix fViewMatrix;
+ GrMatrix fTextureMatrix[NUM_STAGES];
+ GrGLTexture::Orientation fTextureOrientation[NUM_STAGES];
+ GrScalar fRadial2CenterX1[NUM_STAGES];
+ GrScalar fRadial2Radius0[NUM_STAGES];
+ bool fRadial2PosRoot[NUM_STAGES];
+
+};
+
+// must be tightly packed
+struct GrGpuGLShaders2::StageDesc {
+ enum OptFlagBits {
+ kNoPerspective_OptFlagBit = 0x1,
+ kIdentityMatrix_OptFlagBit = 0x2,
+ };
+ int fOptFlags : 8;
+ bool fEnabled;
+ enum Modulation {
+ kColor_Modulation,
+ kAlpha_Modulation,
+ } fModulation : 8;
+ enum CoordMapping {
+ kIdentity_CoordMapping,
+ kRadialGradient_CoordMapping,
+ kSweepGradient_CoordMapping,
+ kRadial2Gradient_CoordMapping,
+ } fCoordMapping : 8;
+};
+
+// must be tightly packed
+struct GrGpuGLShaders2::ProgramDesc {
+ GrVertexLayout fVertexLayout;
+ enum {
+ kNotPoints_OptFlagBit = 0x1,
+ kVertexColorAllOnes_OptFlagBit = 0x2,
+ };
+ // we're assuming optflags and layout pack into 32 bits
+ GR_STATIC_ASSERT(2 == sizeof(GrVertexLayout));
+ int fOptFlags : 16;
+
+ StageDesc fStages[NUM_STAGES];
+
+ bool operator == (const ProgramDesc& desc) const {
+ // keep 4-byte aligned and tightly packed
+ GR_STATIC_ASSERT(4 == sizeof(StageDesc));
+ GR_STATIC_ASSERT(2 + 2 + 4 * NUM_STAGES == sizeof(ProgramDesc));
+ return 0 == memcmp(this, &desc, sizeof(ProgramDesc));
+ }
+};
+
+#include "GrTHashCache.h"
+
+class GrGpuGLShaders2::ProgramCache : public ::GrNoncopyable {
+private:
+ struct Entry;
+ class HashKey {
+ public:
+ HashKey();
+ HashKey(const ProgramDesc& desc);
+ static const HashKey& GetKey(const Entry&);
+ static bool EQ(const Entry&, const HashKey&);
+ static bool LT(const Entry&, const HashKey&);
+ bool operator <(const HashKey& key) const;
+ bool operator ==(const HashKey& key) const;
+ uint32_t getHash() const;
+ private:
+ ProgramDesc fDesc;
+ uint32_t fHash;
+ };
+
+ struct Entry {
+ Program fProgram;
+ HashKey fKey;
+ uint32_t fLRUStamp;
+ };
+
+ // if hash bits is changed, need to change hash function
+ GrTHashTable<Entry, HashKey, 8> fHashCache;
+
+ static const int MAX_ENTRIES = 16;
+ Entry fEntries[MAX_ENTRIES];
+ int fCount;
+ uint32_t fCurrLRUStamp;
+
+public:
+ ProgramCache() {
+ fCount = 0;
+ fCurrLRUStamp = 0;
+ }
+
+ ~ProgramCache() {
+ for (int i = 0; i < fCount; ++i) {
+ GrGpuGLShaders2::DeleteProgram(&fEntries[i].fProgram);
+ }
+ }
+
+ void abandon() {
+ fCount = 0;
+ }
+
+ void invalidateViewMatrices() {
+ for (int i = 0; i < fCount; ++i) {
+ // set to illegal matrix
+ fEntries[i].fProgram.fViewMatrix.setScale(GR_ScalarMax,
+ GR_ScalarMax);
+ }
+ }
+
+ Program* getProgram(const ProgramDesc& desc) {
+ HashKey key(desc);
+ Entry* entry = fHashCache.find(key);
+ if (NULL == entry) {
+ if (fCount < MAX_ENTRIES) {
+ entry = fEntries + fCount;
+ ++fCount;
+ } else {
+ GrAssert(MAX_ENTRIES == fCount);
+ entry = fEntries;
+ for (int i = 1; i < MAX_ENTRIES; ++i) {
+ if (fEntries[i].fLRUStamp < entry->fLRUStamp) {
+ entry = fEntries + i;
+ }
+ }
+ fHashCache.remove(entry->fKey, entry);
+ GrGpuGLShaders2::DeleteProgram(&entry->fProgram);
+ }
+ entry->fKey = key;
+ GrGpuGLShaders2::GenProgram(desc, &entry->fProgram);
+ fHashCache.insert(entry->fKey, entry);
+ }
+
+ entry->fLRUStamp = fCurrLRUStamp;
+ if (UINT32_MAX == fCurrLRUStamp) {
+ // wrap around! just trash our LRU, one time hit.
+ for (int i = 0; i < fCount; ++i) {
+ fEntries[i].fLRUStamp = 0;
+ }
+ }
+ ++fCurrLRUStamp;
+ return &entry->fProgram;
+ }
+};
+
+GrGpuGLShaders2::ProgramCache::HashKey::HashKey() {
+}
+
+static uint32_t ror(uint32_t x) {
+ return (x >> 8) | (x << 24);
+}
+
+
+GrGpuGLShaders2::ProgramCache::HashKey::HashKey(const ProgramDesc& desc) {
+ fDesc = desc;
+ // if you change the size of the desc, need to update the hash function
+ GR_STATIC_ASSERT(8 == sizeof(ProgramDesc));
+
+ uint32_t* d = (uint32_t*) &fDesc;
+ fHash = d[0] ^ ror(d[1]);
+}
+
+bool GrGpuGLShaders2::ProgramCache::HashKey::EQ(const Entry& entry,
+ const HashKey& key) {
+ return entry.fKey == key;
+}
+
+bool GrGpuGLShaders2::ProgramCache::HashKey::LT(const Entry& entry,
+ const HashKey& key) {
+ return entry.fKey < key;
+}
+
+bool GrGpuGLShaders2::ProgramCache::HashKey::operator ==(const HashKey& key) const {
+ return fDesc == key.fDesc;
+}
+
+bool GrGpuGLShaders2::ProgramCache::HashKey::operator <(const HashKey& key) const {
+ return memcmp(&fDesc, &key.fDesc, sizeof(HashKey)) < 0;
+}
+
+uint32_t GrGpuGLShaders2::ProgramCache::HashKey::getHash() const {
+ return fHash;
+}
+
+
+struct GrGpuGLShaders2::ShaderCodeSegments {
+ GrSStringBuilder<256> fVSUnis;
+ GrSStringBuilder<256> fVSAttrs;
+ GrSStringBuilder<256> fVaryings;
+ GrSStringBuilder<256> fFSUnis;
+ GrSStringBuilder<512> fVSCode;
+ GrSStringBuilder<512> fFSCode;
+};
+// for variable names etc
+typedef GrSStringBuilder<16> GrTokenString;
+
+#if ATTRIBUTE_MATRIX
+ #define VIEW_MATRIX_NAME "aViewM"
+#else
+ #define VIEW_MATRIX_NAME "uViewM"
+#endif
+
+#define POS_ATTR_NAME "aPosition"
+#define COL_ATTR_NAME "aColor"
+#define TEX_ATTR_NAME "aTexture"
+
+static inline const char* float_vector_type(int count) {
+ static const char* FLOAT_VECS[] = {"ERROR", "float", "vec2", "vec3", "vec4"};
+ GrAssert(count >= 1 && count < GR_ARRAY_COUNT(FLOAT_VECS));
+ return FLOAT_VECS[count];
+}
+
+static inline const char* vector_homog_coord(int count) {
+ static const char* HOMOGS[] = {"ERROR", "", ".y", ".z", ".w"};
+ GrAssert(count >= 1 && count < GR_ARRAY_COUNT(HOMOGS));
+ return HOMOGS[count];
+}
+
+static inline const char* vector_nonhomog_coords(int count) {
+ static const char* NONHOMOGS[] = {"ERROR", "", ".x", ".xy", ".xyz"};
+ GrAssert(count >= 1 && count < GR_ARRAY_COUNT(NONHOMOGS));
+ return NONHOMOGS[count];
+}
+
+static inline const char* vector_all_coords(int count) {
+ static const char* ALL[] = {"ERROR", "", ".xy", ".xyz", ".xyzw"};
+ GrAssert(count >= 1 && count < GR_ARRAY_COUNT(ALL));
+ return ALL[count];
+}
+
+static void tex_matrix_name(int stage, GrStringBuilder* s) {
+#if ATTRIBUTE_MATRIX
+ *s = "aTexM";
+#else
+ *s = "uTexM";
+#endif
+ s->appendInt(stage);
+}
+
+static void sampler_name(int stage, GrStringBuilder* s) {
+ *s = "uSampler";
+ s->appendInt(stage);
+}
+
+static void stage_varying_name(int stage, GrStringBuilder* s) {
+ *s = "vStage";
+ s->appendInt(stage);
+}
+
+static void radial2_param_name(int stage, GrStringBuilder* s) {
+ *s = "uRadial2Params";
+ s->appendInt(stage);
+}
+
+static void radial2_varying_name(int stage, GrStringBuilder* s) {
+ *s = "vB";
+ s->appendInt(stage);
+}
+
+#include "GrRandom.h"
+
+void GrGpuGLShaders2::ProgramUnitTest() {
+ static const uint16_t VFORMATS[] = {
+ 0,
+ kSeparateTexCoord_VertexLayoutBit,
+ kPositionAsTexCoord_VertexLayoutBit,
+ kSeparateTexCoord_VertexLayoutBit | kColor_VertexLayoutBit,
+ kPositionAsTexCoord_VertexLayoutBit | kColor_VertexLayoutBit,
+ kTextFormat_VertexLayoutBit
+ };
+ static const int PROG_OPTS[] = {
+ 0,
+ ProgramDesc::kNotPoints_OptFlagBit,
+ ProgramDesc::kVertexColorAllOnes_OptFlagBit,
+ ProgramDesc::kNotPoints_OptFlagBit | ProgramDesc::kVertexColorAllOnes_OptFlagBit
+ };
+ static const int STAGE_OPTS[] = {
+ 0,
+ StageDesc::kNoPerspective_OptFlagBit,
+ StageDesc::kIdentity_CoordMapping
+ };
+ static const int STAGE_MODULATES[] = {
+ StageDesc::kColor_Modulation,
+ StageDesc::kAlpha_Modulation
+ };
+ static const int STAGE_COORD_MAPPINGS[] = {
+ StageDesc::kIdentity_CoordMapping,
+ StageDesc::kRadialGradient_CoordMapping,
+ StageDesc::kSweepGradient_CoordMapping,
+ StageDesc::kRadial2Gradient_CoordMapping
+ };
+ ProgramDesc pdesc;
+ memset(&pdesc, 0, sizeof(pdesc));
+
+ static const int NUM_TESTS = 1024;
+
+ // GrRandoms nextU() values have patterns in the low bits
+ // So using nextU() % array_count might never take some values.
+ GrRandom random;
+ for (int t = 0; t < NUM_TESTS; ++t) {
+ int x = (int)(random.nextF() * GR_ARRAY_COUNT(VFORMATS));
+ pdesc.fVertexLayout = VFORMATS[x];
+ x = (int)(random.nextF() * GR_ARRAY_COUNT(PROG_OPTS));
+ pdesc.fOptFlags = PROG_OPTS[x];
+ for (int s = 0; s < NUM_STAGES; ++s) {
+ x = (int)(random.nextF() * 2.f);
+ pdesc.fStages[s].fEnabled = x;
+ x = (int)(random.nextF() * GR_ARRAY_COUNT(STAGE_OPTS));
+ pdesc.fStages[s].fOptFlags = STAGE_OPTS[x];
+ x = (int)(random.nextF() * GR_ARRAY_COUNT(STAGE_MODULATES));
+ pdesc.fStages[s].fModulation = STAGE_MODULATES[x];
+ x = (int)(random.nextF() * GR_ARRAY_COUNT(STAGE_COORD_MAPPINGS));
+ pdesc.fStages[s].fCoordMapping = STAGE_COORD_MAPPINGS[x];
+ }
+ Program program;
+ GenProgram(pdesc, &program);
+ DeleteProgram(&program);
+ }
+}
+
+void GrGpuGLShaders2::GenStageCode(int stageNum,
+ const StageDesc& desc,
+ const char* fsInColor, // NULL means no incoming color
+ const char* fsOutColor,
+ const char* vsInCoord,
+ ShaderCodeSegments* segments,
+ StageUniLocations* locations) {
+
+ GrAssert(stageNum >= 0 && stageNum <= 9);
+
+ GrTokenString varyingName;
+ stage_varying_name(stageNum, &varyingName);
+
+ // First decide how many coords are needed to access the texture
+ // Right now it's always 2 but we could start using 1D textures for
+ // gradients.
+ static const int coordDims = 2;
+ int varyingDims;
+ /// Vertex Shader Stuff
+
+ // decide whether we need a matrix to transform texture coords
+ // and whether the varying needs a perspective coord.
+ GrTokenString texMName;
+ tex_matrix_name(stageNum, &texMName);
+ if (desc.fOptFlags & StageDesc::kIdentityMatrix_OptFlagBit) {
+ varyingDims = coordDims;
+ } else {
+ #if ATTRIBUTE_MATRIX
+ segments->fVSAttrs += "attribute mat3 ";
+ segments->fVSAttrs += texMName;
+ segments->fVSAttrs += ";\n";
+ #else
+ segments->fVSUnis += "uniform mat3 ";
+ segments->fVSUnis += texMName;
+ segments->fVSUnis += ";\n";
+ locations->fTextureMatrixUni = 1;
+ #endif
+ if (desc.fOptFlags & StageDesc::kNoPerspective_OptFlagBit) {
+ varyingDims = coordDims;
+ } else {
+ varyingDims = coordDims + 1;
+ }
+ }
+
+ GrTokenString samplerName;
+ sampler_name(stageNum, &samplerName);
+ segments->fFSUnis += "uniform sampler2D ";
+ segments->fFSUnis += samplerName;
+ segments->fFSUnis += ";\n";
+ locations->fSamplerUni = 1;
+
+ segments->fVaryings += "varying ";
+ segments->fVaryings += float_vector_type(varyingDims);
+ segments->fVaryings += " ";
+ segments->fVaryings += varyingName;
+ segments->fVaryings += ";\n";
+
+ if (desc.fOptFlags & StageDesc::kIdentityMatrix_OptFlagBit) {
+ GrAssert(varyingDims == coordDims);
+ segments->fVSCode += "\t";
+ segments->fVSCode += varyingName;
+ segments->fVSCode += " = ";
+ segments->fVSCode += vsInCoord;
+ segments->fVSCode += ";\n";
+ } else {
+ segments->fVSCode += "\t";
+ segments->fVSCode += varyingName;
+ segments->fVSCode += " = (";
+ segments->fVSCode += texMName;
+ segments->fVSCode += " * vec3(";
+ segments->fVSCode += vsInCoord;
+ segments->fVSCode += ", 1))";
+ segments->fVSCode += vector_all_coords(varyingDims);
+ segments->fVSCode += ";\n";
+ }
+
+ GrTokenString radial2ParamsName;
+ radial2_param_name(stageNum, &radial2ParamsName);
+ // for radial grads without perspective we can pass the linear
+ // part of the quadratic as a varying.
+ GrTokenString radial2VaryingName;
+ radial2_varying_name(stageNum, &radial2VaryingName);
+
+ if (StageDesc::kRadial2Gradient_CoordMapping == desc.fCoordMapping) {
+
+ segments->fVSUnis += "uniform " GR_PRECISION " float ";
+ segments->fVSUnis += radial2ParamsName;
+ segments->fVSUnis += "[6];\n";
+
+ segments->fFSUnis += "uniform " GR_PRECISION " float ";
+ segments->fFSUnis += radial2ParamsName;
+ segments->fFSUnis += "[6];\n";
+ locations->fRadial2Uni = 1;
+
+ // if there is perspective we don't interpolate this
+ if (varyingDims == coordDims) {
+ GrAssert(2 == coordDims);
+ segments->fVaryings += "varying float ";
+ segments->fVaryings += radial2VaryingName;
+ segments->fVaryings += ";\n";
+
+ segments->fVSCode += "\t";
+ segments->fVSCode += radial2VaryingName;
+ segments->fVSCode += " = 2.0 * (";
+ segments->fVSCode += radial2ParamsName;
+ segments->fVSCode += "[2] * ";
+ segments->fVSCode += varyingName;
+ segments->fVSCode += ".x ";
+ segments->fVSCode += " - ";
+ segments->fVSCode += radial2ParamsName;
+ segments->fVSCode += "[3]);\n";
+ }
+ }
+
+ /// Fragment Shader Stuff
+ GrTokenString fsCoordName;
+ // function used to access the shader, may be made projective
+ GrTokenString texFunc("texture2D");
+ if (desc.fOptFlags & (StageDesc::kIdentityMatrix_OptFlagBit |
+ StageDesc::kNoPerspective_OptFlagBit)) {
+ GrAssert(varyingDims == coordDims);
+ fsCoordName = varyingName;
+ } else {
+ // if we have to do some non-matrix op on the varyings to get
+ // our final tex coords then when in perspective we have to
+ // do an explicit divide
+ if (StageDesc::kIdentity_CoordMapping == desc.fCoordMapping) {
+ texFunc += "Proj";
+ fsCoordName = varyingName;
+ } else {
+ fsCoordName = "tCoord";
+ fsCoordName.appendInt(stageNum);
+
+ segments->fFSCode += "\t";
+ segments->fFSCode += float_vector_type(coordDims);
+ segments->fFSCode += " ";
+ segments->fFSCode += fsCoordName;
+ segments->fFSCode += " = ";
+ segments->fFSCode += varyingName;
+ segments->fFSCode += vector_nonhomog_coords(varyingDims);
+ segments->fFSCode += " / ";
+ segments->fFSCode += varyingName;
+ segments->fFSCode += vector_homog_coord(varyingDims);
+ segments->fFSCode += ";\n";
+ }
+ }
+
+ GrSStringBuilder<96> sampleCoords;
+ switch (desc.fCoordMapping) {
+ case StageDesc::kIdentity_CoordMapping:
+ sampleCoords = fsCoordName;
+ break;
+ case StageDesc::kSweepGradient_CoordMapping:
+ sampleCoords = "vec2(atan(-";
+ sampleCoords += fsCoordName;
+ sampleCoords += ".y, -";
+ sampleCoords += fsCoordName;
+ sampleCoords += ".x)*0.1591549430918 + 0.5, 0.5)";
+ break;
+ case StageDesc::kRadialGradient_CoordMapping:
+ sampleCoords = "vec2(length(";
+ sampleCoords += fsCoordName;
+ sampleCoords += ".xy), 0.5)";
+ break;
+ case StageDesc::kRadial2Gradient_CoordMapping: {
+ GrTokenString cName = "c";
+ GrTokenString ac4Name = "ac4";
+ GrTokenString rootName = "root";
+
+ cName.appendInt(stageNum);
+ ac4Name.appendInt(stageNum);
+ rootName.appendInt(stageNum);
+
+ GrTokenString bVar;
+ if (coordDims == varyingDims) {
+ bVar = radial2VaryingName;
+ GrAssert(2 == varyingDims);
+ } else {
+ GrAssert(3 == varyingDims);
+ bVar = "b";
+ bVar.appendInt(stageNum);
+ segments->fFSCode += "\tfloat ";
+ segments->fFSCode += bVar;
+ segments->fFSCode += " = 2.0 * (";
+ segments->fFSCode += radial2ParamsName;
+ segments->fFSCode += "[2] * ";
+ segments->fFSCode += fsCoordName;
+ segments->fFSCode += ".x ";
+ segments->fFSCode += " - ";
+ segments->fFSCode += radial2ParamsName;
+ segments->fFSCode += "[3]);\n";
+ }
+
+ segments->fFSCode += "\tfloat ";
+ segments->fFSCode += cName;
+ segments->fFSCode += " = dot(";
+ segments->fFSCode += fsCoordName;
+ segments->fFSCode += ", ";
+ segments->fFSCode += fsCoordName;
+ segments->fFSCode += ") + ";
+ segments->fFSCode += " - ";
+ segments->fFSCode += radial2ParamsName;
+ segments->fFSCode += "[4];\n";
+
+ segments->fFSCode += "\tfloat ";
+ segments->fFSCode += ac4Name;
+ segments->fFSCode += " = ";
+ segments->fFSCode += radial2ParamsName;
+ segments->fFSCode += "[0] * 4.0 * ";
+ segments->fFSCode += cName;
+ segments->fFSCode += ";\n";
+
+ segments->fFSCode += "\tfloat ";
+ segments->fFSCode += rootName;
+ segments->fFSCode += " = sqrt(abs(";
+ segments->fFSCode += bVar;
+ segments->fFSCode += " * ";
+ segments->fFSCode += bVar;
+ segments->fFSCode += " - ";
+ segments->fFSCode += ac4Name;
+ segments->fFSCode += "));\n";
+
+ sampleCoords = "vec2((-";
+ sampleCoords += bVar;
+ sampleCoords += " + ";
+ sampleCoords += radial2ParamsName;
+ sampleCoords += "[5] * ";
+ sampleCoords += rootName;
+ sampleCoords += ") * ";
+ sampleCoords += radial2ParamsName;
+ sampleCoords += "[1], 0.5)\n";
+ break;}
+ };
+
+ segments->fFSCode += "\t";
+ segments->fFSCode += fsOutColor;
+ segments->fFSCode += " = ";
+ if (NULL != fsInColor) {
+ segments->fFSCode += fsInColor;
+ segments->fFSCode += " * ";
+ }
+ segments->fFSCode += texFunc;
+ segments->fFSCode += "(";
+ segments->fFSCode += samplerName;
+ segments->fFSCode += ", ";
+ segments->fFSCode += sampleCoords;
+ segments->fFSCode += ")";
+ if (desc.fModulation == StageDesc::kAlpha_Modulation) {
+ segments->fFSCode += ".aaaa";
+ }
+ segments->fFSCode += ";\n";
+
+}
+
+void GrGpuGLShaders2::GenProgram(const ProgramDesc& desc,
+ Program* program) {
+
+ ShaderCodeSegments segments;
+ const uint32_t& layout = desc.fVertexLayout;
+
+ memset(&program->fUniLocations, 0, sizeof(UniLocations));
+
+ bool haveColor = !(ProgramDesc::kVertexColorAllOnes_OptFlagBit &
+ desc.fOptFlags);
+
+#if ATTRIBUTE_MATRIX
+ segments.fVSAttrs = "attribute mat3 " VIEW_MATRIX_NAME ";\n"
+#else
+ segments.fVSUnis = "uniform mat3 " VIEW_MATRIX_NAME ";\n";
+ segments.fVSAttrs = "";
+#endif
+ segments.fVSAttrs += "attribute vec2 " POS_ATTR_NAME ";\n";
+ if (haveColor) {
+ segments.fVSAttrs += "attribute vec4 " COL_ATTR_NAME ";\n";
+ segments.fVaryings = "varying vec4 vColor;\n";
+ } else {
+ segments.fVaryings = "";
+ }
+
+ segments.fVSCode = "void main() {\n"
+ "\tvec3 pos3 = " VIEW_MATRIX_NAME " * vec3(" POS_ATTR_NAME ", 1);\n"
+ "\tgl_Position = vec4(pos3.xy, 0, pos3.z);\n";
+ if (haveColor) {
+ segments.fVSCode += "\tvColor = " COL_ATTR_NAME ";\n";
+ }
+
+ if (!(desc.fOptFlags & ProgramDesc::kNotPoints_OptFlagBit)){
+ segments.fVSCode += "\tgl_PointSize = 1.0;\n";
+ }
+ segments.fFSCode = "void main() {\n";
+
+ bool textureCoordAttr = false;
+ static const char* IN_COORDS[] = {POS_ATTR_NAME, TEX_ATTR_NAME};
+ const char* inCoords = NULL;
+ if ((kSeparateTexCoord_VertexLayoutBit | kTextFormat_VertexLayoutBit) &
+ layout) {
+ segments.fVSAttrs += "attribute vec2 " TEX_ATTR_NAME ";\n";
+ inCoords = IN_COORDS[1];
+ textureCoordAttr = true;
+ } else if (kPositionAsTexCoord_VertexLayoutBit & layout) {
+ inCoords = IN_COORDS[0];
+ }
+
+ GrTokenString inColor = "vColor";
+ GR_STATIC_ASSERT(NUM_STAGES <= 9);
+ int numActiveStages = 0;
+ for (int i = 0; i < NUM_STAGES; ++i) {
+ if (desc.fStages[i].fEnabled) {
+ ++numActiveStages;
+ }
+ }
+ if (NULL != inCoords && numActiveStages) {
+ int currActiveStage = 0;
+ for (int i = 0; i < NUM_STAGES; ++i) {
+ if (desc.fStages[i].fEnabled) {
+ GrTokenString outColor;
+ if (currActiveStage < (numActiveStages - 1)) {
+ outColor = "color";
+ outColor.appendInt(currActiveStage);
+ segments.fFSCode += "\tvec4 ";
+ segments.fFSCode += outColor;
+ segments.fFSCode += ";\n";
+ } else {
+ outColor = "gl_FragColor";
+ }
+ GenStageCode(i,
+ desc.fStages[i],
+ haveColor ? inColor.cstr() : NULL,
+ outColor.cstr(),
+ inCoords,
+ &segments,
+ &program->fUniLocations.fStages[i]);
+ ++currActiveStage;
+ inColor = outColor;
+ haveColor = true;
+ }
+ }
+ } else {
+ segments.fFSCode += "\tgl_FragColor = ";
+ if (haveColor) {
+ segments.fFSCode += inColor;
+ } else {
+ segments.fFSCode += "vec4(1,1,1,1)";
+ }
+ segments.fFSCode += ";\n";
+ }
+ segments.fFSCode += "}\n";
+ segments.fVSCode += "}\n";
+
+
+ const char* strings[4];
+ int lengths[4];
+ int stringCnt = 0;
+
+ if (segments.fVSUnis.length()) {
+ strings[stringCnt] = segments.fVSUnis.cstr();
+ lengths[stringCnt] = segments.fVSUnis.length();
+ ++stringCnt;
+ }
+ if (segments.fVSAttrs.length()) {
+ strings[stringCnt] = segments.fVSAttrs.cstr();
+ lengths[stringCnt] = segments.fVSAttrs.length();
+ ++stringCnt;
+ }
+ if (segments.fVaryings.length()) {
+ strings[stringCnt] = segments.fVaryings.cstr();
+ lengths[stringCnt] = segments.fVaryings.length();
+ ++stringCnt;
+ }
+
+ GrAssert(segments.fVSCode.length());
+ strings[stringCnt] = segments.fVSCode.cstr();
+ lengths[stringCnt] = segments.fVSCode.length();
+ ++stringCnt;
+
+#if PRINT_SHADERS
+ GrPrintf("%s%s%s%s\n",
+ segments.fVSUnis.cstr(),
+ segments.fVSAttrs.cstr(),
+ segments.fVaryings.cstr(),
+ segments.fVSCode.cstr());
+#endif
+ program->fVShaderID = CompileShader(GL_VERTEX_SHADER,
+ stringCnt,
+ strings,
+ lengths);
+
+ stringCnt = 0;
+
+ if (GR_ARRAY_COUNT(GR_SHADER_PRECISION) > 1) {
+ strings[stringCnt] = GR_SHADER_PRECISION;
+ lengths[stringCnt] = GR_ARRAY_COUNT(GR_SHADER_PRECISION) - 1;
+ ++stringCnt;
+ }
+ if (segments.fFSUnis.length()) {
+ strings[stringCnt] = segments.fFSUnis.cstr();
+ lengths[stringCnt] = segments.fFSUnis.length();
+ ++stringCnt;
+ }
+ if (segments.fVaryings.length()) {
+ strings[stringCnt] = segments.fVaryings.cstr();
+ lengths[stringCnt] = segments.fVaryings.length();
+ ++stringCnt;
+ }
+
+ GrAssert(segments.fFSCode.length());
+ strings[stringCnt] = segments.fFSCode.cstr();
+ lengths[stringCnt] = segments.fFSCode.length();
+ ++stringCnt;
+
+#if PRINT_SHADERS
+ GrPrintf("%s%s%s%s\n",
+ GR_SHADER_PRECISION,
+ segments.fFSUnis.cstr(),
+ segments.fVaryings.cstr(),
+ segments.fFSCode.cstr());
+#endif
+ program->fFShaderID = CompileShader(GL_FRAGMENT_SHADER,
+ stringCnt,
+ strings,
+ lengths);
+
+ program->fProgramID = GR_GL(CreateProgram());
+ const GLint& progID = program->fProgramID;
+
+ GR_GL(AttachShader(progID, program->fVShaderID));
+ GR_GL(AttachShader(progID, program->fFShaderID));
+
+ // Bind the attrib locations to same values for all shaders
+ GR_GL(BindAttribLocation(progID, POS_ATTR_LOCATION, POS_ATTR_NAME));
+ if (textureCoordAttr) {
+ GR_GL(BindAttribLocation(progID, TEX_ATTR_LOCATION, TEX_ATTR_NAME));
+ }
+
+#if ATTRIBUTE_MATRIX
+ // set unis to a bogus value so that checks against -1 before
+ // flushing will pass.
+ GR_GL(BindAttribLocation(progID,
+ VIEWMAT_ATTR_LOCATION,
+ VIEW_MATRIX_NAME));
+
+ program->fUniLocations.fViewMatrixUni = BOGUS_MATRIX_UNI_LOCATION;
+
+ for (int i = 0; i < NUM_STAGES; ++i) {
+ if (desc.fStages[i].fEnabled) {
+ GR_GL(BindAttribLocation(progID,
+ TEXMAT_ATTR_LOCATION(i),
+ tex_matrix_name(i).cstr()));
+ program->fUniLocations.fStages[i].fTextureMatrixUni =
+ BOGUS_MATRIX_UNI_LOCATION;
+ }
+ }
+#endif
+
+ GR_GL(BindAttribLocation(progID, COL_ATTR_LOCATION, COL_ATTR_NAME));
+
+ GR_GL(LinkProgram(progID));
+
+ GLint linked;
+ GR_GL(GetProgramiv(progID, GL_LINK_STATUS, &linked));
+ if (!linked) {
+ GLint infoLen;
+ GR_GL(GetProgramiv(progID, GL_INFO_LOG_LENGTH, &infoLen));
+ GrAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ GR_GL(GetProgramInfoLog(progID,
+ infoLen+1,
+ NULL,
+ (char*)log.get()));
+ GrPrintf((char*)log.get());
+ }
+ GrAssert(!"Error linking program");
+ GR_GL(DeleteProgram(progID));
+ program->fProgramID = 0;
+ return;
+ }
+
+ // Get uniform locations
+#if !ATTRIBUTE_MATRIX
+ program->fUniLocations.fViewMatrixUni =
+ GR_GL(GetUniformLocation(progID, VIEW_MATRIX_NAME));
+ GrAssert(-1 != program->fUniLocations.fViewMatrixUni);
+#endif
+ for (int i = 0; i < NUM_STAGES; ++i) {
+ StageUniLocations& locations = program->fUniLocations.fStages[i];
+ if (desc.fStages[i].fEnabled) {
+#if !ATTRIBUTE_MATRIX
+ if (locations.fTextureMatrixUni) {
+ GrTokenString texMName;
+ tex_matrix_name(i, &texMName);
+ locations.fTextureMatrixUni = GR_GL(GetUniformLocation(
+ progID,
+ texMName.cstr()));
+ GrAssert(-1 != locations.fTextureMatrixUni);
+ } else {
+ locations.fTextureMatrixUni = -1;
+
+ }
+#endif
+
+ if (locations.fSamplerUni) {
+ GrTokenString samplerName;
+ sampler_name(i, &samplerName);
+ locations.fSamplerUni = GR_GL(GetUniformLocation(
+ progID,
+ samplerName.cstr()));
+ GrAssert(-1 != locations.fSamplerUni);
+ } else {
+ locations.fSamplerUni = -1;
+ }
+
+ if (locations.fRadial2Uni) {
+ GrTokenString radial2ParamName;
+ radial2_param_name(i, &radial2ParamName);
+ locations.fRadial2Uni = GR_GL(GetUniformLocation(
+ progID,
+ radial2ParamName.cstr()));
+ GrAssert(-1 != locations.fRadial2Uni);
+ } else {
+ locations.fRadial2Uni = -1;
+ }
+ } else {
+ locations.fSamplerUni = -1;
+ locations.fRadial2Uni = -1;
+ locations.fTextureMatrixUni = -1;
+ }
+ }
+ GR_GL(UseProgram(progID));
+
+ // init sampler unis and set bogus values for state tracking
+ for (int i = 0; i < NUM_STAGES; ++i) {
+ if (-1 != program->fUniLocations.fStages[i].fSamplerUni) {
+ GR_GL(Uniform1i(program->fUniLocations.fStages[i].fSamplerUni, i));
+ }
+ program->fTextureMatrix[i].setScale(GR_ScalarMax, GR_ScalarMax);
+ program->fRadial2CenterX1[i] = GR_ScalarMax;
+ program->fRadial2Radius0[i] = -GR_ScalarMax;
+ }
+ program->fViewMatrix.setScale(GR_ScalarMax, GR_ScalarMax);
+}
+
+void GrGpuGLShaders2::getProgramDesc(PrimitiveType primType, ProgramDesc* desc) {
+
+ // Must initialize all fields or cache will have false negatives!
+ desc->fVertexLayout = fGeometrySrc.fVertexLayout;
+ desc->fStages[0].fEnabled = VertexHasTexCoords(fGeometrySrc.fVertexLayout);
+ for (int i = 1; i < NUM_STAGES; ++i) {
+ desc->fStages[i].fEnabled = false;
+ desc->fStages[i].fOptFlags = 0;
+ desc->fStages[i].fCoordMapping = 0;
+ desc->fStages[i].fModulation = 0;
+ }
+
+ if (primType != kPoints_PrimitiveType) {
+ desc->fOptFlags = ProgramDesc::kNotPoints_OptFlagBit;
+ } else {
+ desc->fOptFlags = 0;
+ }
+#if SKIP_COLOR_MODULATE_OPT
+ if (!(desc->fVertexLayout & kColor_VertexLayoutBit) &&
+ (0xffffffff == fCurrDrawState.fColor)) {
+ desc->fOptFlags |= ProgramDesc::kVertexColorAllOnes_OptFlagBit;
+ }
+#endif
+
+ StageDesc& stage = desc->fStages[0];
+
+ if (stage.fEnabled) {
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTexture;
+ GrAssert(NULL != texture);
+ // we matrix to invert when orientation is TopDown, so make sure
+ // we aren't in that case before flagging as identity.
+ if (fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode].isIdentity() &&
+ GrGLTexture::kTopDown_Orientation == texture->orientation()) {
+ stage.fOptFlags = StageDesc::kIdentityMatrix_OptFlagBit;
+ } else if (!fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode].hasPerspective()) {
+ stage.fOptFlags = StageDesc::kNoPerspective_OptFlagBit;
+ } else {
+ stage.fOptFlags = 0;
+ }
+ switch (fCurrDrawState.fSamplerState.getSampleMode()) {
+ case GrSamplerState::kNormal_SampleMode:
+ stage.fCoordMapping = StageDesc::kIdentity_CoordMapping;
+ stage.fModulation = StageDesc::kColor_Modulation;
+ break;
+ case GrSamplerState::kAlphaMod_SampleMode:
+ stage.fCoordMapping = StageDesc::kIdentity_CoordMapping;
+ stage.fModulation = StageDesc::kAlpha_Modulation;
+ break;
+ case GrSamplerState::kRadial_SampleMode:
+ stage.fCoordMapping = StageDesc::kRadialGradient_CoordMapping;
+ stage.fModulation = StageDesc::kColor_Modulation;
+ break;
+ case GrSamplerState::kRadial2_SampleMode:
+ stage.fCoordMapping = StageDesc::kRadial2Gradient_CoordMapping;
+ stage.fModulation = StageDesc::kColor_Modulation;
+ break;
+ case GrSamplerState::kSweep_SampleMode:
+ stage.fCoordMapping = StageDesc::StageDesc::kSweepGradient_CoordMapping;
+ stage.fModulation = StageDesc::kColor_Modulation;
+ break;
+ default:
+ GrAssert(!"Unexpected sample mode!");
+ break;
+ }
+ } else {
+ stage.fOptFlags = 0;
+ stage.fCoordMapping = 0;
+ stage.fModulation = 0;
+ }
+}
+
+GLuint GrGpuGLShaders2::CompileShader(GLenum type,
+ int stringCnt,
+ const char** strings,
+ int* stringLengths) {
+ GLuint shader = GR_GL(CreateShader(type));
+ if (0 == shader) {
+ return 0;
+ }
+
+ GLint compiled;
+ GR_GL(ShaderSource(shader, stringCnt, strings, stringLengths));
+ GR_GL(CompileShader(shader));
+ GR_GL(GetShaderiv(shader, GL_COMPILE_STATUS, &compiled));
+
+ if (!compiled) {
+ GLint infoLen;
+ GR_GL(GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen));
+ GrAutoMalloc log(sizeof(char)*(infoLen+1)); // outside if for debugger
+ if (infoLen > 0) {
+ GR_GL(GetShaderInfoLog(shader, infoLen+1, NULL, (char*)log.get()));
+ for (int i = 0; i < stringCnt; ++i) {
+ if (NULL == stringLengths || stringLengths[i] < 0) {
+ GrPrintf(strings[i]);
+ } else {
+ GrPrintf("%.*s", stringLengths[i], strings[i]);
+ }
+ }
+ GrPrintf("\n%s", log.get());
+ }
+ GrAssert(!"Shader compilation failed!");
+ GR_GL(DeleteShader(shader));
+ return 0;
+ }
+ return shader;
+}
+
+void GrGpuGLShaders2::DeleteProgram(Program* program) {
+ GR_GL(DeleteShader(program->fVShaderID));
+ GR_GL(DeleteShader(program->fFShaderID));
+ GR_GL(DeleteProgram(program->fProgramID));
+ GR_DEBUGCODE(memset(program, 0, sizeof(Program)));
+}
+
+
+GrGpuGLShaders2::GrGpuGLShaders2() {
+
+ resetContextHelper();
+
+ fProgram = NULL;
+ fProgramCache = new ProgramCache();
+
+#if GR_DEBUG
+ ProgramUnitTest();
+#endif
+}
+
+GrGpuGLShaders2::~GrGpuGLShaders2() {
+ delete fProgramCache;
+}
+
+void GrGpuGLShaders2::resetContext() {
+ INHERITED::resetContext();
+ resetContextHelper();
+}
+
+void GrGpuGLShaders2::resetContextHelper() {
+ fTextureOrientation = (GrGLTexture::Orientation)-1; // illegal
+
+ fHWGeometryState.fVertexLayout = 0;
+ fHWGeometryState.fPositionPtr = (void*) ~0;
+ GR_GL(DisableVertexAttribArray(COL_ATTR_LOCATION));
+ GR_GL(DisableVertexAttribArray(TEX_ATTR_LOCATION));
+ GR_GL(EnableVertexAttribArray(POS_ATTR_LOCATION));
+
+ fHWProgramID = 0;
+}
+
+void GrGpuGLShaders2::flushViewMatrix() {
+ GrAssert(NULL != fCurrDrawState.fRenderTarget);
+ GrMatrix m (
+ GrIntToScalar(2) / fCurrDrawState.fRenderTarget->width(), 0, -GR_Scalar1,
+ 0,-GrIntToScalar(2) / fCurrDrawState.fRenderTarget->height(), GR_Scalar1,
+ 0, 0, GrMatrix::I()[8]);
+ m.setConcat(m, fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode]);
+
+ // ES doesn't allow you to pass true to the transpose param,
+ // so do our own transpose
+ GrScalar mt[] = {
+ m[GrMatrix::kScaleX],
+ m[GrMatrix::kSkewY],
+ m[GrMatrix::kPersp0],
+ m[GrMatrix::kSkewX],
+ m[GrMatrix::kScaleY],
+ m[GrMatrix::kPersp1],
+ m[GrMatrix::kTransX],
+ m[GrMatrix::kTransY],
+ m[GrMatrix::kPersp2]
+ };
+#if ATTRIBUTE_MATRIX
+ glVertexAttrib4fv(VIEWMAT_ATTR_LOCATION+0, mt+0);
+ glVertexAttrib4fv(VIEWMAT_ATTR_LOCATION+1, mt+3);
+ glVertexAttrib4fv(VIEWMAT_ATTR_LOCATION+2, mt+6);
+#else
+ GR_GL(UniformMatrix3fv(fProgram->fUniLocations.fViewMatrixUni,1,false,mt));
+#endif
+}
+
+void GrGpuGLShaders2::flushTextureMatrix() {
+
+ GrAssert(NULL != fCurrDrawState.fTexture);
+ GrGLTexture::Orientation orientation =
+ ((GrGLTexture*)fCurrDrawState.fTexture)->orientation();
+
+ GrMatrix* m;
+ GrMatrix temp;
+ if (GrGLTexture::kBottomUp_Orientation == orientation) {
+ temp.setAll(
+ GR_Scalar1, 0, 0,
+ 0, -GR_Scalar1, GR_Scalar1,
+ 0, 0, GrMatrix::I()[8]
+ );
+ temp.preConcat(fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode]);
+ m = &temp;
+ } else {
+ GrAssert(GrGLTexture::kTopDown_Orientation == orientation);
+ m = &fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ }
+
+ // ES doesn't allow you to pass true to the transpose param,
+ // so do our own transpose
+ GrScalar mt[] = {
+ (*m)[GrMatrix::kScaleX],
+ (*m)[GrMatrix::kSkewY],
+ (*m)[GrMatrix::kPersp0],
+ (*m)[GrMatrix::kSkewX],
+ (*m)[GrMatrix::kScaleY],
+ (*m)[GrMatrix::kPersp1],
+ (*m)[GrMatrix::kTransX],
+ (*m)[GrMatrix::kTransY],
+ (*m)[GrMatrix::kPersp2]
+ };
+#if ATTRIBUTE_MATRIX
+ glVertexAttrib4fv(TEXMAT_ATTR_LOCATION(0)+0, mt+0);
+ glVertexAttrib4fv(TEXMAT_ATTR_LOCATION(0)+1, mt+3);
+ glVertexAttrib4fv(TEXMAT_ATTR_LOCATION(0)+2, mt+6);
+#else
+ GR_GL(UniformMatrix3fv(fProgram->fUniLocations.fStages[0].fTextureMatrixUni,
+ 1,
+ false,
+ mt));
+#endif
+}
+
+void GrGpuGLShaders2::flushRadial2() {
+
+ const GrSamplerState& sampler = fCurrDrawState.fSamplerState;
+
+ GrScalar centerX1 = sampler.getRadial2CenterX1();
+ GrScalar radius0 = sampler.getRadial2Radius0();
+
+ GrScalar a = GrMul(centerX1, centerX1) - GR_Scalar1;
+
+ float unis[6] = {
+ GrScalarToFloat(a),
+ 1 / (2.f * unis[0]),
+ GrScalarToFloat(centerX1),
+ GrScalarToFloat(radius0),
+ GrScalarToFloat(GrMul(radius0, radius0)),
+ sampler.isRadial2PosRoot() ? 1.f : -1.f
+ };
+ GR_GL(Uniform1fv(fProgram->fUniLocations.fStages[0].fRadial2Uni, 6, unis));
+}
+
+void GrGpuGLShaders2::flushProgram(PrimitiveType type) {
+ ProgramDesc desc;
+ getProgramDesc(type, &desc);
+ fProgram = fProgramCache->getProgram(desc);
+
+ if (fHWProgramID != fProgram->fProgramID) {
+ GR_GL(UseProgram(fProgram->fProgramID));
+ fHWProgramID = fProgram->fProgramID;
+#if GR_COLLECT_STATS
+ ++fStats.fProgChngCnt;
+#endif
+ }
+}
+
+bool GrGpuGLShaders2::flushGraphicsState(PrimitiveType type) {
+
+ flushGLStateCommon(type);
+
+ if (fRenderTargetChanged) {
+ // our coords are in pixel space and the GL matrices map to NDC
+ // so if the viewport changed, our matrix is now wrong.
+#if ATTRIBUTE_MATRIX
+ fHWDrawState.fMatrixModeCache[kModelView_MatrixMode].setScale(GR_ScalarMax,
+ GR_ScalarMax);
+#else
+ // we assume all shader matrices may be wrong after viewport changes
+ fProgramCache->invalidateViewMatrices();
+#endif
+ fRenderTargetChanged = false;
+ }
+
+ flushProgram(type);
+
+ if (fGeometrySrc.fVertexLayout & kColor_VertexLayoutBit) {
+ // invalidate the immediate mode color
+ fHWDrawState.fColor = GrColor_ILLEGAL;
+ } else {
+ if (fHWDrawState.fColor != fCurrDrawState.fColor) {
+ // OpenGL ES only supports the float varities of glVertexAttrib
+ float c[] = {
+ GrColorUnpackR(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackG(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackB(fCurrDrawState.fColor) / 255.f,
+ GrColorUnpackA(fCurrDrawState.fColor) / 255.f
+ };
+ GR_GL(VertexAttrib4fv(COL_ATTR_LOCATION, c));
+ fHWDrawState.fColor = fCurrDrawState.fColor;
+ }
+ }
+
+#if ATTRIBUTE_MATRIX
+ GrMatrix& currViewMatrix = fHWDrawState.fMatrixModeCache[kModelView_MatrixMode];
+ GrMatrix& currTextureMatrix = fHWDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ GrGLTexture::Orientation& orientation = fTextureOrientation;
+#else
+ GrMatrix& currViewMatrix = fProgram->fViewMatrix;
+ GrMatrix& currTextureMatrix = fProgram->fTextureMatrix[0];
+ GrGLTexture::Orientation& orientation = fProgram->fTextureOrientation[0];
+#endif
+
+ if (currViewMatrix != fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode]) {
+ flushViewMatrix();
+ currViewMatrix = fCurrDrawState.fMatrixModeCache[kModelView_MatrixMode];
+ }
+
+ GrGLTexture* texture = (GrGLTexture*) fCurrDrawState.fTexture;
+ if (NULL != texture) {
+ if (-1 != fProgram->fUniLocations.fStages[0].fTextureMatrixUni &&
+ (currTextureMatrix !=
+ fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode] ||
+ orientation != texture->orientation())) {
+ flushTextureMatrix();
+ currTextureMatrix = fCurrDrawState.fMatrixModeCache[kTexture_MatrixMode];
+ orientation = texture->orientation();
+ }
+ }
+
+ const GrSamplerState& sampler = fCurrDrawState.fSamplerState;
+ if (-1 != fProgram->fUniLocations.fStages[0].fRadial2Uni &&
+ (fProgram->fRadial2CenterX1[0] != sampler.getRadial2CenterX1() ||
+ fProgram->fRadial2Radius0[0] != sampler.getRadial2Radius0() ||
+ fProgram->fRadial2PosRoot[0] != sampler.isRadial2PosRoot())) {
+
+ flushRadial2();
+
+ fProgram->fRadial2CenterX1[0] = sampler.getRadial2CenterX1();
+ fProgram->fRadial2Radius0[0] = sampler.getRadial2Radius0();
+ fProgram->fRadial2PosRoot[0] = sampler.isRadial2PosRoot();
+ }
+
+ return true;
+}
+
+void GrGpuGLShaders2::setupGeometry(uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+
+ int newColorOffset, newTexCoordOffset;
+
+ GLsizei newStride = VertexSizeAndOffsets(fGeometrySrc.fVertexLayout,
+ &newTexCoordOffset,
+ &newColorOffset);
+ int oldColorOffset, oldTexCoordOffset;
+ GLsizei oldStride = VertexSizeAndOffsets(fHWGeometryState.fVertexLayout,
+ &oldTexCoordOffset,
+ &oldColorOffset);
+
+ const GLvoid* posPtr = (GLvoid*)(newStride * startVertex);
+
+ if (kBuffer_GeometrySrcType == fGeometrySrc.fVertexSrc) {
+ GrAssert(NULL != fGeometrySrc.fVertexBuffer);
+ GrAssert(!fGeometrySrc.fVertexBuffer->isLocked());
+ if (fHWGeometryState.fVertexBuffer != fGeometrySrc.fVertexBuffer) {
+ GrGLVertexBuffer* buf =
+ (GrGLVertexBuffer*)fGeometrySrc.fVertexBuffer;
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, buf->bufferID()));
+ fHWGeometryState.fVertexBuffer = fGeometrySrc.fVertexBuffer;
+ }
+ } else {
+ if (kArray_GeometrySrcType == fGeometrySrc.fVertexSrc) {
+ posPtr = (void*)((intptr_t)fGeometrySrc.fVertexArray +
+ (intptr_t)posPtr);
+ } else {
+ GrAssert(kReserved_GeometrySrcType == fGeometrySrc.fVertexSrc);
+ posPtr = (void*)((intptr_t)fVertices.get() + (intptr_t)posPtr);
+ }
+ if (NULL != fHWGeometryState.fVertexBuffer) {
+ GR_GL(BindBuffer(GL_ARRAY_BUFFER, 0));
+ fHWGeometryState.fVertexBuffer = NULL;
+ }
+ }
+
+ if (kBuffer_GeometrySrcType == fGeometrySrc.fIndexSrc) {
+ GrAssert(NULL != fGeometrySrc.fIndexBuffer);
+ GrAssert(!fGeometrySrc.fIndexBuffer->isLocked());
+ if (fHWGeometryState.fIndexBuffer != fGeometrySrc.fIndexBuffer) {
+ GrGLIndexBuffer* buf =
+ (GrGLIndexBuffer*)fGeometrySrc.fIndexBuffer;
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, buf->bufferID()));
+ fHWGeometryState.fIndexBuffer = fGeometrySrc.fIndexBuffer;
+ }
+ } else if (NULL != fHWGeometryState.fIndexBuffer) {
+ GR_GL(BindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0));
+ fHWGeometryState.fIndexBuffer = NULL;
+ }
+
+ GLenum scalarType;
+ bool texCoordNorm;
+ if (fGeometrySrc.fVertexLayout & kTextFormat_VertexLayoutBit) {
+ scalarType = GrGLTextType;
+ texCoordNorm = GR_GL_TEXT_TEXTURE_NORMALIZED;
+ } else {
+ scalarType = GrGLType;
+ texCoordNorm = false;
+ }
+
+ bool baseChange = posPtr != fHWGeometryState.fPositionPtr;
+ bool scalarChange = (GrGLTextType != GrGLType) &&
+ (kTextFormat_VertexLayoutBit &
+ (fHWGeometryState.fVertexLayout ^
+ fGeometrySrc.fVertexLayout));
+ bool strideChange = newStride != oldStride;
+ bool posChange = baseChange || scalarChange || strideChange;
+
+ if (posChange) {
+ GR_GL(VertexAttribPointer(POS_ATTR_LOCATION, 2, scalarType,
+ false, newStride, posPtr));
+ fHWGeometryState.fPositionPtr = posPtr;
+ }
+
+ if (newTexCoordOffset > 0) {
+ GLvoid* texCoordPtr = (int8_t*)posPtr + newTexCoordOffset;
+ if (oldTexCoordOffset <= 0) {
+ GR_GL(EnableVertexAttribArray(TEX_ATTR_LOCATION));
+ }
+ if (posChange || newTexCoordOffset != oldTexCoordOffset) {
+ GR_GL(VertexAttribPointer(TEX_ATTR_LOCATION, 2, scalarType,
+ texCoordNorm, newStride, texCoordPtr));
+ }
+ } else if (oldTexCoordOffset > 0) {
+ GR_GL(DisableVertexAttribArray(TEX_ATTR_LOCATION));
+ }
+
+ if (newColorOffset > 0) {
+ GLvoid* colorPtr = (int8_t*)posPtr + newColorOffset;
+ if (oldColorOffset <= 0) {
+ GR_GL(EnableVertexAttribArray(COL_ATTR_LOCATION));
+ }
+ if (posChange || newColorOffset != oldColorOffset) {
+ GR_GL(VertexAttribPointer(COL_ATTR_LOCATION, 4,
+ GL_UNSIGNED_BYTE,
+ true, newStride, colorPtr));
+ }
+ } else if (oldColorOffset > 0) {
+ GR_GL(DisableVertexAttribArray(COL_ATTR_LOCATION));
+ }
+
+ fHWGeometryState.fVertexLayout = fGeometrySrc.fVertexLayout;
+}
+#endif
+
diff --git a/gpu/src/GrGpuGLShaders2.h b/gpu/src/GrGpuGLShaders2.h
new file mode 100644
index 0000000000..c484544293
--- /dev/null
+++ b/gpu/src/GrGpuGLShaders2.h
@@ -0,0 +1,102 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrGpuGLShaders2_DEFINED
+#define GrGpuGLShaders2_DEFINED
+
+#include "GrGpuGL.h"
+
+// Programmable OpenGL or OpenGL ES 2.0
+class GrGpuGLShaders2 : public GrGpuGL {
+public:
+ GrGpuGLShaders2();
+ virtual ~GrGpuGLShaders2();
+
+ virtual void resetContext();
+
+protected:
+ // overrides from GrGpu
+ virtual bool flushGraphicsState(PrimitiveType type);
+ virtual void setupGeometry(uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount);
+
+private:
+ static const int NUM_STAGES;
+
+ void resetContextHelper();
+
+ // sets the texture matrix uniform for currently bound program
+ void flushTextureMatrix();
+ // sets the MVP matrix uniform for currently bound program
+ void flushViewMatrix();
+
+ // flushes the parameters to two point radial gradient
+ void flushRadial2();
+
+ // called at flush time to setup the appropriate program
+ void flushProgram(PrimitiveType type);
+
+ struct Program;
+
+ struct StageDesc;
+ struct ProgramDesc;
+
+ struct UniLocations;
+ struct StageUniLocations;
+
+ struct ShaderCodeSegments;
+
+ class ProgramCache;
+
+ // gets a description of needed shader
+ void getProgramDesc(PrimitiveType primType, ProgramDesc* desc);
+
+ // generates and compiles a program from a description and vertex layout
+ // will change GL's bound program
+ static void GenProgram(const ProgramDesc& desc, Program* program);
+
+ // generates code for a stage of the shader
+ static void GenStageCode(int stageNum,
+ const StageDesc& desc,
+ const char* psInColor,
+ const char* psOutColor,
+ const char* vsInCoord,
+ ShaderCodeSegments* segments,
+ StageUniLocations* locations);
+
+ // Compiles a GL shader, returns shader ID or 0 if failed
+ // params have same meaning as glShaderSource
+ static GLuint CompileShader(GLenum type, int stringCnt,
+ const char** strings,
+ int* stringLengths);
+ static void DeleteProgram(Program* program);
+
+ void ProgramUnitTest();
+
+ GrGLTexture::Orientation fTextureOrientation;
+
+ ProgramCache* fProgramCache;
+ Program* fProgram;
+ GLuint fHWProgramID;
+
+ typedef GrGpuGL INHERITED;
+};
+
+#endif
+
diff --git a/gpu/src/GrInOrderDrawBuffer.cpp b/gpu/src/GrInOrderDrawBuffer.cpp
new file mode 100644
index 0000000000..3f25f2fa68
--- /dev/null
+++ b/gpu/src/GrInOrderDrawBuffer.cpp
@@ -0,0 +1,345 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrInOrderDrawBuffer.h"
+#include "GrTexture.h"
+#include "GrVertexBufferAllocPool.h"
+#include "GrGpu.h"
+
+GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrVertexBufferAllocPool* pool) :
+ fDraws(DRAWS_BLOCK_SIZE, fDrawsStorage),
+ fStates(STATES_BLOCK_SIZE, fStatesStorage),
+ fClips(CLIPS_BLOCK_SIZE, fClipsStorage),
+ fClipChanged(true),
+ fCPUVertices((NULL == pool) ? 0 : VERTEX_BLOCK_SIZE),
+ fBufferVertices(pool),
+ fIndices(INDEX_BLOCK_SIZE),
+ fCurrReservedVertices(NULL),
+ fCurrReservedIndices(NULL),
+ fCurrVertexBuffer(NULL),
+ fReservedVertexBytes(0),
+ fReservedIndexBytes(0),
+ fUsedReservedVertexBytes(0),
+ fUsedReservedIndexBytes(0) {
+ GrAssert(NULL == pool || pool->getGpu()->supportsBufferLocking());
+}
+
+GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
+ reset();
+}
+
+void GrInOrderDrawBuffer::initializeDrawStateAndClip(const GrDrawTarget& target) {
+ this->copyDrawState(target);
+ this->setClip(target.getClip());
+}
+
+void GrInOrderDrawBuffer::drawIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t startIndex,
+ uint32_t vertexCount,
+ uint32_t indexCount) {
+
+ if (!vertexCount || !indexCount) {
+ return;
+ }
+
+ Draw& draw = fDraws.push_back();
+ draw.fType = type;
+ draw.fStartVertex = startVertex;
+ draw.fStartIndex = startIndex;
+ draw.fVertexCount = vertexCount;
+ draw.fIndexCount = indexCount;
+ draw.fClipChanged = grabClip();
+ draw.fStateChange = grabState();
+
+ draw.fVertexLayout = fGeometrySrc.fVertexLayout;
+ switch (fGeometrySrc.fVertexSrc) {
+ case kArray_GeometrySrcType:
+ draw.fUseVertexBuffer = false;
+ draw.fVertexArray = fGeometrySrc.fVertexArray;
+ break;
+ case kReserved_GeometrySrcType: {
+ draw.fUseVertexBuffer = NULL != fBufferVertices;
+ if (draw.fUseVertexBuffer) {
+ draw.fVertexBuffer = fCurrVertexBuffer;
+ draw.fStartVertex += fCurrStartVertex;
+ } else {
+ draw.fVertexArray = fCurrReservedVertices;
+ }
+ size_t vertexBytes = (vertexCount + startVertex) *
+ VertexSize(fGeometrySrc.fVertexLayout);
+ fUsedReservedVertexBytes = GrMax(fUsedReservedVertexBytes,
+ vertexBytes);
+ } break;
+ case kBuffer_GeometrySrcType:
+ draw.fUseVertexBuffer = true;
+ draw.fVertexBuffer = fGeometrySrc.fVertexBuffer;
+ break;
+ }
+
+ switch (fGeometrySrc.fIndexSrc) {
+ case kArray_GeometrySrcType:
+ draw.fUseIndexBuffer = false;
+ draw.fIndexArray = fGeometrySrc.fIndexArray;
+ break;
+ case kReserved_GeometrySrcType: {
+ draw.fUseIndexBuffer = false;
+ draw.fIndexArray = fCurrReservedIndices;
+ size_t indexBytes = (indexCount + startIndex) * sizeof(uint16_t);
+ fUsedReservedIndexBytes = GrMax(fUsedReservedIndexBytes, indexBytes);
+ } break;
+ case kBuffer_GeometrySrcType:
+ draw.fUseIndexBuffer = true;
+ draw.fIndexBuffer = fGeometrySrc.fIndexBuffer;
+ break;
+ }
+}
+
+void GrInOrderDrawBuffer::drawNonIndexed(PrimitiveType type,
+ uint32_t startVertex,
+ uint32_t vertexCount) {
+ if (!vertexCount) {
+ return;
+ }
+
+ Draw& draw = fDraws.push_back();
+ draw.fType = type;
+ draw.fStartVertex = startVertex;
+ draw.fStartIndex = 0;
+ draw.fVertexCount = vertexCount;
+ draw.fIndexCount = 0;
+
+ draw.fClipChanged = grabClip();
+ draw.fStateChange = grabState();
+
+ draw.fVertexLayout = fGeometrySrc.fVertexLayout;
+ switch (fGeometrySrc.fVertexSrc) {
+ case kArray_GeometrySrcType:
+ draw.fUseVertexBuffer = false;
+ draw.fVertexArray = fGeometrySrc.fVertexArray;
+ break;
+ case kReserved_GeometrySrcType: {
+ draw.fUseVertexBuffer = NULL != fBufferVertices;
+ if (draw.fUseVertexBuffer) {
+ draw.fVertexBuffer = fCurrVertexBuffer;
+ draw.fStartVertex += fCurrStartVertex;
+ } else {
+ draw.fVertexArray = fCurrReservedVertices;
+ }
+ size_t vertexBytes = (vertexCount + startVertex) *
+ VertexSize(fGeometrySrc.fVertexLayout);
+ fUsedReservedVertexBytes = GrMax(fUsedReservedVertexBytes,
+ vertexBytes);
+ } break;
+ case kBuffer_GeometrySrcType:
+ draw.fUseVertexBuffer = true;
+ draw.fVertexBuffer = fGeometrySrc.fVertexBuffer;
+ break;
+ }
+}
+
+void GrInOrderDrawBuffer::reset() {
+ GrAssert(!fReservedGeometry.fLocked);
+ uint32_t numStates = fStates.count();
+ for (uint32_t i = 0; i < numStates; ++i) {
+ GrTexture* tex = accessSavedDrawState(fStates[i]).fTexture;
+ if (NULL != tex) {
+ tex->unref();
+ }
+ }
+ fDraws.reset();
+ fStates.reset();
+ if (NULL == fBufferVertices) {
+ fCPUVertices.reset();
+ } else {
+ fBufferVertices->reset();
+ }
+ fIndices.reset();
+ fClips.reset();
+}
+
+void GrInOrderDrawBuffer::playback(GrDrawTarget* target) {
+ GrAssert(NULL != target);
+ GrAssert(target != this); // not considered and why?
+
+ uint32_t numDraws = fDraws.count();
+ if (!numDraws) {
+ return;
+ }
+
+ if (NULL != fBufferVertices) {
+ fBufferVertices->unlock();
+ }
+
+ GrDrawTarget::AutoStateRestore asr(target);
+ GrDrawTarget::AutoClipRestore acr(target);
+ // important to not mess with reserve/lock geometry in the target with this
+ // on the stack.
+ GrDrawTarget::AutoGeometrySrcRestore agsr(target);
+
+ uint32_t currState = ~0;
+ uint32_t currClip = ~0;
+
+ for (uint32_t i = 0; i < numDraws; ++i) {
+ const Draw& draw = fDraws[i];
+ if (draw.fStateChange) {
+ ++currState;
+ target->restoreDrawState(fStates[currState]);
+ }
+ if (draw.fClipChanged) {
+ ++currClip;
+ target->setClip(fClips[currClip]);
+ }
+ if (draw.fUseVertexBuffer) {
+ target->setVertexSourceToBuffer(draw.fVertexBuffer, draw.fVertexLayout);
+ } else {
+ target->setVertexSourceToArray(draw.fVertexArray, draw.fVertexLayout);
+ }
+ if (draw.fIndexCount) {
+ if (draw.fUseIndexBuffer) {
+ target->setIndexSourceToBuffer(draw.fIndexBuffer);
+ } else {
+ target->setIndexSourceToArray(draw.fIndexArray);
+ }
+ target->drawIndexed(draw.fType,
+ draw.fStartVertex,
+ draw.fStartIndex,
+ draw.fVertexCount,
+ draw.fIndexCount);
+ } else {
+ target->drawNonIndexed(draw.fType,
+ draw.fStartVertex,
+ draw.fVertexCount);
+ }
+ }
+}
+
+bool GrInOrderDrawBuffer::geometryHints(GrVertexLayout vertexLayout,
+ int32_t* vertexCount,
+ int32_t* indexCount) const {
+ bool flush = false;
+ if (NULL != indexCount) {
+ *indexCount = -1;
+ }
+ if (NULL != vertexCount) {
+ if (NULL != fBufferVertices) {
+ // we will recommend a flush if the verts could fit in a single
+ // preallocated vertex buffer but none are left and it can't fit
+ // in the current VB (which may not be prealloced).
+ if (*vertexCount > fBufferVertices->currentBufferVertices(vertexLayout) &&
+ (!fBufferVertices->preallocatedBuffersRemaining() &&
+ *vertexCount <= fBufferVertices->preallocatedBufferVertices(vertexLayout))) {
+
+ flush = true;
+ }
+ *vertexCount = fBufferVertices->currentBufferVertices(vertexLayout);
+ } else {
+ *vertexCount = -1;
+ }
+ }
+ return flush;
+}
+
+bool GrInOrderDrawBuffer::acquireGeometryHelper(GrVertexLayout vertexLayout,
+ void** vertices,
+ void** indices) {
+ if (fReservedGeometry.fVertexCount) {
+ fReservedVertexBytes = VertexSize(vertexLayout) *
+ fReservedGeometry.fVertexCount;
+ if (NULL == fBufferVertices) {
+ fCurrReservedVertices = fCPUVertices.alloc(fReservedVertexBytes);
+ } else {
+ fCurrReservedVertices = fBufferVertices->alloc(vertexLayout,
+ fReservedGeometry.fVertexCount,
+ &fCurrVertexBuffer,
+ &fCurrStartVertex);
+ }
+ if (NULL != vertices) {
+ *vertices = fCurrReservedVertices;
+ }
+ if (NULL == fCurrReservedVertices) {
+ return false;
+ }
+ }
+ if (fReservedGeometry.fIndexCount) {
+ fReservedIndexBytes = sizeof(uint16_t) * fReservedGeometry.fIndexCount;
+ fCurrReservedIndices = fIndices.alloc(fReservedIndexBytes);
+ if (NULL != indices) {
+ *indices = fCurrReservedIndices;
+ }
+ if (NULL == fCurrReservedIndices) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrInOrderDrawBuffer::releaseGeometryHelper() {
+ GrAssert(fUsedReservedVertexBytes <= fReservedVertexBytes);
+ GrAssert(fUsedReservedIndexBytes <= fReservedIndexBytes);
+
+ size_t vertexSlack = fReservedVertexBytes - fUsedReservedVertexBytes;
+ if (NULL == fBufferVertices) {
+ fCPUVertices.release(vertexSlack);
+ } else {
+ fBufferVertices->release(vertexSlack);
+ GR_DEBUGCODE(fCurrVertexBuffer = NULL);
+ GR_DEBUGCODE(fCurrStartVertex = 0);
+ }
+
+ fIndices.release(fReservedIndexBytes - fUsedReservedIndexBytes);
+
+ fCurrReservedVertices = NULL;
+ fCurrReservedIndices = NULL;
+ fReservedVertexBytes = 0;
+ fReservedIndexBytes = 0;
+ fUsedReservedVertexBytes = 0;
+ fUsedReservedIndexBytes = 0;
+}
+
+bool GrInOrderDrawBuffer::grabState() {
+ bool newState;
+ if (fStates.empty()) {
+ newState = true;
+ } else {
+ const DrawState& old = accessSavedDrawState(fStates.back());
+ newState = old != fCurrDrawState;
+ }
+ if (newState) {
+ if (NULL != fCurrDrawState.fTexture) {
+ fCurrDrawState.fTexture->ref();
+ }
+ saveCurrentDrawState(&fStates.push_back());
+ }
+ return newState;
+}
+
+bool GrInOrderDrawBuffer::grabClip() {
+ if ((fCurrDrawState.fFlagBits & kClip_StateBit) &&
+ (fClipChanged || fClips.empty())) {
+
+ fClips.push_back() = fClip;
+ fClipChanged = false;
+ return true;
+ }
+ return false;
+}
+
+void GrInOrderDrawBuffer::clipWillChange(const GrClip& clip) {
+ fClipChanged = true;
+}
+
diff --git a/gpu/src/GrMatrix.cpp b/gpu/src/GrMatrix.cpp
new file mode 100644
index 0000000000..e4360cd50f
--- /dev/null
+++ b/gpu/src/GrMatrix.cpp
@@ -0,0 +1,767 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrMatrix.h"
+#include "GrRect.h"
+#include <stddef.h>
+
+#if GR_SCALAR_IS_FLOAT
+ const GrScalar GrMatrix::gRESCALE(GR_Scalar1);
+#else
+ GR_STATIC_ASSERT(GR_SCALAR_IS_FIXED);
+ // fixed point isn't supported right now
+ GR_STATIC_ASSERT(false);
+const GrScalar GrMatrix::gRESCALE(1 << 30);
+#endif
+
+const GrMatrix::MapProc GrMatrix::gMapProcs[] = {
+// Scales are not both zero
+ &GrMatrix::mapIdentity,
+ &GrMatrix::mapScale,
+ &GrMatrix::mapTranslate,
+ &GrMatrix::mapScaleAndTranslate,
+ &GrMatrix::mapSkew,
+ &GrMatrix::mapScaleAndSkew,
+ &GrMatrix::mapSkewAndTranslate,
+ &GrMatrix::mapNonPerspective,
+ // no optimizations for perspective matrices
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapPerspective,
+
+// Scales are zero (every other is invalid because kScale_TypeBit must be set if
+// kZeroScale_TypeBit is set)
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapZero,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapSetToTranslate,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapSwappedScale,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapSwappedScaleAndTranslate,
+
+ // no optimizations for perspective matrices
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapZero,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapPerspective,
+ &GrMatrix::mapInvalid,
+ &GrMatrix::mapPerspective,
+};
+
+const GrMatrix& GrMatrix::I() {
+ struct FakeMatrix {
+ int fTypeMask;
+ GrScalar fM[9];
+ };
+
+#if 0
+ GR_STATIC_ASSERT(offsetof(FakeMatrix, fTypeMask) == offsetof(GrMatrix, fTypeMask));
+ GR_STATIC_ASSERT(offsetof(FakeMatrix, fM) == offsetof(GrMatrix, fM));
+#endif
+
+ GR_STATIC_ASSERT(sizeof(FakeMatrix) == sizeof(GrMatrix));
+ static const FakeMatrix I = {0,
+ {GR_Scalar1, 0, 0,
+ 0, GR_Scalar1, 0,
+ 0, 0, gRESCALE}};
+ return *(const GrMatrix*)&I;
+}
+
+void GrMatrix::setIdentity() {
+ fM[0] = GR_Scalar1; fM[1] = 0; fM[2] = 0;
+ fM[3] = 0; fM[4] = GR_Scalar1; fM[5] = 0;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = 0;
+}
+
+void GrMatrix::setTranslate(GrScalar dx, GrScalar dy) {
+ fM[0] = GR_Scalar1; fM[1] = 0; fM[2] = dx;
+ fM[3] = 0; fM[4] = GR_Scalar1; fM[5] = dy;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = kTranslate_TypeBit;
+}
+
+void GrMatrix::setScale(GrScalar sx, GrScalar sy) {
+ fM[0] = sx; fM[1] = 0; fM[2] = 0;
+ fM[3] = 0; fM[4] = sy; fM[5] = 0;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = kScale_TypeBit;
+}
+
+void GrMatrix::setSkew(GrScalar skx, GrScalar sky) {
+ fM[0] = GR_Scalar1; fM[1] = skx; fM[2] = 0;
+ fM[3] = sky; fM[4] = GR_Scalar1; fM[5] = 0;
+ fM[6] = 0; fM[7] = 0; fM[8] = gRESCALE;
+ fTypeMask = kSkew_TypeBit;
+}
+
+void GrMatrix::setConcat(const GrMatrix& a, const GrMatrix& b) {
+ if (a.isIdentity()) {
+ if (this != &b) {
+ for (int i = 0; i < 9; ++i) {
+ fM[i] = b.fM[i];
+ }
+ fTypeMask = b.fTypeMask;
+ }
+ return;
+ }
+
+ if (b.isIdentity()) {
+ GrAssert(!a.isIdentity());
+ if (this != &a) {
+ for (int i = 0; i < 9; ++i) {
+ fM[i] = a.fM[i];
+ }
+ fTypeMask = a.fTypeMask;
+ }
+ return;
+ }
+
+ // a and/or b could be this
+ GrMatrix tmp;
+
+ // could do more optimizations based on type bits. Hopefully this call is
+ // low frequency.
+ // TODO: make this work for fixed point
+ if (!((b.fTypeMask | a.fTypeMask) & kPerspective_TypeBit)) {
+ tmp.fM[0] = a.fM[0] * b.fM[0] + a.fM[1] * b.fM[3];
+ tmp.fM[1] = a.fM[0] * b.fM[1] + a.fM[1] * b.fM[4];
+ tmp.fM[2] = a.fM[0] * b.fM[2] + a.fM[1] * b.fM[5] + a.fM[2] * gRESCALE;
+
+ tmp.fM[3] = a.fM[3] * b.fM[0] + a.fM[4] * b.fM[3];
+ tmp.fM[4] = a.fM[3] * b.fM[1] + a.fM[4] * b.fM[4];
+ tmp.fM[5] = a.fM[3] * b.fM[2] + a.fM[4] * b.fM[5] + a.fM[5] * gRESCALE;
+
+ tmp.fM[6] = 0;
+ tmp.fM[7] = 0;
+ tmp.fM[8] = gRESCALE * gRESCALE;
+ } else {
+ tmp.fM[0] = a.fM[0] * b.fM[0] + a.fM[1] * b.fM[3] + a.fM[2] * b.fM[6];
+ tmp.fM[1] = a.fM[0] * b.fM[1] + a.fM[1] * b.fM[4] + a.fM[2] * b.fM[7];
+ tmp.fM[2] = a.fM[0] * b.fM[2] + a.fM[1] * b.fM[5] + a.fM[2] * b.fM[8];
+
+ tmp.fM[3] = a.fM[3] * b.fM[0] + a.fM[4] * b.fM[3] + a.fM[5] * b.fM[6];
+ tmp.fM[4] = a.fM[3] * b.fM[1] + a.fM[4] * b.fM[4] + a.fM[5] * b.fM[7];
+ tmp.fM[5] = a.fM[3] * b.fM[2] + a.fM[4] * b.fM[5] + a.fM[5] * b.fM[8];
+
+ tmp.fM[6] = a.fM[6] * b.fM[0] + a.fM[7] * b.fM[3] + a.fM[8] * b.fM[6];
+ tmp.fM[7] = a.fM[6] * b.fM[1] + a.fM[7] * b.fM[4] + a.fM[8] * b.fM[7];
+ tmp.fM[8] = a.fM[6] * b.fM[2] + a.fM[7] * b.fM[5] + a.fM[8] * b.fM[8];
+ }
+ *this = tmp;
+ setTypeMask();
+}
+
+void GrMatrix::preConcat(const GrMatrix& m) {
+ setConcat(*this, m);
+}
+
+void GrMatrix::postConcat(const GrMatrix& m) {
+ setConcat(m, *this);
+}
+
+double GrMatrix::determinant() const {
+ if (fTypeMask & kPerspective_TypeBit) {
+ return fM[0]*((double)fM[4]*fM[8] - (double)fM[5]*fM[7]) +
+ fM[1]*((double)fM[5]*fM[6] - (double)fM[3]*fM[8]) +
+ fM[2]*((double)fM[3]*fM[7] - (double)fM[4]*fM[6]);
+ } else {
+ return (double)fM[0]*fM[4]*gRESCALE -
+ (double)fM[1]*fM[3]*gRESCALE;
+ }
+}
+
+bool GrMatrix::invert(GrMatrix* inverted) const {
+
+ if (isIdentity()) {
+ if (inverted != this) {
+ inverted->setIdentity();
+ }
+ return true;
+ }
+ static const double MIN_DETERMINANT_SQUARED = 1.e-16;
+
+ // could do more optimizations based on type bits. Hopefully this call is
+ // low frequency.
+
+ double det = determinant();
+
+ // check if we can't be inverted
+ if (det*det <= MIN_DETERMINANT_SQUARED) {
+ return false;
+ } else if (NULL == inverted) {
+ return true;
+ }
+
+ double t[9];
+
+ if (fTypeMask & kPerspective_TypeBit) {
+ t[0] = ((double)fM[4]*fM[8] - (double)fM[5]*fM[7]);
+ t[1] = ((double)fM[2]*fM[7] - (double)fM[1]*fM[8]);
+ t[2] = ((double)fM[1]*fM[5] - (double)fM[2]*fM[4]);
+ t[3] = ((double)fM[5]*fM[6] - (double)fM[3]*fM[8]);
+ t[4] = ((double)fM[0]*fM[8] - (double)fM[2]*fM[6]);
+ t[5] = ((double)fM[2]*fM[3] - (double)fM[0]*fM[5]);
+ t[6] = ((double)fM[3]*fM[7] - (double)fM[4]*fM[6]);
+ t[7] = ((double)fM[1]*fM[6] - (double)fM[0]*fM[7]);
+ t[8] = ((double)fM[0]*fM[4] - (double)fM[1]*fM[3]);
+ det = 1.0 / det;
+ for (int i = 0; i < 9; ++i) {
+ inverted->fM[i] = (GrScalar)(t[i] * det);
+ }
+ } else {
+ t[0] = (double)fM[4]*gRESCALE;
+ t[1] = -(double)fM[1]*gRESCALE;
+ t[2] = (double)fM[1]*fM[5] - (double)fM[2]*fM[4];
+ t[3] = -(double)fM[3]*gRESCALE;
+ t[4] = (double)fM[0]*gRESCALE;
+ t[5] = (double)fM[2]*fM[3] - (double)fM[0]*fM[5];
+ //t[6] = 0.0;
+ //t[7] = 0.0;
+ t[8] = (double)fM[0]*fM[4] - (double)fM[1]*fM[3];
+ det = 1.0 / det;
+ for (int i = 0; i < 6; ++i) {
+ inverted->fM[i] = (GrScalar)(t[i] * det);
+ }
+ inverted->fM[6] = 0;
+ inverted->fM[7] = 0;
+ inverted->fM[8] = (GrScalar)(t[8] * det);
+ }
+ inverted->setTypeMask();
+ return true;
+}
+
+void GrMatrix::mapRect(GrRect* dst, const GrRect& src) const {
+ GrPoint srcPts[4], dstPts[4];
+ srcPts[0].set(src.fLeft, src.fTop);
+ srcPts[1].set(src.fRight, src.fTop);
+ srcPts[2].set(src.fRight, src.fBottom);
+ srcPts[3].set(src.fLeft, src.fBottom);
+ this->mapPoints(dstPts, srcPts, 4);
+ dst->setBounds(dstPts, 4);
+}
+
+bool GrMatrix::hasPerspective() const {
+ GrAssert(!!(kPerspective_TypeBit & fTypeMask) ==
+ (fM[kPersp0] != 0 || fM[kPersp1] != 0 || fM[kPersp2] != gRESCALE));
+ return 0 != (kPerspective_TypeBit & fTypeMask);
+}
+
+bool GrMatrix::isIdentity() const {
+ GrAssert((0 == fTypeMask) ==
+ (GR_Scalar1 == fM[kScaleX] && 0 == fM[kSkewX] && 0 == fM[kTransX] &&
+ 0 == fM[kSkewY] && GR_Scalar1 == fM[kScaleY] && 0 == fM[kTransY] &&
+ 0 == fM[kPersp0] && 0 == fM[kPersp1] && gRESCALE == fM[kPersp2]));
+ return (0 == fTypeMask);
+}
+
+
+GrScalar GrMatrix::getMaxStretch() const {
+
+ if (fTypeMask & kPerspective_TypeBit) {
+ return -GR_Scalar1;
+ }
+
+ GrScalar stretch;
+
+ if (isIdentity()) {
+ stretch = GR_Scalar1;
+ } else if (!(fTypeMask & kSkew_TypeBit)) {
+ stretch = GrMax(GrScalarAbs(fM[kScaleX]), GrScalarAbs(fM[kScaleY]));
+ } else if (fTypeMask & kZeroScale_TypeBit) {
+ stretch = GrMax(GrScalarAbs(fM[kSkewX]), GrScalarAbs(fM[kSkewY]));
+ } else {
+ // ignore the translation part of the matrix, just look at 2x2 portion.
+ // compute singular values, take largest abs value.
+ // [a b; b c] = A^T*A
+ GrScalar a = GrMul(fM[kScaleX], fM[kScaleX]) + GrMul(fM[kSkewY], fM[kSkewY]);
+ GrScalar b = GrMul(fM[kScaleX], fM[kSkewX]) + GrMul(fM[kScaleY], fM[kSkewY]);
+ GrScalar c = GrMul(fM[kSkewX], fM[kSkewX]) + GrMul(fM[kScaleY], fM[kScaleY]);
+ // eigenvalues of A^T*A are the squared singular values of A.
+ // characteristic equation is det((A^T*A) - l*I) = 0
+ // l^2 - (a + c)l + (ac-b^2)
+ // solve using quadratic equation (divisor is non-zero since l^2 has 1 coeff
+ // and roots are guaraunteed to be pos and real).
+ GrScalar largerRoot;
+ GrScalar bSqd = GrMul(b,b);
+ // TODO: fixed point tolerance value.
+ if (bSqd < 1e-10) { // will be true if upper left 2x2 is orthogonal, which is common, so save some math
+ largerRoot = GrMax(a, c);
+ } else {
+ GrScalar aminusc = a - c;
+ GrScalar apluscdiv2 = (a + c) / 2;
+ GrScalar x = sqrtf(GrMul(aminusc,aminusc) + GrMul(4,(bSqd))) / 2;
+ largerRoot = apluscdiv2 + x;
+ }
+
+ stretch = sqrtf(largerRoot);
+ }
+#if GR_DEBUG && 0
+ // test a bunch of vectors. None should be scaled by more than stretch
+ // (modulo some error) and we should find a vector that is scaled by almost
+ // stretch.
+ GrPoint pt;
+ GrScalar max = 0;
+ for (int i = 0; i < 1000; ++i) {
+ GrScalar x = (float)rand() / RAND_MAX;
+ GrScalar y = sqrtf(1 - (x*x));
+ pt.fX = fM[kScaleX]*x + fM[kSkewX]*y;
+ pt.fY = fM[kSkewY]*x + fM[kScaleY]*y;
+ GrScalar d = pt.distanceToOrigin();
+ GrAssert(d <= (1.0001 * stretch));
+ max = GrMax(max, pt.distanceToOrigin());
+ }
+ GrAssert((stretch - max) < .05*stretch);
+#endif
+ return stretch;
+}
+
+bool GrMatrix::operator == (const GrMatrix& m) const {
+ if (fTypeMask != m.fTypeMask) {
+ return false;
+ }
+ if (!fTypeMask) {
+ return true;
+ }
+ for (int i = 0; i < 9; ++i) {
+ if (m.fM[i] != fM[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool GrMatrix::operator != (const GrMatrix& m) const {
+ return !(*this == m);
+}
+
+void GrMatrix::setTypeMask()
+{
+ fTypeMask = 0;
+ if (0 != fM[kPersp0] || 0 != fM[kPersp1] || gRESCALE != fM[kPersp2]) {
+ fTypeMask |= kPerspective_TypeBit;
+ }
+ if (GR_Scalar1 != fM[kScaleX] || GR_Scalar1 != fM[kScaleY]) {
+ fTypeMask |= kScale_TypeBit;
+ if (0 == fM[kScaleX] && 0 == fM[kScaleY]) {
+ fTypeMask |= kZeroScale_TypeBit;
+ }
+ }
+ if (0 != fM[kSkewX] || 0 != fM[kSkewY]) {
+ fTypeMask |= kSkew_TypeBit;
+ }
+ if (0 != fM[kTransX] || 0 != fM[kTransY]) {
+ fTypeMask |= kTranslate_TypeBit;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Matrix transformation procs
+//////
+
+void GrMatrix::mapIdentity(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i] = src[i];
+ }
+ }
+}
+
+void GrMatrix::mapScale(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fX, fM[kScaleX]);
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]);
+ }
+}
+
+
+void GrMatrix::mapTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = src[i].fX + fM[kTransX];
+ dst[i].fY = src[i].fY + fM[kTransY];
+ }
+}
+
+void GrMatrix::mapScaleAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fX, fM[kScaleX]) + fM[kTransX];
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]) + fM[kTransY];
+ }
+}
+
+void GrMatrix::mapSkew(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]);
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapScaleAndSkew(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fX, fM[kScaleX]) + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]) + GrMul(src[i].fX, fM[kSkewY]);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(src[i].fX, fM[kScaleX]) + GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fY, fM[kScaleY]) + GrMul(src[i].fX, fM[kSkewY]);
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapSkewAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = src[i].fX + GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = src[i].fY + GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapNonPerspective(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(fM[kScaleX], src[i].fX) + GrMul(fM[kSkewX], src[i].fY) + fM[kTransX];
+ dst[i].fY = GrMul(fM[kSkewY], src[i].fX) + GrMul(fM[kScaleY], src[i].fY) + fM[kTransY];
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(fM[kScaleX], src[i].fX) + GrMul(fM[kSkewX], src[i].fY) + fM[kTransX];
+ dst[i].fY = GrMul(fM[kSkewY], src[i].fX) + GrMul(fM[kScaleY], src[i].fY) + fM[kTransY];
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapPerspective(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar x, y, w;
+ x = GrMul(fM[kScaleX], src[i].fX) + GrMul(fM[kSkewX], src[i].fY) + fM[kTransX];
+ y = GrMul(fM[kSkewY], src[i].fX) + GrMul(fM[kScaleY], src[i].fY) + fM[kTransY];
+ w = GrMul(fM[kPersp0], src[i].fX) + GrMul(fM[kPersp1], src[i].fY) + fM[kPersp2];
+ // TODO need fixed point invert
+ if (w) {
+ w = 1 / w;
+ }
+ dst[i].fX = GrMul(x, w);
+ dst[i].fY = GrMul(y, w);
+ }
+}
+
+void GrMatrix::mapInvalid(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ GrAssert(0);
+}
+
+void GrMatrix::mapZero(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ memset(dst, 0, sizeof(GrPoint)*count);
+}
+
+void GrMatrix::mapSetToTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = fM[kTransX];
+ dst[i].fY = fM[kTransY];
+ }
+}
+
+void GrMatrix::mapSwappedScale(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]);
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(src[i].fY, fM[kSkewX]);
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]);
+ dst[i].fX = newX;
+ }
+ }
+}
+
+void GrMatrix::mapSwappedScaleAndTranslate(GrPoint* dst, const GrPoint* src, uint32_t count) const {
+ if (src != dst) {
+ for (uint32_t i = 0; i < count; ++i) {
+ dst[i].fX = GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ }
+ } else {
+ for (uint32_t i = 0; i < count; ++i) {
+ GrScalar newX = GrMul(src[i].fY, fM[kSkewX]) + fM[kTransX];
+ dst[i].fY = GrMul(src[i].fX, fM[kSkewY]) + fM[kTransY];
+ dst[i].fX = newX;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Unit test
+//////
+
+#include "GrRandom.h"
+
+#if GR_DEBUG
+enum MatrixType {
+ kRotate_MatrixType,
+ kScaleX_MatrixType,
+ kScaleY_MatrixType,
+ kSkewX_MatrixType,
+ kSkewY_MatrixType,
+ kTranslateX_MatrixType,
+ kTranslateY_MatrixType,
+ kSwapScaleXY_MatrixType,
+ kPersp_MatrixType,
+
+ kMatrixTypeCount
+};
+
+static void create_matrix(GrMatrix* matrix, GrRandom& rand) {
+ MatrixType type = (MatrixType)(rand.nextU() % kMatrixTypeCount);
+ switch (type) {
+ case kRotate_MatrixType: {
+ float angle = rand.nextF() * 2 *3.14159265358979323846f;
+ GrScalar cosa = GrFloatToScalar(cosf(angle));
+ GrScalar sina = GrFloatToScalar(sinf(angle));
+ matrix->setAll(cosa, -sina, 0,
+ sina, cosa, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kScaleX_MatrixType: {
+ GrScalar scale = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(scale, 0, 0,
+ 0, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kScaleY_MatrixType: {
+ GrScalar scale = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ 0, scale, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kSkewX_MatrixType: {
+ GrScalar skew = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(GR_Scalar1, skew, 0,
+ 0, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kSkewY_MatrixType: {
+ GrScalar skew = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ skew, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kTranslateX_MatrixType: {
+ GrScalar trans = GrFloatToScalar(rand.nextF(-10, 10));
+ matrix->setAll(GR_Scalar1, 0, trans,
+ 0, GR_Scalar1, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kTranslateY_MatrixType: {
+ GrScalar trans = GrFloatToScalar(rand.nextF(-10, 10));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ 0, GR_Scalar1, trans,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kSwapScaleXY_MatrixType: {
+ GrScalar xy = GrFloatToScalar(rand.nextF(-2, 2));
+ GrScalar yx = GrFloatToScalar(rand.nextF(-2, 2));
+ matrix->setAll(0, xy, 0,
+ yx, 0, 0,
+ 0, 0, GrMatrix::I()[8]);
+ } break;
+ case kPersp_MatrixType: {
+ GrScalar p0 = GrFloatToScalar(rand.nextF(-2, 2));
+ GrScalar p1 = GrFloatToScalar(rand.nextF(-2, 2));
+ GrScalar p2 = GrFloatToScalar(rand.nextF(-0.5f, 0.75f));
+ matrix->setAll(GR_Scalar1, 0, 0,
+ 0, GR_Scalar1, 0,
+ p0, p1, GrMul(p2,GrMatrix::I()[8]));
+ } break;
+ default:
+ GrAssert(0);
+ break;
+ }
+}
+#endif
+
+void GrMatrix::UnitTest() {
+ GrRandom rand;
+
+ // Create a bunch of matrices and test point mapping, max stretch calc,
+ // inversion and multiply-by-inverse.
+#if GR_DEBUG
+ for (int i = 0; i < 10000; ++i) {
+ GrMatrix a, b;
+ a.setIdentity();
+ int num = rand.nextU() % 6;
+ // force testing of I and swapXY
+ if (0 == i) {
+ num = 0;
+ GrAssert(a.isIdentity());
+ } else if (1 == i) {
+ num = 0;
+ a.setAll(0, GR_Scalar1, 0,
+ GR_Scalar1, 0, 0,
+ 0, 0, I()[8]);
+ }
+ for (int j = 0; j < num; ++j) {
+ create_matrix(&b, rand);
+ a.preConcat(b);
+ }
+
+ GrScalar maxStretch = a.getMaxStretch();
+ if (maxStretch > 0) {
+ maxStretch = GrMul(GR_Scalar1 + GR_Scalar1 / 100, maxStretch);
+ }
+ GrPoint origin = a.mapPoint(GrPoint(0,0));
+
+ for (int j = 0; j < 9; ++j) {
+ int mask, origMask = a.fTypeMask;
+ GrScalar old = a[j];
+
+ a.set(j, GR_Scalar1);
+ mask = a.fTypeMask;
+ a.setTypeMask();
+ GrAssert(mask == a.fTypeMask);
+
+ a.set(j, 0);
+ mask = a.fTypeMask;
+ a.setTypeMask();
+ GrAssert(mask == a.fTypeMask);
+
+ a.set(j, 10 * GR_Scalar1);
+ mask = a.fTypeMask;
+ a.setTypeMask();
+ GrAssert(mask == a.fTypeMask);
+
+ a.set(j, old);
+ GrAssert(a.fTypeMask == origMask);
+ }
+
+ for (int j = 0; j < 100; ++j) {
+ GrPoint pt;
+ pt.fX = GrFloatToScalar(rand.nextF(-10, 10));
+ pt.fY = GrFloatToScalar(rand.nextF(-10, 10));
+
+ GrPoint t0, t1, t2;
+ t0 = a.mapPoint(pt); // map to a new point
+ t1 = pt;
+ a.mapPoints(&t1, &t1, 1); // in place
+ a.mapPerspective(&t2, &pt, 1); // full mult
+ GrAssert(t0 == t1 && t1 == t2);
+ if (maxStretch >= 0.f) {
+ GrVec vec;
+ vec.setBetween(t0, origin);
+ GrScalar stretch = vec.length() / pt.distanceToOrigin();
+ GrAssert(stretch <= maxStretch);
+ }
+ }
+ double det = a.determinant();
+ if (fabs(det) > 1e-3 && a.invert(&b)) {
+ GrMatrix c;
+ c.setConcat(a,b);
+ for (int i = 0; i < 9; ++i) {
+ GrScalar diff = GrScalarAbs(c[i] - I()[i]);
+ GrAssert(diff < (5*GR_Scalar1 / 100));
+ }
+ }
+ }
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+int Gr_clz(uint32_t n) {
+ if (0 == n) {
+ return 32;
+ }
+
+ int count = 0;
+ if (0 == (n & 0xFFFF0000)) {
+ count += 16;
+ n <<= 16;
+ }
+ if (0 == (n & 0xFF000000)) {
+ count += 8;
+ n <<= 8;
+ }
+ if (0 == (n & 0xF0000000)) {
+ count += 4;
+ n <<= 4;
+ }
+ if (0 == (n & 0xC0000000)) {
+ count += 2;
+ n <<= 2;
+ }
+ if (0 == (n & 0x80000000)) {
+ count += 1;
+ }
+ return count;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#include "GrRect.h"
+
+void GrRect::setBounds(const GrPoint pts[], int count) {
+ if (count <= 0) {
+ this->setEmpty();
+ } else {
+ GrScalar L, R, T, B;
+ L = R = pts[0].fX;
+ T = B = pts[0].fY;
+ for (int i = 1; i < count; i++) {
+ GrScalar x = pts[i].fX;
+ GrScalar y = pts[i].fY;
+ if (x < L) {
+ L = x;
+ } else if (x > R) {
+ R = x;
+ }
+ if (y < T) {
+ T = y;
+ } else if (y > B) {
+ B = y;
+ }
+ }
+ this->setLTRB(L, T, R, B);
+ }
+}
+
+
diff --git a/gpu/src/GrMemory.cpp b/gpu/src/GrMemory.cpp
new file mode 100644
index 0000000000..3da924a299
--- /dev/null
+++ b/gpu/src/GrMemory.cpp
@@ -0,0 +1,36 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrMemory.h"
+
+#include <stdlib.h>
+
+void* GrMalloc(size_t bytes) {
+ void* ptr = ::malloc(bytes);
+ if (NULL == ptr) {
+ ::exit(-1);
+ }
+ return ptr;
+}
+
+void GrFree(void* ptr) {
+ if (ptr) {
+ ::free(ptr);
+ }
+}
+
+
diff --git a/gpu/src/GrPath.cpp b/gpu/src/GrPath.cpp
new file mode 100644
index 0000000000..7b117ebf41
--- /dev/null
+++ b/gpu/src/GrPath.cpp
@@ -0,0 +1,173 @@
+#include "GrPath.h"
+
+GrPath::GrPath() {}
+
+GrPath::GrPath(const GrPath& src) {
+}
+
+GrPath::GrPath(GrPathIter& iter) {
+ this->resetFromIter(&iter);
+}
+
+GrPath::~GrPath() {
+}
+
+void GrPath::ensureMoveTo() {
+ if (fVerbs.isEmpty() || this->wasLastVerb(kClose)) {
+ *fVerbs.append() = kMove;
+ fPts.append()->set(0, 0);
+ }
+}
+
+void GrPath::moveTo(GrScalar x, GrScalar y) {
+ if (this->wasLastVerb(kMove)) {
+ // overwrite prev kMove value
+ fPts[fPts.count() - 1].set(x, y);
+ } else {
+ *fVerbs.append() = kMove;
+ fPts.append()->set(x, y);
+ }
+}
+
+void GrPath::lineTo(GrScalar x, GrScalar y) {
+ this->ensureMoveTo();
+ *fVerbs.append() = kLine;
+ fPts.append()->set(x, y);
+}
+
+void GrPath::quadTo(GrScalar x0, GrScalar y0, GrScalar x1, GrScalar y1) {
+ this->ensureMoveTo();
+ *fVerbs.append() = kQuad;
+ fPts.append()->set(x0, y0);
+ fPts.append()->set(x1, y1);
+}
+
+void GrPath::cubicTo(GrScalar x0, GrScalar y0, GrScalar x1, GrScalar y1,
+ GrScalar x2, GrScalar y2) {
+ this->ensureMoveTo();
+ *fVerbs.append() = kCubic;
+ fPts.append()->set(x0, y0);
+ fPts.append()->set(x1, y1);
+ fPts.append()->set(x2, y2);
+}
+
+void GrPath::close() {
+ if (!fVerbs.isEmpty() && !this->wasLastVerb(kClose)) {
+ // should we allow kMove followed by kClose?
+ *fVerbs.append() = kClose;
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrPath::resetFromIter(GrPathIter* iter) {
+ fPts.reset();
+ fVerbs.reset();
+
+ GrPoint pts[4];
+ GrPathIter::Command cmd;
+
+ while ((cmd = iter->next(pts)) != GrPathIter::kEnd_Command) {
+ switch (cmd) {
+ case GrPathIter::kMove_Command:
+ this->moveTo(pts[0].fX, pts[0].fY);
+ break;
+ case GrPathIter::kLine_Command:
+ this->lineTo(pts[1].fX, pts[1].fY);
+ break;
+ case GrPathIter::kQuadratic_Command:
+ this->quadTo(pts[1].fX, pts[1].fY, pts[2].fX, pts[2].fY);
+ break;
+ case GrPathIter::kCubic_Command:
+ this->cubicTo(pts[1].fX, pts[1].fY, pts[2].fX, pts[2].fY,
+ pts[3].fX, pts[3].fY);
+ break;
+ case GrPathIter::kClose_Command:
+ this->close();
+ break;
+ case GrPathIter::kEnd_Command:
+ // never get here, but include it to avoid the warning
+ break;
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrPath::Iter::Iter(const GrPath& path) : fPath(path) {
+ this->rewind();
+}
+
+GrPathIter::Command GrPath::Iter::next(GrPoint points[]) {
+ if (fVerbIndex == fPath.fVerbs.count()) {
+ GrAssert(fPtIndex == fPath.fPts.count());
+ return GrPathIter::kEnd_Command;
+ } else {
+ GrAssert(fVerbIndex < fPath.fVerbs.count());
+ }
+
+ uint8_t cmd = fPath.fVerbs[fVerbIndex++];
+ const GrPoint* srcPts = fPath.fPts.begin() + fPtIndex;
+
+ switch (cmd) {
+ case kMove:
+ if (points) {
+ points[0] = srcPts[0];
+ }
+ fLastPt = srcPts[0];
+ GrAssert(fPtIndex <= fPath.fPts.count() + 1);
+ fPtIndex += 1;
+ break;
+ case kLine:
+ if (points) {
+ points[0] = fLastPt;
+ points[1] = srcPts[0];
+ }
+ fLastPt = srcPts[0];
+ GrAssert(fPtIndex <= fPath.fPts.count() + 1);
+ fPtIndex += 1;
+ break;
+ case kQuad:
+ if (points) {
+ points[0] = fLastPt;
+ points[1] = srcPts[0];
+ points[2] = srcPts[1];
+ }
+ fLastPt = srcPts[2];
+ GrAssert(fPtIndex <= fPath.fPts.count() + 2);
+ fPtIndex += 2;
+ break;
+ case kCubic:
+ if (points) {
+ points[0] = fLastPt;
+ points[1] = srcPts[0];
+ points[2] = srcPts[1];
+ points[3] = srcPts[2];
+ }
+ fLastPt = srcPts[2];
+ GrAssert(fPtIndex <= fPath.fPts.count() + 3);
+ fPtIndex += 3;
+ break;
+ case kClose:
+ break;
+ default:
+ GrAssert(!"unknown grpath verb");
+ break;
+ }
+ return (GrPathIter::Command)cmd;
+}
+
+GrPathIter::ConvexHint GrPath::Iter::hint() const {
+ return fPath.getConvexHint();
+}
+
+GrPathIter::Command GrPath::Iter::next() {
+ return this->next(NULL);
+}
+
+void GrPath::Iter::rewind() {
+ fVerbIndex = fPtIndex = 0;
+}
+
+
+
diff --git a/gpu/src/GrPrintf_printf.cpp b/gpu/src/GrPrintf_printf.cpp
new file mode 100644
index 0000000000..ad239ec9aa
--- /dev/null
+++ b/gpu/src/GrPrintf_printf.cpp
@@ -0,0 +1,36 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrTypes.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+void GrPrintf(const char format[], ...) {
+ const size_t MAX_BUFFER_SIZE = 2048;
+
+ char buffer[MAX_BUFFER_SIZE + 1];
+ va_list args;
+
+ va_start(args, format);
+ vsnprintf(buffer, MAX_BUFFER_SIZE, format, args);
+ va_end(args);
+
+ printf("%s", buffer);
+}
+
+
diff --git a/gpu/src/GrPrintf_skia.cpp b/gpu/src/GrPrintf_skia.cpp
new file mode 100644
index 0000000000..fa8b6a7647
--- /dev/null
+++ b/gpu/src/GrPrintf_skia.cpp
@@ -0,0 +1,39 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrTypes.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "SkTypes.h"
+
+void GrPrintf(const char format[], ...) {
+ const size_t MAX_BUFFER_SIZE = 512;
+
+ char buffer[MAX_BUFFER_SIZE + 1];
+ va_list args;
+
+ va_start(args, format);
+ vsnprintf(buffer, MAX_BUFFER_SIZE, format, args);
+ va_end(args);
+
+ // skia has already mapped this to do the "right thing"
+ SkDebugf("%s", buffer);
+}
+
+
diff --git a/gpu/src/GrQuadIndexTable.h b/gpu/src/GrQuadIndexTable.h
new file mode 100644
index 0000000000..7389466b86
--- /dev/null
+++ b/gpu/src/GrQuadIndexTable.h
@@ -0,0 +1,98 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+/*
+ These are our vertex-indices for a series of "quads", which we implement
+ as a series of pairs of triangles. See setRectFan() for the
+ vertex order.
+
+ The largest value is 255, so we could store these in any size
+ (byte, short, int). Since the table is small, we just choose the size
+ that we think will be fastest (e.g. some drivers don't have native support
+ for byte-indices, so byte may not be the fastest)
+*/
+static const uint16_t gQuadIndexTable[] = {
+ 0, 1, 2, 0, 2, 3,
+ 4, 5, 6, 4, 6, 7,
+ 8, 9, 10, 8, 10, 11,
+ 12, 13, 14, 12, 14, 15,
+ 16, 17, 18, 16, 18, 19,
+ 20, 21, 22, 20, 22, 23,
+ 24, 25, 26, 24, 26, 27,
+ 28, 29, 30, 28, 30, 31,
+ 32, 33, 34, 32, 34, 35,
+ 36, 37, 38, 36, 38, 39,
+ 40, 41, 42, 40, 42, 43,
+ 44, 45, 46, 44, 46, 47,
+ 48, 49, 50, 48, 50, 51,
+ 52, 53, 54, 52, 54, 55,
+ 56, 57, 58, 56, 58, 59,
+ 60, 61, 62, 60, 62, 63,
+ 64, 65, 66, 64, 66, 67,
+ 68, 69, 70, 68, 70, 71,
+ 72, 73, 74, 72, 74, 75,
+ 76, 77, 78, 76, 78, 79,
+ 80, 81, 82, 80, 82, 83,
+ 84, 85, 86, 84, 86, 87,
+ 88, 89, 90, 88, 90, 91,
+ 92, 93, 94, 92, 94, 95,
+ 96, 97, 98, 96, 98, 99,
+ 100, 101, 102, 100, 102, 103,
+ 104, 105, 106, 104, 106, 107,
+ 108, 109, 110, 108, 110, 111,
+ 112, 113, 114, 112, 114, 115,
+ 116, 117, 118, 116, 118, 119,
+ 120, 121, 122, 120, 122, 123,
+ 124, 125, 126, 124, 126, 127,
+ 128, 129, 130, 128, 130, 131,
+ 132, 133, 134, 132, 134, 135,
+ 136, 137, 138, 136, 138, 139,
+ 140, 141, 142, 140, 142, 143,
+ 144, 145, 146, 144, 146, 147,
+ 148, 149, 150, 148, 150, 151,
+ 152, 153, 154, 152, 154, 155,
+ 156, 157, 158, 156, 158, 159,
+ 160, 161, 162, 160, 162, 163,
+ 164, 165, 166, 164, 166, 167,
+ 168, 169, 170, 168, 170, 171,
+ 172, 173, 174, 172, 174, 175,
+ 176, 177, 178, 176, 178, 179,
+ 180, 181, 182, 180, 182, 183,
+ 184, 185, 186, 184, 186, 187,
+ 188, 189, 190, 188, 190, 191,
+ 192, 193, 194, 192, 194, 195,
+ 196, 197, 198, 196, 198, 199,
+ 200, 201, 202, 200, 202, 203,
+ 204, 205, 206, 204, 206, 207,
+ 208, 209, 210, 208, 210, 211,
+ 212, 213, 214, 212, 214, 215,
+ 216, 217, 218, 216, 218, 219,
+ 220, 221, 222, 220, 222, 223,
+ 224, 225, 226, 224, 226, 227,
+ 228, 229, 230, 228, 230, 231,
+ 232, 233, 234, 232, 234, 235,
+ 236, 237, 238, 236, 238, 239,
+ 240, 241, 242, 240, 242, 243,
+ 244, 245, 246, 244, 246, 247,
+ 248, 249, 250, 248, 250, 251,
+ 252, 253, 254, 252, 254, 255,
+};
+
+#define GR_COUNT_QUADINDEXTABLE GR_ARRAY_COUNT(gQuadIndexTable)
+
+
+
diff --git a/gpu/src/GrRectanizer.cpp b/gpu/src/GrRectanizer.cpp
new file mode 100644
index 0000000000..cb6576a8c1
--- /dev/null
+++ b/gpu/src/GrRectanizer.cpp
@@ -0,0 +1,130 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrRectanizer.h"
+#include "GrTBSearch.h"
+
+#define MIN_HEIGHT_POW2 2
+
+class GrRectanizerPow2 : public GrRectanizer {
+public:
+ GrRectanizerPow2(int w, int h) : GrRectanizer(w, h) {
+ fNextStripY = 0;
+ fAreaSoFar = 0;
+ Gr_bzero(fRows, sizeof(fRows));
+ }
+
+ virtual ~GrRectanizerPow2() {
+ }
+
+ virtual bool addRect(int w, int h, GrIPoint16* loc);
+
+ virtual float percentFull() const {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+ virtual int stripToPurge(int height) const { return -1; }
+ virtual void purgeStripAtY(int yCoord) { }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ struct Row {
+ GrIPoint16 fLoc;
+ int fRowHeight;
+
+ bool canAddWidth(int width, int containerWidth) const {
+ return fLoc.fX + width <= containerWidth;
+ }
+ };
+
+ Row fRows[16];
+
+ static int HeightToRowIndex(int height) {
+ GrAssert(height >= MIN_HEIGHT_POW2);
+ return 32 - Gr_clz(height - 1);
+ }
+
+ int fNextStripY;
+ int32_t fAreaSoFar;
+
+ bool canAddStrip(int height) const {
+ return fNextStripY + height <= this->height();
+ }
+
+ void initRow(Row* row, int rowHeight) {
+ row->fLoc.set(0, fNextStripY);
+ row->fRowHeight = rowHeight;
+ fNextStripY += rowHeight;
+ }
+};
+
+bool GrRectanizerPow2::addRect(int width, int height, GrIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ int32_t area = width * height;
+
+ /*
+ We use bsearch, but there may be more than one row with the same height,
+ so we actually search for height-1, which can only be a pow2 itself if
+ height == 2. Thus we set a minimum height.
+ */
+ height = GrNextPow2(height);
+ if (height < MIN_HEIGHT_POW2) {
+ height = MIN_HEIGHT_POW2;
+ }
+
+ Row* row = &fRows[HeightToRowIndex(height)];
+ GrAssert(row->fRowHeight == 0 || row->fRowHeight == height);
+
+ if (0 == row->fRowHeight) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ this->initRow(row, height);
+ } else {
+ if (!row->canAddWidth(width, this->width())) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ // that row is now "full", so retarget our Row record for
+ // another one
+ this->initRow(row, height);
+ }
+ }
+
+ GrAssert(row->fRowHeight == height);
+ GrAssert(row->canAddWidth(width, this->width()));
+ *loc = row->fLoc;
+ row->fLoc.fX += width;
+
+ GrAssert(row->fLoc.fX <= this->width());
+ GrAssert(row->fLoc.fY <= this->height());
+ GrAssert(fNextStripY <= this->height());
+ fAreaSoFar += area;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrRectanizer* GrRectanizer::Factory(int width, int height) {
+ return new GrRectanizerPow2(width, height);
+}
+
+
diff --git a/gpu/src/GrRectanizer_fifo.cpp b/gpu/src/GrRectanizer_fifo.cpp
new file mode 100644
index 0000000000..6b1cad2bdf
--- /dev/null
+++ b/gpu/src/GrRectanizer_fifo.cpp
@@ -0,0 +1,130 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrRectanizer.h"
+#include "GrTBSearch.h"
+
+#define MIN_HEIGHT_POW2 2
+
+class GrRectanizerFIFO : public GrRectanizer {
+public:
+ GrRectanizerFIFO(int w, int h) : GrRectanizer(w, h) {
+ fNextStripY = 0;
+ fAreaSoFar = 0;
+ Gr_bzero(fRows, sizeof(fRows));
+ }
+
+ virtual ~GrRectanizerFIFO() {
+ }
+
+ virtual bool addRect(int w, int h, GrIPoint16* loc);
+
+ virtual float percentFull() const {
+ return fAreaSoFar / ((float)this->width() * this->height());
+ }
+
+ virtual int stripToPurge(int height) const { return -1; }
+ virtual void purgeStripAtY(int yCoord) { }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ struct Row {
+ GrIPoint16 fLoc;
+ int fRowHeight;
+
+ bool canAddWidth(int width, int containerWidth) const {
+ return fLoc.fX + width <= containerWidth;
+ }
+ };
+
+ Row fRows[16];
+
+ static int HeightToRowIndex(int height) {
+ GrAssert(height >= MIN_HEIGHT_POW2);
+ return 32 - Gr_clz(height - 1);
+ }
+
+ int fNextStripY;
+ int32_t fAreaSoFar;
+
+ bool canAddStrip(int height) const {
+ return fNextStripY + height <= this->height();
+ }
+
+ void initRow(Row* row, int rowHeight) {
+ row->fLoc.set(0, fNextStripY);
+ row->fRowHeight = rowHeight;
+ fNextStripY += rowHeight;
+ }
+};
+
+bool GrRectanizerFIFO::addRect(int width, int height, GrIPoint16* loc) {
+ if ((unsigned)width > (unsigned)this->width() ||
+ (unsigned)height > (unsigned)this->height()) {
+ return false;
+ }
+
+ int32_t area = width * height;
+
+ /*
+ We use bsearch, but there may be more than one row with the same height,
+ so we actually search for height-1, which can only be a pow2 itself if
+ height == 2. Thus we set a minimum height.
+ */
+ height = GrNextPow2(height);
+ if (height < MIN_HEIGHT_POW2) {
+ height = MIN_HEIGHT_POW2;
+ }
+
+ Row* row = &fRows[HeightToRowIndex(height)];
+ GrAssert(row->fRowHeight == 0 || row->fRowHeight == height);
+
+ if (0 == row->fRowHeight) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ this->initRow(row, height);
+ } else {
+ if (!row->canAddWidth(width, this->width())) {
+ if (!this->canAddStrip(height)) {
+ return false;
+ }
+ // that row is now "full", so retarget our Row record for
+ // another one
+ this->initRow(row, height);
+ }
+ }
+
+ GrAssert(row->fRowHeight == height);
+ GrAssert(row->canAddWidth(width, this->width()));
+ *loc = row->fLoc;
+ row->fLoc.fX += width;
+
+ GrAssert(row->fLoc.fX <= this->width());
+ GrAssert(row->fLoc.fY <= this->height());
+ GrAssert(fNextStripY <= this->height());
+ fAreaSoFar += area;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrRectanizer* GrRectanizer::Factory(int width, int height) {
+ return new GrRectanizerFIFO(width, height);
+}
+
+
diff --git a/gpu/src/GrTextContext.cpp b/gpu/src/GrTextContext.cpp
new file mode 100644
index 0000000000..d5fa1ccc65
--- /dev/null
+++ b/gpu/src/GrTextContext.cpp
@@ -0,0 +1,244 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrAtlas.h"
+#include "GrClipIterator.h"
+#include "GrContext.h"
+#include "GrTextContext.h"
+#include "GrTextStrike.h"
+#include "GrTextStrike_impl.h"
+#include "GrFontScaler.h"
+
+static const GrVertexLayout VLAYOUT = GrDrawTarget::kTextFormat_VertexLayoutBit;
+
+void GrTextContext::flushGlyphs() {
+ if (fCurrVertex > 0) {
+ GrDrawTarget::AutoStateRestore asr(fDrawTarget);
+
+ // setup our sampler state for our text texture/atlas
+
+ GrSamplerState sampler(GrSamplerState::kRepeat_WrapMode,
+ GrSamplerState::kRepeat_WrapMode,
+ GrSamplerState::kAlphaMod_SampleMode,
+ !fExtMatrix.isIdentity());
+ fDrawTarget->setSamplerState(sampler);
+
+ GrAssert(GrIsALIGN4(fCurrVertex));
+ int nIndices = fCurrVertex + (fCurrVertex >> 1);
+ GrAssert(fCurrTexture);
+ fDrawTarget->setTexture(fCurrTexture);
+ fDrawTarget->setTextureMatrix(GrMatrix::I());
+ fDrawTarget->setIndexSourceToBuffer(fContext->quadIndexBuffer());
+
+ fDrawTarget->drawIndexed(GrDrawTarget::kTriangles_PrimitiveType,
+ 0, 0, fCurrVertex, nIndices);
+
+ fDrawTarget->releaseReservedGeometry();
+ fVertices = NULL;
+ fMaxVertices = 0;
+ fCurrVertex = 0;
+ fCurrTexture->unref();
+ fCurrTexture = NULL;
+ }
+}
+
+GrTextContext::GrTextContext(GrContext* context, const GrMatrix* extMatrix) {
+ fContext = context;
+ fStrike = NULL;
+
+ fCurrTexture = NULL;
+ fCurrVertex = 0;
+ fClipRect = context->getClip().getBounds();
+
+ if (NULL != extMatrix) {
+ fExtMatrix = *extMatrix;
+ } else {
+ fExtMatrix = GrMatrix::I();
+ }
+ if (!fExtMatrix.isIdentity()) {
+ GrMatrix inverse;
+ GrRect r;
+ r.set(fClipRect);
+ if (fExtMatrix.invert(&inverse)) {
+ inverse.mapRect(&r);
+ r.roundOut(&fClipRect);
+ }
+ }
+
+ fContext->getViewMatrix(&fOrigViewMatrix);
+ fContext->setViewMatrix(fExtMatrix);
+
+ fVertices = NULL;
+ fMaxVertices = 0;
+ fDrawTarget = fContext->getTextTarget();
+}
+
+GrTextContext::~GrTextContext() {
+ this->flushGlyphs();
+ fContext->setViewMatrix(fOrigViewMatrix);
+}
+
+void GrTextContext::flush() {
+ this->flushGlyphs();
+}
+
+static inline void setRectFan(GrGpuTextVertex v[4], int l, int t, int r, int b,
+ int stride) {
+ v[0 * stride].setI(l, t);
+ v[1 * stride].setI(l, b);
+ v[2 * stride].setI(r, b);
+ v[3 * stride].setI(r, t);
+}
+
+void GrTextContext::drawPackedGlyph(GrGlyph::PackedID packed,
+ GrFixed vx, GrFixed vy,
+ GrFontScaler* scaler) {
+ if (NULL == fStrike) {
+ fStrike = fContext->getFontCache()->getStrike(scaler);
+ }
+
+ GrGlyph* glyph = fStrike->getGlyph(packed, scaler);
+ if (NULL == glyph || glyph->fBounds.isEmpty()) {
+ return;
+ }
+
+ vx += GrIntToFixed(glyph->fBounds.fLeft);
+ vy += GrIntToFixed(glyph->fBounds.fTop);
+
+ // keep them as ints until we've done the clip-test
+ GrFixed width = glyph->fBounds.width();
+ GrFixed height = glyph->fBounds.height();
+
+ // check if we clipped out
+ if (true || NULL == glyph->fAtlas) {
+ int x = vx >> 16;
+ int y = vy >> 16;
+ if (fClipRect.quickReject(x, y, x + width, y + height)) {
+// Gr_clz(3); // so we can set a break-point in the debugger
+ return;
+ }
+ }
+
+ if (NULL == glyph->fAtlas) {
+ if (fStrike->getGlyphAtlas(glyph, scaler)) {
+ goto HAS_ATLAS;
+ }
+ // try to purge
+ fContext->getFontCache()->purgeExceptFor(fStrike);
+ if (fStrike->getGlyphAtlas(glyph, scaler)) {
+ goto HAS_ATLAS;
+ }
+
+ // Draw as a path, so we flush any accumulated glyphs first
+ this->flushGlyphs();
+
+ if (NULL == glyph->fPath) {
+
+ GrPath* path = new GrPath;
+ if (!scaler->getGlyphPath(glyph->glyphID(), path)) {
+ // flag the glyph as being dead?
+ delete path;
+ return;
+ }
+ glyph->fPath = path;
+ }
+ GrPath::Iter iter(*glyph->fPath);
+ bool useTexture = false;
+ GrPoint translate;
+ translate.set(GrFixedToScalar(vx - GrIntToFixed(glyph->fBounds.fLeft)),
+ GrFixedToScalar(vy - GrIntToFixed(glyph->fBounds.fTop)));
+ fContext->drawPath(&iter, GrContext::kWinding_PathFill,
+ useTexture, &translate);
+ return;
+ }
+
+HAS_ATLAS:
+ GrAssert(glyph->fAtlas);
+
+ // now promote them to fixed
+ width = GrIntToFixed(width);
+ height = GrIntToFixed(height);
+
+ GrTexture* texture = glyph->fAtlas->texture();
+ GrAssert(texture);
+
+ if (fCurrTexture != texture || fCurrVertex + 4 > fMaxVertices) {
+ this->flushGlyphs();
+ fCurrTexture = texture;
+ fCurrTexture->ref();
+ }
+
+ if (NULL == fVertices) {
+ // If we need to reserve vertices allow the draw target to suggest
+ // a number of verts to reserve and whether to perform a flush.
+ fMaxVertices = kMinRequestedVerts;
+ bool flush = fDrawTarget->geometryHints(VLAYOUT,
+ &fMaxVertices,
+ NULL);
+ if (flush) {
+ this->flushGlyphs();
+ fContext->flushText();
+ fDrawTarget = fContext->getTextTarget();
+ fMaxVertices = kDefaultRequestedVerts;
+ // ignore return, no point in flushing again.
+ fDrawTarget->geometryHints(VLAYOUT,
+ &fMaxVertices,
+ NULL);
+ }
+
+ if (fMaxVertices < kMinRequestedVerts) {
+ fMaxVertices = kDefaultRequestedVerts;
+ } else if (fMaxVertices > (fContext->maxQuadsInIndexBuffer() * 4)) {
+ // don't exceed the limit of the index buffer
+ fMaxVertices = (fContext->maxQuadsInIndexBuffer() * 4);
+ }
+ bool success = fDrawTarget->reserveAndLockGeometry(VLAYOUT,
+ fMaxVertices, 0,
+ (void**)&fVertices,
+ NULL);
+ GrAlwaysAssert(success);
+ }
+
+ GrFixed tx = GrIntToFixed(glyph->fAtlasLocation.fX);
+ GrFixed ty = GrIntToFixed(glyph->fAtlasLocation.fY);
+
+#if GR_GL_TEXT_TEXTURE_NORMALIZED
+ int x = vx >> 16;
+ int y = vy >> 16;
+ int w = width >> 16;
+ int h = height >> 16;
+
+ setRectFan(&fVertices[2*fCurrVertex], x, y, x + w, y + h, 2);
+ setRectFan(&fVertices[2*fCurrVertex+1],
+ texture->normalizeFixedX(tx),
+ texture->normalizeFixedY(ty),
+ texture->normalizeFixedX(tx + width),
+ texture->normalizeFixedY(ty + height),
+ 2);
+#else
+ fVertices[2*fCurrVertex].setXRectFan(vx, vy, vx + width, vy + height,
+ 2 * sizeof(GrGpuTextVertex));
+ fVertices[2*fCurrVertex+1].setXRectFan(texture->normalizeFixedX(tx),
+ texture->normalizeFixedY(ty),
+ texture->normalizeFixedX(tx + width),
+ texture->normalizeFixedY(ty + height),
+ 2 * sizeof(GrGpuTextVertex));
+#endif
+ fCurrVertex += 4;
+}
+
+
diff --git a/gpu/src/GrTextStrike.cpp b/gpu/src/GrTextStrike.cpp
new file mode 100644
index 0000000000..c2d81d544a
--- /dev/null
+++ b/gpu/src/GrTextStrike.cpp
@@ -0,0 +1,204 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrAtlas.h"
+#include "GrGpu.h"
+#include "GrMemory.h"
+#include "GrRectanizer.h"
+#include "GrTextStrike.h"
+#include "GrTextStrike_impl.h"
+#include "GrRect.h"
+
+GrFontCache::GrFontCache(GrGpu* gpu) : fGpu(gpu) {
+ gpu->ref();
+ fAtlasMgr = NULL;
+
+ fHead = fTail = NULL;
+}
+
+GrFontCache::~GrFontCache() {
+ fCache.deleteAll();
+ delete fAtlasMgr;
+ fGpu->unref();
+}
+
+GrTextStrike* GrFontCache::generateStrike(GrFontScaler* scaler,
+ const Key& key) {
+ if (NULL == fAtlasMgr) {
+ fAtlasMgr = new GrAtlasMgr(fGpu);
+ }
+ GrTextStrike* strike = new GrTextStrike(this, scaler->getKey(), fAtlasMgr);
+ fCache.insert(key, strike);
+
+ if (fHead) {
+ fHead->fPrev = strike;
+ } else {
+ GrAssert(NULL == fTail);
+ fTail = strike;
+ }
+ strike->fPrev = NULL;
+ strike->fNext = fHead;
+ fHead = strike;
+
+ return strike;
+}
+
+void GrFontCache::abandonAll() {
+ fCache.deleteAll();
+ if (fAtlasMgr) {
+ fAtlasMgr->abandonAll();
+ delete fAtlasMgr;
+ fAtlasMgr = NULL;
+ }
+}
+
+void GrFontCache::freeAll() {
+ fCache.deleteAll();
+ delete fAtlasMgr;
+ fAtlasMgr = NULL;
+}
+
+void GrFontCache::purgeExceptFor(GrTextStrike* preserveStrike) {
+ GrTextStrike* strike = fTail;
+ if (strike == preserveStrike) {
+ strike = strike->fPrev;
+ }
+ if (strike) {
+ int index = fCache.slowFindIndex(strike);
+ GrAssert(index >= 0);
+ fCache.removeAt(index, strike->fFontScalerKey->getHash());
+ this->detachStrikeFromList(strike);
+ delete strike;
+ }
+}
+
+#if GR_DEBUG
+void GrFontCache::validate() const {
+ int count = fCache.count();
+ if (0 == count) {
+ GrAssert(!fHead);
+ GrAssert(!fTail);
+ } else if (1 == count) {
+ GrAssert(fHead == fTail);
+ } else {
+ GrAssert(fHead != fTail);
+ }
+
+ int count2 = 0;
+ const GrTextStrike* strike = fHead;
+ while (strike) {
+ count2 += 1;
+ strike = strike->fNext;
+ }
+ GrAssert(count == count2);
+
+ count2 = 0;
+ strike = fTail;
+ while (strike) {
+ count2 += 1;
+ strike = strike->fPrev;
+ }
+ GrAssert(count == count2);
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_DEBUG
+ static int gCounter;
+#endif
+
+/*
+ The text strike is specific to a given font/style/matrix setup, which is
+ represented by the GrHostFontScaler object we are given in getGlyph().
+
+ We map a 32bit glyphID to a GrGlyph record, which in turn points to a
+ atlas and a position within that texture.
+ */
+
+GrTextStrike::GrTextStrike(GrFontCache* cache, const GrKey* key,
+ GrAtlasMgr* atlasMgr) : fPool(64) {
+ fFontScalerKey = key;
+ fFontScalerKey->ref();
+
+ fFontCache = cache; // no need to ref, it won't go away before we do
+ fAtlasMgr = atlasMgr; // no need to ref, it won't go away before we do
+ fAtlas = NULL;
+
+#if GR_DEBUG
+ GrPrintf(" GrTextStrike %p %d\n", this, gCounter);
+ gCounter += 1;
+#endif
+}
+
+static void FreeGlyph(GrGlyph*& glyph) { glyph->free(); }
+
+GrTextStrike::~GrTextStrike() {
+ GrAtlas::FreeLList(fAtlas);
+ fFontScalerKey->unref();
+ fCache.getArray().visit(FreeGlyph);
+
+#if GR_DEBUG
+ gCounter -= 1;
+ GrPrintf("~GrTextStrike %p %d\n", this, gCounter);
+#endif
+}
+
+GrGlyph* GrTextStrike::generateGlyph(GrGlyph::PackedID packed,
+ GrFontScaler* scaler) {
+ GrIRect bounds;
+ if (!scaler->getPackedGlyphBounds(packed, &bounds)) {
+ return NULL;
+ }
+
+ GrGlyph* glyph = fPool.alloc();
+ glyph->init(packed, bounds);
+ fCache.insert(packed, glyph);
+ return glyph;
+}
+
+bool GrTextStrike::getGlyphAtlas(GrGlyph* glyph, GrFontScaler* scaler) {
+ GrAssert(glyph);
+ GrAssert(scaler);
+ GrAssert(fCache.contains(glyph));
+ if (glyph->fAtlas) {
+ return true;
+ }
+
+ GrAutoRef ar(scaler);
+
+ size_t size = glyph->fBounds.area();
+ GrAutoSMalloc<1024> storage(size);
+ if (!scaler->getPackedGlyphImage(glyph->fPackedID, glyph->width(),
+ glyph->height(), glyph->width(),
+ storage.get())) {
+ return false;
+ }
+
+ GrAtlas* atlas = fAtlasMgr->addToAtlas(fAtlas, glyph->width(),
+ glyph->height(), storage.get(),
+ &glyph->fAtlasLocation);
+ if (NULL == atlas) {
+ return false;
+ }
+
+ // update fAtlas as well, since they may be chained in a linklist
+ glyph->fAtlas = fAtlas = atlas;
+ return true;
+}
+
+
diff --git a/gpu/src/GrTextStrike_impl.h b/gpu/src/GrTextStrike_impl.h
new file mode 100644
index 0000000000..7e03e2aff4
--- /dev/null
+++ b/gpu/src/GrTextStrike_impl.h
@@ -0,0 +1,113 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef GrTextStrike_impl_DEFINED
+#define GrTextStrike_impl_DEFINED
+
+class GrFontCache::Key {
+public:
+ Key(GrFontScaler* scaler) {
+ fFontScalerKey = scaler->getKey();
+ }
+
+ uint32_t getHash() const { return fFontScalerKey->getHash(); }
+
+ static bool LT(const GrTextStrike& strike, const Key& key) {
+ return *strike.getFontScalerKey() < *key.fFontScalerKey;
+ }
+ static bool EQ(const GrTextStrike& strike, const Key& key) {
+ return *strike.getFontScalerKey() == *key.fFontScalerKey;
+ }
+
+private:
+ const GrKey* fFontScalerKey;
+};
+
+void GrFontCache::detachStrikeFromList(GrTextStrike* strike) {
+ if (strike->fPrev) {
+ GrAssert(fHead != strike);
+ strike->fPrev->fNext = strike->fNext;
+ } else {
+ GrAssert(fHead == strike);
+ fHead = strike->fNext;
+ }
+
+ if (strike->fNext) {
+ GrAssert(fTail != strike);
+ strike->fNext->fPrev = strike->fPrev;
+ } else {
+ GrAssert(fTail == strike);
+ fTail = strike->fPrev;
+ }
+}
+
+GrTextStrike* GrFontCache::getStrike(GrFontScaler* scaler) {
+ this->validate();
+
+ Key key(scaler);
+ GrTextStrike* strike = fCache.find(key);
+ if (NULL == strike) {
+ strike = this->generateStrike(scaler, key);
+ } else if (strike->fPrev) {
+ // Need to put the strike at the head of its dllist, since that is how
+ // we age the strikes for purging (we purge from the back of the list
+ this->detachStrikeFromList(strike);
+ // attach at the head
+ fHead->fPrev = strike;
+ strike->fNext = fHead;
+ strike->fPrev = NULL;
+ fHead = strike;
+ }
+
+ this->validate();
+ return strike;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+/**
+ * This Key just wraps a glyphID, and matches the protocol need for
+ * GrTHashTable
+ */
+class GrTextStrike::Key {
+public:
+ Key(GrGlyph::PackedID id) : fPackedID(id) {}
+
+ uint32_t getHash() const { return fPackedID; }
+
+ static bool LT(const GrGlyph& glyph, const Key& key) {
+ return glyph.fPackedID < key.fPackedID;
+ }
+ static bool EQ(const GrGlyph& glyph, const Key& key) {
+ return glyph.fPackedID == key.fPackedID;
+ }
+
+private:
+ GrGlyph::PackedID fPackedID;
+};
+
+GrGlyph* GrTextStrike::getGlyph(GrGlyph::PackedID packed,
+ GrFontScaler* scaler) {
+ GrGlyph* glyph = fCache.find(packed);
+ if (NULL == glyph) {
+ glyph = this->generateGlyph(packed, scaler);
+ }
+ return glyph;
+}
+
+#endif
+
diff --git a/gpu/src/GrTextureCache.cpp b/gpu/src/GrTextureCache.cpp
new file mode 100644
index 0000000000..3ba333945c
--- /dev/null
+++ b/gpu/src/GrTextureCache.cpp
@@ -0,0 +1,297 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrTextureCache.h"
+#include "GrTexture.h"
+
+GrTextureEntry::GrTextureEntry(const GrTextureKey& key, GrTexture* texture)
+ : fKey(key), fTexture(texture) {
+ fLockCount = 0;
+ fPrev = fNext = NULL;
+
+ // we assume ownership of the texture, and will unref it when we die
+ GrAssert(texture);
+}
+
+GrTextureEntry::~GrTextureEntry() {
+ fTexture->unref();
+}
+
+#if GR_DEBUG
+void GrTextureEntry::validate() const {
+ GrAssert(fLockCount >= 0);
+ GrAssert(fTexture);
+ fTexture->validate();
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrTextureCache::GrTextureCache(int maxCount, size_t maxBytes) :
+ fMaxCount(maxCount),
+ fMaxBytes(maxBytes) {
+ fEntryCount = 0;
+ fEntryBytes = 0;
+ fClientDetachedCount = 0;
+ fClientDetachedBytes = 0;
+
+ fHead = fTail = NULL;
+}
+
+GrTextureCache::~GrTextureCache() {
+ GrAutoTextureCacheValidate atcv(this);
+
+ this->deleteAll(kFreeTexture_DeleteMode);
+}
+
+void GrTextureCache::internalDetach(GrTextureEntry* entry,
+ bool clientDetach) {
+ GrTextureEntry* prev = entry->fPrev;
+ GrTextureEntry* next = entry->fNext;
+
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ fHead = next;
+ }
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ fTail = prev;
+ }
+
+ // update our stats
+ if (clientDetach) {
+ fClientDetachedCount += 1;
+ fClientDetachedBytes += entry->texture()->sizeInBytes();
+ } else {
+ fEntryCount -= 1;
+ fEntryBytes -= entry->texture()->sizeInBytes();
+ }
+}
+
+void GrTextureCache::attachToHead(GrTextureEntry* entry,
+ bool clientReattach) {
+ entry->fPrev = NULL;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ }
+ fHead = entry;
+ if (NULL == fTail) {
+ fTail = entry;
+ }
+
+ // update our stats
+ if (clientReattach) {
+ fClientDetachedCount -= 1;
+ fClientDetachedBytes -= entry->texture()->sizeInBytes();
+ } else {
+ fEntryCount += 1;
+ fEntryBytes += entry->texture()->sizeInBytes();
+ }
+}
+
+class GrTextureCache::Key {
+ typedef GrTextureEntry T;
+
+ const GrTextureKey& fKey;
+public:
+ Key(const GrTextureKey& key) : fKey(key) {}
+
+ uint32_t getHash() const { return fKey.hashIndex(); }
+
+ static bool LT(const T& entry, const Key& key) {
+ return entry.key() < key.fKey;
+ }
+ static bool EQ(const T& entry, const Key& key) {
+ return entry.key() == key.fKey;
+ }
+#if GR_DEBUG
+ static uint32_t GetHash(const T& entry) {
+ return entry.key().hashIndex();
+ }
+ static bool LT(const T& a, const T& b) {
+ return a.key() < b.key();
+ }
+ static bool EQ(const T& a, const T& b) {
+ return a.key() == b.key();
+ }
+#endif
+};
+
+GrTextureEntry* GrTextureCache::findAndLock(const GrTextureKey& key) {
+ GrAutoTextureCacheValidate atcv(this);
+
+ GrTextureEntry* entry = fCache.find(key);
+ if (entry) {
+ this->internalDetach(entry, false);
+ this->attachToHead(entry, false);
+ // mark the entry as "busy" so it doesn't get purged
+ entry->lock();
+ }
+ return entry;
+}
+
+GrTextureEntry* GrTextureCache::createAndLock(const GrTextureKey& key,
+ GrTexture* texture) {
+ GrAutoTextureCacheValidate atcv(this);
+
+ GrTextureEntry* entry = new GrTextureEntry(key, texture);
+
+ this->attachToHead(entry, false);
+ fCache.insert(key, entry);
+
+#if GR_DUMP_TEXTURE_UPLOAD
+ GrPrintf("--- add texture to cache %p, count=%d bytes= %d %d\n",
+ entry, fEntryCount, texture->sizeInBytes(), fEntryBytes);
+#endif
+
+ // mark the entry as "busy" so it doesn't get purged
+ entry->lock();
+ this->purgeAsNeeded();
+ return entry;
+}
+
+void GrTextureCache::detach(GrTextureEntry* entry) {
+ internalDetach(entry, true);
+ fCache.remove(entry->fKey, entry);
+}
+
+void GrTextureCache::reattachAndUnlock(GrTextureEntry* entry) {
+ attachToHead(entry, true);
+ fCache.insert(entry->key(), entry);
+ unlock(entry);
+}
+
+void GrTextureCache::unlock(GrTextureEntry* entry) {
+ GrAutoTextureCacheValidate atcv(this);
+
+ GrAssert(entry);
+ GrAssert(entry->isLocked());
+ GrAssert(fCache.find(entry->key()));
+
+ entry->unlock();
+ this->purgeAsNeeded();
+}
+
+void GrTextureCache::purgeAsNeeded() {
+ GrAutoTextureCacheValidate atcv(this);
+
+ GrTextureEntry* entry = fTail;
+ while (entry) {
+ if (fEntryCount <= fMaxCount && fEntryBytes <= fMaxBytes) {
+ break;
+ }
+
+ GrTextureEntry* prev = entry->fPrev;
+ if (!entry->isLocked()) {
+ // remove from our cache
+ fCache.remove(entry->fKey, entry);
+
+ // remove from our llist
+ this->internalDetach(entry, false);
+
+#if GR_DUMP_TEXTURE_UPLOAD
+ GrPrintf("--- ~texture from cache %p [%d %d]\n", entry->texture(),
+ entry->texture()->contentWidth(),
+ entry->texture()->contentHeight());
+#endif
+ delete entry;
+ }
+ entry = prev;
+ }
+}
+
+void GrTextureCache::deleteAll(DeleteMode mode) {
+ GrAssert(!fClientDetachedCount);
+ GrAssert(!fClientDetachedBytes);
+
+ GrTextureEntry* entry = fHead;
+ while (entry) {
+ GrAssert(!entry->isLocked());
+
+ GrTextureEntry* next = entry->fNext;
+ if (kAbandonTexture_DeleteMode == mode) {
+ entry->texture()->abandon();
+ }
+ delete entry;
+ entry = next;
+ }
+
+ fCache.removeAll();
+ fHead = fTail = NULL;
+ fEntryCount = 0;
+ fEntryBytes = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+#if GR_DEBUG
+static int countMatches(const GrTextureEntry* head, const GrTextureEntry* target) {
+ const GrTextureEntry* entry = head;
+ int count = 0;
+ while (entry) {
+ if (target == entry) {
+ count += 1;
+ }
+ entry = entry->next();
+ }
+ return count;
+}
+
+void GrTextureCache::validate() const {
+ GrAssert(!fHead == !fTail);
+ GrAssert(!fEntryCount == !fEntryBytes);
+ GrAssert(!fClientDetachedBytes == !fClientDetachedBytes);
+ GrAssert(fClientDetachedBytes <= fEntryBytes);
+ GrAssert(fClientDetachedCount <= fEntryCount);
+ GrAssert((fEntryCount - fClientDetachedCount) == fCache.count());
+ GrAssert(fEntryBytes >= 0);
+ GrAssert(fEntryCount >= 0);
+ GrAssert(fClientDetachedCount >= 0);
+ GrAssert(fClientDetachedBytes >= 0);
+
+ fCache.validate();
+
+ GrTextureEntry* entry = fHead;
+ int count = 0;
+ size_t bytes = 0;
+ while (entry) {
+ entry->validate();
+ GrAssert(fCache.find(entry->key()));
+ count += 1;
+ bytes += entry->texture()->sizeInBytes();
+ entry = entry->fNext;
+ }
+ GrAssert(count == fEntryCount - fClientDetachedCount);
+ GrAssert(bytes == fEntryBytes - fClientDetachedBytes);
+
+ count = 0;
+ for (entry = fTail; entry; entry = entry->fPrev) {
+ count += 1;
+ }
+ GrAssert(count == fEntryCount - fClientDetachedCount);
+
+ for (int i = 0; i < count; i++) {
+ int matches = countMatches(fHead, fCache.getArray()[i]);
+ GrAssert(1 == matches);
+ }
+}
+#endif
+
+
diff --git a/gpu/src/GrTouchGesture.cpp b/gpu/src/GrTouchGesture.cpp
new file mode 100644
index 0000000000..0eaedc74bb
--- /dev/null
+++ b/gpu/src/GrTouchGesture.cpp
@@ -0,0 +1,243 @@
+#include "GrTouchGesture.h"
+#include "SkMatrix.h"
+#include "SkTime.h"
+
+#include <math.h>
+
+static const SkMSec MAX_DBL_TAP_INTERVAL = 300;
+static const float MAX_DBL_TAP_DISTANCE = 100;
+static const float MAX_JITTER_RADIUS = 2;
+
+// if true, then ignore the touch-move, 'cause its probably just jitter
+static bool close_enough_for_jitter(float x0, float y0, float x1, float y1) {
+ return sk_float_abs(x0 - x1) <= MAX_JITTER_RADIUS &&
+ sk_float_abs(y0 - y1) <= MAX_JITTER_RADIUS;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrTouchGesture::GrTouchGesture() {
+ this->reset();
+}
+
+GrTouchGesture::~GrTouchGesture() {
+}
+
+void GrTouchGesture::reset() {
+ fTouches.reset();
+ fState = kEmpty_State;
+ fLocalM.reset();
+ fGlobalM.reset();
+
+ fLastUpT = SkTime::GetMSecs() - 2*MAX_DBL_TAP_INTERVAL;
+ fLastUpP.set(0, 0);
+}
+
+void GrTouchGesture::flushLocalM() {
+ fGlobalM.postConcat(fLocalM);
+ fLocalM.reset();
+}
+
+const SkMatrix& GrTouchGesture::localM() {
+ if (fFlinger.isActive()) {
+ if (!fFlinger.evaluateMatrix(&fLocalM)) {
+ this->flushLocalM();
+ }
+ }
+ return fLocalM;
+}
+
+void GrTouchGesture::appendNewRec(void* owner, float x, float y) {
+ Rec* rec = fTouches.append();
+ rec->fOwner = owner;
+ rec->fStartX = rec->fPrevX = rec->fLastX = x;
+ rec->fStartY = rec->fPrevY = rec->fLastY = y;
+ rec->fLastT = rec->fPrevT = SkTime::GetMSecs();
+}
+
+void GrTouchGesture::touchBegin(void* owner, float x, float y) {
+// GrPrintf("--- %d touchBegin %p %g %g\n", fTouches.count(), owner, x, y);
+
+ int index = this->findRec(owner);
+ if (index >= 0) {
+ this->flushLocalM();
+ fTouches.removeShuffle(index);
+ GrPrintf("---- already exists, removing\n");
+ }
+
+ if (fTouches.count() == 2) {
+ return;
+ }
+
+ this->flushLocalM();
+ fFlinger.stop();
+
+ this->appendNewRec(owner, x, y);
+
+ switch (fTouches.count()) {
+ case 1:
+ fState = kTranslate_State;
+ break;
+ case 2:
+ fState = kZoom_State;
+ break;
+ default:
+ break;
+ }
+}
+
+int GrTouchGesture::findRec(void* owner) const {
+ for (int i = 0; i < fTouches.count(); i++) {
+ if (owner == fTouches[i].fOwner) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static float center(float pos0, float pos1) {
+ return (pos0 + pos1) * 0.5f;
+}
+
+static const float MAX_ZOOM_SCALE = 4;
+static const float MIN_ZOOM_SCALE = 0.25f;
+
+float GrTouchGesture::limitTotalZoom(float scale) const {
+ // this query works 'cause we know that we're square-scale w/ no skew/rotation
+ const float curr = fGlobalM[0];
+
+ if (scale > 1 && curr * scale > MAX_ZOOM_SCALE) {
+ scale = MAX_ZOOM_SCALE / curr;
+ } else if (scale < 1 && curr * scale < MIN_ZOOM_SCALE) {
+ scale = MIN_ZOOM_SCALE / curr;
+ }
+ return scale;
+}
+
+void GrTouchGesture::touchMoved(void* owner, float x, float y) {
+// GrPrintf("--- %d touchMoved %p %g %g\n", fTouches.count(), owner, x, y);
+
+ GrAssert(kEmpty_State != fState);
+
+ int index = this->findRec(owner);
+ if (index < 0) {
+ // not found, so I guess we should add it...
+ GrPrintf("---- add missing begin\n");
+ this->appendNewRec(owner, x, y);
+ index = fTouches.count() - 1;
+ }
+
+ Rec& rec = fTouches[index];
+
+ // not sure how valuable this is
+ if (fTouches.count() == 2) {
+ if (close_enough_for_jitter(rec.fLastX, rec.fLastY, x, y)) {
+// GrPrintf("--- drop touchMove, withing jitter tolerance %g %g\n", rec.fLastX - x, rec.fLastY - y);
+ return;
+ }
+ }
+
+ rec.fPrevX = rec.fLastX; rec.fLastX = x;
+ rec.fPrevY = rec.fLastY; rec.fLastY = y;
+ rec.fPrevT = rec.fLastT; rec.fLastT = SkTime::GetMSecs();
+
+ switch (fTouches.count()) {
+ case 1: {
+ float dx = rec.fLastX - rec.fStartX;
+ float dy = rec.fLastY - rec.fStartY;
+ dx = (float)sk_float_round2int(dx);
+ dy = (float)sk_float_round2int(dy);
+ fLocalM.setTranslate(dx, dy);
+ } break;
+ case 2: {
+ GrAssert(kZoom_State == fState);
+ const Rec& rec0 = fTouches[0];
+ const Rec& rec1 = fTouches[1];
+
+ float scale = this->computePinch(rec0, rec1);
+ scale = this->limitTotalZoom(scale);
+
+ fLocalM.setTranslate(-center(rec0.fStartX, rec1.fStartX),
+ -center(rec0.fStartY, rec1.fStartY));
+ fLocalM.postScale(scale, scale);
+ fLocalM.postTranslate(center(rec0.fLastX, rec1.fLastX),
+ center(rec0.fLastY, rec1.fLastY));
+ } break;
+ default:
+ break;
+ }
+}
+
+void GrTouchGesture::touchEnd(void* owner) {
+// GrPrintf("--- %d touchEnd %p\n", fTouches.count(), owner);
+
+ int index = this->findRec(owner);
+ if (index < 0) {
+ GrPrintf("--- not found\n");
+ return;
+ }
+
+ const Rec& rec = fTouches[index];
+ if (this->handleDblTap(rec.fLastX, rec.fLastY)) {
+ return;
+ }
+
+ // count() reflects the number before we removed the owner
+ switch (fTouches.count()) {
+ case 1: {
+ this->flushLocalM();
+ float dx = rec.fLastX - rec.fPrevX;
+ float dy = rec.fLastY - rec.fPrevY;
+ float dur = (rec.fLastT - rec.fPrevT) * 0.001f;
+ if (dur > 0) {
+ fFlinger.reset(dx / dur, dy / dur);
+ }
+ fState = kEmpty_State;
+ } break;
+ case 2:
+ this->flushLocalM();
+ GrAssert(kZoom_State == fState);
+ fState = kEmpty_State;
+ break;
+ default:
+ GrAssert(kZoom_State == fState);
+ break;
+ }
+
+ fTouches.removeShuffle(index);
+}
+
+float GrTouchGesture::computePinch(const Rec& rec0, const Rec& rec1) {
+ double dx = rec0.fStartX - rec1.fStartX;
+ double dy = rec0.fStartY - rec1.fStartY;
+ double dist0 = sqrt(dx*dx + dy*dy);
+
+ dx = rec0.fLastX - rec1.fLastX;
+ dy = rec0.fLastY - rec1.fLastY;
+ double dist1 = sqrt(dx*dx + dy*dy);
+
+ double scale = dist1 / dist0;
+ return (float)scale;
+}
+
+bool GrTouchGesture::handleDblTap(float x, float y) {
+ bool found = false;
+ SkMSec now = SkTime::GetMSecs();
+ if (now - fLastUpT <= MAX_DBL_TAP_INTERVAL) {
+ if (SkPoint::Length(fLastUpP.fX - x,
+ fLastUpP.fY - y) <= MAX_DBL_TAP_DISTANCE) {
+ fFlinger.stop();
+ fLocalM.reset();
+ fGlobalM.reset();
+ fTouches.reset();
+ fState = kEmpty_State;
+ found = true;
+ }
+ }
+
+ fLastUpT = now;
+ fLastUpP.set(x, y);
+ return found;
+}
+
+
diff --git a/gpu/src/GrVertexBufferAllocPool.cpp b/gpu/src/GrVertexBufferAllocPool.cpp
new file mode 100644
index 0000000000..b6f08c97ee
--- /dev/null
+++ b/gpu/src/GrVertexBufferAllocPool.cpp
@@ -0,0 +1,220 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrVertexBufferAllocPool.h"
+#include "GrVertexBuffer.h"
+#include "GrGpu.h"
+
+#define GrVertexBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 10)
+
+GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
+ size_t blockSize,
+ int preallocBufferCnt) :
+ fBlocks(GrMax(8, 2*preallocBufferCnt)) {
+ GrAssert(NULL != gpu);
+ fGpu = gpu;
+ fGpu->ref();
+ fBufferPtr = NULL;
+ fMinBlockSize = GrMax(GrVertexBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
+
+ fPreallocBuffersInUse = 0;
+ fFirstPreallocBuffer = 0;
+ for (int i = 0; i < preallocBufferCnt; ++i) {
+ GrVertexBuffer* buffer = gpu->createVertexBuffer(fMinBlockSize, true);
+ if (NULL != buffer) {
+ *fPreallocBuffers.append() = buffer;
+ buffer->ref();
+ }
+ }
+}
+
+GrVertexBufferAllocPool::~GrVertexBufferAllocPool() {
+ fPreallocBuffers.unrefAll();
+ while (!fBlocks.empty()) {
+ destroyBlock();
+ }
+ fGpu->unref();
+}
+
+void GrVertexBufferAllocPool::reset() {
+ while (!fBlocks.empty()) {
+ destroyBlock();
+ }
+ if (fPreallocBuffers.count()) {
+ // must set this after above loop.
+ fFirstPreallocBuffer = (fFirstPreallocBuffer + fPreallocBuffersInUse) %
+ fPreallocBuffers.count();
+ }
+ GrAssert(0 == fPreallocBuffersInUse);
+}
+
+void GrVertexBufferAllocPool::unlock() {
+ GrAssert((NULL == fBufferPtr) ? (!fBlocks.empty() ||
+ !fBlocks.back().fVertexBuffer->isLocked()) :
+ (!fBlocks.empty() &&
+ fBlocks.back().fVertexBuffer->isLocked()));
+ if (NULL != fBufferPtr) {
+ GrAssert(!fBlocks.empty());
+ GrAssert(fBlocks.back().fVertexBuffer->isLocked());
+ fBufferPtr = NULL;
+ fBlocks.back().fVertexBuffer->unlock();
+ }
+#if GR_DEBUG
+ for (uint32_t i = 0; i < fBlocks.count(); ++i) {
+ GrAssert(!fBlocks[i].fVertexBuffer->isLocked());
+ }
+#endif
+}
+
+void* GrVertexBufferAllocPool::alloc(GrVertexLayout layout,
+ uint32_t vertexCount,
+ GrVertexBuffer** buffer,
+ uint32_t* startVertex) {
+ GrAssert(NULL != buffer);
+ size_t vSize = GrDrawTarget::VertexSize(layout);
+ size_t bytes = vSize * vertexCount;
+
+ if (NULL != fBufferPtr) {
+ GrAssert(!fBlocks.empty());
+ GrAssert(fBlocks.back().fVertexBuffer->isLocked());
+ BufferBlock& back = fBlocks.back();
+ uint32_t usedBytes = back.fVertexBuffer->size() - back.fBytesFree;
+ uint32_t pad = GrUIAlignUpPad(usedBytes, layout);
+ if ((bytes + pad) <= back.fBytesFree) {
+ usedBytes += pad;
+ *startVertex = usedBytes / vSize;
+ *buffer = back.fVertexBuffer;
+ back.fBytesFree -= bytes + pad;
+ return (void*)((intptr_t)fBufferPtr + usedBytes);
+ }
+ }
+
+ if (!createBlock(GrMax(bytes, fMinBlockSize))) {
+ return NULL;
+ }
+ *startVertex = 0;
+ GrAssert(NULL != fBufferPtr);
+ BufferBlock& back = fBlocks.back();
+ *buffer = back.fVertexBuffer;
+ back.fBytesFree -= bytes;
+ return fBufferPtr;
+}
+
+int GrVertexBufferAllocPool::currentBufferVertices(GrVertexLayout layout) const {
+ if (NULL != fBufferPtr) {
+ GrAssert(!fBlocks.empty());
+ const BufferBlock& back = fBlocks.back();
+ GrAssert(back.fVertexBuffer->isLocked());
+ return back.fBytesFree / GrDrawTarget::VertexSize(layout);
+ } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
+ return fMinBlockSize / GrDrawTarget::VertexSize(layout);
+ }
+ return 0;
+}
+
+int GrVertexBufferAllocPool::preallocatedBuffersRemaining() const {
+ return fPreallocBuffers.count() - fPreallocBuffersInUse;
+}
+
+int GrVertexBufferAllocPool::preallocatedBufferVertices(GrVertexLayout layout) const {
+ return fPreallocBuffers.count() ?
+ (fMinBlockSize / GrDrawTarget::VertexSize(layout)) :
+ 0;
+}
+
+int GrVertexBufferAllocPool::preallocatedBufferCount() const {
+ return fPreallocBuffers.count();
+}
+
+void GrVertexBufferAllocPool::release(size_t bytes) {
+ if (NULL != fBufferPtr) {
+ GrAssert(!fBlocks.empty());
+ BufferBlock& back = fBlocks.back();
+ GrAssert(back.fVertexBuffer->isLocked());
+ size_t bytesUsed = back.fVertexBuffer->size() - back.fBytesFree;
+ if (bytes >= bytesUsed) {
+ destroyBlock();
+ bytes -= bytesUsed;
+ } else {
+ back.fBytesFree += bytes;
+ return;
+ }
+ }
+ GrAssert(NULL == fBufferPtr);
+ GrAssert(fBlocks.empty() ||
+ !fBlocks.back().fVertexBuffer->isLocked());
+ // we don't honor release if it is within an already unlocked VB
+ // Our VB semantics say locking a VB discards its previous content
+ while (!fBlocks.empty() &&
+ bytes >= fBlocks.back().fVertexBuffer->size()) {
+ bytes -= fBlocks.back().fVertexBuffer->size();
+ destroyBlock();
+ }
+}
+
+bool GrVertexBufferAllocPool::createBlock(size_t size) {
+ GrAssert(size >= GrVertexBufferAllocPool_MIN_BLOCK_SIZE);
+
+ BufferBlock& block = fBlocks.push_back();
+
+ if (size == fMinBlockSize &&
+ fPreallocBuffersInUse < fPreallocBuffers.count()) {
+
+ uint32_t nextBuffer = (fPreallocBuffersInUse + fFirstPreallocBuffer) %
+ fPreallocBuffers.count();
+ block.fVertexBuffer = fPreallocBuffers[nextBuffer];
+ block.fVertexBuffer->ref();
+ ++fPreallocBuffersInUse;
+ } else {
+ block.fVertexBuffer = fGpu->createVertexBuffer(size, true);
+ if (NULL == block.fVertexBuffer) {
+ fBlocks.pop_back();
+ return false;
+ }
+ }
+
+ block.fBytesFree = size;
+ if (NULL != fBufferPtr) {
+ GrAssert(fBlocks.count() > 1);
+ BufferBlock& prev = fBlocks.fromBack(1);
+ GrAssert(prev.fVertexBuffer->isLocked());
+ fBufferPtr = NULL;
+ prev.fVertexBuffer->unlock();
+ }
+ fBufferPtr = block.fVertexBuffer->lock();
+ return true;
+}
+
+void GrVertexBufferAllocPool::destroyBlock() {
+ GrAssert(!fBlocks.empty());
+
+ BufferBlock& block = fBlocks.back();
+ if (fPreallocBuffersInUse > 0) {
+ uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
+ fFirstPreallocBuffer +
+ (fPreallocBuffers.count() - 1)) %
+ fPreallocBuffers.count();
+ if (block.fVertexBuffer == fPreallocBuffers[prevPreallocBuffer]) {
+ --fPreallocBuffersInUse;
+ }
+ }
+ block.fVertexBuffer->unref();
+ fBlocks.pop_back();
+ fBufferPtr = NULL;
+}
+
+
diff --git a/gpu/src/app-android.cpp b/gpu/src/app-android.cpp
new file mode 100644
index 0000000000..eea9a4d35d
--- /dev/null
+++ b/gpu/src/app-android.cpp
@@ -0,0 +1,387 @@
+#include <jni.h>
+#include <sys/time.h>
+#include <time.h>
+#include <android/log.h>
+#include <stdint.h>
+
+#include "GrContext.h"
+#include "SkGpuCanvas.h"
+#include "SkPaint.h"
+#include "SkString.h"
+#include "SkTime.h"
+
+#include "GrGLConfig.h"
+
+static GrContext* make_context() {
+ SkDebugf("---- before create\n");
+ GrContext* ctx = GrContext::Create(GrGpu::kOpenGL_Shaders_Engine, NULL);
+// GrContext* ctx = GrContext::Create(GrGpu::kOpenGL_Fixed_Engine, NULL);
+ SkDebugf("---- after create %p\n", ctx);
+ return ctx;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void gr_run_unittests() {}
+
+#include "FlingState.h"
+#include "GrTouchGesture.h"
+#include "SkView.h"
+
+typedef SkView* (*SkViewFactory)();
+
+// these values must match those in Ganesh.java
+enum TouchState {
+ kUnknown_TouchState,
+ kDown_TouchState,
+ kMoved_TouchState,
+ kUp_TouchState
+};
+
+struct State {
+ State();
+ ~State();
+
+ int countSlides() const { return fFactory.count(); }
+ const char* getSlideTitle(int index) const;
+ void chooseSlide(int index) {
+ SkDebugf("----- index %d\n", index);
+ if (index < fFactory.count()) {
+ this->setView(fFactory[index]());
+ }
+ }
+
+ void setViewport(int w, int h);
+ int getWidth() const { return fViewport.fX; }
+ int getHeight() const { return fViewport.fY; }
+
+ void handleTouch(void*, TouchState, float x, float y);
+ void applyMatrix(SkCanvas*);
+
+ SkView* getView() const { return fView; }
+
+private:
+ SkView* fView;
+ SkIPoint fViewport;
+
+ GrTouchGesture fGesture;
+
+ SkTDArray<SkViewFactory> fFactory;
+
+ void setView(SkView* view) {
+ SkSafeUnref(fView);
+ fView = view;
+
+ view->setVisibleP(true);
+ view->setClipToBounds(false);
+ view->setSize(SkIntToScalar(fViewport.fX),
+ SkIntToScalar(fViewport.fY));
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+#include "SampleCode.h"
+
+SkViewRegister* SkViewRegister::gHead;
+SkViewRegister::SkViewRegister(SkViewFactory fact) : fFact(fact) {
+ static bool gOnce;
+ if (!gOnce) {
+ gHead = NULL;
+ gOnce = true;
+ }
+
+ fChain = gHead;
+ gHead = this;
+}
+
+static const char gCharEvtName[] = "SampleCode_Char_Event";
+static const char gKeyEvtName[] = "SampleCode_Key_Event";
+static const char gTitleEvtName[] = "SampleCode_Title_Event";
+static const char gPrefSizeEvtName[] = "SampleCode_PrefSize_Event";
+static const char gFastTextEvtName[] = "SampleCode_FastText_Event";
+
+bool SampleCode::CharQ(const SkEvent& evt, SkUnichar* outUni) {
+ if (evt.isType(gCharEvtName, sizeof(gCharEvtName) - 1)) {
+ if (outUni) {
+ *outUni = evt.getFast32();
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SampleCode::KeyQ(const SkEvent& evt, SkKey* outKey) {
+ if (evt.isType(gKeyEvtName, sizeof(gKeyEvtName) - 1)) {
+ if (outKey) {
+ *outKey = (SkKey)evt.getFast32();
+ }
+ return true;
+ }
+ return false;
+}
+
+bool SampleCode::TitleQ(const SkEvent& evt) {
+ return evt.isType(gTitleEvtName, sizeof(gTitleEvtName) - 1);
+}
+
+void SampleCode::TitleR(SkEvent* evt, const char title[]) {
+ GrAssert(evt && TitleQ(*evt));
+ evt->setString(gTitleEvtName, title);
+}
+
+bool SampleCode::PrefSizeQ(const SkEvent& evt) {
+ return evt.isType(gPrefSizeEvtName, sizeof(gPrefSizeEvtName) - 1);
+}
+
+void SampleCode::PrefSizeR(SkEvent* evt, SkScalar width, SkScalar height) {
+ GrAssert(evt && PrefSizeQ(*evt));
+ SkScalar size[2];
+ size[0] = width;
+ size[1] = height;
+ evt->setScalars(gPrefSizeEvtName, 2, size);
+}
+
+bool SampleCode::FastTextQ(const SkEvent& evt) {
+ return evt.isType(gFastTextEvtName, sizeof(gFastTextEvtName) - 1);
+}
+
+static SkMSec gAnimTime;
+static SkMSec gAnimTimePrev;
+
+SkMSec SampleCode::GetAnimTime() { return gAnimTime; }
+SkMSec SampleCode::GetAnimTimeDelta() { return gAnimTime - gAnimTimePrev; }
+SkScalar SampleCode::GetAnimSecondsDelta() {
+ return SkDoubleToScalar(GetAnimTimeDelta() / 1000.0);
+}
+
+SkScalar SampleCode::GetAnimScalar(SkScalar speed, SkScalar period) {
+ // since gAnimTime can be up to 32 bits, we can't convert it to a float
+ // or we'll lose the low bits. Hence we use doubles for the intermediate
+ // calculations
+ double seconds = (double)gAnimTime / 1000.0;
+ double value = SkScalarToDouble(speed) * seconds;
+ if (period) {
+ value = ::fmod(value, SkScalarToDouble(period));
+ }
+ return SkDoubleToScalar(value);
+}
+
+static void drawIntoCanvas(State* state, SkCanvas* canvas) {
+ gAnimTime = SkTime::GetMSecs();
+ SkView* view = state->getView();
+ view->draw(canvas);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void resetGpuState();
+
+State::State() {
+ fViewport.set(0, 0);
+
+ const SkViewRegister* reg = SkViewRegister::Head();
+ while (reg) {
+ *fFactory.append() = reg->factory();
+ reg = reg->next();
+ }
+
+ SkDebugf("----- %d slides\n", fFactory.count());
+ fView = NULL;
+ this->chooseSlide(0);
+}
+
+State::~State() {
+ SkSafeUnref(fView);
+}
+
+void State::setViewport(int w, int h) {
+ fViewport.set(w, h);
+ if (fView) {
+ fView->setSize(SkIntToScalar(w), SkIntToScalar(h));
+ }
+ resetGpuState();
+}
+
+const char* State::getSlideTitle(int index) const {
+ SkEvent evt(gTitleEvtName);
+ evt.setFast32(index);
+ {
+ SkView* view = fFactory[index]();
+ view->doQuery(&evt);
+ view->unref();
+ }
+ return evt.findString(gTitleEvtName);
+}
+
+void State::handleTouch(void* owner, TouchState state, float x, float y) {
+ switch (state) {
+ case kDown_TouchState:
+ fGesture.touchBegin(owner, x, y);
+ break;
+ case kMoved_TouchState:
+ fGesture.touchMoved(owner, x, y);
+ break;
+ case kUp_TouchState:
+ fGesture.touchEnd(owner);
+ break;
+ }
+}
+
+void State::applyMatrix(SkCanvas* canvas) {
+ const SkMatrix& localM = fGesture.localM();
+ if (localM.getType() & SkMatrix::kScale_Mask) {
+ canvas->setExternalMatrix(&localM);
+ }
+ canvas->concat(localM);
+ canvas->concat(fGesture.globalM());
+}
+
+static State* get_state() {
+ static State* gState;
+ if (NULL == gState) {
+ gState = new State;
+ }
+ return gState;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static GrContext* gContext;
+static int gWidth;
+static int gHeight;
+static float gX, gY;
+
+static void resetGpuState() {
+ if (NULL == gContext) {
+ SkDebugf("creating context for first time\n");
+ gContext = make_context();
+ } else {
+ SkDebugf("------ gContext refcnt=%d\n", gContext->refcnt());
+ gContext->abandonAllTextures();
+ gContext->unref();
+ gContext = make_context();
+ }
+}
+
+static void doDraw() {
+ if (NULL == gContext) {
+ gContext = make_context();
+ }
+
+ State* state = get_state();
+ SkBitmap viewport;
+ viewport.setConfig(SkBitmap::kARGB_8888_Config,
+ state->getWidth(), state->getHeight());
+
+ SkGpuCanvas canvas(gContext);
+
+ canvas.setBitmapDevice(viewport);
+ state->applyMatrix(&canvas);
+
+ drawIntoCanvas(state, &canvas);
+
+ GrGLCheckErr();
+ GrGLClearErr();
+// gContext->checkError();
+// gContext->clearError();
+
+ if (true) {
+ static const int FRAME_COUNT = 32;
+ static SkMSec gDuration;
+
+ static SkMSec gNow;
+ static int gFrameCounter;
+ if (++gFrameCounter == FRAME_COUNT) {
+ gFrameCounter = 0;
+ SkMSec now = SkTime::GetMSecs();
+
+ gDuration = now - gNow;
+ gNow = now;
+ }
+
+ int fps = (FRAME_COUNT * 1000) / gDuration;
+ SkString str;
+ str.printf("FPS=%3d MS=%3d", fps, gDuration / FRAME_COUNT);
+
+ SkGpuCanvas c(gContext);
+ c.setBitmapDevice(viewport);
+
+ SkPaint p;
+ p.setAntiAlias(true);
+ SkRect r = { 0, 0, 110, 16 };
+ p.setColor(SK_ColorWHITE);
+ c.drawRect(r, p);
+ p.setColor(SK_ColorBLACK);
+ c.drawText(str.c_str(), str.size(), 4, 12, p);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+extern "C" {
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeSurfaceCreated(
+ JNIEnv*, jobject);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeViewport(JNIEnv*, jobject,
+ jint w, jint h);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeDrawFrame(JNIEnv*, jobject);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeTouch(JNIEnv*, jobject,
+ jint id, jint type, jfloat x, jfloat y);
+
+ JNIEXPORT int JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeCountSlides(JNIEnv*, jobject);
+ JNIEXPORT jobject JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeGetSlideTitle(JNIEnv*, jobject, jint index);
+ JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeChooseSlide(JNIEnv*, jobject, jint index);
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeSurfaceCreated(
+ JNIEnv*, jobject) {
+ SkDebugf("------ nativeSurfaceCreated\n");
+ resetGpuState();
+ SkDebugf("------ end nativeSurfaceCreated\n");
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeViewport(JNIEnv*, jobject,
+ jint w, jint h) {
+ State* state = get_state();
+ SkDebugf("---- state.setviewport %p %d %d\n", state, w, h);
+ state->setViewport(w, h);
+ SkDebugf("---- end setviewport\n");
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeDrawFrame(JNIEnv*, jobject) {
+ doDraw();
+}
+
+union IntPtr {
+ jint fInt;
+ void* fPtr;
+};
+static void* int2ptr(jint n) {
+ IntPtr data;
+ data.fInt = n;
+ return data.fPtr;
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeTouch(JNIEnv*, jobject,
+ jint id, jint type, jfloat x, jfloat y) {
+ get_state()->handleTouch(int2ptr(id), (TouchState)type, x, y);
+}
+
+////////////
+
+JNIEXPORT int JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeCountSlides(JNIEnv*, jobject) {
+ return get_state()->countSlides();
+}
+
+JNIEXPORT jobject JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeGetSlideTitle(JNIEnv* env, jobject, jint index) {
+ return env->NewStringUTF(get_state()->getSlideTitle(index));
+}
+
+JNIEXPORT void JNICALL Java_com_tetrark_ganesh_MyRenderer_nativeChooseSlide(JNIEnv*, jobject, jint index) {
+ get_state()->chooseSlide(index);
+}
+
+
+
+
+
diff --git a/gpu/src/gr_files.mk b/gpu/src/gr_files.mk
new file mode 100644
index 0000000000..f9ca49e9ab
--- /dev/null
+++ b/gpu/src/gr_files.mk
@@ -0,0 +1,23 @@
+SOURCE := \
+ GrAllocPool.cpp \
+ GrAtlas.cpp \
+ GrClip.cpp \
+ GrContext.cpp \
+ GrDrawTarget.cpp \
+ GrGLIndexBuffer.cpp \
+ GrGLTexture.cpp \
+ GrGLVertexBuffer.cpp \
+ GrGpu.cpp \
+ GrGpuGLShaders.cpp \
+ GrGpuGLFixed.cpp \
+ GrGpuFactory.cpp \
+ GrGpuGL.cpp \
+ GrInOrderDrawBuffer.cpp \
+ GrMatrix.cpp \
+ GrMemory.cpp \
+ GrPath.cpp \
+ GrRectanizer_fifo.cpp \
+ GrTextureCache.cpp \
+ GrTextContext.cpp \
+ GrTextStrike.cpp \
+ GrVertexBufferAllocPool.cpp
diff --git a/gpu/src/gr_hello_world.cpp b/gpu/src/gr_hello_world.cpp
new file mode 100644
index 0000000000..5638e19aba
--- /dev/null
+++ b/gpu/src/gr_hello_world.cpp
@@ -0,0 +1,30 @@
+#include "SkGLCanvas.h"
+#include "SkBitmap.h"
+#include "SkPaint.h"
+#include "SkGpuGLShaders.h"
+
+extern "C" {
+ void gr_hello_world();
+}
+
+void gr_hello_world() {
+ static GrGpu* gGpu;
+ if (NULL == gGpu) {
+ gGpu = new SkGpuGLShaders;
+ }
+
+ SkGLCanvas canvas(gGpu);
+ SkBitmap bm;
+
+ bm.setConfig(SkBitmap::kARGB_8888_Config, WIDTH, HEIGHT);
+ canvas.setBitmapDevice(bm);
+
+ canvas.drawColor(SK_ColorWHITE);
+
+ SkPaint paint;
+ paint.setAntiAlias(true);
+ paint.setTextSize(30);
+ canvas.drawText("Hello Kno", 9, 40, 40, paint);
+}
+
+
diff --git a/gpu/src/gr_unittests.cpp b/gpu/src/gr_unittests.cpp
new file mode 100644
index 0000000000..a9fd40d124
--- /dev/null
+++ b/gpu/src/gr_unittests.cpp
@@ -0,0 +1,143 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrClip.h"
+#include "GrTDArray.h"
+#include "GrTBSearch.h"
+#include "GrMatrix.h"
+
+static void dump(const GrTDArray<int>& array) {
+#if 0
+ for (int i = 0; i < array.count(); i++) {
+ printf(" %d", array[i]);
+ }
+ printf("\n");
+#endif
+}
+
+static void test_tdarray() {
+ GrTDArray<int> array;
+
+ *array.append() = 0; dump(array);
+ *array.append() = 2; dump(array);
+ *array.append() = 4; dump(array);
+ *array.append() = 6; dump(array);
+ GrAssert(array.count() == 4);
+
+ *array.insert(0) = -1; dump(array);
+ *array.insert(2) = 1; dump(array);
+ *array.insert(4) = 3; dump(array);
+ *array.insert(7) = 7; dump(array);
+ GrAssert(array.count() == 8);
+ array.remove(3); dump(array);
+ array.remove(0); dump(array);
+ array.removeShuffle(4); dump(array);
+ array.removeShuffle(1); dump(array);
+ GrAssert(array.count() == 4);
+}
+
+static bool LT(const int& elem, int value) {
+ return elem < value;
+}
+static bool EQ(const int& elem, int value) {
+ return elem == value;
+}
+
+static void test_bsearch() {
+ const int array[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99
+ };
+
+ for (size_t n = 0; n < GR_ARRAY_COUNT(array); n++) {
+ for (size_t i = 0; i < n; i++) {
+ int index = GrTBSearch<int, int>(array, n, array[i]);
+ GrAssert(index == i);
+ index = GrTBSearch<int, int>(array, n, -array[i]);
+ GrAssert(index < 0);
+ }
+ }
+}
+
+static void dump(const GrClip& clip, const char message[]) {
+ GrPrintf("--- dump clip %s\n", message);
+ GrClipIter iter(clip);
+ while (!iter.isDone()) {
+ GrIRect r;
+ iter.getRect(&r);
+ GrPrintf("--- [%d %d %d %d]\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
+ iter.next();
+ }
+}
+
+static void test_clip() {
+ GrClip clip;
+ GrAssert(clip.isEmpty());
+ GrAssert(!clip.isRect());
+ GrAssert(!clip.isComplex());
+ GrAssert(clip.getBounds().equalsLTRB(0, 0, 0, 0));
+ GrAssert(0 == clip.countRects());
+
+ clip.setRect(GrIRect(10, 10, 10, 10));
+ GrAssert(clip.isEmpty());
+ GrAssert(!clip.isRect());
+ GrAssert(!clip.isComplex());
+ GrAssert(clip.getBounds().equalsLTRB(0, 0, 0, 0));
+ GrAssert(0 == clip.countRects());
+ dump(clip, "empty");
+
+ clip.setRect(GrIRect(10, 10, 20, 20));
+ GrAssert(!clip.isEmpty());
+ GrAssert(clip.isRect());
+ GrAssert(!clip.isComplex());
+ GrAssert(clip.getBounds().equalsLTRB(10, 10, 20, 20));
+ GrAssert(1 == clip.countRects());
+ GrAssert(clip.getRects()[0] == clip.getBounds());
+ dump(clip, "rect");
+
+ clip.addRect(GrIRect(20, 20, 25, 25));
+ GrAssert(!clip.isEmpty());
+ GrAssert(!clip.isRect());
+ GrAssert(clip.isComplex());
+ GrAssert(clip.getBounds().equalsLTRB(10, 10, 25, 25));
+ GrAssert(2 == clip.countRects());
+ dump(clip, "complex");
+
+ GrClip c1(clip);
+ GrAssert(c1 == clip);
+ GrClip c2;
+ GrAssert(c2 != c1);
+ c2 = clip;
+ GrAssert(c2 == clip);
+
+ clip.setEmpty();
+ GrAssert(clip.isEmpty());
+ GrAssert(!clip.isRect());
+ GrAssert(!clip.isComplex());
+ GrAssert(clip.getBounds().equalsLTRB(0, 0, 0, 0));
+
+ GrAssert(c1 != clip);
+ GrAssert(c2 != clip);
+}
+
+void gr_run_unittests() {
+ test_tdarray();
+ test_bsearch();
+ test_clip();
+ GrMatrix::UnitTest();
+}
+
+
diff --git a/gpu/src/skia/SkGpuCanvas.cpp b/gpu/src/skia/SkGpuCanvas.cpp
new file mode 100644
index 0000000000..19bda4d943
--- /dev/null
+++ b/gpu/src/skia/SkGpuCanvas.cpp
@@ -0,0 +1,60 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrContext.h"
+
+#include "SkGpuCanvas.h"
+#include "SkGpuDevice.h"
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkGpuCanvas::SkGpuCanvas(GrContext* context) {
+ SkASSERT(context);
+ fContext = context;
+ fContext->ref();
+}
+
+SkGpuCanvas::~SkGpuCanvas() {
+ // call this now, while our override of restore() is in effect
+ this->restoreToCount(1);
+ fContext->flush(false);
+ fContext->unref();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkGpuCanvas::getViewport(SkIPoint* size) const {
+ if (size) {
+ SkDevice* device = this->getDevice();
+ if (device) {
+ size->set(device->width(), device->height());
+ } else {
+ size->set(0, 0);
+ }
+ }
+ return true;
+}
+
+SkDevice* SkGpuCanvas::createDevice(SkBitmap::Config config, int width, int height,
+ bool isOpaque, bool isLayer) {
+ SkBitmap bm;
+ bm.setConfig(config, width, height);
+ bm.setIsOpaque(isOpaque);
+ return new SkGpuDevice(this, bm, isLayer);
+}
+
+
diff --git a/gpu/src/skia/SkGpuDevice.cpp b/gpu/src/skia/SkGpuDevice.cpp
new file mode 100644
index 0000000000..832fc6ea75
--- /dev/null
+++ b/gpu/src/skia/SkGpuDevice.cpp
@@ -0,0 +1,1048 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "GrContext.h"
+#include "GrTextContext.h"
+
+#include "SkGpuCanvas.h"
+#include "SkGpuDevice.h"
+#include "SkGrTexturePixelRef.h"
+
+#include "SkDrawProcs.h"
+#include "SkGlyphCache.h"
+
+#define CACHE_LAYER_TEXTURES 1
+
+#if 0
+ extern bool (*gShouldDrawProc)();
+ #define CHECK_SHOULD_DRAW(draw) \
+ do { \
+ if (gShouldDrawProc && !gShouldDrawProc()) return; \
+ this->prepareRenderTarget(draw); \
+ } while (0)
+#else
+ #define CHECK_SHOULD_DRAW(draw) this->prepareRenderTarget(draw)
+#endif
+
+class SkAutoExtMatrix {
+public:
+ SkAutoExtMatrix(const SkMatrix* extMatrix) {
+ if (extMatrix) {
+ SkGr::SkMatrix2GrMatrix(*extMatrix, &fMatrix);
+ fExtMatrix = &fMatrix;
+ } else {
+ fExtMatrix = NULL;
+ }
+ }
+ const GrMatrix* extMatrix() const { return fExtMatrix; }
+
+private:
+ GrMatrix fMatrix;
+ GrMatrix* fExtMatrix; // NULL or &fMatrix
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkGpuDevice::SkAutoCachedTexture::
+ SkAutoCachedTexture(SkGpuDevice* device,
+ const SkBitmap& bitmap,
+ const GrSamplerState& sampler,
+ GrTexture** texture) {
+ GrAssert(texture);
+ fTex = NULL;
+ *texture = this->set(device, bitmap, sampler);
+}
+
+SkGpuDevice::SkAutoCachedTexture::SkAutoCachedTexture() {
+ fTex = NULL;
+}
+
+GrTexture* SkGpuDevice::SkAutoCachedTexture::set(SkGpuDevice* device,
+ const SkBitmap& bitmap,
+ const GrSamplerState& sampler) {
+ if (fTex) {
+ fDevice->unlockCachedTexture(fTex);
+ }
+ fDevice = device;
+ GrTexture* texture = (GrTexture*)bitmap.getTexture();
+ if (texture) {
+ // return the native texture
+ fTex = NULL;
+ device->context()->setTexture(texture);
+ } else {
+ // look it up in our cache
+ fTex = device->lockCachedTexture(bitmap, sampler, &texture, false);
+ }
+ return texture;
+}
+
+SkGpuDevice::SkAutoCachedTexture::~SkAutoCachedTexture() {
+ if (fTex) {
+ fDevice->unlockCachedTexture(fTex);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool gDoTraceDraw;
+
+struct GrSkDrawProcs : public SkDrawProcs {
+public:
+ GrContext* fContext;
+ GrTextContext* fTextContext;
+ GrFontScaler* fFontScaler; // cached in the skia glyphcache
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkGpuDevice::SkGpuDevice(SkGpuCanvas* canvas, const SkBitmap& bitmap, bool isLayer)
+ : SkDevice(canvas, bitmap, false) {
+
+ fNeedPrepareRenderTarget = false;
+ fDrawProcs = NULL;
+
+ fContext = canvas->context();
+
+ fCache = NULL;
+ fTexture = NULL;
+ fRenderTarget = NULL;
+ fNeedClear = false;
+
+ if (isLayer) {
+ SkBitmap::Config c = bitmap.config();
+ if (c != SkBitmap::kRGB_565_Config) {
+ c = SkBitmap::kARGB_8888_Config;
+ }
+ SkBitmap bm;
+ bm.setConfig(c, this->width(), this->height());
+
+#if CACHE_LAYER_TEXTURES
+
+ fCache = this->lockCachedTexture(bm, GrSamplerState::ClampNoFilter(),
+ &fTexture, true);
+ if (fCache) {
+ SkASSERT(NULL != fTexture);
+ SkASSERT(fTexture->isRenderTarget());
+ }
+#else
+ const GrGpu::TextureDesc desc = {
+ GrGpu::kRenderTarget_TextureFlag,
+ GrGpu::kNone_AALevel,
+ this->width(),
+ this->height(),
+ SkGr::Bitmap2PixelConfig(bm)
+ };
+
+ fTexture = fContext->createUncachedTexture(desc, NULL, 0);
+#endif
+ if (NULL != fTexture) {
+ fRenderTarget = fTexture->asRenderTarget();
+
+ GrAssert(NULL != fRenderTarget);
+
+ // we defer the actual clear until our gainFocus()
+ fNeedClear = true;
+
+ // wrap the bitmap with a pixelref to expose our texture
+ SkGrTexturePixelRef* pr = new SkGrTexturePixelRef(fTexture);
+ this->setPixelRef(pr, 0)->unref();
+ } else {
+ GrPrintf("--- failed to create gpu-offscreen [%d %d]\n",
+ this->width(), this->height());
+ }
+ }
+
+ if (NULL == fRenderTarget) {
+ GrAssert(NULL == fCache);
+ GrAssert(NULL == fTexture);
+
+ fRenderTarget = fContext->currentRenderTarget();
+ fRenderTarget->ref();
+ fContext->setDefaultRenderTargetSize(this->width(), this->height());
+ }
+}
+
+SkGpuDevice::~SkGpuDevice() {
+ if (fDrawProcs) {
+ delete fDrawProcs;
+ }
+
+ if (fCache) {
+ GrAssert(NULL != fTexture);
+ GrAssert(fRenderTarget == fTexture->asRenderTarget());
+ // IMPORTANT: reattach the rendertarget/tex back to the cache.
+ fContext->reattachAndUnlockCachedTexture((GrTextureEntry*)fCache);
+ } else if (NULL != fTexture) {
+ GrAssert(!CACHE_LAYER_TEXTURES);
+ GrAssert(fRenderTarget == fTexture->asRenderTarget());
+ fTexture->unref();
+ } else if (NULL != fRenderTarget) {
+ fRenderTarget->unref();
+ }
+}
+
+void SkGpuDevice::bindDeviceToTargetHandle(intptr_t handle) {
+ if (fCache) {
+ GrAssert(NULL != fTexture);
+ GrAssert(fRenderTarget == fTexture->asRenderTarget());
+ // IMPORTANT: reattach the rendertarget/tex back to the cache.
+ fContext->reattachAndUnlockCachedTexture((GrTextureEntry*)fCache);
+ } else if (NULL != fTexture) {
+ GrAssert(!CACHE_LAYER_TEXTURES);
+ fTexture->unref();
+ } else if (NULL != fRenderTarget) {
+ fRenderTarget->unref();
+ }
+
+ fCache = NULL;
+ fTexture = NULL;
+ fRenderTarget = fContext->createPlatformRenderTarget(handle,
+ this->width(),
+ this->height());
+}
+
+intptr_t SkGpuDevice::getLayerTextureHandle() const {
+ if (fTexture) {
+ return fTexture->getTextureHandle();
+ } else {
+ return 0;
+ }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::makeRenderTargetCurrent() {
+ fContext->setRenderTarget(fRenderTarget);
+ fContext->flush(true);
+ fNeedPrepareRenderTarget = true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool SkGpuDevice::readPixels(const SkIRect& srcRect, SkBitmap* bitmap) {
+ SkIRect bounds;
+ bounds.set(0, 0, this->width(), this->height());
+ if (!bounds.intersect(srcRect)) {
+ return false;
+ }
+
+ const int w = bounds.width();
+ const int h = bounds.height();
+ SkBitmap tmp;
+ // note we explicitly specify our rowBytes to be snug (no gap between rows)
+ tmp.setConfig(SkBitmap::kARGB_8888_Config, w, h, w * 4);
+ if (!tmp.allocPixels()) {
+ return false;
+ }
+
+ SkAutoLockPixels alp(tmp);
+ fContext->setRenderTarget(fRenderTarget);
+ // we aren't setting the clip or matrix, so mark as dirty
+ // we don't need to set them for this call and don't have them anyway
+ fNeedPrepareRenderTarget = true;
+
+ if (!fContext->readPixels(bounds.fLeft, bounds.fTop,
+ bounds.width(), bounds.height(),
+ GrTexture::kRGBA_8888_PixelConfig,
+ tmp.getPixels())) {
+ return false;
+ }
+
+ tmp.swap(*bitmap);
+ return true;
+}
+
+void SkGpuDevice::writePixels(const SkBitmap& bitmap, int x, int y) {
+ SkAutoLockPixels alp(bitmap);
+ if (!bitmap.readyToDraw()) {
+ return;
+ }
+ GrTexture::PixelConfig config = SkGr::BitmapConfig2PixelConfig(bitmap.config(),
+ bitmap.isOpaque());
+ fContext->setRenderTarget(fRenderTarget);
+ // we aren't setting the clip or matrix, so mark as dirty
+ // we don't need to set them for this call and don't have them anyway
+ fNeedPrepareRenderTarget = true;
+
+ fContext->writePixels(x, y, bitmap.width(), bitmap.height(),
+ config, bitmap.getPixels(), bitmap.rowBytes());
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void convert_matrixclip(GrContext* context, const SkMatrix& matrix,
+ const SkRegion& clip) {
+ GrMatrix grmat;
+ SkGr::SkMatrix2GrMatrix(matrix, &grmat);
+ context->setViewMatrix(grmat);
+
+ SkGrClipIterator iter;
+ iter.reset(clip);
+ GrClip grc(&iter);
+ if (context->getClip() == grc) {
+ } else {
+ context->setClip(grc);
+ }
+}
+
+// call this ever each draw call, to ensure that the context reflects our state,
+// and not the state from some other canvas/device
+void SkGpuDevice::prepareRenderTarget(const SkDraw& draw) {
+ if (fNeedPrepareRenderTarget ||
+ fContext->currentRenderTarget() != fRenderTarget) {
+
+ fContext->setRenderTarget(fRenderTarget);
+ convert_matrixclip(fContext, *draw.fMatrix, *draw.fClip);
+ fNeedPrepareRenderTarget = false;
+ }
+}
+
+void SkGpuDevice::setMatrixClip(const SkMatrix& matrix, const SkRegion& clip) {
+ this->INHERITED::setMatrixClip(matrix, clip);
+
+ convert_matrixclip(fContext, matrix, clip);
+}
+
+void SkGpuDevice::gainFocus(SkCanvas* canvas, const SkMatrix& matrix,
+ const SkRegion& clip) {
+ fContext->setRenderTarget(fRenderTarget);
+
+ this->INHERITED::gainFocus(canvas, matrix, clip);
+
+ convert_matrixclip(fContext, matrix, clip);
+
+ if (fNeedClear) {
+ fContext->eraseColor(0x0);
+ fNeedClear = false;
+ }
+}
+
+bool SkGpuDevice::bindDeviceAsTexture(SkPoint* max) {
+ if (NULL != fTexture) {
+ fContext->setTexture(fTexture);
+ if (NULL != max) {
+ max->set(SkFixedToScalar((width() << 16) /
+ fTexture->allocWidth()),
+ SkFixedToScalar((height() << 16) /
+ fTexture->allocHeight()));
+ }
+ return true;
+ }
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// must be in the same order as SkXfermode::Coeff in SkXfermode.h
+
+SkGpuDevice::AutoPaintShader::AutoPaintShader() {
+ fSuccess = false;
+ fTexture = NULL;
+}
+
+SkGpuDevice::AutoPaintShader::AutoPaintShader(SkGpuDevice* device,
+ const SkPaint& paint,
+ const SkMatrix& matrix) {
+ fSuccess = false;
+ fTexture = NULL;
+ this->init(device, paint, matrix);
+}
+
+void SkGpuDevice::AutoPaintShader::init(SkGpuDevice* device,
+ const SkPaint& paint,
+ const SkMatrix& ctm) {
+ fSuccess = true;
+ GrContext* ctx = device->context();
+ sk_gr_set_paint(ctx, paint); // should we pass true for justAlpha if we have a shader/texture?
+
+ SkShader* shader = paint.getShader();
+ if (NULL == shader) {
+ return;
+ }
+
+ if (!shader->setContext(device->accessBitmap(false), paint, ctm)) {
+ fSuccess = false;
+ return;
+ }
+
+ GrSamplerState::SampleMode sampleMode;
+ SkBitmap bitmap;
+ SkMatrix matrix;
+ SkShader::TileMode tileModes[2];
+ SkScalar twoPointParams[3];
+ SkShader::BitmapType bmptype = shader->asABitmap(&bitmap, &matrix,
+ tileModes, twoPointParams);
+
+ switch (bmptype) {
+ case SkShader::kNone_BitmapType:
+ SkDebugf("shader->asABitmap() == kNone_BitmapType");
+ return;
+ case SkShader::kDefault_BitmapType:
+ sampleMode = GrSamplerState::kNormal_SampleMode;
+ break;
+ case SkShader::kRadial_BitmapType:
+ sampleMode = GrSamplerState::kRadial_SampleMode;
+ break;
+ case SkShader::kSweep_BitmapType:
+ sampleMode = GrSamplerState::kSweep_SampleMode;
+ break;
+ case SkShader::kTwoPointRadial_BitmapType:
+ sampleMode = GrSamplerState::kRadial2_SampleMode;
+ break;
+ default:
+ SkASSERT("Unexpected return from asABitmap");
+ return;
+ }
+
+ bitmap.lockPixels();
+ if (!bitmap.getTexture() && !bitmap.readyToDraw()) {
+ return;
+ }
+
+ // see if we've already cached the bitmap from the shader
+ GrSamplerState samplerState(sk_tile_mode_to_grwrap(tileModes[0]),
+ sk_tile_mode_to_grwrap(tileModes[1]),
+ sampleMode,
+ paint.isFilterBitmap());
+
+ if (GrSamplerState::kRadial2_SampleMode == sampleMode) {
+ samplerState.setRadial2Params(twoPointParams[0],
+ twoPointParams[1],
+ twoPointParams[2] < 0);
+ }
+
+ GrTexture* texture = fCachedTexture.set(device, bitmap, samplerState);
+ if (NULL == texture) {
+ return;
+ }
+
+ // the lock has already called setTexture for us
+ ctx->setSamplerState(samplerState);
+
+ // since our texture coords will be in local space, we wack the texture
+ // matrix to map them back into 0...1 before we load it
+ SkMatrix localM;
+ if (shader->getLocalMatrix(&localM)) {
+ SkMatrix inverse;
+ if (localM.invert(&inverse)) {
+ matrix.preConcat(inverse);
+ }
+ }
+ if (SkShader::kDefault_BitmapType == bmptype) {
+ GrScalar sx = (GR_Scalar1 * texture->contentWidth()) /
+ (bitmap.width() * texture->allocWidth());
+ GrScalar sy = (GR_Scalar1 * texture->contentHeight()) /
+ (bitmap.height() * texture->allocHeight());
+ matrix.postScale(sx, sy);
+
+ } else if (SkShader::kRadial_BitmapType == bmptype) {
+ GrScalar s = (GR_Scalar1 * texture->contentWidth()) /
+ (bitmap.width() * texture->allocWidth());
+ matrix.postScale(s, s);
+ }
+ GrMatrix grmat;
+ SkGr::SkMatrix2GrMatrix(matrix, &grmat);
+ ctx->setTextureMatrix(grmat);
+
+ // since we're going to use a shader/texture, we don't want the color,
+ // just its alpha
+ ctx->setAlpha(paint.getAlpha());
+ // report that we have setup the texture
+ fSuccess = true;
+ fTexture = texture;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGpuDevice::drawPaint(const SkDraw& draw, const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ AutoPaintShader shader(this, paint, *draw.fMatrix);
+ if (shader.failed()) {
+ return;
+ }
+ fContext->drawFull(shader.useTex());
+}
+
+// must be in SkCanvas::PointMode order
+static const GrGpu::PrimitiveType gPointMode2PrimtiveType[] = {
+ GrGpu::kPoints_PrimitiveType,
+ GrGpu::kLines_PrimitiveType,
+ GrGpu::kLineStrip_PrimitiveType
+};
+
+void SkGpuDevice::drawPoints(const SkDraw& draw, SkCanvas::PointMode mode,
+ size_t count, const SkPoint pts[], const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ SkScalar width = paint.getStrokeWidth();
+ if (width < 0) {
+ return;
+ }
+
+ // we only handle hairlines here, else we let the SkDraw call our drawPath()
+ if (width > 0) {
+ draw.drawPoints(mode, count, pts, paint, true);
+ return;
+ }
+
+ AutoPaintShader shader(this, paint, *draw.fMatrix);
+ if (shader.failed()) {
+ return;
+ }
+
+ GrVertexLayout layout = shader.useTex() ?
+ GrDrawTarget::kPositionAsTexCoord_VertexLayoutBit :
+ 0;
+#if SK_SCALAR_IS_GR_SCALAR
+ fContext->setVertexSourceToArray(pts, layout);
+ fContext->drawNonIndexed(gPointMode2PrimtiveType[mode], 0, count);
+#else
+ GrPoint* v;
+ fContext->reserveAndLockGeometry(layout, count, 0, (void**)&v, NULL);
+ for (int i = 0; i < count; ++i) {
+ v[i].set(SkScalarToGrScalar(pts[i].fX), SkScalarToGrScalar(pts[i].fY));
+ }
+ fContext->drawNonIndexed(gPointMode2PrimtiveType[mode], layout, 0, count);
+ fContext->releaseReservedGeometry();
+#endif
+
+}
+
+void SkGpuDevice::drawRect(const SkDraw& draw, const SkRect& rect,
+ const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ bool doStroke = paint.getStyle() == SkPaint::kStroke_Style;
+ SkScalar width = paint.getStrokeWidth();
+
+ /*
+ We have special code for hairline strokes, miter-strokes, and fills.
+ Anything else we just call our path code. (i.e. non-miter thick stroke)
+ */
+ if (doStroke && width > 0 && paint.getStrokeJoin() != SkPaint::kMiter_Join) {
+ SkPath path;
+ path.addRect(rect);
+ this->drawPath(draw, path, paint, NULL, true);
+ return;
+ }
+
+ AutoPaintShader shader(this, paint, *draw.fMatrix);
+ if (shader.failed()) {
+ return;
+ }
+
+ fContext->drawRect(Sk2Gr(rect), shader.useTex(), doStroke ? width : -1);
+}
+
+void SkGpuDevice::drawPath(const SkDraw& draw, const SkPath& path,
+ const SkPaint& paint, const SkMatrix* prePathMatrix,
+ bool pathIsMutable) {
+ CHECK_SHOULD_DRAW(draw);
+
+ AutoPaintShader shader(this, paint, *draw.fMatrix);
+ if (shader.failed()) {
+ return;
+ }
+
+ const SkPath* pathPtr = &path;
+ SkPath tmpPath;
+
+ if (prePathMatrix) {
+ if (pathIsMutable) {
+ const_cast<SkPath*>(pathPtr)->transform(*prePathMatrix);
+ } else {
+ path.transform(*prePathMatrix, &tmpPath);
+ pathPtr = &tmpPath;
+ }
+ }
+
+ SkPath fillPath;
+ GrContext::PathFills fill = GrContext::kHairLine_PathFill;
+
+ if (paint.getFillPath(*pathPtr, &fillPath)) {
+ switch (fillPath.getFillType()) {
+ case SkPath::kWinding_FillType:
+ fill = GrContext::kWinding_PathFill;
+ break;
+ case SkPath::kEvenOdd_FillType:
+ fill = GrContext::kEvenOdd_PathFill;
+ break;
+ case SkPath::kInverseWinding_FillType:
+ fill = GrContext::kInverseWinding_PathFill;
+ break;
+ case SkPath::kInverseEvenOdd_FillType:
+ fill = GrContext::kInverseEvenOdd_PathFill;
+ break;
+ default:
+ SkDebugf("Unsupported path fill type");
+ return;
+ }
+ }
+
+ SkGrPathIter iter(fillPath);
+ fContext->drawPath(&iter, fill, shader.useTex());
+}
+
+/*
+ * This value must not exceed the GPU's texture dimension limit, but it can
+ * be smaller, if that helps avoid very large single textures hurting the
+ * cache.
+ */
+#define MAX_TEXTURE_DIM 512
+
+void SkGpuDevice::drawBitmap(const SkDraw& draw,
+ const SkBitmap& bitmap,
+ const SkIRect* srcRectPtr,
+ const SkMatrix& m,
+ const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ SkIRect srcRect;
+ if (NULL == srcRectPtr) {
+ srcRect.set(0, 0, bitmap.width(), bitmap.height());
+ } else {
+ srcRect = *srcRectPtr;
+ }
+
+ if (bitmap.getTexture() || (bitmap.width() <= MAX_TEXTURE_DIM &&
+ bitmap.height() <= MAX_TEXTURE_DIM)) {
+ // take the fast case
+ this->internalDrawBitmap(draw, bitmap, srcRect, m, paint);
+ return;
+ }
+
+ // undo the translate done by SkCanvas
+ int DX = SkMax32(0, srcRect.fLeft);
+ int DY = SkMax32(0, srcRect.fTop);
+ // compute clip bounds in local coordinates
+ SkIRect clipRect;
+ {
+ SkRect r;
+ r.set(draw.fClip->getBounds());
+ SkMatrix matrix, inverse;
+ matrix.setConcat(*draw.fMatrix, m);
+ if (!matrix.invert(&inverse)) {
+ return;
+ }
+ inverse.mapRect(&r);
+ r.roundOut(&clipRect);
+ // apply the canvas' translate to our local clip
+ clipRect.offset(DX, DY);
+ }
+
+ int nx = bitmap.width() / MAX_TEXTURE_DIM;
+ int ny = bitmap.height() / MAX_TEXTURE_DIM;
+ for (int x = 0; x <= nx; x++) {
+ for (int y = 0; y <= ny; y++) {
+ SkIRect tileR;
+ tileR.set(x * MAX_TEXTURE_DIM, y * MAX_TEXTURE_DIM,
+ (x + 1) * MAX_TEXTURE_DIM, (y + 1) * MAX_TEXTURE_DIM);
+ if (!SkIRect::Intersects(tileR, clipRect)) {
+ continue;
+ }
+
+ SkIRect srcR = tileR;
+ if (!srcR.intersect(srcRect)) {
+ continue;
+ }
+
+ SkBitmap tmpB;
+ if (bitmap.extractSubset(&tmpB, tileR)) {
+ // now offset it to make it "local" to our tmp bitmap
+ srcR.offset(-tileR.fLeft, -tileR.fTop);
+
+ SkMatrix tmpM(m);
+ {
+ int dx = tileR.fLeft - DX + SkMax32(0, srcR.fLeft);
+ int dy = tileR.fTop - DY + SkMax32(0, srcR.fTop);
+ tmpM.preTranslate(SkIntToScalar(dx), SkIntToScalar(dy));
+ }
+ this->internalDrawBitmap(draw, tmpB, srcR, tmpM, paint);
+ }
+ }
+ }
+}
+
+/*
+ * This is called by drawBitmap(), which has to handle images that may be too
+ * large to be represented by a single texture.
+ *
+ * internalDrawBitmap assumes that the specified bitmap will fit in a texture.
+ */
+void SkGpuDevice::internalDrawBitmap(const SkDraw& draw,
+ const SkBitmap& bitmap,
+ const SkIRect& srcRect,
+ const SkMatrix& m,
+ const SkPaint& paint) {
+ SkASSERT(bitmap.width() <= MAX_TEXTURE_DIM &&
+ bitmap.height() <= MAX_TEXTURE_DIM);
+
+ SkAutoLockPixels alp(bitmap);
+ if (!bitmap.getTexture() && !bitmap.readyToDraw()) {
+ return;
+ }
+
+ GrSamplerState sampler(paint.isFilterBitmap()); // defaults to clamp
+ // the lock has already called setTexture for us
+ fContext->setSamplerState(sampler);
+
+ GrTexture* texture;
+ SkAutoCachedTexture act(this, bitmap, sampler, &texture);
+ if (NULL == texture) {
+ return;
+ }
+
+ GrVertexLayout layout = GrDrawTarget::kSeparateTexCoord_VertexLayoutBit;
+
+ GrPoint* vertex;
+ if (!fContext->reserveAndLockGeometry(layout, 4,
+ 0, (void**)&vertex, NULL)) {
+ return;
+ }
+
+ {
+ GrMatrix grmat;
+ SkGr::SkMatrix2GrMatrix(m, &grmat);
+ vertex[0].setIRectFan(0, 0, srcRect.width(), srcRect.height(),
+ 2*sizeof(GrPoint));
+ grmat.mapPointsWithStride(vertex, 2*sizeof(GrPoint), 4);
+ }
+
+ SkScalar left = SkFixedToScalar((srcRect.fLeft << 16) /
+ texture->allocWidth());
+ SkScalar right = SkFixedToScalar((srcRect.fRight << 16) /
+ texture->allocWidth());
+ SkScalar top = SkFixedToScalar((srcRect.fTop << 16) /
+ texture->allocHeight());
+ SkScalar bottom = SkFixedToScalar((srcRect.fBottom << 16) /
+ texture->allocHeight());
+ vertex[1].setRectFan(left, top, right, bottom, 2*sizeof(GrPoint));
+
+ fContext->setTextureMatrix(GrMatrix::I());
+ // now draw the mesh
+ sk_gr_set_paint(fContext, paint, true);
+ fContext->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType, 0, 4);
+ fContext->releaseReservedGeometry();
+}
+
+static void gl_drawSprite(GrContext* ctx,
+ int x, int y, int w, int h, const SkPoint& max,
+ const SkPaint& paint) {
+ GrAutoViewMatrix avm(ctx, GrMatrix::I());
+
+ ctx->setSamplerState(GrSamplerState::ClampNoFilter());
+ ctx->setTextureMatrix(GrMatrix::I());
+
+ GrPoint* vertex;
+ GrVertexLayout layout = GrGpu::kSeparateTexCoord_VertexLayoutBit;
+ if (!ctx->reserveAndLockGeometry(layout, 4, 0, (void**)&vertex, NULL)) {
+ return;
+ }
+
+ vertex[1].setRectFan(0, 0, max.fX, max.fY, 2*sizeof(GrPoint));
+
+ vertex[0].setIRectFan(x, y, x + w, y + h, 2*sizeof(GrPoint));
+
+ sk_gr_set_paint(ctx, paint, true);
+ // should look to use glDrawTexi() has we do for text...
+ ctx->drawNonIndexed(GrGpu::kTriangleFan_PrimitiveType, 0, 4);
+ ctx->releaseReservedGeometry();
+}
+
+void SkGpuDevice::drawSprite(const SkDraw& draw, const SkBitmap& bitmap,
+ int left, int top, const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ SkAutoLockPixels alp(bitmap);
+ if (!bitmap.getTexture() && !bitmap.readyToDraw()) {
+ return;
+ }
+
+ SkPoint max;
+ GrTexture* texture;
+ SkAutoCachedTexture act(this, bitmap, GrSamplerState::ClampNoFilter(),
+ &texture);
+
+ max.set(SkFixedToScalar((texture->contentWidth() << 16) /
+ texture->allocWidth()),
+ SkFixedToScalar((texture->contentHeight() << 16) /
+ texture->allocHeight()));
+ gl_drawSprite(fContext, left, top, bitmap.width(), bitmap.height(), max, paint);
+}
+
+void SkGpuDevice::drawDevice(const SkDraw& draw, SkDevice* dev,
+ int x, int y, const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ SkPoint max;
+ if (((SkGpuDevice*)dev)->bindDeviceAsTexture(&max)) {
+ const SkBitmap& bm = dev->accessBitmap(false);
+ int w = bm.width();
+ int h = bm.height();
+ gl_drawSprite(fContext, x, y, w, h, max, paint);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// must be in SkCanvas::VertexMode order
+static const GrGpu::PrimitiveType gVertexMode2PrimitiveType[] = {
+ GrGpu::kTriangles_PrimitiveType,
+ GrGpu::kTriangleStrip_PrimitiveType,
+ GrGpu::kTriangleFan_PrimitiveType,
+};
+
+void SkGpuDevice::drawVertices(const SkDraw& draw, SkCanvas::VertexMode vmode,
+ int vertexCount, const SkPoint vertices[],
+ const SkPoint texs[], const SkColor colors[],
+ SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ sk_gr_set_paint(fContext, paint);
+
+ TexCache* cache = NULL;
+
+ bool useTexture = false;
+
+ AutoPaintShader autoShader;
+
+ if (texs) {
+ autoShader.init(this, paint, *draw.fMatrix);
+
+ if (autoShader.failed()) {
+ return;
+ }
+ useTexture = autoShader.useTex();
+ }
+
+ bool releaseVerts = false;
+ GrVertexLayout layout = 0;
+ if (useTexture) {
+ layout |= GrDrawTarget::kSeparateTexCoord_VertexLayoutBit;
+ }
+ if (NULL != colors) {
+ layout |= GrDrawTarget::kColor_VertexLayoutBit;
+ }
+
+ #if SK_SCALAR_IS_GR_SCALAR
+ if (!layout) {
+ fContext->setVertexSourceToArray(vertices, layout);
+ } else
+ #endif
+ {
+ void* verts;
+ releaseVerts = true;
+ if (!fContext->reserveAndLockGeometry(layout, vertexCount, 0,
+ &verts, NULL)) {
+ return;
+ }
+ int texOffset, colorOffset;
+ uint32_t stride = GrDrawTarget::VertexSizeAndOffsets(layout,
+ &texOffset,
+ &colorOffset);
+ for (int i = 0; i < vertexCount; ++i) {
+ GrPoint* p = (GrPoint*)((intptr_t)verts + i * stride);
+ p->set(SkScalarToGrScalar(vertices[i].fX),
+ SkScalarToGrScalar(vertices[i].fY));
+ if (texOffset > 0) {
+ GrPoint* t = (GrPoint*)((intptr_t)p + texOffset);
+ t->set(SkScalarToGrScalar(texs[i].fX),
+ SkScalarToGrScalar(texs[i].fY));
+ }
+ if (colorOffset > 0) {
+ uint32_t* color = (uint32_t*) ((intptr_t)p + colorOffset);
+ *color = SkGr::SkColor2GrColor(colors[i]);
+ }
+ }
+ }
+ if (indices) {
+ fContext->setIndexSourceToArray(indices);
+ fContext->drawIndexed(gVertexMode2PrimitiveType[vmode], 0, 0,
+ vertexCount, indexCount);
+ } else {
+ fContext->drawNonIndexed(gVertexMode2PrimitiveType[vmode],
+ 0, vertexCount);
+ }
+ if (cache) {
+ this->unlockCachedTexture(cache);
+ }
+ if (releaseVerts) {
+ fContext->releaseReservedGeometry();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static void GlyphCacheAuxProc(void* data) {
+ delete (GrFontScaler*)data;
+}
+
+static GrFontScaler* get_gr_font_scaler(SkGlyphCache* cache) {
+ void* auxData;
+ GrFontScaler* scaler = NULL;
+ if (cache->getAuxProcData(GlyphCacheAuxProc, &auxData)) {
+ scaler = (GrFontScaler*)auxData;
+ }
+ if (NULL == scaler) {
+ scaler = new SkGrFontScaler(cache);
+ cache->setAuxProc(GlyphCacheAuxProc, scaler);
+ }
+ return scaler;
+}
+
+static void SkGPU_Draw1Glyph(const SkDraw1Glyph& state,
+ SkFixed fx, SkFixed fy,
+ const SkGlyph& glyph) {
+ SkASSERT(glyph.fWidth > 0 && glyph.fHeight > 0);
+
+ GrSkDrawProcs* procs = (GrSkDrawProcs*)state.fDraw->fProcs;
+
+ if (NULL == procs->fFontScaler) {
+ procs->fFontScaler = get_gr_font_scaler(state.fCache);
+ }
+ procs->fTextContext->drawPackedGlyph(GrGlyph::Pack(glyph.getGlyphID(), fx, 0),
+ SkIntToFixed(SkFixedFloor(fx)), fy,
+ procs->fFontScaler);
+}
+
+SkDrawProcs* SkGpuDevice::initDrawForText(const SkPaint& paint,
+ GrTextContext* context) {
+
+ // deferred allocation
+ if (NULL == fDrawProcs) {
+ fDrawProcs = new GrSkDrawProcs;
+ fDrawProcs->fD1GProc = SkGPU_Draw1Glyph;
+ fDrawProcs->fContext = fContext;
+ }
+
+ // init our (and GL's) state
+ fDrawProcs->fTextContext = context;
+ fDrawProcs->fFontScaler = NULL;
+ return fDrawProcs;
+}
+
+void SkGpuDevice::drawText(const SkDraw& draw, const void* text,
+ size_t byteLength, SkScalar x, SkScalar y,
+ const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ if (draw.fMatrix->getType() & SkMatrix::kPerspective_Mask) {
+ // this guy will just call our drawPath()
+ draw.drawText((const char*)text, byteLength, x, y, paint);
+ } else {
+ SkAutoExtMatrix aem(draw.fExtMatrix);
+ SkDraw myDraw(draw);
+ sk_gr_set_paint(fContext, paint);
+ GrTextContext context(fContext, aem.extMatrix());
+ myDraw.fProcs = this->initDrawForText(paint, &context);
+ this->INHERITED::drawText(myDraw, text, byteLength, x, y, paint);
+ }
+}
+
+void SkGpuDevice::drawPosText(const SkDraw& draw, const void* text,
+ size_t byteLength, const SkScalar pos[],
+ SkScalar constY, int scalarsPerPos,
+ const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ if (draw.fMatrix->getType() & SkMatrix::kPerspective_Mask) {
+ // this guy will just call our drawPath()
+ draw.drawPosText((const char*)text, byteLength, pos, constY,
+ scalarsPerPos, paint);
+ } else {
+ SkAutoExtMatrix aem(draw.fExtMatrix);
+ SkDraw myDraw(draw);
+ sk_gr_set_paint(fContext, paint);
+ GrTextContext context(fContext, aem.extMatrix());
+ myDraw.fProcs = this->initDrawForText(paint, &context);
+ this->INHERITED::drawPosText(myDraw, text, byteLength, pos, constY,
+ scalarsPerPos, paint);
+ }
+}
+
+void SkGpuDevice::drawTextOnPath(const SkDraw& draw, const void* text,
+ size_t len, const SkPath& path,
+ const SkMatrix* m, const SkPaint& paint) {
+ CHECK_SHOULD_DRAW(draw);
+
+ SkASSERT(draw.fDevice == this);
+ draw.drawTextOnPath((const char*)text, len, path, m, paint);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkGpuDevice::TexCache* SkGpuDevice::lockCachedTexture(const SkBitmap& bitmap,
+ const GrSamplerState& sampler,
+ GrTexture** texture,
+ bool forDeviceRenderTarget) {
+ GrContext* ctx = this->context();
+ uint32_t p0, p1;
+ if (forDeviceRenderTarget) {
+ p0 = p1 = -1;
+ } else {
+ p0 = bitmap.getGenerationID();
+ p1 = bitmap.pixelRefOffset();
+ }
+
+ GrTexture* newTexture = NULL;
+ GrTextureKey key(p0, p1, bitmap.width(), bitmap.height());
+ GrTextureEntry* entry = ctx->findAndLockTexture(&key, sampler);
+
+ if (NULL == entry) {
+
+ if (forDeviceRenderTarget) {
+ const GrGpu::TextureDesc desc = {
+ GrGpu::kRenderTarget_TextureFlag,
+ GrGpu::kNone_AALevel,
+ bitmap.width(),
+ bitmap.height(),
+ SkGr::Bitmap2PixelConfig(bitmap)
+ };
+ entry = ctx->createAndLockTexture(&key, sampler, desc, NULL, 0);
+
+ } else {
+ entry = sk_gr_create_bitmap_texture(ctx, &key, sampler, bitmap);
+ }
+ if (NULL == entry) {
+ GrPrintf("---- failed to create texture for cache [%d %d]\n",
+ bitmap.width(), bitmap.height());
+ }
+ }
+
+ if (NULL != entry) {
+ newTexture = entry->texture();
+ ctx->setTexture(newTexture);
+ if (texture) {
+ *texture = newTexture;
+ }
+ // IMPORTANT: We can't allow another SkGpuDevice to get this
+ // cache entry until this one is destroyed!
+ if (forDeviceRenderTarget) {
+ ctx->detachCachedTexture(entry);
+ }
+ }
+ return (TexCache*)entry;
+}
+
+void SkGpuDevice::unlockCachedTexture(TexCache* cache) {
+ this->context()->unlockTexture((GrTextureEntry*)cache);
+}
+
+
diff --git a/gpu/src/skia/SkGpuDevice.h b/gpu/src/skia/SkGpuDevice.h
new file mode 100644
index 0000000000..e42e99709d
--- /dev/null
+++ b/gpu/src/skia/SkGpuDevice.h
@@ -0,0 +1,176 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#ifndef SkGpuDevice_DEFINED
+#define SkGpuDevice_DEFINED
+
+#include "SkGr.h"
+#include "SkDevice.h"
+#include "SkRegion.h"
+
+struct SkDrawProcs;
+class SkGpuCanvas;
+struct GrSkDrawProcs;
+class GrTextContext;
+
+/**
+ * Subclass of SkDevice, which directs all drawing to the GrGpu owned by the
+ * canvas.
+ */
+class SkGpuDevice : public SkDevice {
+public:
+ SkGpuDevice(SkGpuCanvas*, const SkBitmap& bitmap, bool isLayer);
+ virtual ~SkGpuDevice();
+
+ GrContext* context() const { return fContext; }
+
+ /**
+ * If this device was built for rendering as a layer (i.e. offscreen),
+ * then this will return the platform-specific handle to that GPU resource.
+ * For example, in OpenGL, this will return the FBO's texture ID.
+ * If this device was not built for rendering as a layer, then 0
+ * is returned.
+ */
+ intptr_t getLayerTextureHandle() const;
+
+ /**
+ * Attaches the device to a rendering surface. This device will then render
+ * to the surface.
+ * For example, in OpenGL, the device will interpret handle as an FBO ID
+ * and drawing to the device would direct GL to the FBO.
+ */
+ void bindDeviceToTargetHandle(intptr_t handle);
+
+ // call to set the clip to the specified rect
+ void scissor(const SkIRect&);
+
+ /**
+ * Override from SkGpuDevice, so we can set our FBO to be the render target
+ * The canvas parameter must be a SkGpuCanvas
+ */
+ virtual void gainFocus(SkCanvas*, const SkMatrix&, const SkRegion&);
+
+ virtual SkGpuTexture* accessTexture() { return (SkGpuTexture*)fTexture; }
+
+ // overrides from SkDevice
+
+ virtual bool readPixels(const SkIRect& srcRect, SkBitmap* bitmap);
+ virtual void writePixels(const SkBitmap& bitmap, int x, int y);
+
+ virtual void setMatrixClip(const SkMatrix& matrix, const SkRegion& clip);
+
+ virtual void drawPaint(const SkDraw&, const SkPaint& paint);
+ virtual void drawPoints(const SkDraw&, SkCanvas::PointMode mode, size_t count,
+ const SkPoint[], const SkPaint& paint);
+ virtual void drawRect(const SkDraw&, const SkRect& r,
+ const SkPaint& paint);
+ virtual void drawPath(const SkDraw&, const SkPath& path,
+ const SkPaint& paint, const SkMatrix* prePathMatrix,
+ bool pathIsMutable);
+ virtual void drawBitmap(const SkDraw&, const SkBitmap& bitmap,
+ const SkIRect* srcRectOrNull,
+ const SkMatrix& matrix, const SkPaint& paint);
+ virtual void drawSprite(const SkDraw&, const SkBitmap& bitmap,
+ int x, int y, const SkPaint& paint);
+ virtual void drawText(const SkDraw&, const void* text, size_t len,
+ SkScalar x, SkScalar y, const SkPaint& paint);
+ virtual void drawPosText(const SkDraw&, const void* text, size_t len,
+ const SkScalar pos[], SkScalar constY,
+ int scalarsPerPos, const SkPaint& paint);
+ virtual void drawTextOnPath(const SkDraw&, const void* text, size_t len,
+ const SkPath& path, const SkMatrix* matrix,
+ const SkPaint& paint);
+ virtual void drawVertices(const SkDraw&, SkCanvas::VertexMode, int vertexCount,
+ const SkPoint verts[], const SkPoint texs[],
+ const SkColor colors[], SkXfermode* xmode,
+ const uint16_t indices[], int indexCount,
+ const SkPaint& paint);
+ virtual void drawDevice(const SkDraw&, SkDevice*, int x, int y,
+ const SkPaint&);
+
+ virtual void flush() { fContext->flush(false); }
+
+ /**
+ * Make's this device's rendertarget current in the underlying 3D API.
+ * Also implicitly flushes.
+ */
+ virtual void makeRenderTargetCurrent();
+
+protected:
+ class TexCache;
+ TexCache* lockCachedTexture(const SkBitmap& bitmap,
+ const GrSamplerState& sampler,
+ GrTexture** texture,
+ bool forDeviceRenderTarget = false);
+ void unlockCachedTexture(TexCache*);
+
+ class SkAutoCachedTexture {
+ public:
+ SkAutoCachedTexture();
+ SkAutoCachedTexture(SkGpuDevice* device,
+ const SkBitmap& bitmap,
+ const GrSamplerState& sampler,
+ GrTexture** texture);
+ ~SkAutoCachedTexture();
+
+ GrTexture* set(SkGpuDevice*, const SkBitmap&, const GrSamplerState&);
+
+ private:
+ SkGpuDevice* fDevice;
+ TexCache* fTex;
+ };
+ friend class SkAutoTexCache;
+
+private:
+ GrContext* fContext;
+ GrSkDrawProcs* fDrawProcs;
+
+ // state for our offscreen render-target
+ TexCache* fCache;
+ GrTexture* fTexture;
+ GrRenderTarget* fRenderTarget;
+ bool fNeedClear;
+ bool fNeedPrepareRenderTarget;
+
+ SkDrawProcs* initDrawForText(const SkPaint&, GrTextContext*);
+ bool bindDeviceAsTexture(SkPoint* max);
+
+ void prepareRenderTarget(const SkDraw&);
+ void internalDrawBitmap(const SkDraw&, const SkBitmap&,
+ const SkIRect&, const SkMatrix&, const SkPaint&);
+
+ class AutoPaintShader {
+ public:
+ AutoPaintShader();
+ AutoPaintShader(SkGpuDevice*, const SkPaint& paint, const SkMatrix&);
+
+ void init(SkGpuDevice*, const SkPaint& paint, const SkMatrix&);
+
+ bool failed() const { return !fSuccess; }
+ bool useTex() const { return fTexture != 0; }
+ private:
+ GrTexture* fTexture;
+ SkAutoCachedTexture fCachedTexture;
+ bool fSuccess;
+ };
+ friend class AutoPaintShader;
+
+ typedef SkDevice INHERITED;
+};
+
+#endif
+
diff --git a/gpu/src/skia/SkGr.cpp b/gpu/src/skia/SkGr.cpp
new file mode 100644
index 0000000000..8849db767a
--- /dev/null
+++ b/gpu/src/skia/SkGr.cpp
@@ -0,0 +1,206 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "SkGr.h"
+
+/* Fill out buffer with the compressed format Ganesh expects from a colortable
+ based bitmap. [palette (colortable) + indices].
+
+ At the moment Ganesh only supports 8bit version. If Ganesh allowed we others
+ we could detect that the colortable.count is <= 16, and then repack the
+ indices as nibbles to save RAM, but it would take more time (i.e. a lot
+ slower than memcpy), so skipping that for now.
+
+ Ganesh wants a full 256 palette entry, even though Skia's ctable is only as big
+ as the colortable.count says it is.
+ */
+static void build_compressed_data(void* buffer, const SkBitmap& bitmap) {
+ SkASSERT(SkBitmap::kIndex8_Config == bitmap.config());
+
+ SkAutoLockPixels apl(bitmap);
+ if (!bitmap.readyToDraw()) {
+ SkASSERT(!"bitmap not ready to draw!");
+ return;
+ }
+
+ SkColorTable* ctable = bitmap.getColorTable();
+ char* dst = (char*)buffer;
+
+ memcpy(dst, ctable->lockColors(), ctable->count() * sizeof(SkPMColor));
+ ctable->unlockColors(false);
+
+ // always skip a full 256 number of entries, even if we memcpy'd fewer
+ dst += GrGpu::kColorTableSize;
+
+ if (bitmap.width() == bitmap.rowBytes()) {
+ memcpy(dst, bitmap.getPixels(), bitmap.getSize());
+ } else {
+ // need to trim off the extra bytes per row
+ size_t width = bitmap.width();
+ size_t rowBytes = bitmap.rowBytes();
+ const char* src = (const char*)bitmap.getPixels();
+ for (int y = 0; y < bitmap.height(); y++) {
+ memcpy(dst, src, width);
+ src += rowBytes;
+ dst += width;
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrTextureEntry* sk_gr_create_bitmap_texture(GrContext* ctx,
+ GrTextureKey* key,
+ const GrSamplerState& sampler,
+ const SkBitmap& origBitmap) {
+ SkAutoLockPixels alp(origBitmap);
+ if (!origBitmap.readyToDraw()) {
+ return NULL;
+ }
+
+ SkBitmap tmpBitmap;
+
+ const SkBitmap* bitmap = &origBitmap;
+
+ GrGpu::TextureDesc desc = {
+ 0,
+ GrGpu::kNone_AALevel,
+ bitmap->width(),
+ bitmap->height(),
+ SkGr::Bitmap2PixelConfig(*bitmap)
+ };
+
+ if (SkBitmap::kIndex8_Config == bitmap->config()) {
+ // build_compressed_data doesn't do npot->pot expansion
+ // and paletted textures can't be sub-updated
+ if (ctx->supportsIndex8PixelConfig(sampler,
+ bitmap->width(), bitmap->height())) {
+ size_t imagesize = bitmap->width() * bitmap->height() +
+ GrGpu::kColorTableSize;
+ SkAutoMalloc storage(imagesize);
+
+ build_compressed_data(storage.get(), origBitmap);
+
+ // our compressed data will be trimmed, so pass width() for its
+ // "rowBytes", since they are the same now.
+ return ctx->createAndLockTexture(key, sampler, desc, storage.get(),
+ bitmap->width());
+
+ } else {
+ origBitmap.copyTo(&tmpBitmap, SkBitmap::kARGB_8888_Config);
+ // now bitmap points to our temp, which has been promoted to 32bits
+ bitmap = &tmpBitmap;
+ }
+ }
+
+ desc.fFormat = SkGr::Bitmap2PixelConfig(*bitmap);
+ return ctx->createAndLockTexture(key, sampler, desc, bitmap->getPixels(),
+ bitmap->rowBytes());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void sk_gr_set_paint(GrContext* ctx, const SkPaint& paint, bool justAlpha) {
+ ctx->setDither(paint.isDither());
+ ctx->setAntiAlias(paint.isAntiAlias());
+
+ if (justAlpha) {
+ ctx->setAlpha(paint.getAlpha());
+ } else {
+ ctx->setColor(SkGr::SkColor2GrColor(paint.getColor()));
+ }
+
+ SkXfermode::Coeff sm = SkXfermode::kOne_Coeff;
+ SkXfermode::Coeff dm = SkXfermode::kISA_Coeff;
+
+ SkXfermode* mode = paint.getXfermode();
+ if (mode) {
+ mode->asCoeff(&sm, &dm);
+ }
+ ctx->setBlendFunc(sk_blend_to_grblend(sm), sk_blend_to_grblend(dm));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SkGrPathIter::Command SkGrPathIter::next(GrPoint pts[]) {
+ GrAssert(NULL != pts);
+#if SK_SCALAR_IS_GR_SCALAR
+ return sk_path_verb_to_gr_path_command(fIter.next((SkPoint*)pts));
+#else
+ Command cmd = sk_path_verb_to_gr_path_command(fIter.next(fPoints));
+ int n = NumCommandPoints(cmd);
+ for (int i = 0; i < n; ++i) {
+ pts[i].fX = SkScalarToGrScalar(fPoints[i].fX);
+ pts[i].fY = SkScalarToGrScalar(fPoints[i].fY);
+ }
+ return cmd;
+#endif
+}
+
+SkGrPathIter::Command SkGrPathIter::next() {
+ return sk_path_verb_to_gr_path_command(fIter.next(NULL));
+}
+
+void SkGrPathIter::rewind() {
+ fIter.setPath(fPath, false);
+}
+
+GrPathIter::ConvexHint SkGrPathIter::hint() const {
+ return fPath.isConvex() ? GrPathIter::kConvex_ConvexHint :
+ GrPathIter::kNone_ConvexHint;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void SkGrClipIterator::computeBounds(GrIRect* bounds) {
+ const SkRegion* rgn = fIter.rgn();
+ if (rgn) {
+ SkGr::SetIRect(bounds, rgn->getBounds());
+ } else {
+ bounds->setEmpty();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+GrTexture::PixelConfig SkGr::BitmapConfig2PixelConfig(SkBitmap::Config config,
+ bool isOpaque) {
+ switch (config) {
+ case SkBitmap::kA8_Config:
+ return GrTexture::kAlpha_8_PixelConfig;
+ case SkBitmap::kIndex8_Config:
+ return GrTexture::kIndex_8_PixelConfig;
+ case SkBitmap::kRGB_565_Config:
+ return GrTexture::kRGB_565_PixelConfig;
+ case SkBitmap::kARGB_4444_Config:
+ return GrTexture::kRGBA_4444_PixelConfig;
+ case SkBitmap::kARGB_8888_Config:
+ if (isOpaque) {
+ return GrTexture::kRGBX_8888_PixelConfig;
+ } else {
+ return GrTexture::kRGBA_8888_PixelConfig;
+ }
+ default:
+ return GrTexture::kUnknown_PixelConfig;
+ }
+}
+
+void SkGr::AbandonAllTextures(GrContext* ctx) {
+ ctx->abandonAllTextures();
+}
+
+
diff --git a/gpu/src/skia/SkGrFontScaler.cpp b/gpu/src/skia/SkGrFontScaler.cpp
new file mode 100644
index 0000000000..5c88717b88
--- /dev/null
+++ b/gpu/src/skia/SkGrFontScaler.cpp
@@ -0,0 +1,142 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "SkGr.h"
+#include "SkDescriptor.h"
+#include "SkGlyphCache.h"
+
+class SkGrDescKey : public GrKey {
+public:
+ explicit SkGrDescKey(const SkDescriptor& desc);
+ virtual ~SkGrDescKey();
+
+protected:
+ // overrides
+ virtual bool lt(const GrKey& rh) const;
+ virtual bool eq(const GrKey& rh) const;
+
+private:
+ SkDescriptor* fDesc;
+ enum {
+ kMaxStorageInts = 16
+ };
+ uint32_t fStorage[kMaxStorageInts];
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkGrDescKey::SkGrDescKey(const SkDescriptor& desc) : GrKey(desc.getChecksum()) {
+ size_t size = desc.getLength();
+ if (size <= sizeof(fStorage)) {
+ fDesc = (SkDescriptor*)fStorage;
+ } else {
+ fDesc = SkDescriptor::Alloc(size);
+ }
+ memcpy(fDesc, &desc, size);
+}
+
+SkGrDescKey::~SkGrDescKey() {
+ if (fDesc != (SkDescriptor*)fStorage) {
+ SkDescriptor::Free(fDesc);
+ }
+}
+
+bool SkGrDescKey::lt(const GrKey& rh) const {
+ const SkDescriptor* srcDesc = ((const SkGrDescKey*)&rh)->fDesc;
+ size_t lenLH = fDesc->getLength();
+ size_t lenRH = srcDesc->getLength();
+ int cmp = memcmp(fDesc, srcDesc, SkMin32(lenLH, lenRH));
+ if (0 == cmp) {
+ return lenLH < lenRH;
+ } else {
+ return cmp < 0;
+ }
+}
+
+bool SkGrDescKey::eq(const GrKey& rh) const {
+ const SkDescriptor* srcDesc = ((const SkGrDescKey*)&rh)->fDesc;
+ return fDesc->equals(*srcDesc);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+SkGrFontScaler::SkGrFontScaler(SkGlyphCache* strike) {
+ fStrike = strike;
+ fKey = NULL;
+}
+
+SkGrFontScaler::~SkGrFontScaler() {
+ GrSafeUnref(fKey);
+}
+
+const GrKey* SkGrFontScaler::getKey() {
+ if (NULL == fKey) {
+ fKey = new SkGrDescKey(fStrike->getDescriptor());
+ }
+ return fKey;
+}
+
+bool SkGrFontScaler::getPackedGlyphBounds(GrGlyph::PackedID packed,
+ GrIRect* bounds) {
+ const SkGlyph& glyph = fStrike->getGlyphIDMetrics(GrGlyph::UnpackID(packed),
+ GrGlyph::UnpackFixedX(packed),
+ GrGlyph::UnpackFixedY(packed));
+ bounds->setXYWH(glyph.fLeft, glyph.fTop, glyph.fWidth, glyph.fHeight);
+ return true;
+
+}
+
+bool SkGrFontScaler::getPackedGlyphImage(GrGlyph::PackedID packed,
+ int width, int height,
+ int dstRB, void* dst) {
+ const SkGlyph& glyph = fStrike->getGlyphIDMetrics(GrGlyph::UnpackID(packed),
+ GrGlyph::UnpackFixedX(packed),
+ GrGlyph::UnpackFixedY(packed));
+ GrAssert(glyph.fWidth == width);
+ GrAssert(glyph.fHeight == height);
+ const void* src = fStrike->findImage(glyph);
+ if (NULL == src) {
+ return false;
+ }
+
+ int srcRB = glyph.rowBytes();
+ if (srcRB == dstRB) {
+ memcpy(dst, src, dstRB * height);
+ } else {
+ for (int y = 0; y < height; y++) {
+ memcpy(dst, src, width);
+ src = (const char*)src + srcRB;
+ dst = (char*)dst + dstRB;
+ }
+ }
+ return true;
+}
+
+bool SkGrFontScaler::getGlyphPath(uint16_t glyphID, GrPath* path) {
+
+ const SkGlyph& glyph = fStrike->getGlyphIDMetrics(glyphID);
+ const SkPath* skPath = fStrike->findPath(glyph);
+ if (skPath) {
+ SkGrPathIter iter(*skPath);
+ path->resetFromIter(&iter);
+ return true;
+ }
+ return false;
+}
+
+
+
diff --git a/gpu/src/skia/SkGrTexturePixelRef.cpp b/gpu/src/skia/SkGrTexturePixelRef.cpp
new file mode 100644
index 0000000000..da9ac1a692
--- /dev/null
+++ b/gpu/src/skia/SkGrTexturePixelRef.cpp
@@ -0,0 +1,30 @@
+/*
+ Copyright 2010 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ */
+
+
+#include "SkGrTexturePixelRef.h"
+#include "GrTexture.h"
+
+SkGrTexturePixelRef::SkGrTexturePixelRef(GrTexture* tex) {
+ fTexture = tex;
+ GrSafeRef(tex);
+}
+
+SkGrTexturePixelRef::~SkGrTexturePixelRef() {
+ GrSafeUnref(fTexture);
+}
+
+
diff --git a/gpu/src/skia/SkUIView.mm b/gpu/src/skia/SkUIView.mm
new file mode 100644
index 0000000000..667a474fd7
--- /dev/null
+++ b/gpu/src/skia/SkUIView.mm
@@ -0,0 +1,858 @@
+#import "SkUIView.h"
+#include <QuartzCore/QuartzCore.h>
+
+#include "SkGpuCanvas.h"
+#include "SkCGUtils.h"
+#include "GrContext.h"
+
+#define SKWIND_CONFIG SkBitmap::kRGB_565_Config
+//#define SKWIND_CONFIG SkBitmap::kARGB_8888_Config
+#define SKGL_CONFIG kEAGLColorFormatRGB565
+//#define SKGL_CONFIG kEAGLColorFormatRGBA8
+
+#define SHOW_FPS
+#define FORCE_REDRAW
+//#define DUMP_FPS_TO_PRINTF
+
+//#define USE_ACCEL_TO_ROTATE
+
+//#define SHOULD_COUNTER_INIT 334
+static int gShouldCounter;
+static bool should_draw() {
+ if (--gShouldCounter == 0) {
+ // printf("\n");
+ }
+ return true;
+ return gShouldCounter >= 0;
+}
+#ifdef SHOULD_COUNTER_INIT
+ bool (*gShouldDrawProc)() = should_draw;
+#else
+ bool (*gShouldDrawProc)() = NULL;
+#endif
+
+//#define USE_GL_1
+#define USE_GL_2
+
+#if defined(USE_GL_1) || defined(USE_GL_2)
+ #define USE_GL
+#endif
+
+@implementation SkUIView
+
+
+@synthesize fWind;
+@synthesize fTitleLabel;
+@synthesize fBackend;
+@synthesize fComplexClip;
+@synthesize fUseWarp;
+
+#include "SkWindow.h"
+#include "SkEvent.h"
+
+static float gScreenScale = 1;
+
+extern SkOSWindow* create_sk_window(void* hwnd);
+
+#define kREDRAW_UIVIEW_GL "sk_redraw_uiview_gl_iOS"
+
+#define TITLE_HEIGHT 44
+
+static const float SCALE_FOR_ZOOM_LENS = 4.0;
+#define Y_OFFSET_FOR_ZOOM_LENS 200
+#define SIZE_FOR_ZOOM_LENS 250
+
+static const float MAX_ZOOM_SCALE = 4.0;
+static const float MIN_ZOOM_SCALE = 2.0 / MAX_ZOOM_SCALE;
+
+extern bool gDoTraceDraw;
+#define DO_TRACE_DRAW_MAX 100
+
+#ifdef SHOW_FPS
+struct FPSState {
+ static const int FRAME_COUNT = 60;
+
+ CFTimeInterval fNow0, fNow1;
+ CFTimeInterval fTime0, fTime1, fTotalTime;
+ int fFrameCounter;
+ int fDrawCounter;
+
+ FPSState() {
+ fTime0 = fTime1 = fTotalTime = 0;
+ fFrameCounter = 0;
+ }
+
+ void startDraw() {
+ fNow0 = CACurrentMediaTime();
+
+ if (0 == fDrawCounter && false) {
+ gDoTraceDraw = true;
+ SkDebugf("\n");
+ }
+ }
+
+ void endDraw() {
+ fNow1 = CACurrentMediaTime();
+
+ if (0 == fDrawCounter) {
+ gDoTraceDraw = true;
+ }
+ if (DO_TRACE_DRAW_MAX == ++fDrawCounter) {
+ fDrawCounter = 0;
+ }
+ }
+
+ void flush(SkOSWindow* wind) {
+ CFTimeInterval now2 = CACurrentMediaTime();
+
+ fTime0 += fNow1 - fNow0;
+ fTime1 += now2 - fNow1;
+
+ if (++fFrameCounter == FRAME_COUNT) {
+ CFTimeInterval totalNow = CACurrentMediaTime();
+ fTotalTime = totalNow - fTotalTime;
+
+ SkMSec ms0 = (int)(1000 * fTime0 / FRAME_COUNT);
+ SkMSec msTotal = (int)(1000 * fTotalTime / FRAME_COUNT);
+
+ SkString str;
+ str.printf("ms: %d [%d], fps: %3.1f", msTotal, ms0,
+ FRAME_COUNT / fTotalTime);
+#ifdef DUMP_FPS_TO_PRINTF
+ SkDebugf("%s\n", str.c_str());
+#else
+ wind->setTitle(str.c_str());
+#endif
+
+ fTotalTime = totalNow;
+ fTime0 = fTime1 = 0;
+ fFrameCounter = 0;
+ }
+ }
+};
+
+static FPSState gFPS;
+
+ #define FPS_StartDraw() gFPS.startDraw()
+ #define FPS_EndDraw() gFPS.endDraw()
+ #define FPS_Flush(wind) gFPS.flush(wind)
+#else
+ #define FPS_StartDraw()
+ #define FPS_EndDraw()
+ #define FPS_Flush(wind)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef USE_GL
++ (Class) layerClass
+{
+ return [CAEAGLLayer class];
+}
+#endif
+
+- (id)initWithMyDefaults {
+ fBackend = kGL_Backend;
+ fUseWarp = false;
+ fRedrawRequestPending = false;
+ fWind = create_sk_window(self);
+ fWind->setConfig(SKWIND_CONFIG);
+ fMatrix.reset();
+ fLocalMatrix.reset();
+ fNeedGestureEnded = false;
+ fNeedFirstPinch = true;
+ fZoomAround = false;
+ fComplexClip = false;
+
+ [self initGestures];
+
+#ifdef USE_GL
+ CAEAGLLayer *eaglLayer = (CAEAGLLayer *)self.layer;
+ eaglLayer.opaque = TRUE;
+ eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
+ [NSNumber numberWithBool:NO],
+ kEAGLDrawablePropertyRetainedBacking,
+ SKGL_CONFIG,
+ kEAGLDrawablePropertyColorFormat,
+ nil];
+
+#ifdef USE_GL_1
+ fGL.fContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES1];
+#else
+ fGL.fContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
+#endif
+
+ if (!fGL.fContext || ![EAGLContext setCurrentContext:fGL.fContext])
+ {
+ [self release];
+ return nil;
+ }
+
+ // Create default framebuffer object. The backing will be allocated for the current layer in -resizeFromLayer
+ glGenFramebuffersOES(1, &fGL.fFramebuffer);
+ glBindFramebufferOES(GL_FRAMEBUFFER_OES, fGL.fFramebuffer);
+
+ glGenRenderbuffersOES(1, &fGL.fRenderbuffer);
+ glGenRenderbuffersOES(1, &fGL.fStencilbuffer);
+
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+ glFramebufferRenderbufferOES(GL_FRAMEBUFFER_OES, GL_COLOR_ATTACHMENT0_OES, GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fStencilbuffer);
+ glFramebufferRenderbufferOES(GL_FRAMEBUFFER_OES, GL_STENCIL_ATTACHMENT_OES, GL_RENDERBUFFER_OES, fGL.fStencilbuffer);
+#endif
+
+#ifdef USE_ACCEL_TO_ROTATE
+ fRotateMatrix.reset();
+ [UIAccelerometer sharedAccelerometer].delegate = self;
+ [UIAccelerometer sharedAccelerometer].updateInterval = 1 / 30.0;
+#endif
+ return self;
+}
+
+- (id)initWithCoder:(NSCoder*)coder {
+ if ((self = [super initWithCoder:coder])) {
+ self = [self initWithMyDefaults];
+ }
+ return self;
+}
+
+- (id)initWithFrame:(CGRect)frame {
+ if (self = [super initWithFrame:frame]) {
+ self = [self initWithMyDefaults];
+ }
+ return self;
+}
+
+#include "SkImageDecoder.h"
+#include "SkStream_NSData.h"
+
+static void zoom_around(SkCanvas* canvas, float cx, float cy, float zoom) {
+ float clipW = SIZE_FOR_ZOOM_LENS;
+ float clipH = SIZE_FOR_ZOOM_LENS;
+
+ SkRect r;
+ r.set(0, 0, clipW, clipH);
+ r.offset(cx - clipW/2, cy - clipH/2);
+
+ SkPaint paint;
+ paint.setColor(0xFF66AAEE);
+ paint.setStyle(SkPaint::kStroke_Style);
+ paint.setStrokeWidth(10);
+
+ // draw our "frame" around the zoom lens
+ canvas->drawRect(r, paint);
+
+ // now clip and scale the lens
+ canvas->clipRect(r);
+ canvas->translate(cx, cy);
+ canvas->scale(zoom, zoom);
+ canvas->translate(-cx, -cy);
+}
+
+- (void)drawWithCanvas:(SkCanvas*)canvas {
+ if (fComplexClip) {
+ canvas->drawColor(SK_ColorBLUE);
+
+ SkPath path;
+ static const SkRect r[] = {
+ { 50, 50, 250, 250 },
+ { 150, 150, 500, 600 }
+ };
+ for (size_t i = 0; i < GR_ARRAY_COUNT(r); i++) {
+ path.addRect(r[i]);
+ }
+ canvas->clipPath(path);
+ }
+
+ // This is to consolidate multiple inval requests
+ fRedrawRequestPending = false;
+
+ if (fFlingState.isActive()) {
+ if (!fFlingState.evaluateMatrix(&fLocalMatrix)) {
+ [self flushLocalMatrix];
+ }
+ }
+
+ SkMatrix localMatrix = fLocalMatrix;
+#ifdef USE_ACCEL_TO_ROTATE
+ localMatrix.preConcat(fRotateMatrix);
+#endif
+
+ SkMatrix matrix;
+ matrix.setConcat(localMatrix, fMatrix);
+
+ const SkMatrix* localM = NULL;
+ if (localMatrix.getType() & SkMatrix::kScale_Mask) {
+ localM = &localMatrix;
+ }
+#ifdef USE_ACCEL_TO_ROTATE
+ localM = &localMatrix;
+#endif
+ canvas->setExternalMatrix(localM);
+
+#ifdef SHOULD_COUNTER_INIT
+ gShouldCounter = SHOULD_COUNTER_INIT;
+#endif
+
+ {
+ int saveCount = canvas->save();
+ canvas->concat(matrix);
+// SkRect r = { 10, 10, 500, 600 }; canvas->clipRect(r);
+ fWind->draw(canvas);
+ canvas->restoreToCount(saveCount);
+ }
+
+ if (fZoomAround) {
+ zoom_around(canvas, fZoomAroundX, fZoomAroundY, SCALE_FOR_ZOOM_LENS);
+ canvas->concat(matrix);
+ fWind->draw(canvas);
+ }
+
+#ifdef FORCE_REDRAW
+ fWind->inval(NULL);
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+- (void)layoutSubviews {
+ int W, H;
+
+ gScreenScale = [UIScreen mainScreen].scale;
+
+#ifdef USE_GL
+
+ CAEAGLLayer* eaglLayer = (CAEAGLLayer*)self.layer;
+ if ([self respondsToSelector:@selector(setContentScaleFactor:)]) {
+ self.contentScaleFactor = gScreenScale;
+ }
+ // Allocate color buffer backing based on the current layer size
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+ [fGL.fContext renderbufferStorage:GL_RENDERBUFFER_OES fromDrawable:eaglLayer];
+
+ glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &fGL.fWidth);
+ glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &fGL.fHeight);
+
+ if (glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES) != GL_FRAMEBUFFER_COMPLETE_OES)
+ {
+ NSLog(@"Failed to make complete framebuffer object %x", glCheckFramebufferStatusOES(GL_FRAMEBUFFER_OES));
+ }
+
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fStencilbuffer);
+ glRenderbufferStorageOES(GL_RENDERBUFFER_OES, GL_STENCIL_INDEX8_OES, fGL.fWidth, fGL.fHeight);
+
+ W = fGL.fWidth;
+ H = fGL.fHeight;
+#else
+ CGRect rect = [self bounds];
+ W = (int)CGRectGetWidth(rect);
+ H = (int)CGRectGetHeight(rect) - TITLE_HEIGHT;
+#endif
+
+ printf("---- layoutSubviews %d %d\n", W, H);
+ fWind->resize(W, H);
+ fWind->inval(NULL);
+}
+
+#ifdef USE_GL
+
+static GrContext* gCtx;
+static GrContext* get_global_grctx() {
+ // should be pthread-local at least
+ if (NULL == gCtx) {
+#ifdef USE_GL_1
+ gCtx = GrContext::Create(GrGpu::kOpenGL_Fixed_Engine, NULL);
+#else
+ gCtx = GrContext::Create(GrGpu::kOpenGL_Shaders_Engine, NULL);
+#endif
+ }
+ return gCtx;
+}
+
+#include "SkDevice.h"
+#include "SkShader.h"
+#include "SkGrTexturePixelRef.h"
+#include "GrMesh.h"
+#include "SkRandom.h"
+
+#include "GrAtlas.h"
+#include "GrTextStrike.h"
+
+static void show_fontcache(GrContext* ctx, SkCanvas* canvas) {
+#if 0
+ SkPaint paint;
+ const int SIZE = 64;
+ GrAtlas* plot[64][64];
+
+ paint.setAntiAlias(true);
+ paint.setTextSize(24);
+ paint.setTextAlign(SkPaint::kCenter_Align);
+
+ Gr_bzero(plot, sizeof(plot));
+
+ GrFontCache* cache = ctx->getFontCache();
+ GrTextStrike* strike = cache->getHeadStrike();
+ int count = 0;
+ while (strike) {
+ GrAtlas* atlas = strike->getAtlas();
+ while (atlas) {
+ int x = atlas->getPlotX();
+ int y = atlas->getPlotY();
+
+ SkRandom rand((intptr_t)strike);
+ SkColor c = rand.nextU() | 0x80808080;
+ paint.setColor(c);
+ paint.setAlpha(0x80);
+
+ SkRect r;
+ r.set(x * SIZE, y * SIZE, (x + 1)*SIZE, (y+1)*SIZE);
+ r.inset(1, 1);
+ canvas->drawRect(r, paint);
+
+ paint.setColor(0xFF660000);
+ SkString label;
+ label.printf("%d", count);
+ canvas->drawText(label.c_str(), label.size(), r.centerX(),
+ r.fTop + r.height() * 2 / 3, paint);
+
+ atlas = atlas->nextAtlas();
+ }
+ strike = strike->fNext;
+ count += 1;
+ }
+#endif
+}
+
+void test_patch(SkCanvas* canvas, const SkBitmap& bm, SkScalar scale);
+
+static void draw_mesh(SkCanvas* canvas, const SkBitmap& bm) {
+ GrMesh fMesh;
+
+ SkRect r;
+ r.set(0, 0, SkIntToScalar(bm.width()), SkIntToScalar(bm.height()));
+
+ // fMesh.init(bounds, fBitmap.width() / 40, fBitmap.height() / 40, texture);
+ fMesh.init(r, bm.width()/16, bm.height()/16, r);
+
+ SkPaint paint;
+ SkShader* s = SkShader::CreateBitmapShader(bm, SkShader::kClamp_TileMode, SkShader::kClamp_TileMode);
+ paint.setShader(s)->unref();
+ fMesh.draw(canvas, paint);
+}
+
+static void scale_about(SkCanvas* canvas, float sx, float sy, float px, float py) {
+ canvas->translate(px, py);
+ canvas->scale(sx, sy);
+ canvas->translate(-px, -py);
+}
+
+static float grInterp(float v0, float v1, float percent) {
+ return v0 + percent * (v1 - v0);
+}
+
+static void draw_device(SkCanvas* canvas, SkDevice* dev, float w, float h, float warp) {
+ canvas->save();
+ float s = grInterp(1, 0.8, warp);
+ scale_about(canvas, s, s, w/2, h/2);
+ test_patch(canvas, dev->accessBitmap(false), warp);
+ canvas->restore();
+}
+
+- (void)drawInGL {
+// printf("------ drawInGL\n");
+ // This application only creates a single context which is already set current at this point.
+ // This call is redundant, but needed if dealing with multiple contexts.
+ [EAGLContext setCurrentContext:fGL.fContext];
+
+ // This application only creates a single default framebuffer which is already bound at this point.
+ // This call is redundant, but needed if dealing with multiple framebuffers.
+ glBindFramebufferOES(GL_FRAMEBUFFER_OES, fGL.fFramebuffer);
+
+ GLint scissorEnable;
+ glGetIntegerv(GL_SCISSOR_TEST, &scissorEnable);
+ glDisable(GL_SCISSOR_TEST);
+ glClearColor(0,0,0,0);
+ glClear(GL_COLOR_BUFFER_BIT);
+ if (scissorEnable) {
+ glEnable(GL_SCISSOR_TEST);
+ }
+
+ GrContext* ctx = get_global_grctx();
+ SkGpuCanvas origCanvas(ctx);
+ origCanvas.setBitmapDevice(fWind->getBitmap());
+
+ // gl->reset();
+ SkGpuCanvas glCanvas(ctx);
+ SkCanvas rasterCanvas;
+
+ SkCanvas* canvas;
+ SkDevice* dev = NULL;
+
+ switch (fBackend) {
+ case kRaster_Backend:
+ canvas = &rasterCanvas;
+ break;
+ case kGL_Backend:
+ canvas = &glCanvas;
+ break;
+ }
+
+ if (fUseWarp || fWarpState.isActive()) {
+ if (kGL_Backend == fBackend) {
+ dev = origCanvas.createDevice(fWind->getBitmap(), true);
+ canvas->setDevice(dev)->unref();
+ } else {
+ canvas->setBitmapDevice(fWind->getBitmap());
+ dev = canvas->getDevice();
+ }
+ } else {
+ canvas->setBitmapDevice(fWind->getBitmap());
+ dev = NULL;
+ }
+
+ canvas->translate(0, TITLE_HEIGHT);
+
+ // if we're not "retained", then we have to always redraw everything.
+ // This call forces us to ignore the fDirtyRgn, and draw everywhere.
+ // If we are "retained", we can skip this call (as the raster case does)
+ fWind->forceInvalAll();
+
+ FPS_StartDraw();
+ [self drawWithCanvas:canvas];
+ FPS_EndDraw();
+
+ if (dev) {
+ draw_device(&origCanvas, dev, fWind->width(), fWind->height(),
+ fWarpState.evaluate());
+ } else {
+ if (kRaster_Backend == fBackend) {
+ origCanvas.drawBitmap(fWind->getBitmap(), 0, 0, NULL);
+ }
+ // else GL - we're already on screen
+ }
+
+ show_fontcache(ctx, canvas);
+ ctx->flush(false);
+
+ // This application only creates a single color renderbuffer which is already bound at this point.
+ // This call is redundant, but needed if dealing with multiple renderbuffers.
+ glBindRenderbufferOES(GL_RENDERBUFFER_OES, fGL.fRenderbuffer);
+ [fGL.fContext presentRenderbuffer:GL_RENDERBUFFER_OES];
+
+#if GR_COLLECT_STATS
+ static int frame = 0;
+ if (!(frame % 100)) {
+ get_global_grctx()->printStats();
+ }
+ get_global_grctx()->resetStats();
+ ++frame;
+#endif
+
+ FPS_Flush(fWind);
+
+#if 0
+ gCtx->deleteAllTextures(GrTextureCache::kAbandonTexture_DeleteMode);
+ gCtx->unref();
+ gCtx = NULL;
+#endif
+}
+
+#else // raster case
+
+- (void)drawRect:(CGRect)rect {
+ CGContextRef cg = UIGraphicsGetCurrentContext();
+ SkCanvas* canvas = NULL;
+
+ FPS_StartDraw();
+ [self drawWithCanvas:canvas];
+
+ FPS_EndDraw();
+ SkCGDrawBitmap(cg, fWind->getBitmap(), 0, TITLE_HEIGHT);
+
+ FPS_Flush(fWind);
+
+}
+#endif
+
+- (void)setWarpState:(bool)useWarp {
+ fWarpState.stop(); // we should reverse from where we are if active...
+
+ const float duration = 0.5;
+ fUseWarp = useWarp;
+ if (useWarp) {
+ fWarpState.start(0, 1, duration);
+ } else {
+ fWarpState.start(1, 0, duration);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+- (void)flushLocalMatrix {
+ fMatrix.postConcat(fLocalMatrix);
+ fLocalMatrix.reset();
+ fFlingState.stop();
+ fNeedGestureEnded = false;
+ fNeedFirstPinch = true;
+}
+
+- (void)localMatrixWithGesture:(UIGestureRecognizer*)gesture {
+ fNeedGestureEnded = true;
+
+ switch (gesture.state) {
+ case UIGestureRecognizerStateCancelled:
+ case UIGestureRecognizerStateEnded:
+ [self flushLocalMatrix];
+ break;
+ case UIGestureRecognizerStateChanged: {
+ SkMatrix matrix;
+ matrix.setConcat(fLocalMatrix, fMatrix);
+ } break;
+ default:
+ break;
+ }
+}
+
+- (void)commonHandleGesture:(UIGestureRecognizer*)sender {
+ if (fFlingState.isActive()) {
+ [self flushLocalMatrix];
+ }
+
+ switch (sender.state) {
+ case UIGestureRecognizerStateBegan:
+ [self flushLocalMatrix];
+ break;
+ default:
+ break;
+ }
+}
+
+- (void)handleDTapGesture:(UIGestureRecognizer*)sender {
+ [self flushLocalMatrix];
+ fMatrix.reset();
+}
+
+static float discretize(float x) {
+ return (int)x;
+}
+
+- (void)handlePanGesture:(UIPanGestureRecognizer*)sender {
+ [self commonHandleGesture:sender];
+
+ CGPoint delta = [sender translationInView:self];
+ delta.x *= gScreenScale;
+ delta.y *= gScreenScale;
+ // avoid flickering where the drawing might toggle in and out of a pixel
+ // center if translated by a fractional value
+ delta.x = discretize(delta.x);
+ delta.y = discretize(delta.y);
+ fLocalMatrix.setTranslate(delta.x, delta.y);
+ [self localMatrixWithGesture:sender];
+
+ if (UIGestureRecognizerStateEnded == sender.state) {
+ CGPoint velocity = [sender velocityInView:self];
+ fFlingState.reset(velocity.x, velocity.y);
+ fNeedGestureEnded = true;
+ }
+}
+
+- (float)limitTotalZoom:(float)scale {
+ // this query works 'cause we know that we're square-scale w/ no skew/rotation
+ const float curr = fMatrix[0];
+
+ if (scale > 1 && curr * scale > MAX_ZOOM_SCALE) {
+ scale = MAX_ZOOM_SCALE / curr;
+ } else if (scale < 1 && curr * scale < MIN_ZOOM_SCALE) {
+ scale = MIN_ZOOM_SCALE / curr;
+ }
+ return scale;
+}
+
+- (void)handleScaleGesture:(UIPinchGestureRecognizer*)sender {
+ [self commonHandleGesture:sender];
+
+ if ([sender numberOfTouches] == 2) {
+ float scale = sender.scale;
+ CGPoint p0 = [sender locationOfTouch:0 inView:self];
+ CGPoint p1 = [sender locationOfTouch:0 inView:self];
+ float cx = (p0.x + p1.x) * 0.5;
+ float cy = (p0.y + p1.y) * 0.5;
+
+ if (fNeedFirstPinch) {
+ fFirstPinchX = cx;
+ fFirstPinchY = cy;
+ fNeedFirstPinch = false;
+ }
+
+ scale = [self limitTotalZoom:scale];
+
+ fLocalMatrix.setTranslate(-fFirstPinchX, -fFirstPinchY);
+ fLocalMatrix.postScale(scale, scale);
+ fLocalMatrix.postTranslate(cx, cy);
+ [self localMatrixWithGesture:sender];
+ } else {
+ [self flushLocalMatrix];
+ }
+}
+
+- (void)handleLongPressGesture:(UILongPressGestureRecognizer*)sender {
+ [self commonHandleGesture:sender];
+
+ if ([sender numberOfTouches] == 0) {
+ fZoomAround = false;
+ return;
+ }
+
+ CGPoint pt = [sender locationOfTouch:0 inView:self];
+ switch (sender.state) {
+ case UIGestureRecognizerStateBegan:
+ case UIGestureRecognizerStateChanged:
+ fZoomAround = true;
+ fZoomAroundX = pt.x;
+ fZoomAroundY = pt.y - Y_OFFSET_FOR_ZOOM_LENS;
+ break;
+ case UIGestureRecognizerStateEnded:
+ case UIGestureRecognizerStateCancelled:
+ fZoomAround = false;
+ break;
+ default:
+ break;
+ }
+}
+
+- (void)addAndReleaseGesture:(UIGestureRecognizer*)gesture {
+ [self addGestureRecognizer:gesture];
+ [gesture release];
+}
+
+- (void)initGestures {
+ UITapGestureRecognizer* tapG = [UITapGestureRecognizer alloc];
+ [tapG initWithTarget:self action:@selector(handleDTapGesture:)];
+ tapG.numberOfTapsRequired = 2;
+ [self addAndReleaseGesture:tapG];
+
+ UIPanGestureRecognizer* panG = [UIPanGestureRecognizer alloc];
+ [panG initWithTarget:self action:@selector(handlePanGesture:)];
+ [self addAndReleaseGesture:panG];
+
+ UIPinchGestureRecognizer* pinchG = [UIPinchGestureRecognizer alloc];
+ [pinchG initWithTarget:self action:@selector(handleScaleGesture:)];
+ [self addAndReleaseGesture:pinchG];
+
+ UILongPressGestureRecognizer* longG = [UILongPressGestureRecognizer alloc];
+ [longG initWithTarget:self action:@selector(handleLongPressGesture:)];
+ [self addAndReleaseGesture:longG];
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static float abs(float x) { return x < 0 ? -x : x; }
+
+static bool normalize(UIAcceleration* acc, float xy[]) {
+ float mag2 = acc.x*acc.x + acc.y*acc.y + acc.z*acc.z;
+ if (mag2 < 0.000001) {
+ return false;
+ }
+ if (abs((float)acc.z) > 0.9 * sqrt(mag2)) {
+ return false;
+ }
+
+ mag2 = acc.x*acc.x + acc.y*acc.y;
+ if (mag2 < 0.000001) {
+ return false;
+ }
+ float scale = 1 / sqrt(mag2);
+ xy[0] = acc.x * scale;
+ xy[1] = acc.y * scale;
+ return true;
+}
+
+static void normalize(float xy[]) {
+ float scale = 1 / sqrt(xy[0]*xy[0] + xy[1]*xy[1]);
+ xy[0] *= scale;
+ xy[1] *= scale;
+}
+
+static float weighted_average(float newv, float oldv) {
+ return newv * 0.25 + oldv * 0.75;
+}
+
+- (void)accelerometer:(UIAccelerometer *)accelerometer didAccelerate:(UIAcceleration *)acc {
+
+ float norm[2];
+ if (normalize(acc, norm)) {
+ float sinv = -norm[0];
+ float cosv = -norm[1];
+ // smooth
+ norm[0] = weighted_average(sinv, -fRotateMatrix[1]);
+ norm[1] = weighted_average(cosv, fRotateMatrix[0]);
+ normalize(norm);
+ fRotateMatrix.setSinCos(norm[0], norm[1], 400, 400);
+ }
+#if 0
+ NSDate *now = [NSDate date];
+ NSTimeInterval intervalDate = [now timeIntervalSinceDate:now_prev];
+
+ velX += (acceleration.x * intervalDate);
+ distX += (velX * intervalDate);
+ //do other axis here too
+
+ // setup for next UIAccelerometer event
+ now_prev = now;
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+- (void)setSkTitle:(const char *)title {
+ if (fTitleLabel) {
+ fTitleLabel.text = [NSString stringWithUTF8String:title];
+ [fTitleLabel setNeedsDisplay];
+ }
+}
+
+- (BOOL)onHandleEvent:(const SkEvent&)evt {
+ if (evt.isType(kREDRAW_UIVIEW_GL)) {
+ [self drawInGL];
+ return true;
+ }
+ return false;
+}
+
+- (void)postInvalWithRect:(const SkIRect*)r {
+#ifdef USE_GL
+
+#if 1
+ if (!fRedrawRequestPending) {
+ fRedrawRequestPending = true;
+ /*
+ performSelectorOnMainThread seems to starve updating other views
+ (e.g. our FPS view in the titlebar), so we use the afterDelay
+ version
+ */
+ if (0) {
+ [self performSelectorOnMainThread:@selector(drawInGL) withObject:nil waitUntilDone:NO];
+ } else {
+ [self performSelector:@selector(drawInGL) withObject:nil afterDelay:0];
+ }
+ }
+#else
+ if (!fRedrawRequestPending) {
+ SkEvent* evt = new SkEvent(kREDRAW_UIVIEW_GL);
+ evt->post(fWind->getSinkID());
+ fRedrawRequestPending = true;
+ }
+#endif
+
+#else
+ if (r) {
+ [self setNeedsDisplayInRect:CGRectMake(r->fLeft, r->fTop,
+ r->width(), r->height())];
+ } else {
+ [self setNeedsDisplay];
+ }
+#endif
+}
+
+@end
diff --git a/gpu/src/skia/skgr_files.mk b/gpu/src/skia/skgr_files.mk
new file mode 100644
index 0000000000..41896c005e
--- /dev/null
+++ b/gpu/src/skia/skgr_files.mk
@@ -0,0 +1,7 @@
+SOURCE := \
+ SkGpuCanvas.cpp \
+ SkGpuDevice.cpp \
+ SkGr.cpp \
+ SkGrTexturePixelRef.cpp \
+ SkGrFontScaler.cpp
+