diff options
-rw-r--r-- | src/gpu/GrPathRange.cpp | 56 | ||||
-rw-r--r-- | src/gpu/GrPathRange.h | 69 | ||||
-rw-r--r-- | src/gpu/GrPathRendering.h | 4 | ||||
-rw-r--r-- | src/gpu/GrStencilAndCoverTextContext.cpp | 4 | ||||
-rw-r--r-- | src/gpu/batches/GrDrawPathBatch.h | 4 |
5 files changed, 88 insertions, 49 deletions
diff --git a/src/gpu/GrPathRange.cpp b/src/gpu/GrPathRange.cpp index 5e71014ce8..117051db19 100644 --- a/src/gpu/GrPathRange.cpp +++ b/src/gpu/GrPathRange.cpp @@ -8,9 +8,6 @@ #include "GrPathRange.h" #include "SkPath.h" -enum { - kPathsPerGroup = 16 // Paths get tracked in groups of 16 for lazy loading. -}; GrPathRange::GrPathRange(GrGpu* gpu, PathGenerator* pathGenerator) @@ -28,51 +25,24 @@ GrPathRange::GrPathRange(GrGpu* gpu, fNumPaths(numPaths) { } -void GrPathRange::willDrawPaths(const void* indices, PathIndexType indexType, int count) const { - if (!fPathGenerator) { - return; - } - +void GrPathRange::loadPathsIfNeeded(const void* indices, PathIndexType indexType, int count) const { switch (indexType) { - case kU8_PathIndexType: return this->willDrawPaths<uint8_t>(indices, count); - case kU16_PathIndexType: return this->willDrawPaths<uint16_t>(indices, count); - case kU32_PathIndexType: return this->willDrawPaths<uint32_t>(indices, count); + case kU8_PathIndexType: return this->loadPathsIfNeeded<uint8_t>(indices, count); + case kU16_PathIndexType: return this->loadPathsIfNeeded<uint16_t>(indices, count); + case kU32_PathIndexType: return this->loadPathsIfNeeded<uint32_t>(indices, count); default: SkFAIL("Unknown path index type"); } } -template<typename IndexType> void GrPathRange::willDrawPaths(const void* indices, int count) const { - SkASSERT(fPathGenerator); - - const IndexType* indexArray = reinterpret_cast<const IndexType*>(indices); - bool didLoadPaths = false; - - for (int i = 0; i < count; ++i) { - SkASSERT(indexArray[i] < static_cast<uint32_t>(fNumPaths)); - - const int groupIndex = indexArray[i] / kPathsPerGroup; - const int groupByte = groupIndex / 8; - const uint8_t groupBit = 1 << (groupIndex % 8); - - const bool hasPath = SkToBool(fGeneratedPaths[groupByte] & groupBit); - if (!hasPath) { - // We track which paths are loaded in groups of kPathsPerGroup. To - // mark a path as loaded we need to load the entire group. - const int groupFirstPath = groupIndex * kPathsPerGroup; - const int groupLastPath = SkTMin(groupFirstPath + kPathsPerGroup, fNumPaths) - 1; +#ifdef SK_DEBUG - SkPath path; - for (int pathIdx = groupFirstPath; pathIdx <= groupLastPath; ++pathIdx) { - fPathGenerator->generatePath(pathIdx, &path); - this->onInitPath(pathIdx, path); - } - - fGeneratedPaths[groupByte] |= groupBit; - didLoadPaths = true; - } - } - - if (didLoadPaths) { - this->didChangeGpuMemorySize(); +void GrPathRange::assertPathsLoaded(const void* indices, PathIndexType indexType, int count) const { + switch (indexType) { + case kU8_PathIndexType: return this->assertPathsLoaded<uint8_t>(indices, count); + case kU16_PathIndexType: return this->assertPathsLoaded<uint16_t>(indices, count); + case kU32_PathIndexType: return this->assertPathsLoaded<uint32_t>(indices, count); + default: SkFAIL("Unknown path index type"); } } + +#endif diff --git a/src/gpu/GrPathRange.h b/src/gpu/GrPathRange.h index a2483c13c2..23b8beb226 100644 --- a/src/gpu/GrPathRange.h +++ b/src/gpu/GrPathRange.h @@ -9,10 +9,10 @@ #define GrPathRange_DEFINED #include "GrGpuResource.h" +#include "SkPath.h" #include "SkRefCnt.h" #include "SkTArray.h" -class SkPath; class SkDescriptor; /** @@ -70,7 +70,67 @@ public: int getNumPaths() const { return fNumPaths; } const PathGenerator* getPathGenerator() const { return fPathGenerator.get(); } + void loadPathsIfNeeded(const void* indices, PathIndexType, int count) const; + + template<typename IndexType> void loadPathsIfNeeded(const void* indices, int count) const { + if (!fPathGenerator) { + return; + } + + const IndexType* indexArray = reinterpret_cast<const IndexType*>(indices); + bool didLoadPaths = false; + + for (int i = 0; i < count; ++i) { + SkASSERT(indexArray[i] < static_cast<uint32_t>(fNumPaths)); + + const int groupIndex = indexArray[i] / kPathsPerGroup; + const int groupByte = groupIndex / 8; + const uint8_t groupBit = 1 << (groupIndex % 8); + + const bool hasPath = SkToBool(fGeneratedPaths[groupByte] & groupBit); + if (!hasPath) { + // We track which paths are loaded in groups of kPathsPerGroup. To + // mark a path as loaded we need to load the entire group. + const int groupFirstPath = groupIndex * kPathsPerGroup; + const int groupLastPath = SkTMin(groupFirstPath + kPathsPerGroup, fNumPaths) - 1; + + SkPath path; + for (int pathIdx = groupFirstPath; pathIdx <= groupLastPath; ++pathIdx) { + fPathGenerator->generatePath(pathIdx, &path); + this->onInitPath(pathIdx, path); + } + + fGeneratedPaths[groupByte] |= groupBit; + didLoadPaths = true; + } + } + + if (didLoadPaths) { + this->didChangeGpuMemorySize(); + } + } + #ifdef SK_DEBUG + void assertPathsLoaded(const void* indices, PathIndexType, int count) const; + + template<typename IndexType> void assertPathsLoaded(const void* indices, int count) const { + if (!fPathGenerator) { + return; + } + + const IndexType* indexArray = reinterpret_cast<const IndexType*>(indices); + + for (int i = 0; i < count; ++i) { + SkASSERT(indexArray[i] < static_cast<uint32_t>(fNumPaths)); + + const int groupIndex = indexArray[i] / kPathsPerGroup; + const int groupByte = groupIndex / 8; + const uint8_t groupBit = 1 << (groupIndex % 8); + + SkASSERT(fGeneratedPaths[groupByte] & groupBit); + } + } + virtual bool isEqualTo(const SkDescriptor& desc) const { return nullptr != fPathGenerator.get() && fPathGenerator->isEqualTo(desc); } @@ -82,10 +142,9 @@ protected: virtual void onInitPath(int index, const SkPath&) const = 0; private: - // Notify when paths will be drawn in case this is a lazy-loaded path range. - friend class GrPathRendering; - void willDrawPaths(const void* indices, PathIndexType, int count) const; - template<typename IndexType> void willDrawPaths(const void* indices, int count) const; + enum { + kPathsPerGroup = 16 // Paths get tracked in groups of 16 for lazy loading. + }; mutable SkAutoTUnref<PathGenerator> fPathGenerator; mutable SkTArray<uint8_t, true /*MEM_COPY*/> fGeneratedPaths; diff --git a/src/gpu/GrPathRendering.h b/src/gpu/GrPathRendering.h index 47c9a46f83..a2e9c0295b 100644 --- a/src/gpu/GrPathRendering.h +++ b/src/gpu/GrPathRendering.h @@ -179,7 +179,9 @@ public: if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*fGpu->caps())) { fGpu->xferBarrier(args.fPipeline->getRenderTarget(), barrierType); } - pathRange->willDrawPaths(indices, indexType, count); +#ifdef SK_DEBUG + pathRange->assertPathsLoaded(indices, indexType, count); +#endif this->onDrawPaths(args, pathRange, indices, indexType, transformValues, transformType, count); } diff --git a/src/gpu/GrStencilAndCoverTextContext.cpp b/src/gpu/GrStencilAndCoverTextContext.cpp index c3c49f92bf..0dc902928b 100644 --- a/src/gpu/GrStencilAndCoverTextContext.cpp +++ b/src/gpu/GrStencilAndCoverTextContext.cpp @@ -378,6 +378,8 @@ void GrStencilAndCoverTextContext::TextRun::setText(const char text[], size_t by fy += SkFixedMul(glyph.fAdvanceY, fixedSizeRatio); } + fDraw->loadGlyphPathsIfNeeded(); + fFallbackTextBlob.reset(fallback.buildIfInitialized()); } @@ -416,6 +418,8 @@ void GrStencilAndCoverTextContext::TextRun::setPosText(const char text[], size_t pos += scalarsPerPosition; } + fDraw->loadGlyphPathsIfNeeded(); + fFallbackTextBlob.reset(fallback.buildIfInitialized()); } diff --git a/src/gpu/batches/GrDrawPathBatch.h b/src/gpu/batches/GrDrawPathBatch.h index bb76abb26b..91e0e41ee4 100644 --- a/src/gpu/batches/GrDrawPathBatch.h +++ b/src/gpu/batches/GrDrawPathBatch.h @@ -115,6 +115,10 @@ public: const GrPathRange* range() const { return fPathRange.get(); } + void loadGlyphPathsIfNeeded() { + fPathRange.get()->loadPathsIfNeeded<uint16_t>(fIndices.begin(), fIndices.count()); + } + static bool CanMerge(const GrPathRangeDraw& a, const GrPathRangeDraw& b) { return a.transformType() == b.transformType() && a.range() == b.range(); } |