aboutsummaryrefslogtreecommitdiffhomepage
path: root/include
diff options
context:
space:
mode:
authorGravatar Robert Phillips <robertphillips@google.com>2018-05-31 12:43:27 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-05-31 17:27:43 +0000
commit774168efac172d096cd7f09841bce51650cb6e84 (patch)
tree1433b93978baaf92439289fe5a0a3e648a73974f /include
parente304a8a21b0bc40c3a7d85a96371a21180750076 (diff)
Allow CCPR in DDL mode (take 2)
A lot of the changes to get this compiling on the win_chromium_compile_dbg_ng bot (i.e., moving a lot of header files to private) should be undone if that bot is ever "fixed". Bug: skia:7988 Change-Id: I704ff793d80b18e7312048538874498824803580 Reviewed-on: https://skia-review.googlesource.com/130920 Reviewed-by: Chris Dalton <csmartdalton@google.com> Commit-Queue: Robert Phillips <robertphillips@google.com>
Diffstat (limited to 'include')
-rw-r--r--include/private/GrCCClipPath.h79
-rw-r--r--include/private/GrCCPerOpListPaths.h34
-rw-r--r--include/private/SkArenaAlloc.h250
-rw-r--r--include/private/SkDeferredDisplayList.h9
-rw-r--r--include/private/SkTInternalLList.h318
5 files changed, 689 insertions, 1 deletions
diff --git a/include/private/GrCCClipPath.h b/include/private/GrCCClipPath.h
new file mode 100644
index 0000000000..f15cc9c756
--- /dev/null
+++ b/include/private/GrCCClipPath.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCClipPath_DEFINED
+#define GrCCClipPath_DEFINED
+
+#include "GrTextureProxy.h"
+#include "SkPath.h"
+
+class GrCCAtlas;
+class GrCCPerFlushResources;
+class GrOnFlushResourceProvider;
+class GrProxyProvider;
+
+/**
+ * These are keyed by SkPath generation ID, and store which device-space paths are accessed and
+ * where by clip FPs in a given opList. A single GrCCClipPath can be referenced by multiple FPs. At
+ * flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
+ */
+class GrCCClipPath {
+public:
+ GrCCClipPath() = default;
+ GrCCClipPath(const GrCCClipPath&) = delete;
+
+ ~GrCCClipPath() {
+ // Ensure no clip FPs exist with a dangling pointer back into this class.
+ SkASSERT(!fAtlasLazyProxy || fAtlasLazyProxy->isUnique_debugOnly());
+ // Ensure no lazy proxy callbacks exist with a dangling pointer back into this class.
+ SkASSERT(fHasAtlasTransform);
+ }
+
+ bool isInitialized() const { return fAtlasLazyProxy != nullptr; }
+ void init(GrProxyProvider* proxyProvider,
+ const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ int rtWidth, int rtHeight);
+
+ void addAccess(const SkIRect& accessRect) {
+ SkASSERT(this->isInitialized());
+ fAccessRect.join(accessRect);
+ }
+ GrTextureProxy* atlasLazyProxy() const {
+ SkASSERT(this->isInitialized());
+ return fAtlasLazyProxy.get();
+ }
+ const SkPath& deviceSpacePath() const {
+ SkASSERT(this->isInitialized());
+ return fDeviceSpacePath;
+ }
+ const SkIRect& pathDevIBounds() const {
+ SkASSERT(this->isInitialized());
+ return fPathDevIBounds;
+ }
+
+ void renderPathInAtlas(GrCCPerFlushResources*, GrOnFlushResourceProvider*);
+
+ const SkVector& atlasScale() const { SkASSERT(fHasAtlasTransform); return fAtlasScale; }
+ const SkVector& atlasTranslate() const { SkASSERT(fHasAtlasTransform); return fAtlasTranslate; }
+
+private:
+ sk_sp<GrTextureProxy> fAtlasLazyProxy;
+ SkPath fDeviceSpacePath;
+ SkIRect fPathDevIBounds;
+ SkIRect fAccessRect;
+
+ const GrCCAtlas* fAtlas = nullptr;
+ int16_t fAtlasOffsetX;
+ int16_t fAtlasOffsetY;
+ SkDEBUGCODE(bool fHasAtlas = false);
+
+ SkVector fAtlasScale;
+ SkVector fAtlasTranslate;
+ SkDEBUGCODE(bool fHasAtlasTransform = false);
+};
+
+#endif
diff --git a/include/private/GrCCPerOpListPaths.h b/include/private/GrCCPerOpListPaths.h
new file mode 100644
index 0000000000..b94e82c548
--- /dev/null
+++ b/include/private/GrCCPerOpListPaths.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCCPerOpListPaths_DEFINED
+#define GrCCPerOpListPaths_DEFINED
+
+#include "SkArenaAlloc.h"
+#include "SkRefCnt.h"
+#include "SkTInternalLList.h"
+#include "GrCCClipPath.h"
+#include <map>
+
+class GrCCDrawPathsOp;
+class GrCCPerFlushResources;
+
+/**
+ * Tracks all the CCPR paths in a given opList that will be drawn when it flushes.
+ */
+// DDL TODO: given the usage pattern in DDL mode, this could probably be non-atomic refcounting.
+class GrCCPerOpListPaths : public SkRefCnt {
+public:
+ ~GrCCPerOpListPaths();
+
+ SkTInternalLList<GrCCDrawPathsOp> fDrawOps;
+ std::map<uint32_t, GrCCClipPath> fClipPaths;
+ SkSTArenaAlloc<10 * 1024> fAllocator{10 * 1024 * 2};
+ sk_sp<const GrCCPerFlushResources> fFlushResources;
+};
+
+#endif
diff --git a/include/private/SkArenaAlloc.h b/include/private/SkArenaAlloc.h
new file mode 100644
index 0000000000..c9e7274e63
--- /dev/null
+++ b/include/private/SkArenaAlloc.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkArenaAlloc_DEFINED
+#define SkArenaAlloc_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkTFitsIn.h"
+#include "SkTypes.h"
+#include <cstddef>
+#include <new>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+// SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed
+// to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an
+// (optional) user-provided block of memory, and when that's exhausted it allocates on the heap,
+// starting with an allocation of extraSize bytes. If your data (plus a small overhead) fits in
+// the user-provided block, SkArenaAlloc never uses the heap, and if it fits in extraSize bytes,
+// it'll use the heap only once. If you pass extraSize = 0, it allocates blocks for each call to
+// make<T>.
+//
+// Examples:
+//
+// char block[mostCasesSize];
+// SkArenaAlloc arena(block, almostAllCasesSize);
+//
+// If mostCasesSize is too large for the stack, you can use the following pattern.
+//
+// std::unique_ptr<char[]> block{new char[mostCasesSize]};
+// SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize);
+//
+// If the program only sometimes allocates memory, use the following.
+//
+// SkArenaAlloc arena(nullptr, 0, almostAllCasesSize);
+//
+// The storage does not necessarily need to be on the stack. Embedding the storage in a class also
+// works.
+//
+// class Foo {
+// char storage[mostCasesSize];
+// SkArenaAlloc arena (storage, almostAllCasesSize);
+// };
+//
+// In addition, the system is optimized to handle POD data including arrays of PODs (where
+// POD is really data with no destructors). For POD data it has zero overhead per item, and a
+// typical block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4 bytes.
+// For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There is an
+// addition overhead when switching from POD data to non-POD data of typically 8 bytes.
+//
+// You can track memory use by adding SkArenaAlloc::kTrack as the last parameter to any constructor.
+//
+// char storage[someNumber];
+// SkArenaAlloc alloc{storage, SkArenaAlloc::kTrack};
+//
+// This will print out a line for every destructor or reset call that has the total memory
+// allocated, the total slop (the unused portion of a block), and the slop of the last block.
+//
+// If additional blocks are needed they are increased exponentially. This strategy bounds the
+// recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using
+// the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48
+// there are 71 allocations.
+class SkArenaAlloc {
+public:
+ enum Tracking {kDontTrack, kTrack};
+ SkArenaAlloc(char* block, size_t size, size_t, Tracking tracking = kDontTrack);
+
+ SkArenaAlloc(size_t extraSize, Tracking tracking = kDontTrack)
+ : SkArenaAlloc(nullptr, 0, extraSize, tracking)
+ {}
+
+ ~SkArenaAlloc();
+
+ template <typename T, typename... Args>
+ T* make(Args&&... args) {
+ uint32_t size = SkTo<uint32_t>(sizeof(T));
+ uint32_t alignment = SkTo<uint32_t>(alignof(T));
+ char* objStart;
+ if (skstd::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(size, alignment);
+ fCursor = objStart + size;
+ } else {
+ objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment);
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = SkTo<uint32_t>(objStart - fCursor);
+
+ // Advance to end of object to install footer.
+ fCursor = objStart + size;
+ FooterAction* releaser = [](char* objEnd) {
+ char* objStart = objEnd - (sizeof(T) + sizeof(Footer));
+ ((T*)objStart)->~T();
+ return objStart;
+ };
+ this->installFooter(releaser, padding);
+ }
+
+ // This must be last to make objects with nested use of this allocator work.
+ return new(objStart) T(std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args>
+ sk_sp<T> makeSkSp(Args&&... args) {
+ SkASSERT(SkTFitsIn<uint32_t>(sizeof(T)));
+
+ // The arena takes a ref for itself to account for the destructor. The sk_sp count can't
+ // become zero or the sk_sp will try to call free on the pointer.
+ return sk_sp<T>(SkRef(this->make<T>(std::forward<Args>(args)...)));
+ }
+
+ template <typename T>
+ T* makeArrayDefault(size_t count) {
+ uint32_t safeCount = SkTo<uint32_t>(count);
+ T* array = (T*)this->commonArrayAlloc<T>(safeCount);
+
+ // If T is primitive then no initialization takes place.
+ for (size_t i = 0; i < safeCount; i++) {
+ new (&array[i]) T;
+ }
+ return array;
+ }
+
+ template <typename T>
+ T* makeArray(size_t count) {
+ uint32_t safeCount = SkTo<uint32_t>(count);
+ T* array = (T*)this->commonArrayAlloc<T>(safeCount);
+
+ // If T is primitive then the memory is initialized. For example, an array of chars will
+ // be zeroed.
+ for (size_t i = 0; i < safeCount; i++) {
+ new (&array[i]) T();
+ }
+ return array;
+ }
+
+ // Only use makeBytesAlignedTo if none of the typed variants are impractical to use.
+ void* makeBytesAlignedTo(size_t size, size_t align) {
+ auto objStart = this->allocObject(SkTo<uint32_t>(size), SkTo<uint32_t>(align));
+ fCursor = objStart + size;
+ return objStart;
+ }
+
+ // Destroy all allocated objects, free any heap allocations.
+ void reset();
+
+private:
+ using Footer = int64_t;
+ using FooterAction = char* (char*);
+
+ static char* SkipPod(char* footerEnd);
+ static void RunDtorsOnBlock(char* footerEnd);
+ static char* NextBlock(char* footerEnd);
+
+ void installFooter(FooterAction* releaser, uint32_t padding);
+ void installUint32Footer(FooterAction* action, uint32_t value, uint32_t padding);
+ void installPtrFooter(FooterAction* action, char* ptr, uint32_t padding);
+
+ void ensureSpace(uint32_t size, uint32_t alignment);
+
+ char* allocObject(uint32_t size, uint32_t alignment) {
+ uintptr_t mask = alignment - 1;
+ uintptr_t alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ uintptr_t totalSize = size + alignedOffset;
+ if (totalSize < size) {
+ SK_ABORT("The total size of allocation overflowed uintptr_t.");
+ }
+ if (totalSize > static_cast<uintptr_t>(fEnd - fCursor)) {
+ this->ensureSpace(size, alignment);
+ alignedOffset = (~reinterpret_cast<uintptr_t>(fCursor) + 1) & mask;
+ }
+ return fCursor + alignedOffset;
+ }
+
+ char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment);
+
+ template <typename T>
+ char* commonArrayAlloc(uint32_t count) {
+ char* objStart;
+ SkASSERT_RELEASE(count <= std::numeric_limits<uint32_t>::max() / sizeof(T));
+ uint32_t arraySize = SkTo<uint32_t>(count * sizeof(T));
+ uint32_t alignment = SkTo<uint32_t>(alignof(T));
+
+ if (skstd::is_trivially_destructible<T>::value) {
+ objStart = this->allocObject(arraySize, alignment);
+ fCursor = objStart + arraySize;
+ } else {
+ constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t);
+ SkASSERT_RELEASE(arraySize <= std::numeric_limits<uint32_t>::max() - overhead);
+ uint32_t totalSize = arraySize + overhead;
+ objStart = this->allocObjectWithFooter(totalSize, alignment);
+
+ // Can never be UB because max value is alignof(T).
+ uint32_t padding = SkTo<uint32_t>(objStart - fCursor);
+
+ // Advance to end of array to install footer.?
+ fCursor = objStart + arraySize;
+ this->installUint32Footer(
+ [](char* footerEnd) {
+ char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t));
+ uint32_t count;
+ memmove(&count, objEnd, sizeof(uint32_t));
+ char* objStart = objEnd - count * sizeof(T);
+ T* array = (T*) objStart;
+ for (uint32_t i = 0; i < count; i++) {
+ array[i].~T();
+ }
+ return objStart;
+ },
+ SkTo<uint32_t>(count),
+ padding);
+ }
+
+ return objStart;
+ }
+
+ char* fDtorCursor;
+ char* fCursor;
+ char* fEnd;
+ char* const fFirstBlock;
+ const uint32_t fFirstSize;
+ const uint32_t fExtraSize;
+
+ // Track some useful stats. Track stats if fTotalSlop is >= 0;
+ uint32_t fTotalAlloc { 0};
+ int32_t fTotalSlop {-1};
+
+ // Use the Fibonacci sequence as the growth factor for block size. The size of the block
+ // allocated is fFib0 * fExtraSize. Using 2 ^ n * fExtraSize had too much slop for Android.
+ uint32_t fFib0 {1}, fFib1 {1};
+};
+
+// Helper for defining allocators with inline/reserved storage.
+// For argument declarations, stick to the base type (SkArenaAlloc).
+template <size_t InlineStorageSize>
+class SkSTArenaAlloc : public SkArenaAlloc {
+public:
+ explicit SkSTArenaAlloc(size_t extraSize = InlineStorageSize, Tracking tracking = kDontTrack)
+ : INHERITED(fInlineStorage, InlineStorageSize, extraSize, tracking) {}
+
+private:
+ char fInlineStorage[InlineStorageSize];
+
+ using INHERITED = SkArenaAlloc;
+};
+
+#endif // SkArenaAlloc_DEFINED
diff --git a/include/private/SkDeferredDisplayList.h b/include/private/SkDeferredDisplayList.h
index 4d379636a9..00afd85df1 100644
--- a/include/private/SkDeferredDisplayList.h
+++ b/include/private/SkDeferredDisplayList.h
@@ -11,6 +11,8 @@
#include "SkSurfaceCharacterization.h"
#if SK_SUPPORT_GPU
+#include <map>
+#include "GrCCPerOpListPaths.h"
#include "GrOpList.h"
#endif
@@ -22,7 +24,7 @@ class SkSurface;
*
* TODO: we probably need to expose this class so users can query it for memory usage.
*/
-class SkDeferredDisplayList {
+class SK_API SkDeferredDisplayList {
public:
#if SK_SUPPORT_GPU
@@ -42,6 +44,7 @@ public:
SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,
sk_sp<LazyProxyData>);
+ ~SkDeferredDisplayList();
const SkSurfaceCharacterization& characterization() const {
return fCharacterization;
@@ -54,7 +57,11 @@ private:
const SkSurfaceCharacterization fCharacterization;
#if SK_SUPPORT_GPU
+ // This needs to match the same type in GrCoverageCountingPathRenderer.h
+ using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
+
SkTArray<sk_sp<GrOpList>> fOpLists;
+ PendingPathsMap fPendingPaths; // This is the path data from CCPR.
#endif
sk_sp<LazyProxyData> fLazyProxyData;
};
diff --git a/include/private/SkTInternalLList.h b/include/private/SkTInternalLList.h
new file mode 100644
index 0000000000..2f43f1c1eb
--- /dev/null
+++ b/include/private/SkTInternalLList.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkTInternalLList_DEFINED
+#define SkTInternalLList_DEFINED
+
+#include "SkTypes.h"
+
+/**
+ * Helper class to automatically initialize the doubly linked list created pointers.
+ */
+template <typename T> class SkPtrWrapper {
+ public:
+ SkPtrWrapper() : fPtr(nullptr) {}
+ SkPtrWrapper& operator =(T* ptr) { fPtr = ptr; return *this; }
+ operator T*() const { return fPtr; }
+ T* operator->() { return fPtr; }
+ private:
+ T* fPtr;
+};
+
+
+/**
+ * This macro creates the member variables required by the SkTInternalLList class. It should be
+ * placed in the private section of any class that will be stored in a double linked list.
+ */
+#define SK_DECLARE_INTERNAL_LLIST_INTERFACE(ClassName) \
+ friend class SkTInternalLList<ClassName>; \
+ /* back pointer to the owning list - for debugging */ \
+ SkDEBUGCODE(SkPtrWrapper<SkTInternalLList<ClassName> > fList;) \
+ SkPtrWrapper<ClassName> fPrev; \
+ SkPtrWrapper<ClassName> fNext
+
+/**
+ * This class implements a templated internal doubly linked list data structure.
+ */
+template <class T> class SkTInternalLList : SkNoncopyable {
+public:
+ SkTInternalLList()
+ : fHead(nullptr)
+ , fTail(nullptr) {
+ }
+
+ void reset() {
+ fHead = nullptr;
+ fTail = nullptr;
+ }
+
+ void remove(T* entry) {
+ SkASSERT(fHead && fTail);
+ SkASSERT(this->isInList(entry));
+
+ T* prev = entry->fPrev;
+ T* next = entry->fNext;
+
+ if (prev) {
+ prev->fNext = next;
+ } else {
+ fHead = next;
+ }
+ if (next) {
+ next->fPrev = prev;
+ } else {
+ fTail = prev;
+ }
+
+ entry->fPrev = nullptr;
+ entry->fNext = nullptr;
+
+#ifdef SK_DEBUG
+ entry->fList = nullptr;
+#endif
+ }
+
+ void addToHead(T* entry) {
+ SkASSERT(nullptr == entry->fPrev && nullptr == entry->fNext);
+ SkASSERT(nullptr == entry->fList);
+
+ entry->fPrev = nullptr;
+ entry->fNext = fHead;
+ if (fHead) {
+ fHead->fPrev = entry;
+ }
+ fHead = entry;
+ if (nullptr == fTail) {
+ fTail = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ void addToTail(T* entry) {
+ SkASSERT(nullptr == entry->fPrev && nullptr == entry->fNext);
+ SkASSERT(nullptr == entry->fList);
+
+ entry->fPrev = fTail;
+ entry->fNext = nullptr;
+ if (fTail) {
+ fTail->fNext = entry;
+ }
+ fTail = entry;
+ if (nullptr == fHead) {
+ fHead = entry;
+ }
+
+#ifdef SK_DEBUG
+ entry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry before an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the tail.
+ */
+ void addBefore(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (nullptr == existingEntry) {
+ this->addToTail(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fNext = existingEntry;
+ T* prev = existingEntry->fPrev;
+ existingEntry->fPrev = newEntry;
+ newEntry->fPrev = prev;
+ if (nullptr == prev) {
+ SkASSERT(fHead == existingEntry);
+ fHead = newEntry;
+ } else {
+ prev->fNext = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ /**
+ * Inserts a new list entry after an existing list entry. The new entry must not already be
+ * a member of this or any other list. If existingEntry is NULL then the new entry is added
+ * at the head.
+ */
+ void addAfter(T* newEntry, T* existingEntry) {
+ SkASSERT(newEntry);
+
+ if (nullptr == existingEntry) {
+ this->addToHead(newEntry);
+ return;
+ }
+
+ SkASSERT(this->isInList(existingEntry));
+ newEntry->fPrev = existingEntry;
+ T* next = existingEntry->fNext;
+ existingEntry->fNext = newEntry;
+ newEntry->fNext = next;
+ if (nullptr == next) {
+ SkASSERT(fTail == existingEntry);
+ fTail = newEntry;
+ } else {
+ next->fPrev = newEntry;
+ }
+#ifdef SK_DEBUG
+ newEntry->fList = this;
+#endif
+ }
+
+ void concat(SkTInternalLList&& list) {
+ if (list.isEmpty()) {
+ return;
+ }
+
+ list.fHead->fPrev = fTail;
+ if (!fHead) {
+ SkASSERT(!list.fHead->fPrev);
+ fHead = list.fHead;
+ } else {
+ SkASSERT(fTail);
+ fTail->fNext = list.fHead;
+ }
+ fTail = list.fTail;
+
+#ifdef SK_DEBUG
+ for (T* node = list.fHead; node; node = node->fNext) {
+ SkASSERT(node->fList == &list);
+ node->fList = this;
+ }
+#endif
+
+ list.fHead = list.fTail = nullptr;
+ }
+
+ bool isEmpty() const {
+ SkASSERT(SkToBool(fHead) == SkToBool(fTail));
+ return !fHead;
+ }
+
+ T* head() { return fHead; }
+ T* tail() { return fTail; }
+
+ class Iter {
+ public:
+ enum IterStart {
+ kHead_IterStart,
+ kTail_IterStart
+ };
+
+ Iter() : fCurr(nullptr) {}
+ Iter(const Iter& iter) : fCurr(iter.fCurr) {}
+ Iter& operator= (const Iter& iter) { fCurr = iter.fCurr; return *this; }
+
+ T* init(const SkTInternalLList& list, IterStart startLoc) {
+ if (kHead_IterStart == startLoc) {
+ fCurr = list.fHead;
+ } else {
+ SkASSERT(kTail_IterStart == startLoc);
+ fCurr = list.fTail;
+ }
+
+ return fCurr;
+ }
+
+ T* get() { return fCurr; }
+
+ /**
+ * Return the next/previous element in the list or NULL if at the end.
+ */
+ T* next() {
+ if (nullptr == fCurr) {
+ return nullptr;
+ }
+
+ fCurr = fCurr->fNext;
+ return fCurr;
+ }
+
+ T* prev() {
+ if (nullptr == fCurr) {
+ return nullptr;
+ }
+
+ fCurr = fCurr->fPrev;
+ return fCurr;
+ }
+
+ /**
+ * C++11 range-for interface.
+ */
+ bool operator!=(const Iter& that) { return fCurr != that.fCurr; }
+ T* operator*() { return this->get(); }
+ void operator++() { this->next(); }
+
+ private:
+ T* fCurr;
+ };
+
+ Iter begin() const {
+ Iter iter;
+ iter.init(*this, Iter::kHead_IterStart);
+ return iter;
+ }
+
+ Iter end() const { return Iter(); }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(!fHead == !fTail);
+ Iter iter;
+ for (T* item = iter.init(*this, Iter::kHead_IterStart); item; item = iter.next()) {
+ SkASSERT(this->isInList(item));
+ if (nullptr == item->fPrev) {
+ SkASSERT(fHead == item);
+ } else {
+ SkASSERT(item->fPrev->fNext == item);
+ }
+ if (nullptr == item->fNext) {
+ SkASSERT(fTail == item);
+ } else {
+ SkASSERT(item->fNext->fPrev == item);
+ }
+ }
+ }
+
+ /**
+ * Debugging-only method that uses the list back pointer to check if 'entry' is indeed in 'this'
+ * list.
+ */
+ bool isInList(const T* entry) const {
+ return entry->fList == this;
+ }
+
+ /**
+ * Debugging-only method that laboriously counts the list entries.
+ */
+ int countEntries() const {
+ int count = 0;
+ for (T* entry = fHead; entry; entry = entry->fNext) {
+ ++count;
+ }
+ return count;
+ }
+#endif // SK_DEBUG
+
+private:
+ T* fHead;
+ T* fTail;
+
+ typedef SkNoncopyable INHERITED;
+};
+
+#endif