aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--gn/gpu.gni2
-rw-r--r--gn/tests.gni1
-rw-r--r--include/private/GrRenderTargetProxy.h4
-rw-r--r--include/private/GrSurfaceProxy.h8
-rw-r--r--include/private/GrTextureProxy.h2
-rw-r--r--src/gpu/GrRenderTargetProxy.cpp16
-rw-r--r--src/gpu/GrResourceAllocator.cpp115
-rw-r--r--src/gpu/GrResourceAllocator.h127
-rw-r--r--src/gpu/GrSurfaceProxy.cpp42
-rw-r--r--src/gpu/GrSurfaceProxyPriv.h9
-rw-r--r--src/gpu/GrTextureProxy.cpp11
-rw-r--r--src/gpu/GrTextureRenderTargetProxy.cpp17
-rw-r--r--src/gpu/GrTextureRenderTargetProxy.h1
-rw-r--r--tests/ResourceAllocatorTest.cpp51
14 files changed, 393 insertions, 13 deletions
diff --git a/gn/gpu.gni b/gn/gpu.gni
index 06f990866d..4188131619 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -161,6 +161,8 @@ skia_gpu_sources = [
"$_src/gpu/GrPathRenderingRenderTargetContext.h",
"$_src/gpu/GrRenderTargetOpList.cpp",
"$_src/gpu/GrRenderTargetOpList.h",
+ "$_src/gpu/GrResourceAllocator.cpp",
+ "$_src/gpu/GrResourceAllocator.h",
"$_src/gpu/GrResourceCache.cpp",
"$_src/gpu/GrResourceCache.h",
"$_src/gpu/GrResourceHandle.h",
diff --git a/gn/tests.gni b/gn/tests.gni
index fde7df64eb..37ccfca228 100644
--- a/gn/tests.gni
+++ b/gn/tests.gni
@@ -187,6 +187,7 @@ tests_sources = [
"$_tests/RefDictTest.cpp",
"$_tests/RegionTest.cpp",
"$_tests/RenderTargetContextTest.cpp",
+ "$_tests/ResourceAllocatorTest.cpp",
"$_tests/ResourceCacheTest.cpp",
"$_tests/RoundRectTest.cpp",
"$_tests/RRectInPathTest.cpp",
diff --git a/include/private/GrRenderTargetProxy.h b/include/private/GrRenderTargetProxy.h
index d7c4e06b80..61c3ecb8d7 100644
--- a/include/private/GrRenderTargetProxy.h
+++ b/include/private/GrRenderTargetProxy.h
@@ -23,7 +23,7 @@ public:
const GrRenderTargetProxy* asRenderTargetProxy() const override { return this; }
// Actually instantiate the backing rendertarget, if necessary.
- bool instantiate(GrResourceProvider* resourceProvider) override;
+ bool instantiate(GrResourceProvider*) override;
GrFSAAType fsaaType() const {
if (!fSampleCnt) {
@@ -68,6 +68,8 @@ protected:
// Wrapped version
GrRenderTargetProxy(sk_sp<GrSurface>);
+ sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
+
private:
size_t onUninstantiatedGpuMemorySize() const override;
diff --git a/include/private/GrSurfaceProxy.h b/include/private/GrSurfaceProxy.h
index 7aa944793a..f899f75abf 100644
--- a/include/private/GrSurfaceProxy.h
+++ b/include/private/GrSurfaceProxy.h
@@ -329,6 +329,7 @@ public:
bool isWrapped_ForTesting() const;
+ SkDEBUGCODE(bool isInstantiated() const { return SkToBool(fTarget); })
SkDEBUGCODE(void validate(GrContext*) const;)
// Provides access to functions that aren't part of the public API.
@@ -367,6 +368,13 @@ protected:
return this->internalHasPendingWrite();
}
+ virtual sk_sp<GrSurface> createSurface(GrResourceProvider*) const = 0;
+ void assign(sk_sp<GrSurface> surface);
+
+ sk_sp<GrSurface> createSurfaceImpl(GrResourceProvider*, int sampleCnt,
+ GrSurfaceFlags flags, bool isMipMapped,
+ SkDestinationSurfaceColorMode mipColorMode) const;
+
bool instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
GrSurfaceFlags flags, bool isMipMapped,
SkDestinationSurfaceColorMode mipColorMode);
diff --git a/include/private/GrTextureProxy.h b/include/private/GrTextureProxy.h
index 2736e61c12..60be19ecef 100644
--- a/include/private/GrTextureProxy.h
+++ b/include/private/GrTextureProxy.h
@@ -49,6 +49,8 @@ protected:
SkDestinationSurfaceColorMode mipColorMode() const { return fMipColorMode; }
+ sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
+
private:
bool fIsMipMapped;
SkDestinationSurfaceColorMode fMipColorMode;
diff --git a/src/gpu/GrRenderTargetProxy.cpp b/src/gpu/GrRenderTargetProxy.cpp
index 79d083d955..eef5f3ec31 100644
--- a/src/gpu/GrRenderTargetProxy.cpp
+++ b/src/gpu/GrRenderTargetProxy.cpp
@@ -61,6 +61,22 @@ bool GrRenderTargetProxy::instantiate(GrResourceProvider* resourceProvider) {
return true;
}
+sk_sp<GrSurface> GrRenderTargetProxy::createSurface(GrResourceProvider* resourceProvider) const {
+ static constexpr GrSurfaceFlags kFlags = kRenderTarget_GrSurfaceFlag;
+
+ sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, fSampleCnt, kFlags,
+ /* isMipped = */ false,
+ SkDestinationSurfaceColorMode::kLegacy);
+ if (!surface) {
+ return nullptr;
+ }
+ SkASSERT(surface->asRenderTarget());
+ // Check that our a priori computation matched the ultimate reality
+ SkASSERT(fRenderTargetFlags == surface->asRenderTarget()->renderTargetPriv().flags());
+
+ return surface;
+}
+
int GrRenderTargetProxy::worstCaseWidth() const {
if (fTarget) {
return fTarget->width();
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
new file mode 100644
index 0000000000..f57e088dd0
--- /dev/null
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrResourceAllocator.h"
+
+#include "GrSurfaceProxy.h"
+#include "GrSurfaceProxyPriv.h"
+
+void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy,
+ unsigned int start, unsigned int end) {
+ SkASSERT(start <= end);
+ SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
+
+ if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
+ // Revise the interval for an existing use
+ SkASSERT(intvl->fEnd < start);
+ intvl->fEnd = end;
+ return;
+ }
+
+ // TODO: given the usage pattern an arena allocation scheme would work well here
+ Interval* newIntvl = new Interval(proxy, start, end);
+
+ fIntvlList.insertByIncreasingStart(newIntvl);
+ fIntvlHash.add(newIntvl);
+}
+
+GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
+ Interval* temp = fHead;
+ if (temp) {
+ fHead = temp->fNext;
+ }
+ return temp;
+}
+
+// TODO: fuse this with insertByIncreasingEnd
+void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
+ if (!fHead) {
+ intvl->fNext = nullptr;
+ fHead = intvl;
+ } else if (intvl->fStart <= fHead->fStart) {
+ intvl->fNext = fHead;
+ fHead = intvl;
+ } else {
+ Interval* prev = fHead;
+ Interval* next = prev->fNext;
+ for (; next && intvl->fStart > next->fStart; prev = next, next = next->fNext) {
+ }
+ intvl->fNext = next;
+ prev->fNext = intvl;
+ }
+}
+
+// TODO: fuse this with insertByIncreasingStart
+void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
+ if (!fHead) {
+ intvl->fNext = nullptr;
+ fHead = intvl;
+ } else if (intvl->fEnd <= fHead->fEnd) {
+ intvl->fNext = fHead;
+ fHead = intvl;
+ } else {
+ Interval* prev = fHead;
+ Interval* next = prev->fNext;
+ for (; next && intvl->fEnd > next->fEnd; prev = next, next = next->fNext) {
+ }
+ intvl->fNext = next;
+ prev->fNext = intvl;
+ }
+}
+
+// 'surface' can be reused. Add it back to the free pool.
+void GrResourceAllocator::freeUpSurface(GrSurface* surface) {
+ // TODO: add free pool
+}
+
+// First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
+// If we can't find a useable one, create a new one.
+// TODO: handle being overbudget
+sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(GrSurfaceProxy* proxy) {
+ // TODO: add free pool
+
+ // Try to grab one from the resource cache
+ return proxy->priv().createSurface(fResourceProvider);
+}
+
+// Remove any intervals that end before the current index. Return their GrSurfaces
+// to the free pool.
+void GrResourceAllocator::expire(unsigned int curIndex) {
+ while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->fEnd < curIndex) {
+ Interval* temp = fActiveIntvls.popHead();
+ this->freeUpSurface(temp->fProxy->priv().peekSurface());
+ delete temp;
+ }
+}
+
+void GrResourceAllocator::assign() {
+ fIntvlHash.reset(); // we don't need this anymore
+ SkDEBUGCODE(fAssigned = true;)
+
+ while (Interval* cur = fIntvlList.popHead()) {
+ this->expire(cur->fStart);
+ // TODO: add over budget handling here?
+ sk_sp<GrSurface> surface = this->findSurfaceFor(cur->fProxy);
+ if (surface) {
+ cur->fProxy->priv().assign(std::move(surface));
+ }
+ // TODO: handle resouce allocation failure upstack
+ fActiveIntvls.insertByIncreasingEnd(cur);
+ }
+}
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
new file mode 100644
index 0000000000..300aa25b3b
--- /dev/null
+++ b/src/gpu/GrResourceAllocator.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrResourceAllocator_DEFINED
+#define GrResourceAllocator_DEFINED
+
+#include "GrSurfaceProxy.h"
+#include "SkTDynamicHash.h"
+
+class GrResourceProvider;
+
+/*
+ * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
+ * being given the usage intervals of the various proxies. It keeps these intervals in a singly
+ * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
+ * to interval to find proxy reuse). When it comes time to allocate the resources it
+ * traverses the sorted list and:
+ * removes intervals from the active list that have completed (returning their GrSurfaces
+ * to the free pool)
+
+ * allocates a new resource (preferably from the free pool) for the new interval
+ * adds the new interval to the active list (that is sorted by increasing end index)
+ *
+ * Note: the op indices (used in the usage intervals) come from the order of the ops in
+ * their opLists after the opList DAG has been linearized.
+ */
+class GrResourceAllocator {
+public:
+ GrResourceAllocator(GrResourceProvider* resourceProvider)
+ : fResourceProvider(resourceProvider) {
+ }
+
+ unsigned int curOp() const { return fNumOps; }
+ void incOps() { fNumOps++; }
+ unsigned int numOps() const { return fNumOps; }
+
+ // Add a usage interval from start to end inclusive. This is usually used for renderTargets.
+ // If an existing interval already exists it will be expanded to include the new range.
+ void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end);
+
+ // Add an interval that spans just the current op. Usually this is for texture uses.
+ // If an existing interval already exists it will be expanded to include the new operation.
+ void addInterval(GrSurfaceProxy* proxy) {
+ this->addInterval(proxy, fNumOps, fNumOps);
+ }
+
+ void assign();
+
+private:
+ class Interval;
+
+ // Remove dead intervals from the active list
+ void expire(unsigned int curIndex);
+
+ // These two methods wrap the interactions with the free pool
+ void freeUpSurface(GrSurface* surface);
+ sk_sp<GrSurface> findSurfaceFor(GrSurfaceProxy* proxy);
+
+ struct UniqueHashTraits {
+ static const GrUniqueKey& GetKey(const GrSurface& s) { return s.getUniqueKey(); }
+
+ static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
+ };
+ typedef SkTDynamicHash<GrSurface, GrUniqueKey, UniqueHashTraits> UniqueHash;
+ typedef SkTDynamicHash<Interval, unsigned int> IntvlHash;
+
+ class Interval {
+ public:
+ Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
+ : fProxy(proxy)
+ , fProxyID(proxy->uniqueID().asUInt())
+ , fStart(start)
+ , fEnd(end)
+ , fNext(nullptr) {
+ SkASSERT(proxy);
+ }
+
+ // for SkTDynamicHash
+ static const uint32_t& GetKey(const Interval& intvl) {
+ return intvl.fProxyID;
+ }
+ static uint32_t Hash(const uint32_t& key) { return key; }
+
+ GrSurfaceProxy* fProxy;
+ uint32_t fProxyID; // This is here b.c. DynamicHash requires a ref to the key
+ unsigned int fStart;
+ unsigned int fEnd;
+ Interval* fNext;
+ };
+
+ class IntervalList {
+ public:
+ IntervalList() = default;
+ ~IntervalList() {
+ while (fHead) {
+ Interval* temp = fHead;
+ fHead = temp->fNext;
+ delete temp;
+ }
+ }
+
+ bool empty() const { return !SkToBool(fHead); }
+ const Interval* peekHead() const { return fHead; }
+ Interval* popHead();
+ void insertByIncreasingStart(Interval*);
+ void insertByIncreasingEnd(Interval*);
+
+ private:
+ Interval* fHead = nullptr;
+ };
+
+ GrResourceProvider* fResourceProvider;
+ UniqueHash fFreePool; // Recently created/used GrSurfaces
+ IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
+
+ IntervalList fIntvlList; // All the intervals sorted by increasing start
+ IntervalList fActiveIntvls; // List of live intervals during assignment
+ // (sorted by increasing end)
+ unsigned int fNumOps = 0;
+ SkDEBUGCODE(bool fAssigned = false;)
+};
+
+#endif // GrResourceAllocator_DEFINED
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index abacbcbeca..b07366265c 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -40,12 +40,10 @@ GrSurfaceProxy::~GrSurfaceProxy() {
SkASSERT(!fLastOpList);
}
-bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
- GrSurfaceFlags flags, bool isMipMapped,
- SkDestinationSurfaceColorMode mipColorMode) {
- if (fTarget) {
- return true;
- }
+sk_sp<GrSurface> GrSurfaceProxy::createSurfaceImpl(
+ GrResourceProvider* resourceProvider, int sampleCnt,
+ GrSurfaceFlags flags, bool isMipMapped,
+ SkDestinationSurfaceColorMode mipColorMode) const {
GrSurfaceDesc desc;
desc.fConfig = fConfig;
desc.fWidth = fWidth;
@@ -58,16 +56,22 @@ bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int s
desc.fFlags |= kPerformInitialClear_GrSurfaceFlag;
}
+ sk_sp<GrSurface> surface;
if (SkBackingFit::kApprox == fFit) {
- fTarget = resourceProvider->createApproxTexture(desc, fFlags).release();
+ surface.reset(resourceProvider->createApproxTexture(desc, fFlags).release());
} else {
- fTarget = resourceProvider->createTexture(desc, fBudgeted, fFlags).release();
+ surface.reset(resourceProvider->createTexture(desc, fBudgeted, fFlags).release());
}
- if (!fTarget) {
- return false;
+ if (surface) {
+ surface->asTexture()->texturePriv().setMipColorMode(mipColorMode);
}
- fTarget->asTexture()->texturePriv().setMipColorMode(mipColorMode);
+ return surface;
+}
+
+void GrSurfaceProxy::assign(sk_sp<GrSurface> surface) {
+ SkASSERT(!fTarget && surface);
+ fTarget = surface.release();
this->INHERITED::transferRefs();
#ifdef SK_DEBUG
@@ -75,7 +79,22 @@ bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int s
SkASSERT(fTarget->gpuMemorySize() <= this->getRawGpuMemorySize_debugOnly());
}
#endif
+}
+bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
+ GrSurfaceFlags flags, bool isMipMapped,
+ SkDestinationSurfaceColorMode mipColorMode) {
+ if (fTarget) {
+ return true;
+ }
+
+ sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, sampleCnt, flags,
+ isMipMapped, mipColorMode);
+ if (!surface) {
+ return false;
+ }
+
+ this->assign(std::move(surface));
return true;
}
@@ -226,7 +245,6 @@ sk_sp<GrTextureProxy> GrSurfaceProxy::MakeDeferredMipMap(
return GrSurfaceProxy::MakeWrapped(std::move(tex));
}
-
sk_sp<GrTextureProxy> GrSurfaceProxy::MakeWrappedBackend(GrContext* context,
GrBackendTexture& backendTex,
GrSurfaceOrigin origin) {
diff --git a/src/gpu/GrSurfaceProxyPriv.h b/src/gpu/GrSurfaceProxyPriv.h
index b10cb345f7..ca8fffd873 100644
--- a/src/gpu/GrSurfaceProxyPriv.h
+++ b/src/gpu/GrSurfaceProxyPriv.h
@@ -43,6 +43,15 @@ public:
// future when the proxy is actually used/instantiated.
bool hasPendingWrite() const { return fProxy->hasPendingWrite(); }
+ // Create a GrSurface-derived class that meets the requirements (i.e, desc, renderability)
+ // of the GrSurfaceProxy.
+ sk_sp<GrSurface> createSurface(GrResourceProvider* resourceProvider) const {
+ return fProxy->createSurface(resourceProvider);
+ }
+
+ // Assign this proxy the provided GrSurface as its backing surface
+ void assign(sk_sp<GrSurface> surface) { fProxy->assign(std::move(surface)); }
+
// Don't abuse this call!!!!!!!
bool isExact() const { return SkBackingFit::kExact == fProxy->fFit; }
diff --git a/src/gpu/GrTextureProxy.cpp b/src/gpu/GrTextureProxy.cpp
index cf1010c6c7..310b36d7c6 100644
--- a/src/gpu/GrTextureProxy.cpp
+++ b/src/gpu/GrTextureProxy.cpp
@@ -33,6 +33,17 @@ bool GrTextureProxy::instantiate(GrResourceProvider* resourceProvider) {
return true;
}
+sk_sp<GrSurface> GrTextureProxy::createSurface(GrResourceProvider* resourceProvider) const {
+ sk_sp<GrSurface> surface= this->createSurfaceImpl(resourceProvider, 0, kNone_GrSurfaceFlags,
+ fIsMipMapped, fMipColorMode);
+ if (!surface) {
+ return nullptr;
+ }
+
+ SkASSERT(surface->asTexture());
+ return surface;
+}
+
void GrTextureProxy::setMipColorMode(SkDestinationSurfaceColorMode colorMode) {
SkASSERT(fTarget || fTarget->asTexture());
diff --git a/src/gpu/GrTextureRenderTargetProxy.cpp b/src/gpu/GrTextureRenderTargetProxy.cpp
index 9c4ccc67c2..a9a56e18c4 100644
--- a/src/gpu/GrTextureRenderTargetProxy.cpp
+++ b/src/gpu/GrTextureRenderTargetProxy.cpp
@@ -56,3 +56,20 @@ bool GrTextureRenderTargetProxy::instantiate(GrResourceProvider* resourceProvide
return true;
}
+
+sk_sp<GrSurface> GrTextureRenderTargetProxy::createSurface(
+ GrResourceProvider* resourceProvider) const {
+ static constexpr GrSurfaceFlags kFlags = kRenderTarget_GrSurfaceFlag;
+
+ sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, this->numStencilSamples(),
+ kFlags, this->isMipMapped(),
+ this->mipColorMode());
+ if (!surface) {
+ return nullptr;
+ }
+ SkASSERT(surface->asRenderTarget());
+ SkASSERT(surface->asTexture());
+
+ return surface;
+}
+
diff --git a/src/gpu/GrTextureRenderTargetProxy.h b/src/gpu/GrTextureRenderTargetProxy.h
index a9e8b5214e..ed64d9072d 100644
--- a/src/gpu/GrTextureRenderTargetProxy.h
+++ b/src/gpu/GrTextureRenderTargetProxy.h
@@ -33,6 +33,7 @@ private:
GrTextureRenderTargetProxy(sk_sp<GrSurface>);
bool instantiate(GrResourceProvider*) override;
+ sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
size_t onUninstantiatedGpuMemorySize() const override;
};
diff --git a/tests/ResourceAllocatorTest.cpp b/tests/ResourceAllocatorTest.cpp
new file mode 100644
index 0000000000..2398d68935
--- /dev/null
+++ b/tests/ResourceAllocatorTest.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+// Include here to ensure SK_SUPPORT_GPU is set correctly before it is examined.
+#include "SkTypes.h"
+
+#if SK_SUPPORT_GPU
+#include "Test.h"
+
+#include "GrResourceAllocator.h"
+#include "GrSurfaceProxyPriv.h"
+#include "GrTextureProxy.h"
+
+// Basic test that two proxies with overlapping intervals and compatible descriptors are
+// assigned different GrSurfaces.
+static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider) {
+ GrSurfaceDesc desc;
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ desc.fWidth = 64;
+ desc.fHeight = 64;
+
+ sk_sp<GrSurfaceProxy> p1 = GrSurfaceProxy::MakeDeferred(resourceProvider, desc,
+ SkBackingFit::kApprox,
+ SkBudgeted::kNo);
+ sk_sp<GrSurfaceProxy> p2 = GrSurfaceProxy::MakeDeferred(resourceProvider, desc,
+ SkBackingFit::kApprox,
+ SkBudgeted::kNo);
+
+ GrResourceAllocator alloc(resourceProvider);
+
+ alloc.addInterval(p1.get(), 0, 4);
+ alloc.addInterval(p2.get(), 1, 2);
+
+ alloc.assign();
+
+ REPORTER_ASSERT(reporter, p1->priv().peekSurface());
+ REPORTER_ASSERT(reporter, p2->priv().peekSurface());
+ REPORTER_ASSERT(reporter, p1->underlyingUniqueID() != p2->underlyingUniqueID());
+}
+
+DEF_GPUTEST_FOR_ALL_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
+ GrResourceProvider* resourceProvider = ctxInfo.grContext()->resourceProvider();
+
+ overlap_test(reporter, resourceProvider);
+}
+
+#endif