aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'src/gpu')
-rw-r--r--src/gpu/GrBackendTextureImageGenerator.cpp198
-rw-r--r--src/gpu/GrBackendTextureImageGenerator.h72
-rw-r--r--src/gpu/GrContext.cpp2
-rw-r--r--src/gpu/GrGpu.h7
-rw-r--r--src/gpu/GrResourceCache.cpp23
-rw-r--r--src/gpu/GrResourceCache.h15
-rw-r--r--src/gpu/gl/GrGLGpu.cpp8
-rw-r--r--src/gpu/gl/GrGLGpu.h2
-rw-r--r--src/gpu/vk/GrVkGpu.cpp19
-rw-r--r--src/gpu/vk/GrVkGpu.h2
-rw-r--r--src/gpu/vk/GrVkImage.h1
11 files changed, 346 insertions, 3 deletions
diff --git a/src/gpu/GrBackendTextureImageGenerator.cpp b/src/gpu/GrBackendTextureImageGenerator.cpp
new file mode 100644
index 0000000000..d92b32f8f6
--- /dev/null
+++ b/src/gpu/GrBackendTextureImageGenerator.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBackendTextureImageGenerator.h"
+
+#include "GrContext.h"
+#include "GrContextPriv.h"
+#include "GrResourceCache.h"
+#include "GrResourceProvider.h"
+#include "GrSemaphore.h"
+
+#include "SkGr.h"
+#include "SkMessageBus.h"
+
+GrBackendTextureImageGenerator::RefHelper::~RefHelper() {
+ SkASSERT(nullptr == fBorrowedTexture);
+ SkASSERT(SK_InvalidGenID == fBorrowingContextID);
+
+ // Generator has been freed, and no one is borrowing the texture. Notify the original cache
+ // that it can free the last ref, so it happens on the correct thread.
+ GrGpuResourceFreedMessage msg { fOriginalTexture, fOwningContextID };
+ SkMessageBus<GrGpuResourceFreedMessage>::Post(msg);
+}
+
+// TODO: I copied this from SkImage_Gpu, perhaps we put a version of this somewhere else?
+static GrBackendTexture make_backend_texture_from_handle(GrBackend backend,
+ int width, int height,
+ GrPixelConfig config,
+ GrBackendObject handle) {
+ if (kOpenGL_GrBackend == backend) {
+ GrGLTextureInfo* glInfo = (GrGLTextureInfo*)(handle);
+ return GrBackendTexture(width, height, config, *glInfo);
+ } else {
+ SkASSERT(kVulkan_GrBackend == backend);
+ GrVkImageInfo* vkInfo = (GrVkImageInfo*)(handle);
+ return GrBackendTexture(width, height, *vkInfo);
+ }
+}
+
+std::unique_ptr<SkImageGenerator>
+GrBackendTextureImageGenerator::Make(sk_sp<GrTexture> texture, sk_sp<GrSemaphore> semaphore,
+ SkAlphaType alphaType, sk_sp<SkColorSpace> colorSpace) {
+ if (colorSpace && (!colorSpace->gammaCloseToSRGB() && !colorSpace->gammaIsLinear())) {
+ return nullptr;
+ }
+
+ SkColorType colorType = kUnknown_SkColorType;
+ if (!GrPixelConfigToColorType(texture->config(), &colorType)) {
+ return nullptr;
+ }
+
+ GrContext* context = texture->getContext();
+
+ // Attach our texture to this context's resource cache. This ensures that deletion will happen
+ // in the correct thread/context. This adds the only ref to the texture that will persist from
+ // this point. That ref will be released when the generator's RefHelper is freed.
+ context->getResourceCache()->insertCrossContextGpuResource(texture.get());
+
+ GrBackend backend = context->contextPriv().getBackend();
+ GrBackendTexture backendTexture = make_backend_texture_from_handle(backend,
+ texture->width(),
+ texture->height(),
+ texture->config(),
+ texture->getTextureHandle());
+
+ SkImageInfo info = SkImageInfo::Make(texture->width(), texture->height(), colorType, alphaType,
+ std::move(colorSpace));
+ return std::unique_ptr<SkImageGenerator>(new GrBackendTextureImageGenerator(
+ info, texture.get(), context->uniqueID(), std::move(semaphore), backendTexture));
+}
+
+GrBackendTextureImageGenerator::GrBackendTextureImageGenerator(const SkImageInfo& info,
+ GrTexture* texture,
+ uint32_t owningContextID,
+ sk_sp<GrSemaphore> semaphore,
+ const GrBackendTexture& backendTex)
+ : INHERITED(info)
+ , fRefHelper(new RefHelper(texture, owningContextID))
+ , fSemaphore(std::move(semaphore))
+ , fLastBorrowingContextID(SK_InvalidGenID)
+ , fBackendTexture(backendTex)
+ , fSurfaceOrigin(texture->origin()) { }
+
+GrBackendTextureImageGenerator::~GrBackendTextureImageGenerator() {
+ fRefHelper->unref();
+}
+
+bool GrBackendTextureImageGenerator::onGetPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, SkPMColor ctable[],
+ int* ctableCount) {
+ // TODO: Is there any way to implement this? I don't think so.
+ return false;
+}
+
+bool GrBackendTextureImageGenerator::onGetPixels(const SkImageInfo& info, void* pixels,
+ size_t rowBytes, const Options& opts) {
+
+ // TODO: Is there any way to implement this? I don't think so.
+ return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if SK_SUPPORT_GPU
+void GrBackendTextureImageGenerator::ReleaseRefHelper_TextureReleaseProc(void* ctx) {
+ RefHelper* refHelper = static_cast<RefHelper*>(ctx);
+ SkASSERT(refHelper);
+
+ // Release texture so another context can use it
+ refHelper->fBorrowedTexture = nullptr;
+ refHelper->fBorrowingContextID = SK_InvalidGenID;
+ refHelper->unref();
+}
+
+sk_sp<GrTextureProxy> GrBackendTextureImageGenerator::onGenerateTexture(GrContext* context,
+ const SkImageInfo& info,
+ const SkIPoint& origin) {
+ SkASSERT(context);
+
+ if (context->contextPriv().getBackend() != fBackendTexture.backend()) {
+ return nullptr;
+ }
+
+ sk_sp<GrTexture> tex;
+
+ if (fRefHelper->fBorrowingContextID == context->uniqueID()) {
+ // If a client re-draws the same image multiple times, the texture we return will be cached
+ // and re-used. If they draw a subset, though, we may be re-called. In that case, we want
+ // to re-use the borrowed texture we've previously created.
+ tex = sk_ref_sp(fRefHelper->fBorrowedTexture);
+ SkASSERT(tex);
+ } else {
+ // The texture is available or borrwed by another context. Try for exclusive access.
+ uint32_t expectedID = SK_InvalidGenID;
+ if (!fRefHelper->fBorrowingContextID.compare_exchange(&expectedID, context->uniqueID())) {
+ // Some other context is currently borrowing the texture. We aren't allowed to use it.
+ return nullptr;
+ } else {
+ // Wait on a semaphore when a new context has just started borrowing the texture. This
+ // is conservative, but shouldn't be too expensive.
+ if (fSemaphore && fLastBorrowingContextID != context->uniqueID()) {
+ context->getGpu()->waitSemaphore(fSemaphore);
+ fLastBorrowingContextID = context->uniqueID();
+ }
+ }
+
+ // We just gained access to the texture. If we're on the original context, we could use the
+ // original texture, but we'd have no way of detecting that it's no longer in-use. So we
+ // always make a wrapped copy, where the release proc informs us that the context is done
+ // with it. This is unfortunate - we'll have two texture objects referencing the same GPU
+ // object. However, no client can ever see the original texture, so this should be safe.
+ tex = context->resourceProvider()->wrapBackendTexture(fBackendTexture, fSurfaceOrigin,
+ kNone_GrBackendTextureFlag, 0,
+ kBorrow_GrWrapOwnership);
+ if (!tex) {
+ fRefHelper->fBorrowingContextID = SK_InvalidGenID;
+ return nullptr;
+ }
+ fRefHelper->fBorrowedTexture = tex.get();
+
+ tex->setRelease(ReleaseRefHelper_TextureReleaseProc, fRefHelper);
+ fRefHelper->ref();
+ }
+
+ SkASSERT(fRefHelper->fBorrowingContextID == context->uniqueID());
+
+ sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(tex));
+
+ if (0 == origin.fX && 0 == origin.fY &&
+ info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height()) {
+ // If the caller wants the entire texture, we're done
+ return proxy;
+ } else {
+ // Otherwise, make a copy of the requested subset. Make sure our temporary is renderable,
+ // because Vulkan will want to do the copy as a draw.
+ GrSurfaceDesc desc = proxy->desc();
+ desc.fWidth = info.width();
+ desc.fHeight = info.height();
+ desc.fFlags = kRenderTarget_GrSurfaceFlag;
+ sk_sp<GrSurfaceContext> sContext(context->contextPriv().makeDeferredSurfaceContext(
+ desc, SkBackingFit::kExact, SkBudgeted::kYes));
+ if (!sContext) {
+ return nullptr;
+ }
+
+ SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, info.width(), info.height());
+ if (!sContext->copy(proxy.get(), subset, SkIPoint::Make(0, 0))) {
+ return nullptr;
+ }
+
+ return sContext->asTextureProxyRef();
+ }
+}
+#endif
diff --git a/src/gpu/GrBackendTextureImageGenerator.h b/src/gpu/GrBackendTextureImageGenerator.h
new file mode 100644
index 0000000000..2f35895b5b
--- /dev/null
+++ b/src/gpu/GrBackendTextureImageGenerator.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#ifndef GrBackendTextureImageGenerator_DEFINED
+#define GrBackendTextureImageGenerator_DEFINED
+
+#include "SkImageGenerator.h"
+
+#include "GrBackendSurface.h"
+#include "SkAtomics.h"
+
+class GrSemaphore;
+
+class GrBackendTextureImageGenerator : public SkImageGenerator {
+public:
+ static std::unique_ptr<SkImageGenerator> Make(sk_sp<GrTexture>, sk_sp<GrSemaphore>,
+ SkAlphaType, sk_sp<SkColorSpace>);
+
+ ~GrBackendTextureImageGenerator();
+
+protected:
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, SkPMColor ctable[],
+ int* ctableCount) override;
+ bool onGetPixels(const SkImageInfo& info, void* pixels, size_t rowBytes, const Options& opts)
+ override;
+
+#if SK_SUPPORT_GPU
+ sk_sp<GrTextureProxy> onGenerateTexture(GrContext*, const SkImageInfo&,
+ const SkIPoint&) override;
+#endif
+
+private:
+ GrBackendTextureImageGenerator(const SkImageInfo& info, GrTexture*,
+ uint32_t owningContextID, sk_sp<GrSemaphore>,
+ const GrBackendTexture&);
+
+ static void ReleaseRefHelper_TextureReleaseProc(void* ctx);
+
+ class RefHelper : public SkNVRefCnt<RefHelper> {
+ public:
+ RefHelper(GrTexture* texture, uint32_t owningContextID)
+ : fOriginalTexture(texture)
+ , fOwningContextID(owningContextID)
+ , fBorrowedTexture(nullptr)
+ , fBorrowingContextID(SK_InvalidGenID) { }
+
+ ~RefHelper();
+
+ GrTexture* fOriginalTexture;
+ uint32_t fOwningContextID;
+
+ // There is never a ref associated with this pointer. We rely on our atomic bookkeeping
+ // with the context ID to know when this pointer is valid and safe to use. This lets us
+ // avoid releasing a ref from another thread, or get into races during context shutdown.
+ GrTexture* fBorrowedTexture;
+ SkAtomic<uint32_t> fBorrowingContextID;
+ };
+
+ RefHelper* fRefHelper;
+
+ sk_sp<GrSemaphore> fSemaphore;
+ uint32_t fLastBorrowingContextID;
+
+ GrBackendTexture fBackendTexture;
+ GrSurfaceOrigin fSurfaceOrigin;
+
+ typedef SkImageGenerator INHERITED;
+};
+#endif // GrBackendTextureImageGenerator_DEFINED
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index fc38dfa9b8..7862a8223c 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -97,7 +97,7 @@ void GrContext::initCommon(const GrContextOptions& options) {
ASSERT_SINGLE_OWNER
fCaps = SkRef(fGpu->caps());
- fResourceCache = new GrResourceCache(fCaps);
+ fResourceCache = new GrResourceCache(fCaps, fUniqueID);
fResourceProvider = new GrResourceProvider(fGpu, fResourceCache, &fSingleOwner);
fDisableGpuYUVConversion = options.fDisableGpuYUVConversion;
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index b7fe048b7f..f1efadb659 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -388,6 +388,13 @@ public:
virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush = false) = 0;
virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0;
+ /**
+ * Put this texture in a safe and known state for use across multiple GrContexts. Depending on
+ * the backend, this may return a GrSemaphore. If so, other contexts should wait on that
+ * semaphore before using this texture.
+ */
+ virtual sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
+
///////////////////////////////////////////////////////////////////////////
// Debugging and Stats
diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp
index e3373b27f9..299f63fd86 100644
--- a/src/gpu/GrResourceCache.cpp
+++ b/src/gpu/GrResourceCache.cpp
@@ -18,6 +18,8 @@
DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
+DECLARE_SKMESSAGEBUS_MESSAGE(GrGpuResourceFreedMessage);
+
//////////////////////////////////////////////////////////////////////////////
GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
@@ -59,7 +61,7 @@ private:
//////////////////////////////////////////////////////////////////////////////
-GrResourceCache::GrResourceCache(const GrCaps* caps)
+GrResourceCache::GrResourceCache(const GrCaps* caps, uint32_t contextUniqueID)
: fTimestamp(0)
, fMaxCount(kDefaultMaxCount)
, fMaxBytes(kDefaultMaxSize)
@@ -75,6 +77,7 @@ GrResourceCache::GrResourceCache(const GrCaps* caps)
, fBudgetedBytes(0)
, fRequestFlush(false)
, fExternalFlushCnt(0)
+ , fContextUniqueID(contextUniqueID)
, fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
SkDEBUGCODE(fCount = 0;)
SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
@@ -186,6 +189,8 @@ void GrResourceCache::abandonAll() {
void GrResourceCache::releaseAll() {
AutoValidate av(this);
+ this->processFreedGpuResources();
+
while(fNonpurgeableResources.count()) {
GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
SkASSERT(!back->wasDestroyed());
@@ -450,6 +455,8 @@ void GrResourceCache::purgeAsNeeded() {
this->processInvalidUniqueKeys(invalidKeyMsgs);
}
+ this->processFreedGpuResources();
+
if (fMaxUnusedFlushes > 0) {
// We want to know how many complete flushes have occurred without the resource being used.
// If the resource was tagged when fExternalFlushCnt was N then this means it became
@@ -534,6 +541,20 @@ void GrResourceCache::processInvalidUniqueKeys(
}
}
+void GrResourceCache::insertCrossContextGpuResource(GrGpuResource* resource) {
+ resource->ref();
+}
+
+void GrResourceCache::processFreedGpuResources() {
+ SkTArray<GrGpuResourceFreedMessage> msgs;
+ fFreedGpuResourceInbox.poll(&msgs);
+ for (int i = 0; i < msgs.count(); ++i) {
+ if (msgs[i].fOwningUniqueID == fContextUniqueID) {
+ msgs[i].fResource->unref();
+ }
+ }
+}
+
void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
int index = fNonpurgeableResources.count();
*fNonpurgeableResources.append() = resource;
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
index d871c9ad13..71070ee6e9 100644
--- a/src/gpu/GrResourceCache.h
+++ b/src/gpu/GrResourceCache.h
@@ -24,6 +24,11 @@ class GrCaps;
class SkString;
class SkTraceMemoryDump;
+struct GrGpuResourceFreedMessage {
+ GrGpuResource* fResource;
+ uint32_t fOwningUniqueID;
+};
+
/**
* Manages the lifetime of all GrGpuResource instances.
*
@@ -43,7 +48,7 @@ class SkTraceMemoryDump;
*/
class GrResourceCache {
public:
- GrResourceCache(const GrCaps* caps);
+ GrResourceCache(const GrCaps* caps, uint32_t contextUniqueID);
~GrResourceCache();
// Default maximum number of budgeted resources in the cache.
@@ -174,6 +179,9 @@ public:
};
void notifyFlushOccurred(FlushType);
+ /** Maintain a ref to this resource until we receive a GrGpuResourceFreedMessage. */
+ void insertCrossContextGpuResource(GrGpuResource* resource);
+
#if GR_CACHE_STATS
struct Stats {
int fTotal;
@@ -241,6 +249,7 @@ private:
/// @}
void processInvalidUniqueKeys(const SkTArray<GrUniqueKeyInvalidatedMessage>&);
+ void processFreedGpuResources();
void addToNonpurgeableArray(GrGpuResource*);
void removeFromNonpurgeableArray(GrGpuResource*);
bool overBudget() const { return fBudgetedBytes > fMaxBytes || fBudgetedCount > fMaxCount; }
@@ -287,6 +296,7 @@ private:
}
typedef SkMessageBus<GrUniqueKeyInvalidatedMessage>::Inbox InvalidUniqueKeyInbox;
+ typedef SkMessageBus<GrGpuResourceFreedMessage>::Inbox FreedGpuResourceInbox;
typedef SkTDPQueue<GrGpuResource*, CompareTimestamp, AccessResourceIndex> PurgeableQueue;
typedef SkTDArray<GrGpuResource*> ResourceArray;
@@ -326,6 +336,9 @@ private:
uint32_t fExternalFlushCnt;
InvalidUniqueKeyInbox fInvalidUniqueKeyInbox;
+ FreedGpuResourceInbox fFreedGpuResourceInbox;
+
+ uint32_t fContextUniqueID;
// This resource is allowed to be in the nonpurgeable array for the sake of validate() because
// we're in the midst of converting it to purgeable status.
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index e842ddc4f5..a2eabe4e46 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -4476,3 +4476,11 @@ void GrGLGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
void GrGLGpu::deleteSync(GrGLsync sync) const {
GL_CALL(DeleteSync(sync));
}
+
+sk_sp<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
+ // Set up a semaphore to be signaled once the data is ready, and flush GL
+ sk_sp<GrSemaphore> semaphore = this->makeSemaphore();
+ this->insertSemaphore(semaphore, true);
+
+ return semaphore;
+}
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 04929f89fa..041ecfb5ce 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -149,6 +149,8 @@ public:
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
+
void deleteSync(GrGLsync) const;
private:
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 23c7094bb3..9dd3688470 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -1695,6 +1695,11 @@ bool GrVkGpu::onCopySurface(GrSurface* dst,
srcImage = static_cast<GrVkTexture*>(src->asTexture());
}
+ // For borrowed textures, we *only* want to copy using draws (to avoid layout changes)
+ if (srcImage->isBorrowed()) {
+ return false;
+ }
+
if (can_copy_image(dst, src, this)) {
this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
return true;
@@ -1990,3 +1995,17 @@ void GrVkGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
resource->ref();
fSemaphoresToWaitOn.push_back(resource);
}
+
+sk_sp<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
+ SkASSERT(texture);
+ GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
+ vkTexture->setImageLayout(this,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_ACCESS_SHADER_READ_BIT,
+ VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ false);
+ this->submitCommandBuffer(kSkip_SyncQueue);
+
+ // The image layout change serves as a barrier, so no semaphore is needed
+ return nullptr;
+}
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 605c7af965..3bedfd2ce6 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -137,6 +137,8 @@ public:
void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
+ sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
+
void generateMipmap(GrVkTexture* tex);
bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
index 336e468ced..57ab18aa4b 100644
--- a/src/gpu/vk/GrVkImage.h
+++ b/src/gpu/vk/GrVkImage.h
@@ -48,6 +48,7 @@ public:
bool isLinearTiled() const {
return SkToBool(VK_IMAGE_TILING_LINEAR == fInfo.fImageTiling);
}
+ bool isBorrowed() const { return fIsBorrowed; }
VkImageLayout currentLayout() const { return fInfo.fImageLayout; }