diff options
-rw-r--r-- | src/gpu/GrBackendTextureImageGenerator.cpp | 41 | ||||
-rw-r--r-- | src/gpu/gl/GrGLGpu.cpp | 9 | ||||
-rw-r--r-- | src/gpu/vk/GrVkGpu.cpp | 268 | ||||
-rw-r--r-- | tests/GrMipMappedTest.cpp | 119 |
4 files changed, 118 insertions, 319 deletions
diff --git a/src/gpu/GrBackendTextureImageGenerator.cpp b/src/gpu/GrBackendTextureImageGenerator.cpp index 60cbf9c896..93596cf6bd 100644 --- a/src/gpu/GrBackendTextureImageGenerator.cpp +++ b/src/gpu/GrBackendTextureImageGenerator.cpp @@ -10,12 +10,10 @@ #include "GrContext.h" #include "GrContextPriv.h" #include "GrGpu.h" -#include "GrRenderTargetContext.h" #include "GrResourceCache.h" #include "GrResourceProvider.h" #include "GrSemaphore.h" #include "GrTexture.h" -#include "GrTexturePriv.h" #include "SkGr.h" #include "SkMessageBus.h" @@ -34,12 +32,11 @@ GrBackendTextureImageGenerator::RefHelper::~RefHelper() { static GrBackendTexture make_backend_texture_from_handle(GrBackend backend, int width, int height, GrPixelConfig config, - GrMipMapped mipMapped, GrBackendObject handle) { switch (backend) { case kOpenGL_GrBackend: { const GrGLTextureInfo* glInfo = (const GrGLTextureInfo*)(handle); - return GrBackendTexture(width, height, config, mipMapped, *glInfo); + return GrBackendTexture(width, height, config, *glInfo); } #ifdef SK_VULKAN case kVulkan_GrBackend: { @@ -49,7 +46,7 @@ static GrBackendTexture make_backend_texture_from_handle(GrBackend backend, #endif case kMock_GrBackend: { const GrMockTextureInfo* mockInfo = (const GrMockTextureInfo*)(handle); - return GrBackendTexture(width, height, config, mipMapped, *mockInfo); + return GrBackendTexture(width, height, config, *mockInfo); } default: return GrBackendTexture(); @@ -77,13 +74,10 @@ GrBackendTextureImageGenerator::Make(sk_sp<GrTexture> texture, GrSurfaceOrigin o context->getResourceCache()->insertCrossContextGpuResource(texture.get()); GrBackend backend = context->contextPriv().getBackend(); - GrMipMapped mipMapped = texture->texturePriv().hasMipMaps() ? GrMipMapped::kYes - : GrMipMapped::kNo; GrBackendTexture backendTexture = make_backend_texture_from_handle(backend, texture->width(), texture->height(), texture->config(), - mipMapped, texture->getTextureHandle()); SkImageInfo info = SkImageInfo::Make(texture->width(), texture->height(), colorType, alphaType, @@ -176,28 +170,35 @@ sk_sp<GrTextureProxy> GrBackendTextureImageGenerator::onGenerateTexture( sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(tex), fSurfaceOrigin); if (0 == origin.fX && 0 == origin.fY && - info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height() && - (!willNeedMipMaps || proxy->isMipMapped())) { - // If the caller wants the entire texture and we have the correct mip support, we're done + info.width() == fBackendTexture.width() && info.height() == fBackendTexture.height()) { + // If the caller wants the entire texture, we're done return proxy; } else { // Otherwise, make a copy of the requested subset. Make sure our temporary is renderable, - // because Vulkan will want to do the copy as a draw. All other copies would require a - // layout change in Vulkan and we do not change the layout of borrowed images. - sk_sp<GrRenderTargetContext> rtContext(context->makeDeferredRenderTargetContext( - SkBackingFit::kExact, info.width(), info.height(), proxy->config(), nullptr, - 0, willNeedMipMaps, proxy->origin(), nullptr, SkBudgeted::kYes)); - - if (!rtContext) { + // because Vulkan will want to do the copy as a draw. + GrSurfaceDesc desc; + desc.fFlags = kRenderTarget_GrSurfaceFlag; + desc.fOrigin = proxy->origin(); + desc.fWidth = info.width(); + desc.fHeight = info.height(); + desc.fConfig = proxy->config(); + // TODO: We should support the case where we can allocate the mips ahead of time then copy + // the subregion into the base layer and then let the GPU generate the rest of the mip + // levels. + SkASSERT(!proxy->isMipMapped()); + + sk_sp<GrSurfaceContext> sContext(context->contextPriv().makeDeferredSurfaceContext( + desc, SkBackingFit::kExact, SkBudgeted::kYes)); + if (!sContext) { return nullptr; } SkIRect subset = SkIRect::MakeXYWH(origin.fX, origin.fY, info.width(), info.height()); - if (!rtContext->copy(proxy.get(), subset, SkIPoint::Make(0, 0))) { + if (!sContext->copy(proxy.get(), subset, SkIPoint::Make(0, 0))) { return nullptr; } - return rtContext->asTextureProxyRef(); + return sContext->asTextureProxyRef(); } } #endif diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp index 1bee54879e..649373bf79 100644 --- a/src/gpu/gl/GrGLGpu.cpp +++ b/src/gpu/gl/GrGLGpu.cpp @@ -4403,15 +4403,6 @@ GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, in mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1; } - size_t bpp = GrBytesPerPixel(config); - size_t baseLayerSize = bpp * w * h; - SkAutoMalloc defaultStorage(baseLayerSize); - if (!pixels) { - // Fill in the texture with all zeros so we don't have random garbage - pixels = defaultStorage.get(); - memset(pixels, 0, baseLayerSize); - } - int width = w; int height = h; for (int i = 0; i < mipLevels; ++i) { diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp index 3b40f27480..1a9bd2f254 100644 --- a/src/gpu/vk/GrVkGpu.cpp +++ b/src/gpu/vk/GrVkGpu.cpp @@ -1138,12 +1138,12 @@ GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRen //////////////////////////////////////////////////////////////////////////////// -bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size_t bufferOffset, +bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, size_t srcRowBytes, size_t dstRowBytes, int h) { void* mapPtr; VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory, - alloc.fOffset + bufferOffset, + alloc.fOffset, dstRowBytes * h, 0, &mapPtr)); @@ -1255,37 +1255,6 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i return 0; } - // We need to declare these early so that we can delete them at the end outside of the if block. - GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 }; - VkBuffer buffer = VK_NULL_HANDLE; - - VkResult err; - const VkCommandBufferAllocateInfo cmdInfo = { - VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType - nullptr, // pNext - fCmdPool, // commandPool - VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level - 1 // bufferCount - }; - - VkCommandBuffer cmdBuffer; - err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer)); - if (err) { - GrVkMemory::FreeImageMemory(this, false, alloc); - VK_CALL(DestroyImage(fDevice, image, nullptr)); - return 0; - } - - VkCommandBufferBeginInfo cmdBufferBeginInfo; - memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); - cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; - cmdBufferBeginInfo.pNext = nullptr; - cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; - cmdBufferBeginInfo.pInheritanceInfo = nullptr; - - err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo)); - SkASSERT(!err); - size_t bpp = GrBytesPerPixel(config); size_t rowCopyBytes = bpp * w; if (linearTiling) { @@ -1298,89 +1267,79 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout)); - if (!copy_testing_data(this, srcData, alloc, 0, rowCopyBytes, + if (!copy_testing_data(this, srcData, alloc, rowCopyBytes, static_cast<size_t>(layout.rowPitch), h)) { - GrVkMemory::FreeImageMemory(this, true, alloc); + GrVkMemory::FreeImageMemory(this, linearTiling, alloc); VK_CALL(DestroyImage(fDevice, image, nullptr)); - VK_CALL(EndCommandBuffer(cmdBuffer)); - VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); return 0; } } else { SkASSERT(w && h); - SkTArray<size_t> individualMipOffsets(mipLevels); - individualMipOffsets.push_back(0); - size_t combinedBufferSize = w * bpp * h; - int currentWidth = w; - int currentHeight = h; - // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image - // config. This works with the assumption that the bytes in pixel config is always a power - // of 2. - SkASSERT((bpp & (bpp - 1)) == 0); - const size_t alignmentMask = 0x3 | (bpp - 1); - for (uint32_t currentMipLevel = 1; currentMipLevel < mipLevels; currentMipLevel++) { - currentWidth = SkTMax(1, currentWidth/2); - currentHeight = SkTMax(1, currentHeight/2); - - const size_t trimmedSize = currentWidth * bpp * currentHeight; - const size_t alignmentDiff = combinedBufferSize & alignmentMask; - if (alignmentDiff != 0) { - combinedBufferSize += alignmentMask - alignmentDiff + 1; - } - individualMipOffsets.push_back(combinedBufferSize); - combinedBufferSize += trimmedSize; - } - + VkBuffer buffer; VkBufferCreateInfo bufInfo; memset(&bufInfo, 0, sizeof(VkBufferCreateInfo)); bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufInfo.flags = 0; - bufInfo.size = combinedBufferSize; + bufInfo.size = rowCopyBytes * h; bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; bufInfo.queueFamilyIndexCount = 0; bufInfo.pQueueFamilyIndices = nullptr; + VkResult err; err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer)); if (err) { - GrVkMemory::FreeImageMemory(this, false, alloc); + GrVkMemory::FreeImageMemory(this, linearTiling, alloc); VK_CALL(DestroyImage(fDevice, image, nullptr)); - VK_CALL(EndCommandBuffer(cmdBuffer)); - VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); return 0; } + GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 }; if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type, true, &bufferAlloc)) { - GrVkMemory::FreeImageMemory(this, false, alloc); + GrVkMemory::FreeImageMemory(this, linearTiling, alloc); VK_CALL(DestroyImage(fDevice, image, nullptr)); VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); - VK_CALL(EndCommandBuffer(cmdBuffer)); - VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); return 0; } - currentWidth = w; - currentHeight = h; - for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) { - SkASSERT(0 == currentMipLevel || !srcData); - size_t currentRowBytes = bpp * currentWidth; - size_t bufferOffset = individualMipOffsets[currentMipLevel]; - if (!copy_testing_data(this, srcData, bufferAlloc, bufferOffset, - currentRowBytes, currentRowBytes, currentHeight)) { - GrVkMemory::FreeImageMemory(this, false, alloc); - VK_CALL(DestroyImage(fDevice, image, nullptr)); - GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); - VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); - VK_CALL(EndCommandBuffer(cmdBuffer)); - VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); - return 0; - } - currentWidth = SkTMax(1, currentWidth/2); - currentHeight = SkTMax(1, currentHeight/2); + if (!copy_testing_data(this, srcData, bufferAlloc, rowCopyBytes, rowCopyBytes, h)) { + GrVkMemory::FreeImageMemory(this, linearTiling, alloc); + VK_CALL(DestroyImage(fDevice, image, nullptr)); + GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); + VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); + return 0; + } + + const VkCommandBufferAllocateInfo cmdInfo = { + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType + nullptr, // pNext + fCmdPool, // commandPool + VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level + 1 // bufferCount + }; + + VkCommandBuffer cmdBuffer; + err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer)); + if (err) { + GrVkMemory::FreeImageMemory(this, linearTiling, alloc); + VK_CALL(DestroyImage(fDevice, image, nullptr)); + GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); + VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); + return 0; } + VkCommandBufferBeginInfo cmdBufferBeginInfo; + memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); + cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + cmdBufferBeginInfo.pNext = nullptr; + cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + cmdBufferBeginInfo.pInheritanceInfo = nullptr; + + err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo)); + SkASSERT(!err); + // Set image layout and add barrier VkImageMemoryBarrier barrier; memset(&barrier, 0, sizeof(VkImageMemoryBarrier)); @@ -1388,12 +1347,11 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i barrier.pNext = nullptr; barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout); barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - barrier.oldLayout = initialLayout; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; barrier.image = image; - barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1}; + barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0 , 1}; VK_CALL(CmdPipelineBarrier(cmdBuffer, GrVkMemory::LayoutToPipelineStageFlags(initialLayout), @@ -1404,102 +1362,70 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i 1, &barrier)); initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; - SkTArray<VkBufferImageCopy> regions(mipLevels); - - currentWidth = w; - currentHeight = h; - for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) { - // Submit copy command - VkBufferImageCopy& region = regions.push_back(); - memset(®ion, 0, sizeof(VkBufferImageCopy)); - region.bufferOffset = individualMipOffsets[currentMipLevel]; - region.bufferRowLength = currentWidth; - region.bufferImageHeight = currentHeight; - region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; - region.imageOffset = { 0, 0, 0 }; - region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 }; - currentWidth = SkTMax(1, currentWidth/2); - currentHeight = SkTMax(1, currentHeight/2); + // Submit copy command + VkBufferImageCopy region; + memset(®ion, 0, sizeof(VkBufferImageCopy)); + region.bufferOffset = 0; + region.bufferRowLength = w; + region.bufferImageHeight = h; + region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; + region.imageOffset = { 0, 0, 0 }; + region.imageExtent = { (uint32_t)w, (uint32_t)h, 1 }; + + VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, 1, ®ion)); + + // End CommandBuffer + err = VK_CALL(EndCommandBuffer(cmdBuffer)); + SkASSERT(!err); + + // Create Fence for queue + VkFence fence; + VkFenceCreateInfo fenceInfo; + memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); + fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + + err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence)); + SkASSERT(!err); + + VkSubmitInfo submitInfo; + memset(&submitInfo, 0, sizeof(VkSubmitInfo)); + submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submitInfo.pNext = nullptr; + submitInfo.waitSemaphoreCount = 0; + submitInfo.pWaitSemaphores = nullptr; + submitInfo.pWaitDstStageMask = 0; + submitInfo.commandBufferCount = 1; + submitInfo.pCommandBuffers = &cmdBuffer; + submitInfo.signalSemaphoreCount = 0; + submitInfo.pSignalSemaphores = nullptr; + err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence)); + SkASSERT(!err); + + err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX)); + if (VK_TIMEOUT == err) { + GrVkMemory::FreeImageMemory(this, linearTiling, alloc); + VK_CALL(DestroyImage(fDevice, image, nullptr)); + GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); + VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); + VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); + VK_CALL(DestroyFence(fDevice, fence, nullptr)); + SkDebugf("Fence failed to signal: %d\n", err); + SK_ABORT("failing"); } + SkASSERT(!err); - VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, regions.count(), - regions.begin())); - } - // Change Image layout to shader read since if we use this texture as a borrowed textures within - // Ganesh we require that its layout be set to that - VkImageMemoryBarrier barrier; - memset(&barrier, 0, sizeof(VkImageMemoryBarrier)); - barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.pNext = nullptr; - barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout); - barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; - barrier.oldLayout = initialLayout; - barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; - barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier.image = image; - barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0 , 1}; - - VK_CALL(CmdPipelineBarrier(cmdBuffer, - GrVkMemory::LayoutToPipelineStageFlags(initialLayout), - VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, - 0, - 0, nullptr, - 0, nullptr, - 1, &barrier)); - - // End CommandBuffer - err = VK_CALL(EndCommandBuffer(cmdBuffer)); - SkASSERT(!err); - - // Create Fence for queue - VkFence fence; - VkFenceCreateInfo fenceInfo; - memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); - fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; - - err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence)); - SkASSERT(!err); - - VkSubmitInfo submitInfo; - memset(&submitInfo, 0, sizeof(VkSubmitInfo)); - submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; - submitInfo.pNext = nullptr; - submitInfo.waitSemaphoreCount = 0; - submitInfo.pWaitSemaphores = nullptr; - submitInfo.pWaitDstStageMask = 0; - submitInfo.commandBufferCount = 1; - submitInfo.pCommandBuffers = &cmdBuffer; - submitInfo.signalSemaphoreCount = 0; - submitInfo.pSignalSemaphores = nullptr; - err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence)); - SkASSERT(!err); - - err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX)); - if (VK_TIMEOUT == err) { - GrVkMemory::FreeImageMemory(this, false, alloc); - VK_CALL(DestroyImage(fDevice, image, nullptr)); + // Clean up transfer resources GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); VK_CALL(DestroyFence(fDevice, fence, nullptr)); - SkDebugf("Fence failed to signal: %d\n", err); - SK_ABORT("failing"); } - SkASSERT(!err); - - // Clean up transfer resources - GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); - VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); - VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); - VK_CALL(DestroyFence(fDevice, fence, nullptr)); - GrVkImageInfo* info = new GrVkImageInfo; info->fImage = image; info->fAlloc = alloc; info->fImageTiling = imageTiling; - info->fImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + info->fImageLayout = initialLayout; info->fFormat = pixelFormat; info->fLevelCount = mipLevels; diff --git a/tests/GrMipMappedTest.cpp b/tests/GrMipMappedTest.cpp index 91addd87e2..166713a2bd 100644 --- a/tests/GrMipMappedTest.cpp +++ b/tests/GrMipMappedTest.cpp @@ -10,12 +10,10 @@ #if SK_SUPPORT_GPU #include "GrBackendSurface.h" -#include "GrBackendTextureImageGenerator.h" #include "GrContext.h" #include "GrContextPriv.h" #include "GrGpu.h" #include "GrRenderTargetContext.h" -#include "GrSemaphore.h" #include "GrSurfaceProxyPriv.h" #include "GrTest.h" #include "GrTexturePriv.h" @@ -23,7 +21,6 @@ #include "SkCanvas.h" #include "SkImage_Base.h" #include "SkGpuDevice.h" -#include "SkPoint.h" #include "SkSurface.h" #include "SkSurface_Gpu.h" #include "Test.h" @@ -97,120 +94,4 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrWrappedMipMappedTest, reporter, ctxInfo) { } } -// Test that we correctly copy or don't copy GrBackendTextures in the GrBackendTextureImageGenerator -// based on if we will use mips in the draw and the mip status of the GrBackendTexture. -DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrBackendTextureImageMipMappedTest, reporter, ctxInfo) { - static const int kSize = 8; - - GrContext* context = ctxInfo.grContext(); - for (auto mipMapped : {GrMipMapped::kNo, GrMipMapped::kYes}) { - for (auto willUseMips : {false, true}) { - GrBackendObject backendHandle = context->getGpu()->createTestingOnlyBackendTexture( - nullptr, kSize, kSize, kRGBA_8888_GrPixelConfig, false, mipMapped); - - GrBackend backend = context->contextPriv().getBackend(); - GrBackendTexture backendTex = GrTest::CreateBackendTexture(backend, - kSize, - kSize, - kRGBA_8888_GrPixelConfig, - mipMapped, - backendHandle); - - sk_sp<SkImage> image = SkImage::MakeFromTexture(context, backendTex, - kTopLeft_GrSurfaceOrigin, - kPremul_SkAlphaType, nullptr); - - GrTextureProxy* proxy = as_IB(image)->peekProxy(); - REPORTER_ASSERT(reporter, proxy); - if (!proxy) { - context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle); - return; - } - - REPORTER_ASSERT(reporter, proxy->priv().isInstantiated()); - - sk_sp<GrTexture> texture = sk_ref_sp(proxy->priv().peekTexture()); - REPORTER_ASSERT(reporter, texture); - if (!texture) { - context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle); - return; - } - - std::unique_ptr<SkImageGenerator> imageGen = GrBackendTextureImageGenerator::Make( - texture, kTopLeft_GrSurfaceOrigin, nullptr, kPremul_SkAlphaType, nullptr); - REPORTER_ASSERT(reporter, imageGen); - if (!imageGen) { - context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle); - return; - } - - SkIPoint origin = SkIPoint::Make(0,0); - // The transfer function behavior isn't used in the generator so set we set it - // arbitrarily here. - SkTransferFunctionBehavior behavior = SkTransferFunctionBehavior::kIgnore; - SkImageInfo imageInfo = SkImageInfo::Make(kSize, kSize, kRGBA_8888_SkColorType, - kPremul_SkAlphaType); - sk_sp<GrTextureProxy> genProxy = imageGen->generateTexture(context, imageInfo, - origin, behavior, - willUseMips); - - REPORTER_ASSERT(reporter, genProxy); - if (!genProxy) { - context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle); - return; - } - - REPORTER_ASSERT(reporter, genProxy->priv().isInstantiated()); - - GrTexture* genTexture = genProxy->priv().peekTexture(); - REPORTER_ASSERT(reporter, genTexture); - if (!genTexture) { - context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle); - return; - } - - GrBackendObject genBackendObject = genTexture->getTextureHandle(); - - if (kOpenGL_GrBackend == backend) { - const GrGLTextureInfo* origTexInfo = backendTex.getGLTextureInfo(); - GrGLTextureInfo* genTexInfo = (GrGLTextureInfo*)genBackendObject; - if (willUseMips && GrMipMapped::kNo == mipMapped) { - // We did a copy so the texture IDs should be different - REPORTER_ASSERT(reporter, origTexInfo->fID != genTexInfo->fID); - } else { - REPORTER_ASSERT(reporter, origTexInfo->fID == genTexInfo->fID); - } - } else if (kVulkan_GrBackend == backend) { -#ifdef SK_VULKAN - const GrVkImageInfo* origImageInfo = backendTex.getVkImageInfo(); - GrVkImageInfo* genImageInfo = (GrVkImageInfo*)genBackendObject; - if (willUseMips && GrMipMapped::kNo == mipMapped) { - // We did a copy so the texture IDs should be different - REPORTER_ASSERT(reporter, origImageInfo->fImage != genImageInfo->fImage); - } else { - REPORTER_ASSERT(reporter, origImageInfo->fImage == genImageInfo->fImage); - } -#endif - } else if (kMetal_GrBackend == backend) { - REPORTER_ASSERT(reporter, false); - } else { - REPORTER_ASSERT(reporter, false); - } - - // Must make sure the uses of the backend texture have finished (we possibly have a - // queued up copy) before we delete the backend texture. Thus we use readPixels here - // just to force the synchronization. - sk_sp<GrSurfaceContext> surfContext = - context->contextPriv().makeWrappedSurfaceContext(genProxy, nullptr); - - SkBitmap bitmap; - bitmap.allocPixels(imageInfo); - surfContext->readPixels(imageInfo, bitmap.getPixels(), 0, 0, 0, 0); - - context->getGpu()->deleteTestingOnlyBackendTexture(backendHandle); - } - } -} - - #endif |