aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar jvanverth <jvanverth@google.com>2016-04-20 05:54:01 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2016-04-20 05:54:02 -0700
commitb0ec9836dbf7f2304a3a29289b818719ca0a39bd (patch)
tree8664a24afe6a34d2afa217135f5f8c53028af6ff /src
parent5e3815b4d074fe3c47bbf0969446ed9870e5ef0a (diff)
Use transfer buffer for BatchAtlas texture copies.
Sets up use of transfer buffer (if available) to do one-copy transfers. Get transfer buffers working properly in GL. Implement GrVkGpu::onTransferPixels. Check caps to ensure we can create a transfer buffer. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1888473002 Review URL: https://codereview.chromium.org/1888473002
Diffstat (limited to 'src')
-rw-r--r--src/gpu/GrBatchAtlas.cpp77
-rw-r--r--src/gpu/GrBatchAtlas.h17
-rw-r--r--src/gpu/GrBatchFlushState.h9
-rw-r--r--src/gpu/GrGpu.cpp4
-rw-r--r--src/gpu/GrGpu.h10
-rw-r--r--src/gpu/GrResourceProvider.cpp3
-rw-r--r--src/gpu/batches/GrDrawBatch.h10
-rw-r--r--src/gpu/gl/GrGLBuffer.cpp6
-rw-r--r--src/gpu/gl/GrGLCaps.cpp5
-rw-r--r--src/gpu/gl/GrGLGpu.cpp19
-rw-r--r--src/gpu/gl/GrGLGpu.h2
-rw-r--r--src/gpu/vk/GrVkGpu.cpp95
-rw-r--r--src/gpu/vk/GrVkGpu.h4
13 files changed, 209 insertions, 52 deletions
diff --git a/src/gpu/GrBatchAtlas.cpp b/src/gpu/GrBatchAtlas.cpp
index 40ab0e6c0e..17e82ef1fe 100644
--- a/src/gpu/GrBatchAtlas.cpp
+++ b/src/gpu/GrBatchAtlas.cpp
@@ -7,19 +7,23 @@
#include "GrBatchAtlas.h"
#include "GrBatchFlushState.h"
+#include "GrBuffer.h"
#include "GrRectanizer.h"
+#include "GrResourceProvider.h"
#include "GrTracing.h"
////////////////////////////////////////////////////////////////////////////////
GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY, int width,
- int height, GrPixelConfig config)
- : fLastUpload(GrBatchDrawToken::AlreadyFlushedToken())
+ int height, GrPixelConfig config, GrResourceProvider* rp)
+ : fResourceProvider(rp)
+ , fLastUpload(GrBatchDrawToken::AlreadyFlushedToken())
, fLastUse(GrBatchDrawToken::AlreadyFlushedToken())
, fIndex(index)
, fGenID(genID)
, fID(CreateId(fIndex, fGenID))
- , fData(nullptr)
+ , fDataPtr(nullptr)
+ , fTransferBuffer(nullptr)
, fWidth(width)
, fHeight(height)
, fX(offX)
@@ -36,7 +40,11 @@ GrBatchAtlas::BatchPlot::BatchPlot(int index, uint64_t genID, int offX, int offY
}
GrBatchAtlas::BatchPlot::~BatchPlot() {
- sk_free(fData);
+ if (fTransferBuffer) {
+ fTransferBuffer->unref();
+ } else {
+ sk_free(fDataPtr);
+ }
delete fRects;
}
@@ -52,14 +60,26 @@ bool GrBatchAtlas::BatchPlot::addSubImage(int width, int height, const void* ima
return false;
}
- if (!fData) {
- fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
- fHeight));
+ if (!fDataPtr) {
+ if (!fTransferBuffer) {
+ fTransferBuffer =
+ fResourceProvider->createBuffer(fBytesPerPixel * fWidth * fHeight,
+ kXferCpuToGpu_GrBufferType,
+ kDynamic_GrAccessPattern,
+ GrResourceProvider::kNoPendingIO_Flag);
+ }
+ if (fTransferBuffer) {
+ fDataPtr = (unsigned char*)fTransferBuffer->map();
+ } else {
+ fDataPtr = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
+ fHeight));
+ }
}
+
size_t rowBytes = width * fBytesPerPixel;
const unsigned char* imagePtr = (const unsigned char*)image;
// point ourselves at the right starting spot
- unsigned char* dataPtr = fData;
+ unsigned char* dataPtr = fDataPtr;
dataPtr += fBytesPerPixel * fWidth * loc->fY;
dataPtr += fBytesPerPixel * loc->fX;
// copy into the data buffer
@@ -79,16 +99,25 @@ bool GrBatchAtlas::BatchPlot::addSubImage(int width, int height, const void* ima
}
void GrBatchAtlas::BatchPlot::uploadToTexture(GrDrawBatch::WritePixelsFn& writePixels,
+ GrDrawBatch::TransferPixelsFn& xferPixels,
GrTexture* texture) {
// We should only be issuing uploads if we are in fact dirty
- SkASSERT(fDirty && fData && texture);
+ SkASSERT(fDirty && fDataPtr && texture);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrBatchPlot::uploadToTexture");
size_t rowBytes = fBytesPerPixel * fWidth;
- const unsigned char* dataPtr = fData;
- dataPtr += rowBytes * fDirtyRect.fTop;
- dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
- writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
- fDirtyRect.width(), fDirtyRect.height(), fConfig, dataPtr, rowBytes);
+ size_t dataOffset = rowBytes * fDirtyRect.fTop + fBytesPerPixel * fDirtyRect.fLeft;
+ if (fTransferBuffer) {
+ fTransferBuffer->unmap();
+ fDataPtr = nullptr;
+ xferPixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
+ fDirtyRect.width(), fDirtyRect.height(), fConfig, fTransferBuffer, dataOffset,
+ rowBytes);
+ } else {
+ writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
+ fDirtyRect.width(), fDirtyRect.height(), fConfig, fDataPtr + dataOffset,
+ rowBytes);
+ }
+
fDirtyRect.setEmpty();
SkDEBUGCODE(fDirty = false;)
}
@@ -102,8 +131,8 @@ void GrBatchAtlas::BatchPlot::resetRects() {
fID = CreateId(fIndex, fGenID);
// zero out the plot
- if (fData) {
- sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
+ if (fDataPtr) {
+ sk_bzero(fDataPtr, fBytesPerPixel * fWidth * fHeight);
}
fDirtyRect.setEmpty();
@@ -112,7 +141,7 @@ void GrBatchAtlas::BatchPlot::resetRects() {
///////////////////////////////////////////////////////////////////////////////
-GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
+GrBatchAtlas::GrBatchAtlas(GrResourceProvider* rp, GrTexture* texture, int numPlotsX, int numPlotsY)
: fTexture(texture)
, fAtlasGeneration(kInvalidAtlasGeneration + 1) {
@@ -135,7 +164,7 @@ GrBatchAtlas::GrBatchAtlas(GrTexture* texture, int numPlotsX, int numPlotsY)
for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
uint32_t index = r * numPlotsX + c;
currPlot->reset(new BatchPlot(index, 1, x, y, plotWidth, plotHeight,
- texture->desc().fConfig));
+ texture->desc().fConfig, rp));
// build LRU list
fPlotList.addToHead(currPlot->get());
@@ -162,12 +191,13 @@ inline void GrBatchAtlas::updatePlot(GrDrawBatch::Target* target, AtlasID* id, B
// upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
// This new update will piggy back on that previously scheduled update.
if (target->hasDrawBeenFlushed(plot->lastUploadToken())) {
- // With c+14 we could move sk_sp into lamba to only ref once.
+ // With c+14 we could move sk_sp into lambda to only ref once.
sk_sp<BatchPlot> plotsp(SkRef(plot));
GrTexture* texture = fTexture;
GrBatchDrawToken lastUploadToken = target->addAsapUpload(
- [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
- plotsp->uploadToTexture(writePixels, texture);
+ [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels,
+ GrDrawBatch::TransferPixelsFn& transferPixels) {
+ plotsp->uploadToTexture(writePixels, transferPixels, texture);
}
);
plot->setLastUploadToken(lastUploadToken);
@@ -235,8 +265,9 @@ bool GrBatchAtlas::addToAtlas(AtlasID* id, GrDrawBatch::Target* target,
sk_sp<BatchPlot> plotsp(SkRef(newPlot.get()));
GrTexture* texture = fTexture;
GrBatchDrawToken lastUploadToken = target->addInlineUpload(
- [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels) {
- plotsp->uploadToTexture(writePixels, texture);
+ [plotsp, texture] (GrDrawBatch::WritePixelsFn& writePixels,
+ GrDrawBatch::TransferPixelsFn& transferPixels) {
+ plotsp->uploadToTexture(writePixels, transferPixels, texture);
}
);
newPlot->setLastUploadToken(lastUploadToken);
diff --git a/src/gpu/GrBatchAtlas.h b/src/gpu/GrBatchAtlas.h
index 707f463d97..418739fee6 100644
--- a/src/gpu/GrBatchAtlas.h
+++ b/src/gpu/GrBatchAtlas.h
@@ -16,6 +16,8 @@
#include "batches/GrDrawBatch.h"
class GrRectanizer;
+class GrResourceProvider;
+class GrBuffer;
struct GrBatchAtlasConfig {
int numPlotsX() const { return fWidth / fPlotWidth; }
@@ -41,7 +43,7 @@ public:
// the eviction
typedef void (*EvictionFunc)(GrBatchAtlas::AtlasID, void*);
- GrBatchAtlas(GrTexture*, int numPlotsX, int numPlotsY);
+ GrBatchAtlas(GrResourceProvider*, GrTexture*, int numPlotsX, int numPlotsY);
~GrBatchAtlas();
// Adds a width x height subimage to the atlas. Upon success it returns
@@ -172,19 +174,21 @@ private:
void setLastUploadToken(GrBatchDrawToken batchToken) { fLastUpload = batchToken; }
void setLastUseToken(GrBatchDrawToken batchToken) { fLastUse = batchToken; }
- void uploadToTexture(GrDrawBatch::WritePixelsFn&, GrTexture* texture);
+ void uploadToTexture(GrDrawBatch::WritePixelsFn&, GrDrawBatch::TransferPixelsFn&,
+ GrTexture* texture);
void resetRects();
private:
BatchPlot(int index, uint64_t genID, int offX, int offY, int width, int height,
- GrPixelConfig config);
+ GrPixelConfig config, GrResourceProvider* rp);
~BatchPlot() override;
// Create a clone of this plot. The cloned plot will take the place of the
// current plot in the atlas.
BatchPlot* clone() const {
- return new BatchPlot(fIndex, fGenID+1, fX, fY, fWidth, fHeight, fConfig);
+ return new BatchPlot(fIndex, fGenID+1, fX, fY, fWidth, fHeight, fConfig,
+ fResourceProvider);
}
static GrBatchAtlas::AtlasID CreateId(uint32_t index, uint64_t generation) {
@@ -193,13 +197,16 @@ private:
return generation << 16 | index;
}
+ // used to create transfer buffers
+ GrResourceProvider* fResourceProvider;
GrBatchDrawToken fLastUpload;
GrBatchDrawToken fLastUse;
const uint32_t fIndex;
uint64_t fGenID;
GrBatchAtlas::AtlasID fID;
- unsigned char* fData;
+ unsigned char* fDataPtr;
+ GrBuffer* fTransferBuffer;
const int fWidth;
const int fHeight;
const int fX;
diff --git a/src/gpu/GrBatchFlushState.h b/src/gpu/GrBatchFlushState.h
index 0b2e2bd239..ee7e3a0afa 100644
--- a/src/gpu/GrBatchFlushState.h
+++ b/src/gpu/GrBatchFlushState.h
@@ -77,7 +77,14 @@ public:
return this->fGpu->writePixels(surface, left, top, width, height, config, buffer,
rowBytes);
};
- upload(wp);
+ GrDrawBatch::TransferPixelsFn tp = [this](GrTexture* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* buffer,
+ size_t offset, size_t rowBytes) -> bool {
+ return this->fGpu->transferPixels(texture, left, top, width, height, config, buffer,
+ offset, rowBytes);
+ };
+ upload(wp, tp);
}
void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index d062a48129..088641b908 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -397,14 +397,14 @@ bool GrGpu::writePixels(GrSurface* surface,
return this->writePixels(surface, left, top, width, height, config, texels);
}
-bool GrGpu::transferPixels(GrSurface* surface,
+bool GrGpu::transferPixels(GrTexture* texture,
int left, int top, int width, int height,
GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) {
SkASSERT(transferBuffer);
this->handleDirtyContext();
- if (this->onTransferPixels(surface, left, top, width, height, config,
+ if (this->onTransferPixels(texture, left, top, width, height, config,
transferBuffer, offset, rowBytes)) {
fStats.incTransfersToTexture();
return true;
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 883b7ee98a..4defa040d7 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -280,9 +280,9 @@ public:
size_t rowBytes);
/**
- * Updates the pixels in a rectangle of a surface using a buffer
+ * Updates the pixels in a rectangle of a texture using a buffer
*
- * @param surface The surface to write to.
+ * @param textre The texture to write to.
* @param left left edge of the rectangle to write (inclusive)
* @param top top edge of the rectangle to write (inclusive)
* @param width width of rectangle to write in pixels.
@@ -293,7 +293,7 @@ public:
* @param rowBytes number of bytes between consecutive rows. Zero
* means rows are tightly packed.
*/
- bool transferPixels(GrSurface* surface,
+ bool transferPixels(GrTexture* texture,
int left, int top, int width, int height,
GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes);
@@ -579,8 +579,8 @@ private:
GrPixelConfig config,
const SkTArray<GrMipLevel>& texels) = 0;
- // overridden by backend-specific derived class to perform the surface write
- virtual bool onTransferPixels(GrSurface*,
+ // overridden by backend-specific derived class to perform the texture transfer
+ virtual bool onTransferPixels(GrTexture*,
int left, int top, int width, int height,
GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) = 0;
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index 9a2fff3141..922e5be320 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -139,7 +139,8 @@ GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,
if (!texture) {
return nullptr;
}
- GrBatchAtlas* atlas = new GrBatchAtlas(texture, numPlotsX, numPlotsY);
+ GrBatchAtlas* atlas = new GrBatchAtlas(this, texture,
+ numPlotsX, numPlotsY);
atlas->registerEvictionCallback(func, data);
return atlas;
}
diff --git a/src/gpu/batches/GrDrawBatch.h b/src/gpu/batches/GrDrawBatch.h
index bf93cf5a72..ef335fb8ba 100644
--- a/src/gpu/batches/GrDrawBatch.h
+++ b/src/gpu/batches/GrDrawBatch.h
@@ -46,13 +46,17 @@ private:
*/
class GrDrawBatch : public GrBatch {
public:
- /** Method that performs an upload on behalf of a DeferredUploadFn. */
+ /** Method that performs a texture write on behalf of a DeferredUploadFn. */
using WritePixelsFn = std::function<bool(GrSurface* texture,
int left, int top, int width, int height,
- GrPixelConfig config, const void* buffer,
+ GrPixelConfig config, const void* buffer,
size_t rowBytes)>;
+ using TransferPixelsFn = std::function<bool(GrTexture* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* buffer,
+ size_t offset, size_t rowBytes)>;
/** See comments before GrDrawBatch::Target definition on how deferred uploaders work. */
- using DeferredUploadFn = std::function<void(WritePixelsFn&)>;
+ using DeferredUploadFn = std::function<void(WritePixelsFn&, TransferPixelsFn&)>;
class Target;
diff --git a/src/gpu/gl/GrGLBuffer.cpp b/src/gpu/gl/GrGLBuffer.cpp
index 24fd59267d..787445e558 100644
--- a/src/gpu/gl/GrGLBuffer.cpp
+++ b/src/gpu/gl/GrGLBuffer.cpp
@@ -30,6 +30,12 @@
GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
+ if (gpu->glCaps().transferBufferType() == GrGLCaps::kNone_TransferBufferType &&
+ (kXferCpuToGpu_GrBufferType == intendedType ||
+ kXferGpuToCpu_GrBufferType == intendedType)) {
+ return nullptr;
+ }
+
bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() &&
GrBufferTypeIsVertexOrIndex(intendedType) &&
kDynamic_GrAccessPattern == accessPattern;
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index f3e494acb9..abf2cb7d56 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -393,8 +393,9 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
} else {
if (version >= GR_GL_VER(3, 0) || ctxInfo.hasExtension("GL_NV_pixel_buffer_object")) {
fTransferBufferType = kPBO_TransferBufferType;
- } else if (ctxInfo.hasExtension("GL_CHROMIUM_pixel_transfer_buffer_object")) {
- fTransferBufferType = kChromium_TransferBufferType;
+// TODO: get transfer buffers working in Chrome
+// } else if (ctxInfo.hasExtension("GL_CHROMIUM_pixel_transfer_buffer_object")) {
+// fTransferBufferType = kChromium_TransferBufferType;
}
}
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 3a7f531476..e50f1e2fdb 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -895,13 +895,13 @@ bool GrGLGpu::onWritePixels(GrSurface* surface,
return success;
}
-bool GrGLGpu::onTransferPixels(GrSurface* surface,
+bool GrGLGpu::onTransferPixels(GrTexture* texture,
int left, int top, int width, int height,
GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) {
- GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
+ GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
- if (!check_write_and_transfer_input(glTex, surface, config)) {
+ if (!check_write_and_transfer_input(glTex, texture, config)) {
return false;
}
@@ -920,15 +920,16 @@ bool GrGLGpu::onTransferPixels(GrSurface* surface,
bool success = false;
GrMipLevel mipLevel;
- mipLevel.fPixels = transferBuffer;
+ mipLevel.fPixels = (void*)offset;
mipLevel.fRowBytes = rowBytes;
SkSTArray<1, GrMipLevel> texels;
texels.push_back(mipLevel);
success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType,
left, top, width, height, config, texels);
+
if (success) {
SkIRect rect = SkIRect::MakeXYWH(left, top, width, height);
- this->didWriteToSurface(surface, &rect);
+ this->didWriteToSurface(texture, &rect);
return true;
}
@@ -1196,6 +1197,14 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
+ // unbind any previous transfer buffer if not transferring
+ auto& xferBufferState = fHWBufferState[kXferCpuToGpu_GrBufferType];
+ if (kTransfer_UploadType != uploadType &&
+ SK_InvalidUniqueID != xferBufferState.fBoundBufferUniqueID) {
+ GL_CALL(BindBuffer(xferBufferState.fGLTarget, 0));
+ xferBufferState.invalidate();
+ }
+
// texels is const.
// But we may need to flip the texture vertically to prepare it.
// Rather than flip in place and alter the incoming data,
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index 00ceb85241..57d076094c 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -184,7 +184,7 @@ private:
GrPixelConfig config,
const SkTArray<GrMipLevel>& texels) override;
- bool onTransferPixels(GrSurface*,
+ bool onTransferPixels(GrTexture*,
int left, int top, int width, int height,
GrPixelConfig config, GrBuffer* transferBuffer,
size_t offset, size_t rowBytes) override;
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 5fba475cab..8083509877 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -187,11 +187,13 @@ GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPatter
buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
break;
case kXferCpuToGpu_GrBufferType:
- SkASSERT(kStream_GrAccessPattern == accessPattern);
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStream_GrAccessPattern == accessPattern);
buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
break;
case kXferGpuToCpu_GrBufferType:
- SkASSERT(kStream_GrAccessPattern == accessPattern);
+ SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
+ kStream_GrAccessPattern == accessPattern);
buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
break;
default:
@@ -284,6 +286,95 @@ bool GrVkGpu::onWritePixels(GrSurface* surface,
return false;
}
+
+bool GrVkGpu::onTransferPixels(GrTexture* texture,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrBuffer* transferBuffer,
+ size_t bufferOffset, size_t rowBytes) {
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
+ if (!vkTex) {
+ return false;
+ }
+ GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
+ if (!vkBuffer) {
+ return false;
+ }
+
+ // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
+ if (GrPixelConfigIsSRGB(texture->config()) != GrPixelConfigIsSRGB(config)) {
+ return false;
+ }
+
+ // TODO: Handle y axis flip via copy to temp image, then blit to final
+ if (kBottomLeft_GrSurfaceOrigin == vkTex->origin()) {
+ return false;
+ }
+
+ bool success = false;
+ if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
+ // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
+ SkASSERT(config == vkTex->desc().fConfig);
+ // TODO: add compressed texture support
+ // delete the following two lines and uncomment the two after that when ready
+ vkTex->unref();
+ return false;
+ //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
+ // height);
+ } else {
+ // make sure the unmap has finished
+ vkBuffer->addMemoryBarrier(this,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Set up copy region
+ size_t bpp = GrBytesPerPixel(config);
+
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = bufferOffset;
+ region.bufferRowLength = (uint32_t)(rowBytes/bpp);
+ region.bufferImageHeight = 0;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = { left, top, 0 };
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+
+ // Change layout of our target so it can be copied to
+ VkImageLayout layout = vkTex->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ vkTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ vkBuffer,
+ vkTex,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &region);
+
+ // Submit the current command buffer to the Queue
+ this->submitCommandBuffer(kSkip_SyncQueue);
+ }
+
+ if (success) {
+ vkTex->texturePriv().dirtyMipMaps(true);
+ return true;
+ }
+
+ return false;
+}
+
bool GrVkGpu::uploadTexData(GrVkTexture* tex,
int left, int top, int width, int height,
GrPixelConfig dataConfig,
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 76b18ac71e..e738e7336a 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -151,10 +151,10 @@ private:
int left, int top, int width, int height,
GrPixelConfig config, const SkTArray<GrMipLevel>&) override;
- bool onTransferPixels(GrSurface*,
+ bool onTransferPixels(GrTexture*,
int left, int top, int width, int height,
GrPixelConfig config, GrBuffer* transferBuffer,
- size_t offset, size_t rowBytes) override { return false; }
+ size_t offset, size_t rowBytes) override;
void onResolveRenderTarget(GrRenderTarget* target) override {}