aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2017-08-24 15:59:33 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-08-24 20:18:27 +0000
commit500d58b2a6e6fd03239622da42b67b2c9843b7be (patch)
treedc77637f3bbcc07773c3bdbd292870c59f28e333
parentfb126fa96e0f49f5dc17a9a043acced68be99e93 (diff)
Make Copy Ops to go through GpuCommandBuffer instead of straigt to GPU.
Bug: skia: Change-Id: I4eae4507e07278997e26419e94586eef0780c423 Reviewed-on: https://skia-review.googlesource.com/38361 Commit-Queue: Greg Daniel <egdaniel@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com>
-rw-r--r--samplecode/SampleCCPRGeometry.cpp2
-rw-r--r--src/gpu/GrGpu.h17
-rw-r--r--src/gpu/GrGpuCommandBuffer.cpp21
-rw-r--r--src/gpu/GrGpuCommandBuffer.h73
-rw-r--r--src/gpu/GrOpFlushState.cpp4
-rw-r--r--src/gpu/GrOpFlushState.h3
-rw-r--r--src/gpu/GrRenderTargetOpList.cpp42
-rw-r--r--src/gpu/GrTextureOpList.cpp8
-rw-r--r--src/gpu/ccpr/GrCCPRCoverageOpsBuilder.cpp7
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp3
-rw-r--r--src/gpu/gl/GrGLGpu.cpp13
-rw-r--r--src/gpu/gl/GrGLGpu.h20
-rw-r--r--src/gpu/gl/GrGLGpuCommandBuffer.h43
-rw-r--r--src/gpu/mock/GrMockGpu.cpp16
-rw-r--r--src/gpu/mock/GrMockGpu.h12
-rw-r--r--src/gpu/mock/GrMockGpuCommandBuffer.h27
-rw-r--r--src/gpu/ops/GrClearOp.cpp3
-rw-r--r--src/gpu/ops/GrClearStencilClipOp.h3
-rw-r--r--src/gpu/ops/GrCopySurfaceOp.cpp5
-rw-r--r--src/gpu/ops/GrCopySurfaceOp.h2
-rw-r--r--src/gpu/ops/GrDiscardOp.h3
-rw-r--r--src/gpu/ops/GrMeshDrawOp.cpp9
-rw-r--r--src/gpu/ops/GrOp.h2
-rw-r--r--src/gpu/vk/GrVkGpu.cpp13
-rw-r--r--src/gpu/vk/GrVkGpu.h8
-rw-r--r--src/gpu/vk/GrVkGpuCommandBuffer.cpp140
-rw-r--r--src/gpu/vk/GrVkGpuCommandBuffer.h61
-rw-r--r--tests/GrMeshTest.cpp4
-rw-r--r--tests/GrPipelineDynamicStateTest.cpp6
29 files changed, 376 insertions, 194 deletions
diff --git a/samplecode/SampleCCPRGeometry.cpp b/samplecode/SampleCCPRGeometry.cpp
index d1cb2b5b57..9749e45a29 100644
--- a/samplecode/SampleCCPRGeometry.cpp
+++ b/samplecode/SampleCCPRGeometry.cpp
@@ -271,7 +271,7 @@ void CCPRGeometryView::Op::onExecute(GrOpFlushState* state) {
GR_GL_CALL(glGpu->glInterface(), Enable(GR_GL_LINE_SMOOTH));
}
- state->commandBuffer()->draw(pipeline, ccprProc, &mesh, nullptr, 1, this->bounds());
+ state->rtCommandBuffer()->draw(pipeline, ccprProc, &mesh, nullptr, 1, this->bounds());
if (glGpu) {
context->resetContext(kMisc_GrGLBackendState);
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index cc55a05722..8f11249463 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -366,15 +366,16 @@ public:
return fMultisampleSpecs[uniqueID];
}
- // Creates a GrGpuCommandBuffer in which the GrOpList can send draw commands to instead of
- // directly to the Gpu object. This currently does not take a GrRenderTarget. The command buffer
- // is expected to infer the render target from the first draw, clear, or discard. This is an
- // awkward workaround that goes away after MDB is complete and the render target is known from
- // the GrRenderTargetOpList.
- virtual GrGpuCommandBuffer* createCommandBuffer(
+ // Creates a GrGpuRTCommandBuffer which GrOpLists send draw commands to instead of directly
+ // to the Gpu object.
+ virtual GrGpuRTCommandBuffer* createCommandBuffer(
GrRenderTarget*, GrSurfaceOrigin,
- const GrGpuCommandBuffer::LoadAndStoreInfo&,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo&) = 0;
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) = 0;
+
+ // Creates a GrGpuTextureCommandBuffer which GrOpLists send texture commands to instead of
+ // directly to the Gpu object.
+ virtual GrGpuTextureCommandBuffer* createCommandBuffer(GrTexture*, GrSurfaceOrigin) = 0;
// Called by GrDrawingManager when flushing.
// Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
diff --git a/src/gpu/GrGpuCommandBuffer.cpp b/src/gpu/GrGpuCommandBuffer.cpp
index ad370f077e..1dc16c1caa 100644
--- a/src/gpu/GrGpuCommandBuffer.cpp
+++ b/src/gpu/GrGpuCommandBuffer.cpp
@@ -16,12 +16,7 @@
#include "GrRenderTarget.h"
#include "SkRect.h"
-void GrGpuCommandBuffer::submit() {
- this->gpu()->handleDirtyContext();
- this->onSubmit();
-}
-
-void GrGpuCommandBuffer::clear(const GrFixedClip& clip, GrColor color) {
+void GrGpuRTCommandBuffer::clear(const GrFixedClip& clip, GrColor color) {
#ifdef SK_DEBUG
GrRenderTarget* rt = fRenderTarget;
SkASSERT(rt);
@@ -32,16 +27,16 @@ void GrGpuCommandBuffer::clear(const GrFixedClip& clip, GrColor color) {
this->onClear(clip, color);
}
-void GrGpuCommandBuffer::clearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+void GrGpuRTCommandBuffer::clearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
this->onClearStencilClip(clip, insideStencilMask);
}
-bool GrGpuCommandBuffer::draw(const GrPipeline& pipeline,
- const GrPrimitiveProcessor& primProc,
- const GrMesh meshes[],
- const GrPipeline::DynamicState dynamicStates[],
- int meshCount,
- const SkRect& bounds) {
+bool GrGpuRTCommandBuffer::draw(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrMesh meshes[],
+ const GrPipeline::DynamicState dynamicStates[],
+ int meshCount,
+ const SkRect& bounds) {
#ifdef SK_DEBUG
SkASSERT(!primProc.hasInstanceAttribs() || this->gpu()->caps()->instanceAttribSupport());
for (int i = 0; i < meshCount; ++i) {
diff --git a/src/gpu/GrGpuCommandBuffer.h b/src/gpu/GrGpuCommandBuffer.h
index 31e9a546d5..07e823fd11 100644
--- a/src/gpu/GrGpuCommandBuffer.h
+++ b/src/gpu/GrGpuCommandBuffer.h
@@ -22,13 +22,54 @@ class GrRenderTarget;
struct SkIRect;
struct SkRect;
+class GrGpuRTCommandBuffer;
+
+class GrGpuCommandBuffer {
+public:
+ virtual ~GrGpuCommandBuffer() {}
+
+ // Copy src into current surface owned by either a GrGpuTextureCommandBuffer or
+ // GrGpuRenderTargetCommandBuffer.
+ virtual void copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) = 0;
+
+ virtual void insertEventMarker(const char*) = 0;
+
+ virtual GrGpuRTCommandBuffer* asRTCommandBuffer() { return nullptr; }
+
+ // Sends the command buffer off to the GPU object to execute the commands built up in the
+ // buffer. The gpu object is allowed to defer execution of the commands until it is flushed.
+ virtual void submit() = 0;
+
+protected:
+ GrGpuCommandBuffer(GrSurfaceOrigin origin) : fOrigin(origin) {}
+
+ GrSurfaceOrigin fOrigin;
+};
+
+class GrGpuTextureCommandBuffer : public GrGpuCommandBuffer{
+public:
+ virtual ~GrGpuTextureCommandBuffer() {}
+
+ virtual void submit() = 0;
+
+protected:
+ GrGpuTextureCommandBuffer(GrTexture* texture, GrSurfaceOrigin origin)
+ : INHERITED(origin)
+ , fTexture(texture) {}
+
+ GrTexture* fTexture;
+
+private:
+ typedef GrGpuCommandBuffer INHERITED;
+};
+
/**
- * The GrGpuCommandBuffer is a series of commands (draws, clears, and discards), which all target
- * the same render target. It is possible that these commands execute immediately (GL), or get
- * buffered up for later execution (Vulkan). GrOps will execute their draw commands into a
+ * The GrGpuRenderTargetCommandBuffer is a series of commands (draws, clears, and discards), which
+ * all target the same render target. It is possible that these commands execute immediately (GL),
+ * or get buffered up for later execution (Vulkan). GrOps will execute their draw commands into a
* GrGpuCommandBuffer.
*/
-class GrGpuCommandBuffer {
+class GrGpuRTCommandBuffer : public GrGpuCommandBuffer {
public:
enum class LoadOp {
kLoad,
@@ -54,20 +95,14 @@ public:
StoreOp fStoreOp;
};
- GrGpuCommandBuffer(GrRenderTarget* rt, GrSurfaceOrigin origin)
- : fRenderTarget(rt)
- , fOrigin(origin) {
- }
- virtual ~GrGpuCommandBuffer() {}
+ virtual ~GrGpuRTCommandBuffer() {}
+
+ GrGpuRTCommandBuffer* asRTCommandBuffer() { return this; }
virtual void begin() = 0;
// Signals the end of recording to the command buffer and that it can now be submitted.
virtual void end() = 0;
- // Sends the command buffer off to the GPU object to execute the commands built up in the
- // buffer. The gpu object is allowed to defer execution of the commands until it is flushed.
- void submit();
-
// We pass in an array of meshCount GrMesh to the draw. The backend should loop over each
// GrMesh object and emit a draw for it. Each draw will use the same GrPipeline and
// GrPrimitiveProcessor. This may fail if the draw would exceed any resource limits (e.g.
@@ -95,17 +130,17 @@ public:
// TODO: This should be removed in the future to favor using the load and store ops for discard
virtual void discard() = 0;
- virtual void insertEventMarker(const char*) = 0;
-
protected:
+ GrGpuRTCommandBuffer(GrRenderTarget* rt, GrSurfaceOrigin origin)
+ : INHERITED(origin)
+ , fRenderTarget(rt) {
+ }
+
GrRenderTarget* fRenderTarget;
- GrSurfaceOrigin fOrigin;
private:
virtual GrGpu* gpu() = 0;
- virtual void onSubmit() = 0;
-
// overridden by backend-specific derived class to perform the draw call.
virtual void onDraw(const GrPipeline&,
const GrPrimitiveProcessor&,
@@ -118,6 +153,8 @@ private:
virtual void onClear(const GrFixedClip&, GrColor) = 0;
virtual void onClearStencilClip(const GrFixedClip&, bool insideStencilMask) = 0;
+
+ typedef GrGpuCommandBuffer INHERITED;
};
#endif
diff --git a/src/gpu/GrOpFlushState.cpp b/src/gpu/GrOpFlushState.cpp
index 30636a4413..390942abe4 100644
--- a/src/gpu/GrOpFlushState.cpp
+++ b/src/gpu/GrOpFlushState.cpp
@@ -26,6 +26,10 @@ const GrCaps& GrOpFlushState::caps() const {
return *fGpu->caps();
}
+GrGpuRTCommandBuffer* GrOpFlushState::rtCommandBuffer() {
+ return fCommandBuffer->asRTCommandBuffer();
+}
+
void* GrOpFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
const GrBuffer** buffer, int* startVertex) {
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
diff --git a/src/gpu/GrOpFlushState.h b/src/gpu/GrOpFlushState.h
index cc1d22b962..a5a5f82c31 100644
--- a/src/gpu/GrOpFlushState.h
+++ b/src/gpu/GrOpFlushState.h
@@ -15,6 +15,7 @@
class GrGpu;
class GrGpuCommandBuffer;
+class GrGpuRTCommandBuffer;
class GrResourceProvider;
/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */
@@ -86,6 +87,8 @@ public:
void putBackVertexSpace(size_t sizeInBytes) { fVertexPool.putBack(sizeInBytes); }
GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
+ // Helper function used by Ops that are only called via RenderTargetOpLists
+ GrGpuRTCommandBuffer* rtCommandBuffer();
void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; }
GrGpu* gpu() { return fGpu; }
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index ca449d30d3..b947891d90 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -91,13 +91,13 @@ void GrRenderTargetOpList::onPrepare(GrOpFlushState* flushState) {
}
}
-static std::unique_ptr<GrGpuCommandBuffer> create_command_buffer(GrGpu* gpu,
- GrRenderTarget* rt,
- GrSurfaceOrigin origin,
- bool clearSB) {
- static const GrGpuCommandBuffer::LoadAndStoreInfo kBasicLoadStoreInfo {
- GrGpuCommandBuffer::LoadOp::kLoad,
- GrGpuCommandBuffer::StoreOp::kStore,
+static std::unique_ptr<GrGpuRTCommandBuffer> create_command_buffer(GrGpu* gpu,
+ GrRenderTarget* rt,
+ GrSurfaceOrigin origin,
+ bool clearSB) {
+ static const GrGpuRTCommandBuffer::LoadAndStoreInfo kBasicLoadStoreInfo {
+ GrGpuRTCommandBuffer::LoadOp::kLoad,
+ GrGpuRTCommandBuffer::StoreOp::kStore,
GrColor_ILLEGAL
};
@@ -106,19 +106,19 @@ static std::unique_ptr<GrGpuCommandBuffer> create_command_buffer(GrGpu* gpu,
// to stop splitting up higher level opLists for copyOps to achieve that.
// Note: we would still need SB loads and stores but they would happen at a
// lower level (inside the VK command buffer).
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
- clearSB ? GrGpuCommandBuffer::LoadOp::kClear : GrGpuCommandBuffer::LoadOp::kLoad,
- GrGpuCommandBuffer::StoreOp::kStore,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
+ clearSB ? GrGpuRTCommandBuffer::LoadOp::kClear : GrGpuRTCommandBuffer::LoadOp::kLoad,
+ GrGpuRTCommandBuffer::StoreOp::kStore,
};
- std::unique_ptr<GrGpuCommandBuffer> buffer(
+ std::unique_ptr<GrGpuRTCommandBuffer> buffer(
gpu->createCommandBuffer(rt, origin,
kBasicLoadStoreInfo, // Color
stencilLoadAndStoreInfo)); // Stencil
return buffer;
}
-static inline void finish_command_buffer(GrGpuCommandBuffer* buffer) {
+static inline void finish_command_buffer(GrGpuRTCommandBuffer* buffer) {
if (!buffer) {
return;
}
@@ -140,7 +140,7 @@ bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
TRACE_EVENT0("skia", TRACE_FUNC);
#endif
- std::unique_ptr<GrGpuCommandBuffer> commandBuffer = create_command_buffer(
+ std::unique_ptr<GrGpuRTCommandBuffer> commandBuffer = create_command_buffer(
flushState->gpu(),
fTarget.get()->priv().peekRenderTarget(),
fTarget.get()->origin(),
@@ -157,22 +157,6 @@ bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
TRACE_EVENT0("skia", fRecordedOps[i].fOp->name());
#endif
- if (fRecordedOps[i].fOp->needsCommandBufferIsolation()) {
- // This op is a special snowflake and must occur between command buffers
- // TODO: make this go through the command buffer
- finish_command_buffer(commandBuffer.get());
-
- commandBuffer.reset();
- flushState->setCommandBuffer(commandBuffer.get());
- } else if (!commandBuffer) {
- commandBuffer = create_command_buffer(flushState->gpu(),
- fTarget.get()->priv().peekRenderTarget(),
- fTarget.get()->origin(),
- false);
- flushState->setCommandBuffer(commandBuffer.get());
- commandBuffer->begin();
- }
-
GrOpFlushState::DrawOpArgs opArgs {
fTarget.get()->asRenderTargetProxy(),
fRecordedOps[i].fAppliedClip,
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index f36cc350bb..b046e7c272 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -62,11 +62,19 @@ bool GrTextureOpList::onExecute(GrOpFlushState* flushState) {
return false;
}
+ std::unique_ptr<GrGpuTextureCommandBuffer> commandBuffer(
+ flushState->gpu()->createCommandBuffer(fTarget.get()->priv().peekTexture(),
+ fTarget.get()->origin()));
+ flushState->setCommandBuffer(commandBuffer.get());
+
for (int i = 0; i < fRecordedOps.count(); ++i) {
// We do not call flushState->setDrawOpArgs as this op list does not support GrDrawOps.
fRecordedOps[i]->execute(flushState);
}
+ commandBuffer->submit();
+ flushState->setCommandBuffer(nullptr);
+
return true;
}
diff --git a/src/gpu/ccpr/GrCCPRCoverageOpsBuilder.cpp b/src/gpu/ccpr/GrCCPRCoverageOpsBuilder.cpp
index 19b4049516..361a15999d 100644
--- a/src/gpu/ccpr/GrCCPRCoverageOpsBuilder.cpp
+++ b/src/gpu/ccpr/GrCCPRCoverageOpsBuilder.cpp
@@ -586,9 +586,10 @@ void CoverageOp::drawMaskPrimitives(GrOpFlushState* flushState, const GrPipeline
if (!fMeshesScratchBuffer.empty()) {
GrCCPRCoverageProcessor proc(mode, fPointsBuffer.get());
- flushState->commandBuffer()->draw(pipeline, proc, fMeshesScratchBuffer.begin(),
- fDynamicStatesScratchBuffer.begin(),
- fMeshesScratchBuffer.count(), this->bounds());
+ SkASSERT(flushState->rtCommandBuffer());
+ flushState->rtCommandBuffer()->draw(pipeline, proc, fMeshesScratchBuffer.begin(),
+ fDynamicStatesScratchBuffer.begin(),
+ fMeshesScratchBuffer.count(), this->bounds());
}
}
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 6937642c16..596ec5e533 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -287,6 +287,7 @@ void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlush
void DrawPathsOp::onExecute(GrOpFlushState* flushState) {
SkASSERT(fCCPR->fFlushing);
+ SkASSERT(flushState->rtCommandBuffer());
if (!fCCPR->fPerFlushInstanceBuffer) {
return; // Setup failed.
@@ -319,7 +320,7 @@ void DrawPathsOp::onExecute(GrOpFlushState* flushState) {
batch.fEndInstanceIdx - baseInstance, baseInstance);
mesh.setVertexData(fCCPR->fPerFlushVertexBuffer.get());
- flushState->commandBuffer()->draw(pipeline, coverProc, &mesh, nullptr, 1, this->bounds());
+ flushState->rtCommandBuffer()->draw(pipeline, coverProc, &mesh, nullptr, 1, this->bounds());
}
SkASSERT(baseInstance == fBaseInstance + fDebugInstanceCount);
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index be5df4eee4..594312071f 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -2427,11 +2427,16 @@ bool GrGLGpu::onReadPixels(GrSurface* surface,
return true;
}
-GrGpuCommandBuffer* GrGLGpu::createCommandBuffer(
+GrGpuRTCommandBuffer* GrGLGpu::createCommandBuffer(
GrRenderTarget* rt, GrSurfaceOrigin origin,
- const GrGpuCommandBuffer::LoadAndStoreInfo&,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
- return new GrGLGpuCommandBuffer(this, rt, origin, stencilInfo);
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
+ return new GrGLGpuRTCommandBuffer(this, rt, origin, stencilInfo);
+}
+
+GrGpuTextureCommandBuffer* GrGLGpu::createCommandBuffer(GrTexture* texture,
+ GrSurfaceOrigin origin) {
+ return new GrGLGpuTextureCommandBuffer(this, texture, origin);
}
void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bounds, bool disableSRGB) {
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index aafcc37a57..23709b4f5f 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -97,9 +97,9 @@ public:
// Called by GrGLBuffer after its buffer object has been destroyed.
void notifyBufferReleased(const GrGLBuffer*);
- // The GrGLGpuCommandBuffer does not buffer up draws before submitting them to the gpu.
+ // The GrGLGpuRTCommandBuffer does not buffer up draws before submitting them to the gpu.
// Thus this is the implementation of the draw call for the corresponding passthrough function
- // on GrGLGpuCommandBuffer.
+ // on GrGLRTGpuCommandBuffer.
void draw(const GrPipeline&,
const GrPrimitiveProcessor&,
const GrMesh[],
@@ -128,14 +128,14 @@ public:
const GrBuffer* instanceBuffer, int instanceCount,
int baseInstance) final;
- // The GrGLGpuCommandBuffer does not buffer up draws before submitting them to the gpu.
+ // The GrGLGpuRTCommandBuffer does not buffer up draws before submitting them to the gpu.
// Thus this is the implementation of the clear call for the corresponding passthrough function
- // on GrGLGpuCommandBuffer.
+ // on GrGLGpuRTCommandBuffer.
void clear(const GrFixedClip&, GrColor, GrRenderTarget*, GrSurfaceOrigin);
- // The GrGLGpuCommandBuffer does not buffer up draws before submitting them to the gpu.
+ // The GrGLGpuRTCommandBuffer does not buffer up draws before submitting them to the gpu.
// Thus this is the implementation of the clearStencil call for the corresponding passthrough
- // function on GrGLGpuCommandBuffer.
+ // function on GrGLGpuRTCommandBuffer.
void clearStencilClip(const GrFixedClip&, bool insideStencilMask,
GrRenderTarget*, GrSurfaceOrigin);
@@ -145,10 +145,12 @@ public:
void clearStencil(GrRenderTarget*, int clearValue) override;
- GrGpuCommandBuffer* createCommandBuffer(
+ GrGpuRTCommandBuffer* createCommandBuffer(
GrRenderTarget*, GrSurfaceOrigin,
- const GrGpuCommandBuffer::LoadAndStoreInfo&,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo&) override;
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) override;
+
+ GrGpuTextureCommandBuffer* createCommandBuffer(GrTexture*, GrSurfaceOrigin) override;
void invalidateBoundRenderTarget() {
fHWBoundRenderTargetUniqueID.makeInvalid();
diff --git a/src/gpu/gl/GrGLGpuCommandBuffer.h b/src/gpu/gl/GrGLGpuCommandBuffer.h
index 0d57337ac1..e76253669c 100644
--- a/src/gpu/gl/GrGLGpuCommandBuffer.h
+++ b/src/gpu/gl/GrGLGpuCommandBuffer.h
@@ -17,21 +17,46 @@
class GrGLGpu;
class GrGLRenderTarget;
-class GrGLGpuCommandBuffer : public GrGpuCommandBuffer {
+class GrGLGpuTextureCommandBuffer : public GrGpuTextureCommandBuffer {
+public:
+ GrGLGpuTextureCommandBuffer(GrGLGpu* gpu, GrTexture* texture, GrSurfaceOrigin origin)
+ : INHERITED(texture, origin)
+ , fGpu(gpu) {
+ }
+
+ ~GrGLGpuTextureCommandBuffer() override {}
+
+ void submit() override {}
+
+ void copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) override {
+ fGpu->copySurface(fTexture, src, srcRect, dstPoint);
+ }
+
+ void insertEventMarker(const char* msg) override {
+ fGpu->insertEventMarker(msg);
+ }
+
+private:
+ GrGLGpu* fGpu;
+
+ typedef GrGpuTextureCommandBuffer INHERITED;
+};
+
+class GrGLGpuRTCommandBuffer : public GrGpuRTCommandBuffer {
/**
* We do not actually buffer up draws or do any work in the this class for GL. Instead commands
* are immediately sent to the gpu to execute. Thus all the commands in this class are simply
* pass through functions to corresponding calls in the GrGLGpu class.
*/
public:
- GrGLGpuCommandBuffer(GrGLGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo& stencilInfo)
+ GrGLGpuRTCommandBuffer(GrGLGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo)
: INHERITED(rt, origin)
, fGpu(gpu) {
fClearSB = LoadOp::kClear == stencilInfo.fLoadOp;
}
- ~GrGLGpuCommandBuffer() override {}
+ ~GrGLGpuRTCommandBuffer() override {}
void begin() override {
if (fClearSB) {
@@ -50,11 +75,15 @@ public:
state->doUpload(upload);
}
+ void copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) override {
+ fGpu->copySurface(fRenderTarget, src, srcRect, dstPoint);
+ }
+
+ void submit() override {}
+
private:
GrGpu* gpu() override { return fGpu; }
- void onSubmit() override {}
-
void onDraw(const GrPipeline& pipeline,
const GrPrimitiveProcessor& primProc,
const GrMesh mesh[],
@@ -76,7 +105,7 @@ private:
GrGLGpu* fGpu;
bool fClearSB;
- typedef GrGpuCommandBuffer INHERITED;
+ typedef GrGpuRTCommandBuffer INHERITED;
};
#endif
diff --git a/src/gpu/mock/GrMockGpu.cpp b/src/gpu/mock/GrMockGpu.cpp
index 397e1a1ca9..470f306740 100644
--- a/src/gpu/mock/GrMockGpu.cpp
+++ b/src/gpu/mock/GrMockGpu.cpp
@@ -39,14 +39,20 @@ GrGpu* GrMockGpu::Create(const GrMockOptions* mockOptions, const GrContextOption
}
-GrGpuCommandBuffer* GrMockGpu::createCommandBuffer(
+GrGpuRTCommandBuffer* GrMockGpu::createCommandBuffer(
GrRenderTarget* rt, GrSurfaceOrigin origin,
- const GrGpuCommandBuffer::LoadAndStoreInfo&,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo&) {
- return new GrMockGpuCommandBuffer(this, rt, origin);
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) {
+ return new GrMockGpuRTCommandBuffer(this, rt, origin);
}
-void GrMockGpu::submitCommandBuffer(const GrMockGpuCommandBuffer* cmdBuffer) {
+GrGpuTextureCommandBuffer* GrMockGpu::createCommandBuffer(GrTexture* texture,
+ GrSurfaceOrigin origin) {
+ return new GrMockGpuTextureCommandBuffer(texture, origin);
+}
+
+
+void GrMockGpu::submitCommandBuffer(const GrMockGpuRTCommandBuffer* cmdBuffer) {
for (int i = 0; i < cmdBuffer->numDraws(); ++i) {
fStats.incNumDraws();
}
diff --git a/src/gpu/mock/GrMockGpu.h b/src/gpu/mock/GrMockGpu.h
index 6d72399be7..76f893ec45 100644
--- a/src/gpu/mock/GrMockGpu.h
+++ b/src/gpu/mock/GrMockGpu.h
@@ -14,7 +14,7 @@
#include "GrTexture.h"
#include "SkTHash.h"
-class GrMockGpuCommandBuffer;
+class GrMockGpuRTCommandBuffer;
struct GrMockOptions;
class GrPipeline;
@@ -43,10 +43,12 @@ public:
*effectiveSampleCnt = rt->numStencilSamples();
}
- GrGpuCommandBuffer* createCommandBuffer(
+ GrGpuRTCommandBuffer* createCommandBuffer(
GrRenderTarget*, GrSurfaceOrigin,
- const GrGpuCommandBuffer::LoadAndStoreInfo&,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo&) override;
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) override;
+
+ GrGpuTextureCommandBuffer* createCommandBuffer(GrTexture*, GrSurfaceOrigin) override;
GrFence SK_WARN_UNUSED_RESULT insertFence() override { return 0; }
bool waitFence(GrFence, uint64_t) override { return true; }
@@ -61,7 +63,7 @@ public:
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override { return nullptr; }
- void submitCommandBuffer(const GrMockGpuCommandBuffer*);
+ void submitCommandBuffer(const GrMockGpuRTCommandBuffer*);
private:
GrMockGpu(GrContext* context, const GrMockOptions&, const GrContextOptions&);
diff --git a/src/gpu/mock/GrMockGpuCommandBuffer.h b/src/gpu/mock/GrMockGpuCommandBuffer.h
index 6a780972af..d3bf732500 100644
--- a/src/gpu/mock/GrMockGpuCommandBuffer.h
+++ b/src/gpu/mock/GrMockGpuCommandBuffer.h
@@ -11,9 +11,26 @@
#include "GrGpuCommandBuffer.h"
#include "GrMockGpu.h"
-class GrMockGpuCommandBuffer : public GrGpuCommandBuffer {
+class GrMockGpuTextureCommandBuffer : public GrGpuTextureCommandBuffer {
public:
- GrMockGpuCommandBuffer(GrMockGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin)
+ GrMockGpuTextureCommandBuffer(GrTexture* texture, GrSurfaceOrigin origin)
+ : INHERITED(texture, origin) {
+ }
+
+ ~GrMockGpuTextureCommandBuffer() override {}
+
+ void copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) override {}
+ void insertEventMarker(const char*) override {}
+
+private:
+ void submit() override {}
+
+ typedef GrGpuTextureCommandBuffer INHERITED;
+};
+
+class GrMockGpuRTCommandBuffer : public GrGpuRTCommandBuffer {
+public:
+ GrMockGpuRTCommandBuffer(GrMockGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin)
: INHERITED(rt, origin)
, fGpu(gpu) {
}
@@ -24,11 +41,13 @@ public:
void insertEventMarker(const char*) override {}
void begin() override {}
void end() override {}
+ void copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) override {}
int numDraws() const { return fNumDraws; }
+ void submit() override { fGpu->submitCommandBuffer(this); }
+
private:
- void onSubmit() override { fGpu->submitCommandBuffer(this); }
void onDraw(const GrPipeline&, const GrPrimitiveProcessor&, const GrMesh[],
const GrPipeline::DynamicState[], int meshCount, const SkRect& bounds) override {
++fNumDraws;
@@ -39,7 +58,7 @@ private:
GrMockGpu* fGpu;
int fNumDraws = 0;
- typedef GrGpuCommandBuffer INHERITED;
+ typedef GrGpuRTCommandBuffer INHERITED;
};
#endif
diff --git a/src/gpu/ops/GrClearOp.cpp b/src/gpu/ops/GrClearOp.cpp
index 4b1efd6c47..cb9d4db63b 100644
--- a/src/gpu/ops/GrClearOp.cpp
+++ b/src/gpu/ops/GrClearOp.cpp
@@ -32,5 +32,6 @@ GrClearOp::GrClearOp(const GrFixedClip& clip, GrColor color, GrSurfaceProxy* pro
}
void GrClearOp::onExecute(GrOpFlushState* state) {
- state->commandBuffer()->clear(fClip, fColor);
+ SkASSERT(state->rtCommandBuffer());
+ state->rtCommandBuffer()->clear(fClip, fColor);
}
diff --git a/src/gpu/ops/GrClearStencilClipOp.h b/src/gpu/ops/GrClearStencilClipOp.h
index 1364b2365a..ffd2fd9b28 100644
--- a/src/gpu/ops/GrClearStencilClipOp.h
+++ b/src/gpu/ops/GrClearStencilClipOp.h
@@ -55,7 +55,8 @@ private:
void onPrepare(GrOpFlushState*) override {}
void onExecute(GrOpFlushState* state) override {
- state->commandBuffer()->clearStencilClip(fClip, fInsideStencilMask);
+ SkASSERT(state->rtCommandBuffer());
+ state->rtCommandBuffer()->clearStencilClip(fClip, fInsideStencilMask);
}
const GrFixedClip fClip;
diff --git a/src/gpu/ops/GrCopySurfaceOp.cpp b/src/gpu/ops/GrCopySurfaceOp.cpp
index 4e8fab7d70..ad6fec988e 100644
--- a/src/gpu/ops/GrCopySurfaceOp.cpp
+++ b/src/gpu/ops/GrCopySurfaceOp.cpp
@@ -80,13 +80,10 @@ std::unique_ptr<GrOp> GrCopySurfaceOp::Make(GrSurfaceProxy* dstProxy, GrSurfaceP
}
void GrCopySurfaceOp::onExecute(GrOpFlushState* state) {
- SkASSERT(!state->commandBuffer());
-
if (!fDst.get()->instantiate(state->resourceProvider()) ||
!fSrc.get()->instantiate(state->resourceProvider())) {
return;
}
- state->gpu()->copySurface(fDst.get()->priv().peekSurface(),
- fSrc.get()->priv().peekSurface(), fSrcRect, fDstPoint);
+ state->commandBuffer()->copy(fSrc.get()->priv().peekSurface(), fSrcRect, fDstPoint);
}
diff --git a/src/gpu/ops/GrCopySurfaceOp.h b/src/gpu/ops/GrCopySurfaceOp.h
index 64431a523d..57548a8268 100644
--- a/src/gpu/ops/GrCopySurfaceOp.h
+++ b/src/gpu/ops/GrCopySurfaceOp.h
@@ -33,8 +33,6 @@ public:
return string;
}
- bool needsCommandBufferIsolation() const override { return true; }
-
private:
GrCopySurfaceOp(GrSurfaceProxy* dst, GrSurfaceProxy* src,
const SkIRect& srcRect, const SkIPoint& dstPoint)
diff --git a/src/gpu/ops/GrDiscardOp.h b/src/gpu/ops/GrDiscardOp.h
index d30aa5a8c5..b38577027a 100644
--- a/src/gpu/ops/GrDiscardOp.h
+++ b/src/gpu/ops/GrDiscardOp.h
@@ -39,7 +39,8 @@ private:
void onPrepare(GrOpFlushState*) override {}
void onExecute(GrOpFlushState* state) override {
- state->commandBuffer()->discard();
+ SkASSERT(state->rtCommandBuffer());
+ state->rtCommandBuffer()->discard();
}
typedef GrOp INHERITED;
diff --git a/src/gpu/ops/GrMeshDrawOp.cpp b/src/gpu/ops/GrMeshDrawOp.cpp
index 10253174b8..026781a972 100644
--- a/src/gpu/ops/GrMeshDrawOp.cpp
+++ b/src/gpu/ops/GrMeshDrawOp.cpp
@@ -64,18 +64,19 @@ void GrMeshDrawOp::onExecute(GrOpFlushState* state) {
int currMeshIdx = 0;
SkASSERT(fQueuedDraws.empty() || fBaseDrawToken == state->nextTokenToFlush());
+ SkASSERT(state->rtCommandBuffer());
for (int currDrawIdx = 0; currDrawIdx < fQueuedDraws.count(); ++currDrawIdx) {
GrDrawOpUploadToken drawToken = state->nextTokenToFlush();
while (currUploadIdx < fInlineUploads.count() &&
fInlineUploads[currUploadIdx].fUploadBeforeToken == drawToken) {
- state->commandBuffer()->inlineUpload(state, fInlineUploads[currUploadIdx++].fUpload);
+ state->rtCommandBuffer()->inlineUpload(state, fInlineUploads[currUploadIdx++].fUpload);
}
const QueuedDraw& draw = fQueuedDraws[currDrawIdx];
SkASSERT(draw.fPipeline->proxy() == state->drawOpArgs().fProxy);
- state->commandBuffer()->draw(*draw.fPipeline, *draw.fGeometryProcessor.get(),
- fMeshes.begin() + currMeshIdx, nullptr, draw.fMeshCnt,
- this->bounds());
+ state->rtCommandBuffer()->draw(*draw.fPipeline, *draw.fGeometryProcessor.get(),
+ fMeshes.begin() + currMeshIdx, nullptr, draw.fMeshCnt,
+ this->bounds());
currMeshIdx += draw.fMeshCnt;
state->flushToken();
}
diff --git a/src/gpu/ops/GrOp.h b/src/gpu/ops/GrOp.h
index ec4b746091..a87f8b633b 100644
--- a/src/gpu/ops/GrOp.h
+++ b/src/gpu/ops/GrOp.h
@@ -152,8 +152,6 @@ public:
return string;
}
- virtual bool needsCommandBufferIsolation() const { return false; }
-
protected:
/**
* Indicates that the op will produce geometry that extends beyond its bounds for the
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index cd3054cd05..a36bbe51fb 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -257,11 +257,16 @@ void GrVkGpu::disconnect(DisconnectType type) {
///////////////////////////////////////////////////////////////////////////////
-GrGpuCommandBuffer* GrVkGpu::createCommandBuffer(
+GrGpuRTCommandBuffer* GrVkGpu::createCommandBuffer(
GrRenderTarget* rt, GrSurfaceOrigin origin,
- const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
- return new GrVkGpuCommandBuffer(this, rt, origin, colorInfo, stencilInfo);
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
+ return new GrVkGpuRTCommandBuffer(this, rt, origin, colorInfo, stencilInfo);
+}
+
+GrGpuTextureCommandBuffer* GrVkGpu::createCommandBuffer(GrTexture* texture,
+ GrSurfaceOrigin origin) {
+ return new GrVkGpuTextureCommandBuffer(this, texture, origin);
}
void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index 00bf060f74..ad157cac9f 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -96,10 +96,12 @@ public:
void clearStencil(GrRenderTarget* target, int clearValue) override;
- GrGpuCommandBuffer* createCommandBuffer(
+ GrGpuRTCommandBuffer* createCommandBuffer(
GrRenderTarget*, GrSurfaceOrigin,
- const GrGpuCommandBuffer::LoadAndStoreInfo&,
- const GrGpuCommandBuffer::StencilLoadAndStoreInfo&) override;
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) override;
+
+ GrGpuTextureCommandBuffer* createCommandBuffer(GrTexture*, GrSurfaceOrigin) override;
void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
index 66e4bffe87..4987cea3b1 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -22,17 +22,37 @@
#include "GrVkTexture.h"
#include "SkRect.h"
-void get_vk_load_store_ops(GrGpuCommandBuffer::LoadOp loadOpIn,
- GrGpuCommandBuffer::StoreOp storeOpIn,
+void GrVkGpuTextureCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ fCopies.emplace_back(src, srcRect, dstPoint);
+}
+
+void GrVkGpuTextureCommandBuffer::insertEventMarker(const char* msg) {
+ // TODO: does Vulkan have a correlate?
+}
+
+void GrVkGpuTextureCommandBuffer::submit() {
+ for (int i = 0; i < fCopies.count(); ++i) {
+ CopyInfo& copyInfo = fCopies[i];
+ fGpu->copySurface(fTexture, copyInfo.fSrc, copyInfo.fSrcRect, copyInfo.fDstPoint);
+ }
+}
+
+GrVkGpuTextureCommandBuffer::~GrVkGpuTextureCommandBuffer() {}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void get_vk_load_store_ops(GrGpuRTCommandBuffer::LoadOp loadOpIn,
+ GrGpuRTCommandBuffer::StoreOp storeOpIn,
VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
switch (loadOpIn) {
- case GrGpuCommandBuffer::LoadOp::kLoad:
+ case GrGpuRTCommandBuffer::LoadOp::kLoad:
*loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
break;
- case GrGpuCommandBuffer::LoadOp::kClear:
+ case GrGpuRTCommandBuffer::LoadOp::kClear:
*loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
break;
- case GrGpuCommandBuffer::LoadOp::kDiscard:
+ case GrGpuRTCommandBuffer::LoadOp::kDiscard:
*loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
break;
default:
@@ -41,10 +61,10 @@ void get_vk_load_store_ops(GrGpuCommandBuffer::LoadOp loadOpIn,
}
switch (storeOpIn) {
- case GrGpuCommandBuffer::StoreOp::kStore:
+ case GrGpuRTCommandBuffer::StoreOp::kStore:
*storeOp = VK_ATTACHMENT_STORE_OP_STORE;
break;
- case GrGpuCommandBuffer::StoreOp::kDiscard:
+ case GrGpuRTCommandBuffer::StoreOp::kDiscard:
*storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
break;
default:
@@ -53,10 +73,10 @@ void get_vk_load_store_ops(GrGpuCommandBuffer::LoadOp loadOpIn,
}
}
-GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu,
- GrRenderTarget* rt, GrSurfaceOrigin origin,
- const LoadAndStoreInfo& colorInfo,
- const StencilLoadAndStoreInfo& stencilInfo)
+GrVkGpuRTCommandBuffer::GrVkGpuRTCommandBuffer(GrVkGpu* gpu,
+ GrRenderTarget* rt, GrSurfaceOrigin origin,
+ const LoadAndStoreInfo& colorInfo,
+ const StencilLoadAndStoreInfo& stencilInfo)
: INHERITED(rt, origin)
, fGpu(gpu)
, fClearColor(GrColor4f::FromGrColor(colorInfo.fClearColor))
@@ -71,7 +91,7 @@ GrVkGpuCommandBuffer::GrVkGpuCommandBuffer(GrVkGpu* gpu,
this->init();
}
-void GrVkGpuCommandBuffer::init() {
+void GrVkGpuRTCommandBuffer::init() {
GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp);
GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
@@ -105,7 +125,7 @@ void GrVkGpuCommandBuffer::init() {
}
-GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() {
+GrVkGpuRTCommandBuffer::~GrVkGpuRTCommandBuffer() {
for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
for (int j = 0; j < cbInfo.fCommandBuffers.count(); ++j) {
@@ -115,15 +135,15 @@ GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() {
}
}
-GrGpu* GrVkGpuCommandBuffer::gpu() { return fGpu; }
+GrGpu* GrVkGpuRTCommandBuffer::gpu() { return fGpu; }
-void GrVkGpuCommandBuffer::end() {
+void GrVkGpuRTCommandBuffer::end() {
if (fCurrentCmdInfo >= 0) {
fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
}
}
-void GrVkGpuCommandBuffer::onSubmit() {
+void GrVkGpuRTCommandBuffer::submit() {
if (!fRenderTarget) {
return;
}
@@ -160,6 +180,11 @@ void GrVkGpuCommandBuffer::onSubmit() {
iuInfo.fFlushState->doUpload(iuInfo.fUpload);
}
+ for (int j = 0; j < cbInfo.fPreCopies.count(); ++j) {
+ CopyInfo& copyInfo = cbInfo.fPreCopies[j];
+ fGpu->copySurface(fRenderTarget, copyInfo.fSrc, copyInfo.fSrcRect, copyInfo.fDstPoint);
+ }
+
// TODO: We can't add this optimization yet since many things create a scratch texture which
// adds the discard immediately, but then don't draw to it right away. This causes the
// discard to be ignored and we get yelled at for loading uninitialized data. However, once
@@ -184,7 +209,7 @@ void GrVkGpuCommandBuffer::onSubmit() {
}
}
-void GrVkGpuCommandBuffer::discard() {
+void GrVkGpuRTCommandBuffer::discard() {
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
@@ -216,11 +241,11 @@ void GrVkGpuCommandBuffer::discard() {
}
}
-void GrVkGpuCommandBuffer::insertEventMarker(const char* msg) {
+void GrVkGpuRTCommandBuffer::insertEventMarker(const char* msg) {
// TODO: does Vulkan have a correlate?
}
-void GrVkGpuCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
+void GrVkGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
SkASSERT(!clip.hasWindowRectangles());
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
@@ -280,7 +305,7 @@ void GrVkGpuCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insi
}
}
-void GrVkGpuCommandBuffer::onClear(const GrFixedClip& clip, GrColor color) {
+void GrVkGpuRTCommandBuffer::onClear(const GrFixedClip& clip, GrColor color) {
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
// parent class should never let us get here with no RT
@@ -361,7 +386,9 @@ void GrVkGpuCommandBuffer::onClear(const GrFixedClip& clip, GrColor color) {
return;
}
-void GrVkGpuCommandBuffer::addAdditionalCommandBuffer() {
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpuRTCommandBuffer::addAdditionalCommandBuffer() {
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
@@ -370,7 +397,7 @@ void GrVkGpuCommandBuffer::addAdditionalCommandBuffer() {
cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
}
-void GrVkGpuCommandBuffer::addAdditionalRenderPass() {
+void GrVkGpuRTCommandBuffer::addAdditionalRenderPass() {
GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
@@ -406,7 +433,7 @@ void GrVkGpuCommandBuffer::addAdditionalRenderPass() {
cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
}
-void GrVkGpuCommandBuffer::inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload) {
+void GrVkGpuRTCommandBuffer::inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload) {
if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
this->addAdditionalRenderPass();
@@ -414,12 +441,21 @@ void GrVkGpuCommandBuffer::inlineUpload(GrOpFlushState* state, GrDrawOp::Deferre
fCommandBufferInfos[fCurrentCmdInfo].fPreDrawUploads.emplace_back(state, upload);
}
+void GrVkGpuRTCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty ||
+ fCommandBufferInfos[fCurrentCmdInfo].fStartsWithClear) {
+ this->addAdditionalRenderPass();
+ }
+ fCommandBufferInfos[fCurrentCmdInfo].fPreCopies.emplace_back(src, srcRect, dstPoint);
+}
+
////////////////////////////////////////////////////////////////////////////////
-void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
- const GrBuffer* indexBuffer,
- const GrBuffer* vertexBuffer,
- const GrBuffer* instanceBuffer) {
+void GrVkGpuRTCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
+ const GrBuffer* indexBuffer,
+ const GrBuffer* vertexBuffer,
+ const GrBuffer* instanceBuffer) {
GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf();
// There is no need to put any memory barriers to make sure host writes have finished here.
// When a command buffer is submitted to a queue, there is an implicit memory barrier that
@@ -457,7 +493,7 @@ void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
}
}
-sk_sp<GrVkPipelineState> GrVkGpuCommandBuffer::prepareDrawState(
+sk_sp<GrVkPipelineState> GrVkGpuRTCommandBuffer::prepareDrawState(
const GrPipeline& pipeline,
const GrPrimitiveProcessor& primProc,
GrPrimitiveType primitiveType,
@@ -535,12 +571,12 @@ static void prepare_sampled_images(const GrResourceIOProcessor& processor, GrVkG
}
}
-void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline,
- const GrPrimitiveProcessor& primProc,
- const GrMesh meshes[],
- const GrPipeline::DynamicState dynamicStates[],
- int meshCount,
- const SkRect& bounds) {
+void GrVkGpuRTCommandBuffer::onDraw(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ const GrMesh meshes[],
+ const GrPipeline::DynamicState dynamicStates[],
+ int meshCount,
+ const SkRect& bounds) {
SkASSERT(pipeline.renderTarget() == fRenderTarget);
if (!meshCount) {
@@ -605,30 +641,30 @@ void GrVkGpuCommandBuffer::onDraw(const GrPipeline& pipeline,
pipelineState->freeTempResources(fGpu);
}
-void GrVkGpuCommandBuffer::sendInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
- GrPrimitiveType,
- const GrBuffer* vertexBuffer,
- int vertexCount,
- int baseVertex,
- const GrBuffer* instanceBuffer,
- int instanceCount,
- int baseInstance) {
+void GrVkGpuRTCommandBuffer::sendInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
+ GrPrimitiveType,
+ const GrBuffer* vertexBuffer,
+ int vertexCount,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance) {
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
this->bindGeometry(primProc, nullptr, vertexBuffer, instanceBuffer);
cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
fGpu->stats()->incNumDraws();
}
-void GrVkGpuCommandBuffer::sendIndexedInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
- GrPrimitiveType,
- const GrBuffer* indexBuffer,
- int indexCount,
- int baseIndex,
- const GrBuffer* vertexBuffer,
- int baseVertex,
- const GrBuffer* instanceBuffer,
- int instanceCount,
- int baseInstance) {
+void GrVkGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(const GrPrimitiveProcessor& primProc,
+ GrPrimitiveType,
+ const GrBuffer* indexBuffer,
+ int indexCount,
+ int baseIndex,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance) {
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
this->bindGeometry(primProc, indexBuffer, vertexBuffer, instanceBuffer);
cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount,
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.h b/src/gpu/vk/GrVkGpuCommandBuffer.h
index ab5900eb61..9209a3b2a6 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.h
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.h
@@ -21,13 +21,44 @@ class GrVkRenderPass;
class GrVkRenderTarget;
class GrVkSecondaryCommandBuffer;
-class GrVkGpuCommandBuffer : public GrGpuCommandBuffer, private GrMesh::SendToGpuImpl {
+class GrVkGpuTextureCommandBuffer : public GrGpuTextureCommandBuffer {
public:
- GrVkGpuCommandBuffer(GrVkGpu*, GrRenderTarget*, GrSurfaceOrigin,
- const LoadAndStoreInfo&,
- const StencilLoadAndStoreInfo&);
+ GrVkGpuTextureCommandBuffer(GrVkGpu* gpu, GrTexture* texture, GrSurfaceOrigin origin)
+ : INHERITED(texture, origin)
+ , fGpu(gpu) {
+ }
+
+ ~GrVkGpuTextureCommandBuffer() override;
+
+ void copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) override;
+
+ void insertEventMarker(const char*) override;
+
+private:
+ void submit() override;
+
+ struct CopyInfo {
+ CopyInfo(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint)
+ : fSrc(src), fSrcRect(srcRect), fDstPoint(dstPoint) {}
+
+ GrSurface* fSrc;
+ SkIRect fSrcRect;
+ SkIPoint fDstPoint;
+ };
+
+ GrVkGpu* fGpu;
+ SkTArray<CopyInfo> fCopies;
+
+ typedef GrGpuTextureCommandBuffer INHERITED;
+};
- ~GrVkGpuCommandBuffer() override;
+class GrVkGpuRTCommandBuffer : public GrGpuRTCommandBuffer, private GrMesh::SendToGpuImpl {
+public:
+ GrVkGpuRTCommandBuffer(GrVkGpu*, GrRenderTarget*, GrSurfaceOrigin,
+ const LoadAndStoreInfo&,
+ const StencilLoadAndStoreInfo&);
+
+ ~GrVkGpuRTCommandBuffer() override;
void begin() override { }
void end() override;
@@ -37,13 +68,15 @@ public:
void inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload) override;
+ void copy(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint) override;
+
+ void submit() override;
+
private:
void init();
GrGpu* gpu() override;
- void onSubmit() override;
-
// Bind vertex and index buffers
void bindGeometry(const GrPrimitiveProcessor&,
const GrBuffer* indexBuffer,
@@ -104,6 +137,15 @@ private:
GrDrawOp::DeferredUploadFn fUpload;
};
+ struct CopyInfo {
+ CopyInfo(GrSurface* src, const SkIRect& srcRect, const SkIPoint& dstPoint)
+ : fSrc(src), fSrcRect(srcRect), fDstPoint(dstPoint) {}
+
+ GrSurface* fSrc;
+ SkIRect fSrcRect;
+ SkIPoint fDstPoint;
+ };
+
struct CommandBufferInfo {
const GrVkRenderPass* fRenderPass;
SkTArray<GrVkSecondaryCommandBuffer*> fCommandBuffers;
@@ -111,7 +153,10 @@ private:
SkRect fBounds;
bool fIsEmpty;
bool fStartsWithClear;
+ // The PreDrawUploads and PreCopies are sent to the GPU before submitting the secondary
+ // command buffer.
SkTArray<InlineUploadInfo> fPreDrawUploads;
+ SkTArray<CopyInfo> fPreCopies;
GrVkSecondaryCommandBuffer* currentCmdBuf() {
return fCommandBuffers.back();
@@ -129,7 +174,7 @@ private:
GrColor4f fClearColor;
GrVkPipelineState* fLastPipelineState;
- typedef GrGpuCommandBuffer INHERITED;
+ typedef GrGpuRTCommandBuffer INHERITED;
};
#endif
diff --git a/tests/GrMeshTest.cpp b/tests/GrMeshTest.cpp
index 288c057589..0f4dcb6f6d 100644
--- a/tests/GrMeshTest.cpp
+++ b/tests/GrMeshTest.cpp
@@ -370,8 +370,8 @@ void DrawMeshHelper::drawMesh(const GrMesh& mesh) {
GrRenderTargetProxy* proxy = fState->drawOpArgs().fProxy;
GrPipeline pipeline(proxy, GrPipeline::ScissorState::kDisabled, SkBlendMode::kSrc);
GrMeshTestProcessor mtp(mesh.isInstanced(), mesh.hasVertexData());
- fState->commandBuffer()->draw(pipeline, mtp, &mesh, nullptr, 1,
- SkRect::MakeIWH(kImageWidth, kImageHeight));
+ fState->rtCommandBuffer()->draw(pipeline, mtp, &mesh, nullptr, 1,
+ SkRect::MakeIWH(kImageWidth, kImageHeight));
}
static void run_test(const char* testName, skiatest::Reporter* reporter,
diff --git a/tests/GrPipelineDynamicStateTest.cpp b/tests/GrPipelineDynamicStateTest.cpp
index 2049ccb182..76d90dd212 100644
--- a/tests/GrPipelineDynamicStateTest.cpp
+++ b/tests/GrPipelineDynamicStateTest.cpp
@@ -133,9 +133,9 @@ private:
mesh.setNonIndexedNonInstanced(4);
mesh.setVertexData(fVertexBuffer.get(), 4 * i);
}
- state->commandBuffer()->draw(pipeline, GrPipelineDynamicStateTestProcessor(),
- meshes.begin(), kDynamicStates, 4,
- SkRect::MakeIWH(kScreenSize, kScreenSize));
+ state->rtCommandBuffer()->draw(pipeline, GrPipelineDynamicStateTestProcessor(),
+ meshes.begin(), kDynamicStates, 4,
+ SkRect::MakeIWH(kScreenSize, kScreenSize));
}
ScissorState fScissorState;