aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Brian Salomon <bsalomon@google.com>2017-10-30 09:37:55 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-10-30 15:30:09 +0000
commit943ed7910f938d7b4894b4e925566cf7b7053f29 (patch)
treeb281c6fb91ede12be586ee7d475a2c4f38e15f5d
parentf18b1d88aa98f81d2c115a59d887265fea63f876 (diff)
Remove deferred upload types from GrDrawOp.h.
This is motivated by exposing these to an upcoming atlas text rendering API that doesn't use ops. Change-Id: Id034dd43d13bc96fe1350fc6d8f699477bb74a05 Reviewed-on: https://skia-review.googlesource.com/65060 Reviewed-by: Robert Phillips <robertphillips@google.com> Commit-Queue: Brian Salomon <bsalomon@google.com>
-rw-r--r--gn/gpu.gni1
-rw-r--r--src/gpu/GrDeferredProxyUploader.h2
-rw-r--r--src/gpu/GrDeferredUpload.h56
-rw-r--r--src/gpu/GrDrawOpAtlas.cpp35
-rw-r--r--src/gpu/GrDrawOpAtlas.h22
-rw-r--r--src/gpu/GrGpuCommandBuffer.h2
-rw-r--r--src/gpu/GrOnFlushResourceProvider.h2
-rw-r--r--src/gpu/GrOpFlushState.cpp13
-rw-r--r--src/gpu/GrOpFlushState.h32
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp2
-rw-r--r--src/gpu/ccpr/GrCoverageCountingPathRenderer.h2
-rw-r--r--src/gpu/gl/GrGLGpuCommandBuffer.h2
-rw-r--r--src/gpu/mock/GrMockGpuCommandBuffer.h2
-rw-r--r--src/gpu/ops/GrDrawOp.h49
-rw-r--r--src/gpu/ops/GrMeshDrawOp.cpp6
-rw-r--r--src/gpu/ops/GrMeshDrawOp.h2
-rw-r--r--src/gpu/ops/GrSmallPathRenderer.h2
-rw-r--r--src/gpu/text/GrAtlasGlyphCache.h6
-rw-r--r--src/gpu/vk/GrVkGpuCommandBuffer.cpp4
-rw-r--r--src/gpu/vk/GrVkGpuCommandBuffer.h8
20 files changed, 132 insertions, 118 deletions
diff --git a/gn/gpu.gni b/gn/gpu.gni
index edc160542d..ef6031246c 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -74,6 +74,7 @@ skia_gpu_sources = [
"$_src/gpu/GrDefaultGeoProcFactory.cpp",
"$_src/gpu/GrDefaultGeoProcFactory.h",
"$_src/gpu/GrDeferredProxyUploader.h",
+ "$_src/gpu/GrDeferredUpload.h",
"$_src/gpu/GrDistanceFieldGenFromVector.cpp",
"$_src/gpu/GrDistanceFieldGenFromVector.h",
"$_src/gpu/GrDrawingManager.cpp",
diff --git a/src/gpu/GrDeferredProxyUploader.h b/src/gpu/GrDeferredProxyUploader.h
index b9c18857b8..765a38920c 100644
--- a/src/gpu/GrDeferredProxyUploader.h
+++ b/src/gpu/GrDeferredProxyUploader.h
@@ -53,7 +53,7 @@ public:
return;
}
- auto uploadMask = [this, proxy](GrDrawOp::WritePixelsFn& writePixelsFn) {
+ auto uploadMask = [this, proxy](GrDeferredTextureUploadWritePixelsFn& writePixelsFn) {
this->wait();
// If the worker thread was unable to allocate pixels, this check will fail, and we'll
// end up drawing with an uninitialized mask texture, but at least we won't crash.
diff --git a/src/gpu/GrDeferredUpload.h b/src/gpu/GrDeferredUpload.h
new file mode 100644
index 0000000000..4110e55297
--- /dev/null
+++ b/src/gpu/GrDeferredUpload.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDeferredUpload_DEFINED
+#define GrDeferredUpload_DEFINED
+
+#include <functional>
+#include "GrTypes.h"
+
+class GrTextureProxy;
+
+/**
+ * GrDeferredUploadToken is used to sequence the uploads relative to each other and to draws.
+ */
+class GrDeferredUploadToken {
+public:
+ static GrDeferredUploadToken AlreadyFlushedToken() { return GrDeferredUploadToken(0); }
+
+ GrDeferredUploadToken(const GrDeferredUploadToken&) = default;
+ GrDeferredUploadToken& operator=(const GrDeferredUploadToken&) = default;
+ bool operator==(const GrDeferredUploadToken& that) const {
+ return fSequenceNumber == that.fSequenceNumber;
+ }
+ bool operator!=(const GrDeferredUploadToken& that) const { return !(*this == that); }
+ bool inInterval(const GrDeferredUploadToken& start, const GrDeferredUploadToken& finish) {
+ return fSequenceNumber >= start.fSequenceNumber &&
+ fSequenceNumber <= finish.fSequenceNumber;
+ }
+
+private:
+ GrDeferredUploadToken();
+ explicit GrDeferredUploadToken(uint64_t sequenceNumber) : fSequenceNumber(sequenceNumber) {}
+ friend class GrOpFlushState;
+ uint64_t fSequenceNumber;
+};
+
+/**
+ * Passed to a deferred upload when it is executed, this method allows the deferred upload to
+ * actually write its pixel data into a texture.
+ */
+using GrDeferredTextureUploadWritePixelsFn =
+ std::function<bool(GrTextureProxy*, int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer, size_t rowBytes)>;
+
+/**
+ * A deferred texture upload is simply a std::function that takes a
+ * GrDeferredTextureUploadWritePixelsFn as a parameter. It is called when it should perform its
+ * upload as the draw/upload sequence is executed.
+ */
+using GrDeferredTextureUploadFn = std::function<void(GrDeferredTextureUploadWritePixelsFn&)>;
+
+#endif
diff --git a/src/gpu/GrDrawOpAtlas.cpp b/src/gpu/GrDrawOpAtlas.cpp
index 20d2c06a60..1a1452caac 100644
--- a/src/gpu/GrDrawOpAtlas.cpp
+++ b/src/gpu/GrDrawOpAtlas.cpp
@@ -37,8 +37,8 @@ static bool gDumpAtlasData = false;
GrDrawOpAtlas::Plot::Plot(int pageIndex, int plotIndex, uint64_t genID, int offX, int offY,
int width, int height, GrPixelConfig config)
- : fLastUpload(GrDrawOpUploadToken::AlreadyFlushedToken())
- , fLastUse(GrDrawOpUploadToken::AlreadyFlushedToken())
+ : fLastUpload(GrDeferredUploadToken::AlreadyFlushedToken())
+ , fLastUse(GrDeferredUploadToken::AlreadyFlushedToken())
, fFlushesSinceLastUse(0)
, fPageIndex(pageIndex)
, fPlotIndex(plotIndex)
@@ -110,7 +110,7 @@ bool GrDrawOpAtlas::Plot::addSubImage(int width, int height, const void* image,
return true;
}
-void GrDrawOpAtlas::Plot::uploadToTexture(GrDrawOp::WritePixelsFn& writePixels,
+void GrDrawOpAtlas::Plot::uploadToTexture(GrDeferredTextureUploadWritePixelsFn& writePixels,
GrTextureProxy* proxy) {
// We should only be issuing uploads if we are in fact dirty
SkASSERT(fDirty && fData && proxy && proxy->priv().peekTexture());
@@ -132,8 +132,8 @@ void GrDrawOpAtlas::Plot::resetRects() {
fGenID++;
fID = CreateId(fPageIndex, fPlotIndex, fGenID);
- fLastUpload = GrDrawOpUploadToken::AlreadyFlushedToken();
- fLastUse = GrDrawOpUploadToken::AlreadyFlushedToken();
+ fLastUpload = GrDeferredUploadToken::AlreadyFlushedToken();
+ fLastUse = GrDeferredUploadToken::AlreadyFlushedToken();
// zero out the plot
if (fData) {
@@ -153,9 +153,8 @@ GrDrawOpAtlas::GrDrawOpAtlas(GrContext* context, GrPixelConfig config, int width
, fTextureWidth(width)
, fTextureHeight(height)
, fAtlasGeneration(kInvalidAtlasGeneration + 1)
- , fPrevFlushToken(GrDrawOpUploadToken::AlreadyFlushedToken())
+ , fPrevFlushToken(GrDeferredUploadToken::AlreadyFlushedToken())
, fNumPages(0) {
-
fPlotWidth = fTextureWidth / numPlotsX;
fPlotHeight = fTextureHeight / numPlotsY;
SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
@@ -193,11 +192,10 @@ inline bool GrDrawOpAtlas::updatePlot(GrDrawOp::Target* target, AtlasID* id, Plo
GrTextureProxy* proxy = fProxies[pageIdx].get();
- GrDrawOpUploadToken lastUploadToken = target->addAsapUpload(
- [plotsp, proxy] (GrDrawOp::WritePixelsFn& writePixels) {
- plotsp->uploadToTexture(writePixels, proxy);
- }
- );
+ GrDeferredUploadToken lastUploadToken = target->addAsapUpload(
+ [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
+ plotsp->uploadToTexture(writePixels, proxy);
+ });
plot->setLastUploadToken(lastUploadToken);
}
*id = plot->id();
@@ -314,11 +312,10 @@ bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width,
}
GrTextureProxy* proxy = fProxies[pageIdx].get();
- GrDrawOpUploadToken lastUploadToken = target->addInlineUpload(
- [plotsp, proxy] (GrDrawOp::WritePixelsFn& writePixels) {
- plotsp->uploadToTexture(writePixels, proxy);
- }
- );
+ GrDeferredUploadToken lastUploadToken = target->addInlineUpload(
+ [plotsp, proxy](GrDeferredTextureUploadWritePixelsFn& writePixels) {
+ plotsp->uploadToTexture(writePixels, proxy);
+ });
newPlot->setLastUploadToken(lastUploadToken);
*id = newPlot->id();
@@ -326,7 +323,7 @@ bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width,
return true;
}
-void GrDrawOpAtlas::compact(GrDrawOpUploadToken startTokenForNextFlush) {
+void GrDrawOpAtlas::compact(GrDeferredUploadToken startTokenForNextFlush) {
if (fNumPages <= 1) {
fPrevFlushToken = startTokenForNextFlush;
return;
@@ -425,7 +422,7 @@ void GrDrawOpAtlas::compact(GrDrawOpUploadToken startTokenForNextFlush) {
this->processEvictionAndResetRects(plot);
--availablePlots;
}
- } else if (plot->lastUseToken() != GrDrawOpUploadToken::AlreadyFlushedToken()) {
+ } else if (plot->lastUseToken() != GrDeferredUploadToken::AlreadyFlushedToken()) {
// otherwise if aged out just evict it.
this->processEvictionAndResetRects(plot);
}
diff --git a/src/gpu/GrDrawOpAtlas.h b/src/gpu/GrDrawOpAtlas.h
index 647e6a58b9..694af30e00 100644
--- a/src/gpu/GrDrawOpAtlas.h
+++ b/src/gpu/GrDrawOpAtlas.h
@@ -117,7 +117,7 @@ public:
}
/** To ensure the atlas does not evict a given entry, the client must set the last use token. */
- inline void setLastUseToken(AtlasID id, GrDrawOpUploadToken token) {
+ inline void setLastUseToken(AtlasID id, GrDeferredUploadToken token) {
SkASSERT(this->hasID(id));
uint32_t plotIdx = GetPlotIndexFromID(id);
SkASSERT(plotIdx < fNumPlots);
@@ -191,7 +191,7 @@ public:
friend class GrDrawOpAtlas;
};
- void setLastUseTokenBulk(const BulkUseTokenUpdater& updater, GrDrawOpUploadToken token) {
+ void setLastUseTokenBulk(const BulkUseTokenUpdater& updater, GrDeferredUploadToken token) {
int count = updater.fPlotsToUpdate.count();
for (int i = 0; i < count; i++) {
const BulkUseTokenUpdater::PlotData& pd = updater.fPlotsToUpdate[i];
@@ -205,7 +205,7 @@ public:
}
}
- void compact(GrDrawOpUploadToken startTokenForNextFlush);
+ void compact(GrDeferredUploadToken startTokenForNextFlush);
static constexpr auto kGlyphMaxDim = 256;
static bool GlyphTooLargeForAtlas(int width, int height) {
@@ -253,12 +253,12 @@ private:
* use lastUse to determine when we can evict a plot from the cache, i.e. if the last use
* has already flushed through the gpu then we can reuse the plot.
*/
- GrDrawOpUploadToken lastUploadToken() const { return fLastUpload; }
- GrDrawOpUploadToken lastUseToken() const { return fLastUse; }
- void setLastUploadToken(GrDrawOpUploadToken token) { fLastUpload = token; }
- void setLastUseToken(GrDrawOpUploadToken token) { fLastUse = token; }
+ GrDeferredUploadToken lastUploadToken() const { return fLastUpload; }
+ GrDeferredUploadToken lastUseToken() const { return fLastUse; }
+ void setLastUploadToken(GrDeferredUploadToken token) { fLastUpload = token; }
+ void setLastUseToken(GrDeferredUploadToken token) { fLastUse = token; }
- void uploadToTexture(GrDrawOp::WritePixelsFn&, GrTextureProxy*);
+ void uploadToTexture(GrDeferredTextureUploadWritePixelsFn&, GrTextureProxy*);
void resetRects();
int flushesSinceLastUsed() { return fFlushesSinceLastUse; }
@@ -288,8 +288,8 @@ private:
return generation << 16 | plotIdx << 8 | pageIdx;
}
- GrDrawOpUploadToken fLastUpload;
- GrDrawOpUploadToken fLastUse;
+ GrDeferredUploadToken fLastUpload;
+ GrDeferredUploadToken fLastUse;
// the number of flushes since this plot has been last used
int fFlushesSinceLastUse;
@@ -360,7 +360,7 @@ private:
uint64_t fAtlasGeneration;
// nextTokenToFlush() value at the end of the previous flush
- GrDrawOpUploadToken fPrevFlushToken;
+ GrDeferredUploadToken fPrevFlushToken;
struct EvictionData {
EvictionFunc fFunc;
diff --git a/src/gpu/GrGpuCommandBuffer.h b/src/gpu/GrGpuCommandBuffer.h
index 2ac5e145fb..6963d23ebe 100644
--- a/src/gpu/GrGpuCommandBuffer.h
+++ b/src/gpu/GrGpuCommandBuffer.h
@@ -105,7 +105,7 @@ public:
const SkRect& bounds);
// Performs an upload of vertex data in the middle of a set of a set of draws
- virtual void inlineUpload(GrOpFlushState*, GrDrawOp::DeferredUploadFn&) = 0;
+ virtual void inlineUpload(GrOpFlushState*, GrDeferredTextureUploadFn&) = 0;
/**
* Clear the owned render target. Ignores the draw state and clip.
diff --git a/src/gpu/GrOnFlushResourceProvider.h b/src/gpu/GrOnFlushResourceProvider.h
index f57288c75c..cfd527e7f2 100644
--- a/src/gpu/GrOnFlushResourceProvider.h
+++ b/src/gpu/GrOnFlushResourceProvider.h
@@ -46,7 +46,7 @@ public:
* Called once flushing is complete and all ops indicated by preFlush have been executed and
* released. startTokenForNextFlush can be used to track resources used in the current flush.
*/
- virtual void postFlush(GrDrawOpUploadToken startTokenForNextFlush) {}
+ virtual void postFlush(GrDeferredUploadToken startTokenForNextFlush) {}
/**
* Tells the callback owner to hold onto this object when freeing GPU resources
diff --git a/src/gpu/GrOpFlushState.cpp b/src/gpu/GrOpFlushState.cpp
index c54b5a8721..1608519fb4 100644
--- a/src/gpu/GrOpFlushState.cpp
+++ b/src/gpu/GrOpFlushState.cpp
@@ -18,8 +18,8 @@ GrOpFlushState::GrOpFlushState(GrGpu* gpu, GrResourceProvider* resourceProvider)
, fCommandBuffer(nullptr)
, fVertexPool(gpu)
, fIndexPool(gpu)
- , fLastIssuedToken(GrDrawOpUploadToken::AlreadyFlushedToken())
- , fLastFlushedToken(GrDrawOpUploadToken::AlreadyFlushedToken())
+ , fLastIssuedToken(GrDeferredUploadToken::AlreadyFlushedToken())
+ , fLastFlushedToken(GrDeferredUploadToken::AlreadyFlushedToken())
, fOpArgs(nullptr) {}
const GrCaps& GrOpFlushState::caps() const {
@@ -54,11 +54,10 @@ uint16_t* GrOpFlushState::makeIndexSpaceAtLeast(int minIndexCount, int fallbackI
minIndexCount, fallbackIndexCount, buffer, startIndex, actualIndexCount));
}
-void GrOpFlushState::doUpload(GrDrawOp::DeferredUploadFn& upload) {
- GrDrawOp::WritePixelsFn wp = [this](GrTextureProxy* proxy,
- int left, int top, int width,
- int height, GrPixelConfig config, const void* buffer,
- size_t rowBytes) {
+void GrOpFlushState::doUpload(GrDeferredTextureUploadFn& upload) {
+ GrDeferredTextureUploadWritePixelsFn wp = [this](GrTextureProxy* proxy, int left, int top,
+ int width, int height, GrPixelConfig config,
+ const void* buffer, size_t rowBytes) {
GrSurface* surface = proxy->priv().peekSurface();
GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference;
GrGpu::WritePixelTempDrawInfo tempInfo;
diff --git a/src/gpu/GrOpFlushState.h b/src/gpu/GrOpFlushState.h
index a5a5f82c31..22d0e81fad 100644
--- a/src/gpu/GrOpFlushState.h
+++ b/src/gpu/GrOpFlushState.h
@@ -27,7 +27,7 @@ public:
/** Inserts an upload to be executed after all ops in the flush prepared their draws but before
the draws are executed to the backend 3D API. */
- void addASAPUpload(GrDrawOp::DeferredUploadFn&& upload) {
+ void addASAPUpload(GrDeferredTextureUploadFn&& upload) {
fAsapUploads.emplace_back(std::move(upload));
}
@@ -35,26 +35,26 @@ public:
GrResourceProvider* resourceProvider() const { return fResourceProvider; }
/** Has the token been flushed to the backend 3D API. */
- bool hasDrawBeenFlushed(GrDrawOpUploadToken token) const {
+ bool hasDrawBeenFlushed(GrDeferredUploadToken token) const {
return token.fSequenceNumber <= fLastFlushedToken.fSequenceNumber;
}
/** Issue a token to an operation that is being enqueued. */
- GrDrawOpUploadToken issueDrawToken() {
- return GrDrawOpUploadToken(++fLastIssuedToken.fSequenceNumber);
+ GrDeferredUploadToken issueDrawToken() {
+ return GrDeferredUploadToken(++fLastIssuedToken.fSequenceNumber);
}
/** Call every time a draw that was issued a token is flushed */
void flushToken() { ++fLastFlushedToken.fSequenceNumber; }
/** Gets the next draw token that will be issued. */
- GrDrawOpUploadToken nextDrawToken() const {
- return GrDrawOpUploadToken(fLastIssuedToken.fSequenceNumber + 1);
+ GrDeferredUploadToken nextDrawToken() const {
+ return GrDeferredUploadToken(fLastIssuedToken.fSequenceNumber + 1);
}
/** The last token flushed to all the way to the backend API. */
- GrDrawOpUploadToken nextTokenToFlush() const {
- return GrDrawOpUploadToken(fLastFlushedToken.fSequenceNumber + 1);
+ GrDeferredUploadToken nextTokenToFlush() const {
+ return GrDeferredUploadToken(fLastFlushedToken.fSequenceNumber + 1);
}
void* makeVertexSpace(size_t vertexSize, int vertexCount,
@@ -80,7 +80,7 @@ public:
fAsapUploads.reset();
}
- void doUpload(GrDrawOp::DeferredUploadFn&);
+ void doUpload(GrDeferredTextureUploadFn&);
void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
@@ -132,9 +132,9 @@ private:
GrGpuCommandBuffer* fCommandBuffer;
GrVertexBufferAllocPool fVertexPool;
GrIndexBufferAllocPool fIndexPool;
- SkSTArray<4, GrDrawOp::DeferredUploadFn> fAsapUploads;
- GrDrawOpUploadToken fLastIssuedToken;
- GrDrawOpUploadToken fLastFlushedToken;
+ SkSTArray<4, GrDeferredTextureUploadFn> fAsapUploads;
+ GrDeferredUploadToken fLastIssuedToken;
+ GrDeferredUploadToken fLastFlushedToken;
DrawOpArgs* fOpArgs;
SkArenaAlloc fPipelines{sizeof(GrPipeline) * 100};
};
@@ -170,7 +170,7 @@ public:
Target(GrOpFlushState* state, GrDrawOp* op) : fState(state), fOp(op) {}
/** Returns the token of the draw that this upload will occur before. */
- GrDrawOpUploadToken addInlineUpload(DeferredUploadFn&& upload) {
+ GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&& upload) {
fOp->fInlineUploads.emplace_back(std::move(upload), fState->nextDrawToken());
return fOp->fInlineUploads.back().fUploadBeforeToken;
}
@@ -178,19 +178,19 @@ public:
/** Returns the token of the draw that this upload will occur before. Since ASAP uploads
are done first during a flush, this will be the first token since the most recent
flush. */
- GrDrawOpUploadToken addAsapUpload(DeferredUploadFn&& upload) {
+ GrDeferredUploadToken addAsapUpload(GrDeferredTextureUploadFn&& upload) {
fState->addASAPUpload(std::move(upload));
return fState->nextTokenToFlush();
}
- bool hasDrawBeenFlushed(GrDrawOpUploadToken token) const {
+ bool hasDrawBeenFlushed(GrDeferredUploadToken token) const {
return fState->hasDrawBeenFlushed(token);
}
/** Gets the next draw token that will be issued by this target. This can be used by an op
to record that the next draw it issues will use a resource (e.g. texture) while preparing
that draw. */
- GrDrawOpUploadToken nextDrawToken() const { return fState->nextDrawToken(); }
+ GrDeferredUploadToken nextDrawToken() const { return fState->nextDrawToken(); }
const GrCaps& caps() const { return fState->caps(); }
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 86a436568f..73b7e75307 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -409,7 +409,7 @@ void DrawPathsOp::onExecute(GrOpFlushState* flushState) {
SkASSERT(baseInstance == fBaseInstance + fDebugInstanceCount - fDebugSkippedInstances);
}
-void GrCoverageCountingPathRenderer::postFlush(GrDrawOpUploadToken) {
+void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken) {
SkASSERT(fFlushing);
fPerFlushAtlases.reset();
fPerFlushInstanceBuffer.reset();
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 035898dc27..7d613f90e5 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -45,7 +45,7 @@ public:
// GrOnFlushCallbackObject overrides.
void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
SkTArray<sk_sp<GrRenderTargetContext>>* results) override;
- void postFlush(GrDrawOpUploadToken) override;
+ void postFlush(GrDeferredUploadToken) override;
// This is the Op that ultimately draws a path into its final destination, using the atlas we
// generate at flush time.
diff --git a/src/gpu/gl/GrGLGpuCommandBuffer.h b/src/gpu/gl/GrGLGpuCommandBuffer.h
index 049db5da64..c45bf5f141 100644
--- a/src/gpu/gl/GrGLGpuCommandBuffer.h
+++ b/src/gpu/gl/GrGLGpuCommandBuffer.h
@@ -70,7 +70,7 @@ public:
fGpu->insertEventMarker(msg);
}
- void inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload) override {
+ void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override {
state->doUpload(upload);
}
diff --git a/src/gpu/mock/GrMockGpuCommandBuffer.h b/src/gpu/mock/GrMockGpuCommandBuffer.h
index 9c5d824497..2229fa34b6 100644
--- a/src/gpu/mock/GrMockGpuCommandBuffer.h
+++ b/src/gpu/mock/GrMockGpuCommandBuffer.h
@@ -37,7 +37,7 @@ public:
}
GrGpu* gpu() override { return fGpu; }
- void inlineUpload(GrOpFlushState*, GrDrawOp::DeferredUploadFn&) override {}
+ void inlineUpload(GrOpFlushState*, GrDeferredTextureUploadFn&) override {}
void discard() override {}
void insertEventMarker(const char*) override {}
void begin() override {}
diff --git a/src/gpu/ops/GrDrawOp.h b/src/gpu/ops/GrDrawOp.h
index ecd92498ae..88edfe7f38 100644
--- a/src/gpu/ops/GrDrawOp.h
+++ b/src/gpu/ops/GrDrawOp.h
@@ -9,55 +9,17 @@
#define GrDrawOp_DEFINED
#include <functional>
+#include "GrDeferredUpload.h"
#include "GrOp.h"
#include "GrPipeline.h"
class GrAppliedClip;
/**
- * GrDrawOps are flushed in two phases (preDraw, and draw). In preDraw uploads to GrGpuResources
- * and draws are determined and scheduled. They are issued in the draw phase. GrDrawOpUploadToken is
- * used to sequence the uploads relative to each other and to draws.
- **/
-
-class GrDrawOpUploadToken {
-public:
- static GrDrawOpUploadToken AlreadyFlushedToken() { return GrDrawOpUploadToken(0); }
-
- GrDrawOpUploadToken(const GrDrawOpUploadToken& that) : fSequenceNumber(that.fSequenceNumber) {}
- GrDrawOpUploadToken& operator =(const GrDrawOpUploadToken& that) {
- fSequenceNumber = that.fSequenceNumber;
- return *this;
- }
- bool operator==(const GrDrawOpUploadToken& that) const {
- return fSequenceNumber == that.fSequenceNumber;
- }
- bool operator!=(const GrDrawOpUploadToken& that) const { return !(*this == that); }
- bool inInterval(const GrDrawOpUploadToken& start, const GrDrawOpUploadToken& finish) {
- return fSequenceNumber >= start.fSequenceNumber &&
- fSequenceNumber <= finish.fSequenceNumber;
- }
-
-private:
- GrDrawOpUploadToken();
- explicit GrDrawOpUploadToken(uint64_t sequenceNumber) : fSequenceNumber(sequenceNumber) {}
- friend class GrOpFlushState;
- uint64_t fSequenceNumber;
-};
-
-/**
* Base class for GrOps that draw. These ops have a GrPipeline installed by GrOpList.
*/
class GrDrawOp : public GrOp {
public:
- /** Method that performs an upload on behalf of a DeferredUploadFn. */
- using WritePixelsFn = std::function<bool(GrTextureProxy*,
- int left, int top, int width, int height,
- GrPixelConfig config, const void* buffer,
- size_t rowBytes)>;
- /** See comments before GrDrawOp::Target definition on how deferred uploaders work. */
- using DeferredUploadFn = std::function<void(WritePixelsFn&)>;
-
class Target;
GrDrawOp(uint32_t classID) : INHERITED(classID) {}
@@ -89,11 +51,10 @@ public:
protected:
struct QueuedUpload {
- QueuedUpload(DeferredUploadFn&& upload, GrDrawOpUploadToken token)
- : fUpload(std::move(upload))
- , fUploadBeforeToken(token) {}
- DeferredUploadFn fUpload;
- GrDrawOpUploadToken fUploadBeforeToken;
+ QueuedUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
+ : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
+ GrDeferredTextureUploadFn fUpload;
+ GrDeferredUploadToken fUploadBeforeToken;
};
SkTArray<QueuedUpload> fInlineUploads;
diff --git a/src/gpu/ops/GrMeshDrawOp.cpp b/src/gpu/ops/GrMeshDrawOp.cpp
index 9174d8b208..2e12953fe1 100644
--- a/src/gpu/ops/GrMeshDrawOp.cpp
+++ b/src/gpu/ops/GrMeshDrawOp.cpp
@@ -11,7 +11,7 @@
#include "GrResourceProvider.h"
GrMeshDrawOp::GrMeshDrawOp(uint32_t classID)
- : INHERITED(classID), fBaseDrawToken(GrDrawOpUploadToken::AlreadyFlushedToken()) {}
+ : INHERITED(classID), fBaseDrawToken(GrDeferredUploadToken::AlreadyFlushedToken()) {}
void GrMeshDrawOp::onPrepare(GrOpFlushState* state) {
Target target(state, this);
@@ -67,7 +67,7 @@ void GrMeshDrawOp::onExecute(GrOpFlushState* state) {
SkASSERT(state->rtCommandBuffer());
for (int currDrawIdx = 0; currDrawIdx < fQueuedDraws.count(); ++currDrawIdx) {
- GrDrawOpUploadToken drawToken = state->nextTokenToFlush();
+ GrDeferredUploadToken drawToken = state->nextTokenToFlush();
while (currUploadIdx < fInlineUploads.count() &&
fInlineUploads[currUploadIdx].fUploadBeforeToken == drawToken) {
state->rtCommandBuffer()->inlineUpload(state, fInlineUploads[currUploadIdx++].fUpload);
@@ -104,7 +104,7 @@ void GrMeshDrawOp::Target::draw(const GrGeometryProcessor* gp, const GrPipeline*
}
}
GrMeshDrawOp::QueuedDraw& draw = op->fQueuedDraws.push_back();
- GrDrawOpUploadToken token = this->state()->issueDrawToken();
+ GrDeferredUploadToken token = this->state()->issueDrawToken();
draw.fGeometryProcessor.reset(gp);
draw.fPipeline = pipeline;
draw.fMeshCnt = 1;
diff --git a/src/gpu/ops/GrMeshDrawOp.h b/src/gpu/ops/GrMeshDrawOp.h
index dc09e0c765..64faaa8013 100644
--- a/src/gpu/ops/GrMeshDrawOp.h
+++ b/src/gpu/ops/GrMeshDrawOp.h
@@ -83,7 +83,7 @@ private:
// All draws in all the GrMeshDrawOps have implicit tokens based on the order they are enqueued
// globally across all ops. This is the offset of the first entry in fQueuedDraws.
// fQueuedDraws[i]'s token is fBaseDrawToken + i.
- GrDrawOpUploadToken fBaseDrawToken;
+ GrDeferredUploadToken fBaseDrawToken;
SkSTArray<4, GrMesh> fMeshes;
SkSTArray<4, QueuedDraw, true> fQueuedDraws;
diff --git a/src/gpu/ops/GrSmallPathRenderer.h b/src/gpu/ops/GrSmallPathRenderer.h
index fd327c5a60..8794f1b42c 100644
--- a/src/gpu/ops/GrSmallPathRenderer.h
+++ b/src/gpu/ops/GrSmallPathRenderer.h
@@ -36,7 +36,7 @@ public:
void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int,
SkTArray<sk_sp<GrRenderTargetContext>>*) override {}
- void postFlush(GrDrawOpUploadToken startTokenForNextFlush) override {
+ void postFlush(GrDeferredUploadToken startTokenForNextFlush) override {
if (fAtlas) {
fAtlas->compact(startTokenForNextFlush);
}
diff --git a/src/gpu/text/GrAtlasGlyphCache.h b/src/gpu/text/GrAtlasGlyphCache.h
index 5b33cc018b..717c48eb13 100644
--- a/src/gpu/text/GrAtlasGlyphCache.h
+++ b/src/gpu/text/GrAtlasGlyphCache.h
@@ -155,14 +155,14 @@ public:
// For convenience, this function will also set the use token for the current glyph if required
// NOTE: the bulk uploader is only valid if the subrun has a valid atlasGeneration
void addGlyphToBulkAndSetUseToken(GrDrawOpAtlas::BulkUseTokenUpdater* updater, GrGlyph* glyph,
- GrDrawOpUploadToken token) {
+ GrDeferredUploadToken token) {
SkASSERT(glyph);
updater->add(glyph->fID);
this->getAtlas(glyph->fMaskFormat)->setLastUseToken(glyph->fID, token);
}
void setUseTokenBulk(const GrDrawOpAtlas::BulkUseTokenUpdater& updater,
- GrDrawOpUploadToken token,
+ GrDeferredUploadToken token,
GrMaskFormat format) {
this->getAtlas(format)->setLastUseTokenBulk(updater, token);
}
@@ -187,7 +187,7 @@ public:
void preFlush(GrOnFlushResourceProvider*, const uint32_t*, int,
SkTArray<sk_sp<GrRenderTargetContext>>*) override {}
- void postFlush(GrDrawOpUploadToken startTokenForNextFlush) override {
+ void postFlush(GrDeferredUploadToken startTokenForNextFlush) override {
for (int i = 0; i < kMaskFormatCount; ++i) {
if (fAtlases[i]) {
fAtlases[i]->compact(startTokenForNextFlush);
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
index 008d4fcfc9..52dfede267 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -438,8 +438,8 @@ void GrVkGpuRTCommandBuffer::addAdditionalRenderPass() {
cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
}
-void GrVkGpuRTCommandBuffer::inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload) {
-
+void GrVkGpuRTCommandBuffer::inlineUpload(GrOpFlushState* state,
+ GrDeferredTextureUploadFn& upload) {
if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
this->addAdditionalRenderPass();
}
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.h b/src/gpu/vk/GrVkGpuCommandBuffer.h
index 139a72d3af..2aa457bad0 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.h
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.h
@@ -69,7 +69,7 @@ public:
void discard() override;
void insertEventMarker(const char*) override;
- void inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload) override;
+ void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override;
void copy(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
const SkIPoint& dstPoint) override;
@@ -134,11 +134,11 @@ private:
void addAdditionalRenderPass();
struct InlineUploadInfo {
- InlineUploadInfo(GrOpFlushState* state, const GrDrawOp::DeferredUploadFn& upload)
- : fFlushState(state), fUpload(upload) {}
+ InlineUploadInfo(GrOpFlushState* state, const GrDeferredTextureUploadFn& upload)
+ : fFlushState(state), fUpload(upload) {}
GrOpFlushState* fFlushState;
- GrDrawOp::DeferredUploadFn fUpload;
+ GrDeferredTextureUploadFn fUpload;
};
struct CopyInfo {