1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
|
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrBatch_DEFINED
#define GrBatch_DEFINED
#include "../private/SkAtomics.h"
#include "GrNonAtomicRef.h"
#include "SkRect.h"
#include "SkString.h"
#include <new>
class GrCaps;
class GrBatchFlushState;
class GrRenderTarget;
/**
* GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
* reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
* captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
* subclasses complete freedom to decide how / what they can batch.
*
* Batches are created when GrContext processes a draw call. Batches of the same subclass may be
* merged using combineIfPossible. When two batches merge, one takes on the union of the data
* and the other is left empty. The merged batch becomes responsible for drawing the data from both
* the original batches.
*
* If there are any possible optimizations which might require knowing more about the full state of
* the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
* information will be communicated to the GrBatch prior to geometry generation.
*
* The bounds of the batch must contain all the vertices in device space *irrespective* of the clip.
* The bounds are used in determining which clip elements must be applied and thus the bounds cannot
* in turn depend upon the clip.
*/
#define GR_BATCH_SPEW 0
#if GR_BATCH_SPEW
#define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
#define GrBATCH_SPEW(code) code
#else
#define GrBATCH_SPEW(code)
#define GrBATCH_INFO(...)
#endif
// A helper macro to generate a class static id
#define DEFINE_BATCH_CLASS_ID \
static uint32_t ClassID() { \
static uint32_t kClassID = GenBatchClassID(); \
return kClassID; \
}
class GrBatch : public GrNonAtomicRef<GrBatch> {
public:
GrBatch(uint32_t classID);
virtual ~GrBatch();
virtual const char* name() const = 0;
bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
if (this->classID() != that->classID()) {
return false;
}
return this->onCombineIfPossible(that, caps);
}
const SkRect& bounds() const { return fBounds; }
void* operator new(size_t size);
void operator delete(void* target);
void* operator new(size_t size, void* placement) {
return ::operator new(size, placement);
}
void operator delete(void* target, void* placement) {
::operator delete(target, placement);
}
/**
* Helper for safely down-casting to a GrBatch subclass
*/
template <typename T> const T& cast() const {
SkASSERT(T::ClassID() == this->classID());
return *static_cast<const T*>(this);
}
template <typename T> T* cast() {
SkASSERT(T::ClassID() == this->classID());
return static_cast<T*>(this);
}
uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
// We lazily initialize the uniqueID because currently the only user is GrAuditTrail
uint32_t uniqueID() const {
if (kIllegalBatchID == fUniqueID) {
fUniqueID = GenBatchID();
}
return fUniqueID;
}
SkDEBUGCODE(bool isUsed() const { return fUsed; })
/** Called prior to drawing. The batch should perform any resource creation necessary to
to quickly issue its draw when draw is called. */
void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
/** Issues the batches commands to GrGpu. */
void draw(GrBatchFlushState* state) { this->onDraw(state); }
/** Used to block batching across render target changes. Remove this once we store
GrBatches for different RTs in different targets. */
virtual uint32_t renderTargetUniqueID() const = 0;
/** Used for spewing information about batches when debugging. */
virtual SkString dumpInfo() const = 0;
/** Can remove this when multi-draw-buffer lands */
virtual GrRenderTarget* renderTarget() const = 0;
protected:
// NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
// rect because we outset it for dst copy textures
void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
void joinBounds(const SkRect& otherBounds) {
return fBounds.joinPossiblyEmptyRect(otherBounds);
}
static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
SkRect fBounds;
private:
virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
virtual void onPrepare(GrBatchFlushState*) = 0;
virtual void onDraw(GrBatchFlushState*) = 0;
static uint32_t GenID(int32_t* idCounter) {
// The atomic inc returns the old value not the incremented value. So we add
// 1 to the returned value.
uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
if (!id) {
SkFAIL("This should never wrap as it should only be called once for each GrBatch "
"subclass.");
}
return id;
}
enum {
kIllegalBatchID = 0,
};
SkDEBUGCODE(bool fUsed;)
const uint32_t fClassID;
static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
mutable uint32_t fUniqueID;
static int32_t gCurrBatchUniqueID;
static int32_t gCurrBatchClassID;
};
#endif
|