aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/gl/GrGLVertexArray.cpp
blob: 991de96325447ef818feb58e36eef09b734588f8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/*
 * Copyright 2013 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#include "GrGLVertexArray.h"
#include "GrGLBuffer.h"
#include "GrGLGpu.h"

struct AttribLayout {
    bool        fNormalized;  // Only used by floating point types.
    uint8_t     fCount;
    uint16_t    fType;
};

GR_STATIC_ASSERT(4 == sizeof(AttribLayout));

static AttribLayout attrib_layout(GrVertexAttribType type) {
    switch (type) {
        case kFloat_GrVertexAttribType:
            return {false, 1, GR_GL_FLOAT};
        case kVec2f_GrVertexAttribType:
            return {false, 2, GR_GL_FLOAT};
        case kVec3f_GrVertexAttribType:
            return {false, 3, GR_GL_FLOAT};
        case kVec4f_GrVertexAttribType:
            return {false, 4, GR_GL_FLOAT};
        case kVec2i_GrVertexAttribType:
            return {false, 2, GR_GL_INT};
        case kVec3i_GrVertexAttribType:
            return {false, 3, GR_GL_INT};
        case kVec4i_GrVertexAttribType:
            return {false, 4, GR_GL_INT};
        case kUByte_GrVertexAttribType:
            return {true, 1, GR_GL_UNSIGNED_BYTE};
        case kVec4ub_GrVertexAttribType:
            return {true, 4, GR_GL_UNSIGNED_BYTE};
        case kVec2us_norm_GrVertexAttribType:
            return {true, 2, GR_GL_UNSIGNED_SHORT};
        case kVec2us_uint_GrVertexAttribType:
            return {false, 2, GR_GL_UNSIGNED_SHORT};
        case kInt_GrVertexAttribType:
            return {false, 1, GR_GL_INT};
        case kUint_GrVertexAttribType:
            return {false, 1, GR_GL_UNSIGNED_INT};
    }
    SK_ABORT("Unknown vertex attrib type");
    return {false, 0, 0};
};

static bool GrVertexAttribTypeIsIntType(const GrShaderCaps* shaderCaps,
                                        GrVertexAttribType type) {
    switch (type) {
        case kFloat_GrVertexAttribType:
            return false;
        case kVec2f_GrVertexAttribType:
            return false;
        case kVec3f_GrVertexAttribType:
            return false;
        case kVec4f_GrVertexAttribType:
            return false;
        case kVec2i_GrVertexAttribType:
            return true;
        case kVec3i_GrVertexAttribType:
            return true;
        case kVec4i_GrVertexAttribType:
            return true;
        case kUByte_GrVertexAttribType:
            return false;
        case kVec4ub_GrVertexAttribType:
            return false;
        case kVec2us_norm_GrVertexAttribType:
            return false;
        case kVec2us_uint_GrVertexAttribType:
            return shaderCaps->integerSupport();
        case kInt_GrVertexAttribType:
            return true;
        case kUint_GrVertexAttribType:
            return true;
    }
    SK_ABORT("Unexpected attribute type");
    return false;
}

void GrGLAttribArrayState::set(GrGLGpu* gpu,
                               int index,
                               const GrBuffer* vertexBuffer,
                               GrVertexAttribType type,
                               GrGLsizei stride,
                               size_t offsetInBytes,
                               int divisor) {
    SkASSERT(index >= 0 && index < fAttribArrayStates.count());
    SkASSERT(0 == divisor || gpu->caps()->instanceAttribSupport());
    AttribArrayState* array = &fAttribArrayStates[index];
    if (array->fVertexBufferUniqueID != vertexBuffer->uniqueID() ||
        array->fType != type ||
        array->fStride != stride ||
        array->fOffset != offsetInBytes) {
        gpu->bindBuffer(kVertex_GrBufferType, vertexBuffer);
        const AttribLayout& layout = attrib_layout(type);
        const GrGLvoid* offsetAsPtr = reinterpret_cast<const GrGLvoid*>(offsetInBytes);
        if (!GrVertexAttribTypeIsIntType(gpu->caps()->shaderCaps(), type)) {
            GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index,
                                                               layout.fCount,
                                                               layout.fType,
                                                               layout.fNormalized,
                                                               stride,
                                                               offsetAsPtr));
        } else {
            SkASSERT(gpu->caps()->shaderCaps()->integerSupport());
            SkASSERT(!layout.fNormalized);
            GR_GL_CALL(gpu->glInterface(), VertexAttribIPointer(index,
                                                                layout.fCount,
                                                                layout.fType,
                                                                stride,
                                                                offsetAsPtr));
        }
        array->fVertexBufferUniqueID = vertexBuffer->uniqueID();
        array->fType = type;
        array->fStride = stride;
        array->fOffset = offsetInBytes;
    }
    if (gpu->caps()->instanceAttribSupport() && array->fDivisor != divisor) {
        SkASSERT(0 == divisor || 1 == divisor); // not necessarily a requirement but what we expect.
        GR_GL_CALL(gpu->glInterface(), VertexAttribDivisor(index, divisor));
        array->fDivisor = divisor;
    }
}

void GrGLAttribArrayState::enableVertexArrays(const GrGLGpu* gpu, int enabledCount) {
    SkASSERT(enabledCount <= fAttribArrayStates.count());
    if (fEnabledCountIsValid && enabledCount == fNumEnabledArrays) {
        return;
    }

    int firstIdxToEnable = fEnabledCountIsValid ? fNumEnabledArrays : 0;
    for (int i = firstIdxToEnable; i < enabledCount; ++i) {
        GR_GL_CALL(gpu->glInterface(), EnableVertexAttribArray(i));
    }

    int endIdxToDisable = fEnabledCountIsValid ? fNumEnabledArrays : fAttribArrayStates.count();
    for (int i = enabledCount; i < endIdxToDisable; ++i) {
        GR_GL_CALL(gpu->glInterface(), DisableVertexAttribArray(i));
    }

    fNumEnabledArrays = enabledCount;
    fEnabledCountIsValid = true;
}

///////////////////////////////////////////////////////////////////////////////////////////////////

GrGLVertexArray::GrGLVertexArray(GrGLint id, int attribCount)
    : fID(id)
    , fAttribArrays(attribCount)
    , fIndexBufferUniqueID(SK_InvalidUniqueID) {
}

GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) {
    if (0 == fID) {
        return nullptr;
    }
    gpu->bindVertexArray(fID);
    return &fAttribArrays;
}

GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) {
    GrGLAttribArrayState* state = this->bind(gpu);
    if (state && fIndexBufferUniqueID != ibuff->uniqueID()) {
        if (ibuff->isCPUBacked()) {
            GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
        } else {
            const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
            GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
                                                      glBuffer->bufferID()));
        }
        fIndexBufferUniqueID = ibuff->uniqueID();
    }
    return state;
}

void GrGLVertexArray::invalidateCachedState() {
    fAttribArrays.invalidate();
    fIndexBufferUniqueID.makeInvalid();
}