aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/GrBatch.cpp
blob: 7d1c0ae25fb1c14b7a623187a56cc8a38301c69a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
/*
 * Copyright 2015 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#include "GrBatch.h"
#include "GrBatchTarget.h"
#include "GrResourceProvider.h"

#include "GrMemoryPool.h"
#include "SkSpinlock.h"

// TODO I noticed a small benefit to using a larger exclusive pool for batches.  Its very small,
// but seems to be mostly consistent.  There is a lot in flux right now, but we should really
// revisit this when batch is everywhere


// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
// different threads. The GrContext is not used concurrently on different threads and there is a
// memory barrier between accesses of a context on different threads. Also, there may be multiple
// GrContexts and those contexts may be in use concurrently on different threads.
namespace {
SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock);
class MemoryPoolAccessor {
public:
    MemoryPoolAccessor() { gBatchSpinlock.acquire(); }

    ~MemoryPoolAccessor() { gBatchSpinlock.release(); }

    GrMemoryPool* pool() const {
        static GrMemoryPool gPool(16384, 16384);
        return &gPool;
    }
};
}

int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID;

void* GrBatch::operator new(size_t size) {
    return MemoryPoolAccessor().pool()->allocate(size);
}

void GrBatch::operator delete(void* target) {
    return MemoryPoolAccessor().pool()->release(target);
}

void* GrBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType primType,
                                     size_t vertexStride, const GrIndexBuffer* indexBuffer,
                                     int verticesPerInstance, int indicesPerInstance,
                                     int instancesToDraw) {
    SkASSERT(!fInstancesRemaining);
    SkASSERT(batchTarget);
    if (!indexBuffer) {
        return NULL;
    }
    const GrVertexBuffer* vertexBuffer;
    int firstVertex;
    int vertexCount = verticesPerInstance * instancesToDraw;
    void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride, vertexCount, &vertexBuffer,
                                                          &firstVertex);
    if (!vertices) {
        SkDebugf("Vertices could not be allocated for instanced rendering.");
        return NULL;
    }
    SkASSERT(vertexBuffer);
    fInstancesRemaining = instancesToDraw;
    size_t ibSize = indexBuffer->gpuMemorySize();
    fMaxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerInstance));

    fVertices.initInstanced(primType, vertexBuffer, indexBuffer,
        firstVertex, verticesPerInstance, indicesPerInstance, &fInstancesRemaining,
        fMaxInstancesPerDraw);
    SkASSERT(fMaxInstancesPerDraw > 0);
    return vertices;
}

void* GrBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw) {
    SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer(
        batchTarget->resourceProvider()->refQuadIndexBuffer());
    if (!quadIndexBuffer) {
        SkDebugf("Could not get quad index buffer.");
        return NULL;
    }
    return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
                                 quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
}