aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/effects/gradients/Sk4fGradientPriv.h
blob: b8f4bcaee1698f1885e6c936d790e9d77705e00d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
/*
 * Copyright 2016 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#ifndef Sk4fGradientPriv_DEFINED
#define Sk4fGradientPriv_DEFINED

#include "SkColor.h"
#include "SkHalf.h"
#include "SkImageInfo.h"
#include "SkNx.h"
#include "SkPM4f.h"
#include "SkPM4fPriv.h"
#include "SkUtils.h"

// Templates shared by various 4f gradient flavors.

namespace {

enum class ApplyPremul { True, False };

enum class DstType {
    L32,  // Linear 32bit.  Used for both shader/blitter paths.
    S32,  // SRGB 32bit.  Used for the blitter path only.
    F16,  // Linear half-float.  Used for blitters only.
    F32,  // Linear float.  Used for shaders only.
};

template <ApplyPremul>
struct PremulTraits;

template <>
struct PremulTraits<ApplyPremul::False> {
    static Sk4f apply(const Sk4f& c) { return c; }
};

template <>
struct PremulTraits<ApplyPremul::True> {
    static Sk4f apply(const Sk4f& c) {
        const float alpha = c[SkPM4f::A];
        // FIXME: portable swizzle?
        return c * Sk4f(alpha, alpha, alpha, 1);
    }
};

// Struct encapsulating various dest-dependent ops:
//
//   - load()       Load a SkPM4f value into Sk4f.  Normally called once per interval
//                  advance.  Also applies a scale and swizzle suitable for DstType.
//
//   - store()      Store one Sk4f to dest.  Optionally handles premul, color space
//                  conversion, etc.
//
//   - store(count) Store the Sk4f value repeatedly to dest, count times.
//
//   - store4x()    Store 4 Sk4f values to dest (opportunistic optimization).
//
template <DstType, ApplyPremul premul>
struct DstTraits;

template <ApplyPremul premul>
struct DstTraits<DstType::L32, premul> {
    using PM   = PremulTraits<premul>;
    using Type = SkPMColor;

    // For L32, prescaling by 255 saves a per-pixel multiplication when premul is not needed.
    static Sk4f load(const SkPM4f& c) {
        return premul == ApplyPremul::False
            ? c.to4f_pmorder() * Sk4f(255)
            : c.to4f_pmorder();
    }

    static void store(const Sk4f& c, Type* dst) {
        if (premul == ApplyPremul::False) {
            // c is prescaled by 255, just store.
            SkNx_cast<uint8_t>(c).store(dst);
        } else {
            *dst = Sk4f_toL32(PM::apply(c));
        }
    }

    static void store(const Sk4f& c, Type* dst, int n) {
        Type pmc;
        store(c, &pmc);
        sk_memset32(dst, pmc, n);
    }

    static void store4x(const Sk4f& c0, const Sk4f& c1,
                        const Sk4f& c2, const Sk4f& c3,
                        Type* dst) {
        if (premul == ApplyPremul::False) {
            Sk4f_ToBytes((uint8_t*)dst, c0, c1, c2, c3);
        } else {
            store(c0, dst + 0);
            store(c1, dst + 1);
            store(c2, dst + 2);
            store(c3, dst + 3);
        }
    }
};

template <ApplyPremul premul>
struct DstTraits<DstType::S32, premul> {
    using PM   = PremulTraits<premul>;
    using Type = SkPMColor;

    static Sk4f load(const SkPM4f& c) {
        return c.to4f_pmorder();
    }

    static void store(const Sk4f& c, Type* dst) {
        // FIXME: this assumes opaque colors.  Handle unpremultiplication.
        *dst = Sk4f_toS32(PM::apply(c));
    }

    static void store(const Sk4f& c, Type* dst, int n) {
        sk_memset32(dst, Sk4f_toS32(PM::apply(c)), n);
    }

    static void store4x(const Sk4f& c0, const Sk4f& c1,
                        const Sk4f& c2, const Sk4f& c3,
                        Type* dst) {
        store(c0, dst + 0);
        store(c1, dst + 1);
        store(c2, dst + 2);
        store(c3, dst + 3);
    }
};

template <ApplyPremul premul>
struct DstTraits<DstType::F16, premul> {
    using PM   = PremulTraits<premul>;
    using Type = uint64_t;

    static Sk4f load(const SkPM4f& c) {
        return c.to4f();
    }

    static void store(const Sk4f& c, Type* dst) {
        SkFloatToHalf_finite_ftz(PM::apply(c)).store(dst);
    }

    static void store(const Sk4f& c, Type* dst, int n) {
        uint64_t color;
        SkFloatToHalf_finite_ftz(PM::apply(c)).store(&color);
        sk_memset64(dst, color, n);
    }

    static void store4x(const Sk4f& c0, const Sk4f& c1,
                        const Sk4f& c2, const Sk4f& c3,
                        Type* dst) {
        store(c0, dst + 0);
        store(c1, dst + 1);
        store(c2, dst + 2);
        store(c3, dst + 3);
    }
};

template <ApplyPremul premul>
struct DstTraits<DstType::F32, premul> {
    using PM   = PremulTraits<premul>;
    using Type = SkPM4f;

    static Sk4f load(const SkPM4f& c) {
        return c.to4f();
    }

    static void store(const Sk4f& c, Type* dst) {
        PM::apply(c).store(dst->fVec);
    }

    static void store(const Sk4f& c, Type* dst, int n) {
        const Sk4f pmc = PM::apply(c);
        for (int i = 0; i < n; ++i) {
            pmc.store(dst[i].fVec);
        }
    }

    static void store4x(const Sk4f& c0, const Sk4f& c1,
                        const Sk4f& c2, const Sk4f& c3,
                        Type* dst) {
        store(c0, dst + 0);
        store(c1, dst + 1);
        store(c2, dst + 2);
        store(c3, dst + 3);
    }
};

} // anonymous namespace

#endif // Sk4fGradientPriv_DEFINED