1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
|
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkNx_sse_DEFINED
#define SkNx_sse_DEFINED
// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
#include <immintrin.h>
template <>
class SkNb<2, 4> {
public:
SkNb(const __m128i& vec) : fVec(vec) {}
SkNb() {}
bool allTrue() const { return 0xff == (_mm_movemask_epi8(fVec) & 0xff); }
bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(fVec) & 0xff); }
private:
__m128i fVec;
};
template <>
class SkNb<4, 4> {
public:
SkNb(const __m128i& vec) : fVec(vec) {}
SkNb() {}
bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); }
bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); }
private:
__m128i fVec;
};
template <>
class SkNb<2, 8> {
public:
SkNb(const __m128i& vec) : fVec(vec) {}
SkNb() {}
bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); }
bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); }
private:
__m128i fVec;
};
template <>
class SkNf<2, float> {
typedef SkNb<2, 4> Nb;
public:
SkNf(const __m128& vec) : fVec(vec) {}
SkNf() {}
explicit SkNf(float val) : fVec(_mm_set1_ps(val)) {}
static SkNf Load(const float vals[2]) {
return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)vals));
}
SkNf(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
void store(float vals[2]) const { _mm_storel_pi((__m64*)vals, fVec); }
SkNf operator + (const SkNf& o) const { return _mm_add_ps(fVec, o.fVec); }
SkNf operator - (const SkNf& o) const { return _mm_sub_ps(fVec, o.fVec); }
SkNf operator * (const SkNf& o) const { return _mm_mul_ps(fVec, o.fVec); }
SkNf operator / (const SkNf& o) const { return _mm_div_ps(fVec, o.fVec); }
Nb operator == (const SkNf& o) const { return _mm_castps_si128(_mm_cmpeq_ps (fVec, o.fVec)); }
Nb operator != (const SkNf& o) const { return _mm_castps_si128(_mm_cmpneq_ps(fVec, o.fVec)); }
Nb operator < (const SkNf& o) const { return _mm_castps_si128(_mm_cmplt_ps (fVec, o.fVec)); }
Nb operator > (const SkNf& o) const { return _mm_castps_si128(_mm_cmpgt_ps (fVec, o.fVec)); }
Nb operator <= (const SkNf& o) const { return _mm_castps_si128(_mm_cmple_ps (fVec, o.fVec)); }
Nb operator >= (const SkNf& o) const { return _mm_castps_si128(_mm_cmpge_ps (fVec, o.fVec)); }
static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_ps(l.fVec, r.fVec); }
static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_ps(l.fVec, r.fVec); }
SkNf sqrt() const { return _mm_sqrt_ps (fVec); }
SkNf rsqrt() const { return _mm_rsqrt_ps(fVec); }
SkNf invert() const { return SkNf(1) / *this; }
SkNf approxInvert() const { return _mm_rcp_ps(fVec); }
template <int k> float kth() const {
SkASSERT(0 <= k && k < 2);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&1];
}
private:
__m128 fVec;
};
template <>
class SkNf<2, double> {
typedef SkNb<2, 8> Nb;
public:
SkNf(const __m128d& vec) : fVec(vec) {}
SkNf() {}
explicit SkNf(double val) : fVec( _mm_set1_pd(val) ) {}
static SkNf Load(const double vals[2]) { return _mm_loadu_pd(vals); }
SkNf(double a, double b) : fVec(_mm_setr_pd(a,b)) {}
void store(double vals[2]) const { _mm_storeu_pd(vals, fVec); }
SkNf operator + (const SkNf& o) const { return _mm_add_pd(fVec, o.fVec); }
SkNf operator - (const SkNf& o) const { return _mm_sub_pd(fVec, o.fVec); }
SkNf operator * (const SkNf& o) const { return _mm_mul_pd(fVec, o.fVec); }
SkNf operator / (const SkNf& o) const { return _mm_div_pd(fVec, o.fVec); }
Nb operator == (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpeq_pd (fVec, o.fVec)); }
Nb operator != (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpneq_pd(fVec, o.fVec)); }
Nb operator < (const SkNf& o) const { return _mm_castpd_si128(_mm_cmplt_pd (fVec, o.fVec)); }
Nb operator > (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpgt_pd (fVec, o.fVec)); }
Nb operator <= (const SkNf& o) const { return _mm_castpd_si128(_mm_cmple_pd (fVec, o.fVec)); }
Nb operator >= (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpge_pd (fVec, o.fVec)); }
static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_pd(l.fVec, r.fVec); }
static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_pd(l.fVec, r.fVec); }
SkNf sqrt() const { return _mm_sqrt_pd(fVec); }
SkNf rsqrt() const { return _mm_cvtps_pd(_mm_rsqrt_ps(_mm_cvtpd_ps(fVec))); }
SkNf invert() const { return SkNf(1) / *this; }
SkNf approxInvert() const { return _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(fVec))); }
template <int k> double kth() const {
SkASSERT(0 <= k && k < 2);
union { __m128d v; double ds[2]; } pun = {fVec};
return pun.ds[k&1];
}
private:
__m128d fVec;
};
template <>
class SkNf<4, float> {
typedef SkNb<4, 4> Nb;
public:
SkNf(const __m128& vec) : fVec(vec) {}
SkNf() {}
explicit SkNf(float val) : fVec( _mm_set1_ps(val) ) {}
static SkNf Load(const float vals[4]) { return _mm_loadu_ps(vals); }
SkNf(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
void store(float vals[4]) const { _mm_storeu_ps(vals, fVec); }
SkNf operator + (const SkNf& o) const { return _mm_add_ps(fVec, o.fVec); }
SkNf operator - (const SkNf& o) const { return _mm_sub_ps(fVec, o.fVec); }
SkNf operator * (const SkNf& o) const { return _mm_mul_ps(fVec, o.fVec); }
SkNf operator / (const SkNf& o) const { return _mm_div_ps(fVec, o.fVec); }
Nb operator == (const SkNf& o) const { return _mm_castps_si128(_mm_cmpeq_ps (fVec, o.fVec)); }
Nb operator != (const SkNf& o) const { return _mm_castps_si128(_mm_cmpneq_ps(fVec, o.fVec)); }
Nb operator < (const SkNf& o) const { return _mm_castps_si128(_mm_cmplt_ps (fVec, o.fVec)); }
Nb operator > (const SkNf& o) const { return _mm_castps_si128(_mm_cmpgt_ps (fVec, o.fVec)); }
Nb operator <= (const SkNf& o) const { return _mm_castps_si128(_mm_cmple_ps (fVec, o.fVec)); }
Nb operator >= (const SkNf& o) const { return _mm_castps_si128(_mm_cmpge_ps (fVec, o.fVec)); }
static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_ps(l.fVec, r.fVec); }
static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_ps(l.fVec, r.fVec); }
SkNf sqrt() const { return _mm_sqrt_ps (fVec); }
SkNf rsqrt() const { return _mm_rsqrt_ps(fVec); }
SkNf invert() const { return SkNf(1) / *this; }
SkNf approxInvert() const { return _mm_rcp_ps(fVec); }
template <int k> float kth() const {
SkASSERT(0 <= k && k < 4);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&3];
}
protected:
__m128 fVec;
};
template <>
class SkNi<4, uint16_t> {
public:
SkNi(const __m128i& vec) : fVec(vec) {}
SkNi() {}
explicit SkNi(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
static SkNi Load(const uint16_t vals[4]) { return _mm_loadl_epi64((const __m128i*)vals); }
SkNi(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
void store(uint16_t vals[4]) const { _mm_storel_epi64((__m128i*)vals, fVec); }
SkNi operator + (const SkNi& o) const { return _mm_add_epi16(fVec, o.fVec); }
SkNi operator - (const SkNi& o) const { return _mm_sub_epi16(fVec, o.fVec); }
SkNi operator * (const SkNi& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
SkNi operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
SkNi operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
template <int k> uint16_t kth() const {
SkASSERT(0 <= k && k < 4);
return _mm_extract_epi16(fVec, k);
}
protected:
__m128i fVec;
};
template <>
class SkNi<8, uint16_t> {
public:
SkNi(const __m128i& vec) : fVec(vec) {}
SkNi() {}
explicit SkNi(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
static SkNi Load(const uint16_t vals[8]) { return _mm_loadu_si128((const __m128i*)vals); }
SkNi(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
void store(uint16_t vals[8]) const { _mm_storeu_si128((__m128i*)vals, fVec); }
SkNi operator + (const SkNi& o) const { return _mm_add_epi16(fVec, o.fVec); }
SkNi operator - (const SkNi& o) const { return _mm_sub_epi16(fVec, o.fVec); }
SkNi operator * (const SkNi& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
SkNi operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
SkNi operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
template <int k> uint16_t kth() const {
SkASSERT(0 <= k && k < 8);
return _mm_extract_epi16(fVec, k);
}
protected:
__m128i fVec;
};
#endif//SkNx_sse_DEFINED
|