1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
|
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkNx_sse_DEFINED
#define SkNx_sse_DEFINED
#include <immintrin.h>
// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
namespace {
template <>
class SkNx<2, float> {
public:
AI SkNx(const __m128& vec) : fVec(vec) {}
AI SkNx() {}
AI SkNx(float val) : fVec(_mm_set1_ps(val)) {}
AI static SkNx Load(const void* ptr) {
return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
}
AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); }
AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
AI SkNx invert() const { return _mm_rcp_ps(fVec); }
AI float operator[](int k) const {
SkASSERT(0 <= k && k < 2);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&1];
}
AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
AI bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_ps(e.fVec, t.fVec, fVec);
#else
return _mm_or_ps(_mm_and_ps (fVec, t.fVec),
_mm_andnot_ps(fVec, e.fVec));
#endif
}
__m128 fVec;
};
template <>
class SkNx<4, float> {
public:
AI SkNx(const __m128& vec) : fVec(vec) {}
AI SkNx() {}
AI SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
__m128 v0 = _mm_loadu_ps(((float*)ptr) + 0),
v1 = _mm_loadu_ps(((float*)ptr) + 4),
v2 = _mm_loadu_ps(((float*)ptr) + 8),
v3 = _mm_loadu_ps(((float*)ptr) + 12);
_MM_TRANSPOSE4_PS(v0, v1, v2, v3);
*r = v0;
*g = v1;
*b = v2;
*a = v3;
}
AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
__m128 v0 = r.fVec,
v1 = g.fVec,
v2 = b.fVec,
v3 = a.fVec;
_MM_TRANSPOSE4_PS(v0, v1, v2, v3);
_mm_storeu_ps(((float*) dst) + 0, v0);
_mm_storeu_ps(((float*) dst) + 4, v1);
_mm_storeu_ps(((float*) dst) + 8, v2);
_mm_storeu_ps(((float*) dst) + 12, v3);
}
AI SkNx operator - () const { return _mm_xor_ps(_mm_set1_ps(-0.0f), fVec); }
AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
AI SkNx floor() const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_floor_ps(fVec);
#else
// Emulate _mm_floor_ps() with SSE2:
// - roundtrip through integers via truncation
// - subtract 1 if that's too big (possible for negative values).
// This restricts the domain of our inputs to a maximum somehwere around 2^31.
// Seems plenty big.
__m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec));
__m128 too_big = _mm_cmpgt_ps(roundtrip, fVec);
return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
#endif
}
AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
AI SkNx invert() const { return _mm_rcp_ps(fVec); }
AI float operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&3];
}
AI bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
AI bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_ps(e.fVec, t.fVec, fVec);
#else
return _mm_or_ps(_mm_and_ps (fVec, t.fVec),
_mm_andnot_ps(fVec, e.fVec));
#endif
}
__m128 fVec;
};
template <>
class SkNx<4, int32_t> {
public:
AI SkNx(const __m128i& vec) : fVec(vec) {}
AI SkNx() {}
AI SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {}
AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
AI SkNx operator * (const SkNx& o) const {
__m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
_mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
}
AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
AI SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
AI int32_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; int32_t is[4]; } pun = {fVec};
return pun.is[k&3];
}
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
#else
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
#endif
}
AI SkNx abs() const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
return _mm_abs_epi32(fVec);
#else
SkNx mask = (*this) >> 31;
return (mask ^ (*this)) - mask;
#endif
}
AI static SkNx Min(const SkNx& x, const SkNx& y) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_min_epi32(x.fVec, y.fVec);
#else
return (x < y).thenElse(x, y);
#endif
}
AI static SkNx Max(const SkNx& x, const SkNx& y) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_max_epi32(x.fVec, y.fVec);
#else
return (x > y).thenElse(x, y);
#endif
}
__m128i fVec;
};
template <>
class SkNx<4, uint32_t> {
public:
AI SkNx(const __m128i& vec) : fVec(vec) {}
AI SkNx() {}
AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
// Not quite sure how to best do operator * in SSE2. We probably don't use it.
AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
// operator < and > take a little extra fiddling to make work for unsigned ints.
AI uint32_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; uint32_t us[4]; } pun = {fVec};
return pun.us[k&3];
}
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
#else
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
#endif
}
__m128i fVec;
};
template <>
class SkNx<4, uint16_t> {
public:
AI SkNx(const __m128i& vec) : fVec(vec) {}
AI SkNx() {}
AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d)
: fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
__m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0),
hi = _mm_loadu_si128(((__m128i*)ptr) + 1);
__m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2
odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ...
__m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3
ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ...
*r = rg;
*g = _mm_srli_si128(rg, 8);
*b = ba;
*a = _mm_srli_si128(ba, 8);
}
AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
// The idea here is to get 4 vectors that are R G B _ _ _ _ _.
// The second load is at a funny location to make sure we don't read past
// the bounds of memory. This is fine, we just need to shift it a little bit.
const uint8_t* ptr8 = (const uint8_t*) ptr;
__m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0));
__m128i rgb1 = _mm_srli_si128(rgb0, 3*2);
__m128i rgb2 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 4*2)), 2*2);
__m128i rgb3 = _mm_srli_si128(rgb2, 3*2);
__m128i rrggbb01 = _mm_unpacklo_epi16(rgb0, rgb1);
__m128i rrggbb23 = _mm_unpacklo_epi16(rgb2, rgb3);
*r = _mm_unpacklo_epi32(rrggbb01, rrggbb23);
*g = _mm_srli_si128(r->fVec, 4*2);
*b = _mm_unpackhi_epi32(rrggbb01, rrggbb23);
}
AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
__m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec);
__m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec);
__m128i lo = _mm_unpacklo_epi32(rg, ba);
__m128i hi = _mm_unpackhi_epi32(rg, ba);
_mm_storeu_si128(((__m128i*) dst) + 0, lo);
_mm_storeu_si128(((__m128i*) dst) + 1, hi);
}
AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
AI uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; uint16_t us[8]; } pun = {fVec};
return pun.us[k&3];
}
__m128i fVec;
};
template <>
class SkNx<8, uint16_t> {
public:
AI SkNx(const __m128i& vec) : fVec(vec) {}
AI SkNx() {}
AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
uint16_t e, uint16_t f, uint16_t g, uint16_t h)
: fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
__m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0),
_23 = _mm_loadu_si128(((__m128i*)ptr) + 1),
_45 = _mm_loadu_si128(((__m128i*)ptr) + 2),
_67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
__m128i _02 = _mm_unpacklo_epi16(_01, _23), // r0 r2 g0 g2 b0 b2 a0 a2
_13 = _mm_unpackhi_epi16(_01, _23), // r1 r3 g1 g3 b1 b3 a1 a3
_46 = _mm_unpacklo_epi16(_45, _67),
_57 = _mm_unpackhi_epi16(_45, _67);
__m128i rg0123 = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
ba0123 = _mm_unpackhi_epi16(_02, _13), // b0 b1 b2 b3 a0 a1 a2 a3
rg4567 = _mm_unpacklo_epi16(_46, _57),
ba4567 = _mm_unpackhi_epi16(_46, _57);
*r = _mm_unpacklo_epi64(rg0123, rg4567);
*g = _mm_unpackhi_epi64(rg0123, rg4567);
*b = _mm_unpacklo_epi64(ba0123, ba4567);
*a = _mm_unpackhi_epi64(ba0123, ba4567);
}
AI static void Load3(const void* ptr, SkNx* r, SkNx* g, SkNx* b) {
const uint8_t* ptr8 = (const uint8_t*) ptr;
__m128i rgb0 = _mm_loadu_si128((const __m128i*) (ptr8 + 0*2));
__m128i rgb1 = _mm_srli_si128(rgb0, 3*2);
__m128i rgb2 = _mm_loadu_si128((const __m128i*) (ptr8 + 6*2));
__m128i rgb3 = _mm_srli_si128(rgb2, 3*2);
__m128i rgb4 = _mm_loadu_si128((const __m128i*) (ptr8 + 12*2));
__m128i rgb5 = _mm_srli_si128(rgb4, 3*2);
__m128i rgb6 = _mm_srli_si128(_mm_loadu_si128((const __m128i*) (ptr8 + 16*2)), 2*2);
__m128i rgb7 = _mm_srli_si128(rgb6, 3*2);
__m128i rgb01 = _mm_unpacklo_epi16(rgb0, rgb1);
__m128i rgb23 = _mm_unpacklo_epi16(rgb2, rgb3);
__m128i rgb45 = _mm_unpacklo_epi16(rgb4, rgb5);
__m128i rgb67 = _mm_unpacklo_epi16(rgb6, rgb7);
__m128i rg03 = _mm_unpacklo_epi32(rgb01, rgb23);
__m128i bx03 = _mm_unpackhi_epi32(rgb01, rgb23);
__m128i rg47 = _mm_unpacklo_epi32(rgb45, rgb67);
__m128i bx47 = _mm_unpackhi_epi32(rgb45, rgb67);
*r = _mm_unpacklo_epi64(rg03, rg47);
*g = _mm_unpackhi_epi64(rg03, rg47);
*b = _mm_unpacklo_epi64(bx03, bx47);
}
AI static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
__m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec), // r0 g0 r1 g1 r2 g2 r3 g3
rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec), // r4 g4 r5 g5 r6 g6 r7 g7
ba0123 = _mm_unpacklo_epi16(b.fVec, a.fVec),
ba4567 = _mm_unpackhi_epi16(b.fVec, a.fVec);
_mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg0123, ba0123));
_mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg0123, ba0123));
_mm_storeu_si128((__m128i*)ptr + 2, _mm_unpacklo_epi32(rg4567, ba4567));
_mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567));
}
AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
AI static SkNx Min(const SkNx& a, const SkNx& b) {
// No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
// signed version, _mm_min_epi16, then shift back.
const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
const __m128i top_8x = _mm_set1_epi16(top);
return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
_mm_sub_epi8(b.fVec, top_8x)));
}
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
}
AI uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
union { __m128i v; uint16_t us[8]; } pun = {fVec};
return pun.us[k&7];
}
__m128i fVec;
};
template <>
class SkNx<4, uint8_t> {
public:
AI SkNx() {}
AI SkNx(const __m128i& vec) : fVec(vec) {}
AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
: fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
AI static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
AI void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
AI uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; uint8_t us[16]; } pun = {fVec};
return pun.us[k&3];
}
// TODO as needed
__m128i fVec;
};
template <>
class SkNx<16, uint8_t> {
public:
AI SkNx(const __m128i& vec) : fVec(vec) {}
AI SkNx() {}
AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
uint8_t e, uint8_t f, uint8_t g, uint8_t h,
uint8_t i, uint8_t j, uint8_t k, uint8_t l,
uint8_t m, uint8_t n, uint8_t o, uint8_t p)
: fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
AI SkNx operator < (const SkNx& o) const {
// There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
auto flip = _mm_set1_epi8(char(0x80));
return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
}
AI uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 16);
union { __m128i v; uint8_t us[16]; } pun = {fVec};
return pun.us[k&15];
}
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
}
__m128i fVec;
};
template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
return _mm_cvtepi32_ps(src.fVec);
}
template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
return SkNx_cast<float>(Sk4i::Load(&src));
}
template <> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
return _mm_cvttps_epi32(src.fVec);
}
template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
#if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
// TODO: This seems to be causing code generation problems. Investigate?
return _mm_packus_epi32(src.fVec);
#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
// With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
const int _ = ~0;
return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_));
#else
// With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want.
__m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
return _mm_packs_epi32(x,x);
#endif
}
template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
}
template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
auto _32 = _mm_cvttps_epi32(src.fVec);
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
#else
auto _16 = _mm_packus_epi16(_32, _32);
return _mm_packus_epi16(_16, _16);
#endif
}
template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
#else
auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
return _mm_unpacklo_epi16(_16, _mm_setzero_si128());
#endif
}
template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
return _mm_cvtepi32_ps(SkNx_cast<int32_t>(src).fVec);
}
template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
return _mm_cvtepi32_ps(_32);
}
template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
Sk8f ab, cd;
SkNx_split(src, &ab, &cd);
Sk4f a,b,c,d;
SkNx_split(ab, &a, &b);
SkNx_split(cd, &c, &d);
return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
_mm_cvttps_epi32(b.fVec)),
_mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
_mm_cvttps_epi32(d.fVec)));
}
template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
}
template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
return _mm_packus_epi16(src.fVec, src.fVec);
}
template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
}
template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
}
template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
return src.fVec;
}
AI static Sk4i Sk4f_round(const Sk4f& x) {
return _mm_cvtps_epi32(x.fVec);
}
} // namespace
#endif//SkNx_sse_DEFINED
|