aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-12-11 12:04:47 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2015-12-11 12:04:48 -0800
commit52e2581700b719aad317605160a2cef45d3db68b (patch)
tree5bbd62f91d3eabf9e0820abde4ac69688d3d3a04
parent0ff46c06b7035ca66a1a459f21160f1d5ba7fcb2 (diff)
archive skpx... currently dead code
BUG=skia: CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Review URL: https://codereview.chromium.org/1521623003
-rw-r--r--src/core/SkPx.h89
-rw-r--r--src/opts/SkPx_neon.h188
-rw-r--r--src/opts/SkPx_none.h111
-rw-r--r--src/opts/SkPx_sse.h155
4 files changed, 0 insertions, 543 deletions
diff --git a/src/core/SkPx.h b/src/core/SkPx.h
deleted file mode 100644
index 62ec598cd6..0000000000
--- a/src/core/SkPx.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef SkPx_DEFINED
-#define SkPx_DEFINED
-
-#include "SkTypes.h"
-#include "SkColorPriv.h"
-
-// We'll include one of src/opts/SkPx_{sse,neon,none}.h to define a type SkPx.
-//
-// SkPx represents up to SkPx::N 8888 pixels. It's agnostic to whether these
-// are SkColors or SkPMColors; it only assumes that alpha is the high byte.
-static_assert(SK_A32_SHIFT == 24, "For both SkColor and SkPMColor, alpha is always the high byte.");
-//
-// SkPx::Alpha represents up to SkPx::N 8-bit values, usually coverage or alpha.
-// SkPx::Wide represents up to SkPx::N pixels with 16 bits per component.
-//
-// SkPx supports the following methods:
-// static SkPx Dup(uint32_t);
-// static SkPx Load(const uint32_t*);
-// static SkPx Load(const uint32_t*, int n); // where 0<n<SkPx::N
-// void store(uint32_t*) const;
-// void store(uint32_t*, int n) const; // where 0<n<SkPx::N
-//
-// Alpha alpha() const; // argb -> a
-// Wide widenLo() const; // argb -> 0a0r0g0b
-// Wide widenHi() const; // argb -> a0r0g0b0
-// Wide widenLoHi() const; // argb -> aarrggbb
-//
-// SkPx operator+(const SkPx&) const;
-// SkPx operator-(const SkPx&) const;
-// SkPx saturatedAdd(const SkPx&) const;
-//
-// Wide operator*(const Alpha&) const; // argb * A -> (a*A)(r*A)(g*A)(b*A)
-//
-// // Fast approximate (px*a+127)/255.
-// // Never off by more than 1, and always correct when px or a is 0 or 255.
-// // We use the approximation (px*a+px)/256.
-// SkPx approxMulDiv255(const Alpha&) const;
-//
-// SkPx addAlpha(const Alpha&) const; // argb + A -> (a+A)rgb
-//
-// SkPx::Alpha supports the following methods:
-// static Alpha Dup(uint8_t);
-// static Alpha Load(const uint8_t*);
-// static Alpha Load(const uint8_t*, int n); // where 0<n<SkPx::N
-//
-// Alpha inv() const; // a -> 255-a
-//
-// SkPx::Wide supports the following methods:
-// Wide operator+(const Wide&);
-// Wide operator-(const Wide&);
-// Wide shl<int bits>();
-// Wide shr<int bits>();
-//
-// // Return the high byte of each component of (*this + o.widenLo()).
-// SkPx addNarrowHi(const SkPx& o);
-//
-// Methods left unwritten, but certainly to come:
-// SkPx SkPx::operator<(const SkPx&) const;
-// SkPx SkPx::thenElse(const SkPx& then, const SkPx& else) const;
-// Wide Wide::operator<(const Wide&) const;
-// Wide Wide::thenElse(const Wide& then, const Wide& else) const;
-//
-// SkPx Wide::div255() const; // Rounds, think (*this + 127) / 255.
-//
-// The different implementations of SkPx have complete freedom to choose
-// SkPx::N and how they represent SkPx, SkPx::Alpha, and SkPx::Wide.
-//
-// All observable math must remain identical.
-
-#if defined(SKNX_NO_SIMD)
- #include "../opts/SkPx_none.h"
-#else
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
- #include "../opts/SkPx_sse.h"
- #elif defined(SK_ARM_HAS_NEON)
- #include "../opts/SkPx_neon.h"
- #else
- #include "../opts/SkPx_none.h"
- #endif
-#endif
-
-#endif//SkPx_DEFINED
diff --git a/src/opts/SkPx_neon.h b/src/opts/SkPx_neon.h
deleted file mode 100644
index 23a0934ab2..0000000000
--- a/src/opts/SkPx_neon.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef SkPx_neon_DEFINED
-#define SkPx_neon_DEFINED
-
-// When we have NEON, we like to work 8 pixels at a time.
-// This lets us exploit vld4/vst4 and represent SkPx as planar uint8x8x4_t,
-// Wide as planar uint16x8x4_t, and Alpha as a single uint8x8_t plane.
-
-namespace neon {
-
-struct SkPx {
- static const int N = 8;
-
- uint8x8x4_t fVec;
- SkPx(uint8x8x4_t vec) : fVec(vec) {}
-
- static SkPx Dup(uint32_t px) { return vld4_dup_u8((const uint8_t*)&px); }
- static SkPx Load(const uint32_t* px) { return vld4_u8((const uint8_t*)px); }
- static SkPx Load(const uint32_t* px, int n) {
- SkASSERT(0 < n && n < 8);
- uint8x8x4_t v = vld4_dup_u8((const uint8_t*)px); // n>=1, so start all lanes with pixel 0.
- switch (n) {
- case 7: v = vld4_lane_u8((const uint8_t*)(px+6), v, 6); // fall through
- case 6: v = vld4_lane_u8((const uint8_t*)(px+5), v, 5); // fall through
- case 5: v = vld4_lane_u8((const uint8_t*)(px+4), v, 4); // fall through
- case 4: v = vld4_lane_u8((const uint8_t*)(px+3), v, 3); // fall through
- case 3: v = vld4_lane_u8((const uint8_t*)(px+2), v, 2); // fall through
- case 2: v = vld4_lane_u8((const uint8_t*)(px+1), v, 1);
- }
- return v;
- }
-
- void store(uint32_t* px) const { vst4_u8((uint8_t*)px, fVec); }
- void store(uint32_t* px, int n) const {
- SkASSERT(0 < n && n < 8);
- switch (n) {
- case 7: vst4_lane_u8((uint8_t*)(px+6), fVec, 6);
- case 6: vst4_lane_u8((uint8_t*)(px+5), fVec, 5);
- case 5: vst4_lane_u8((uint8_t*)(px+4), fVec, 4);
- case 4: vst4_lane_u8((uint8_t*)(px+3), fVec, 3);
- case 3: vst4_lane_u8((uint8_t*)(px+2), fVec, 2);
- case 2: vst4_lane_u8((uint8_t*)(px+1), fVec, 1);
- case 1: vst4_lane_u8((uint8_t*)(px+0), fVec, 0);
- }
- }
-
- struct Alpha {
- uint8x8_t fA;
- Alpha(uint8x8_t a) : fA(a) {}
-
- static Alpha Dup(uint8_t a) { return vdup_n_u8(a); }
- static Alpha Load(const uint8_t* a) { return vld1_u8(a); }
- static Alpha Load(const uint8_t* a, int n) {
- SkASSERT(0 < n && n < 8);
- uint8x8_t v = vld1_dup_u8(a); // n>=1, so start all lanes with alpha 0.
- switch (n) {
- case 7: v = vld1_lane_u8(a+6, v, 6); // fall through
- case 6: v = vld1_lane_u8(a+5, v, 5); // fall through
- case 5: v = vld1_lane_u8(a+4, v, 4); // fall through
- case 4: v = vld1_lane_u8(a+3, v, 3); // fall through
- case 3: v = vld1_lane_u8(a+2, v, 2); // fall through
- case 2: v = vld1_lane_u8(a+1, v, 1);
- }
- return v;
- }
- Alpha inv() const { return vsub_u8(vdup_n_u8(255), fA); }
- };
-
- struct Wide {
- uint16x8x4_t fVec;
- Wide(uint16x8x4_t vec) : fVec(vec) {}
-
- Wide operator+(const Wide& o) const {
- return (uint16x8x4_t) {{
- vaddq_u16(fVec.val[0], o.fVec.val[0]),
- vaddq_u16(fVec.val[1], o.fVec.val[1]),
- vaddq_u16(fVec.val[2], o.fVec.val[2]),
- vaddq_u16(fVec.val[3], o.fVec.val[3]),
- }};
- }
- Wide operator-(const Wide& o) const {
- return (uint16x8x4_t) {{
- vsubq_u16(fVec.val[0], o.fVec.val[0]),
- vsubq_u16(fVec.val[1], o.fVec.val[1]),
- vsubq_u16(fVec.val[2], o.fVec.val[2]),
- vsubq_u16(fVec.val[3], o.fVec.val[3]),
- }};
- }
-
- template <int bits> Wide shl() const {
- return (uint16x8x4_t) {{
- vshlq_n_u16(fVec.val[0], bits),
- vshlq_n_u16(fVec.val[1], bits),
- vshlq_n_u16(fVec.val[2], bits),
- vshlq_n_u16(fVec.val[3], bits),
- }};
- }
- template <int bits> Wide shr() const {
- return (uint16x8x4_t) {{
- vshrq_n_u16(fVec.val[0], bits),
- vshrq_n_u16(fVec.val[1], bits),
- vshrq_n_u16(fVec.val[2], bits),
- vshrq_n_u16(fVec.val[3], bits),
- }};
- }
-
- SkPx addNarrowHi(const SkPx& o) const {
- return (uint8x8x4_t) {{
- vshrn_n_u16(vaddw_u8(fVec.val[0], o.fVec.val[0]), 8),
- vshrn_n_u16(vaddw_u8(fVec.val[1], o.fVec.val[1]), 8),
- vshrn_n_u16(vaddw_u8(fVec.val[2], o.fVec.val[2]), 8),
- vshrn_n_u16(vaddw_u8(fVec.val[3], o.fVec.val[3]), 8),
- }};
- }
- };
-
- Alpha alpha() const { return fVec.val[3]; }
-
- Wide widenLo() const {
- return (uint16x8x4_t) {{
- vmovl_u8(fVec.val[0]),
- vmovl_u8(fVec.val[1]),
- vmovl_u8(fVec.val[2]),
- vmovl_u8(fVec.val[3]),
- }};
- }
- // TODO: these two can probably be done faster.
- Wide widenHi() const { return this->widenLo().shl<8>(); }
- Wide widenLoHi() const { return this->widenLo() + this->widenHi(); }
-
- SkPx operator+(const SkPx& o) const {
- return (uint8x8x4_t) {{
- vadd_u8(fVec.val[0], o.fVec.val[0]),
- vadd_u8(fVec.val[1], o.fVec.val[1]),
- vadd_u8(fVec.val[2], o.fVec.val[2]),
- vadd_u8(fVec.val[3], o.fVec.val[3]),
- }};
- }
- SkPx operator-(const SkPx& o) const {
- return (uint8x8x4_t) {{
- vsub_u8(fVec.val[0], o.fVec.val[0]),
- vsub_u8(fVec.val[1], o.fVec.val[1]),
- vsub_u8(fVec.val[2], o.fVec.val[2]),
- vsub_u8(fVec.val[3], o.fVec.val[3]),
- }};
- }
- SkPx saturatedAdd(const SkPx& o) const {
- return (uint8x8x4_t) {{
- vqadd_u8(fVec.val[0], o.fVec.val[0]),
- vqadd_u8(fVec.val[1], o.fVec.val[1]),
- vqadd_u8(fVec.val[2], o.fVec.val[2]),
- vqadd_u8(fVec.val[3], o.fVec.val[3]),
- }};
- }
-
- Wide operator*(const Alpha& a) const {
- return (uint16x8x4_t) {{
- vmull_u8(fVec.val[0], a.fA),
- vmull_u8(fVec.val[1], a.fA),
- vmull_u8(fVec.val[2], a.fA),
- vmull_u8(fVec.val[3], a.fA),
- }};
- }
- SkPx approxMulDiv255(const Alpha& a) const {
- return (*this * a).addNarrowHi(*this);
- }
-
- SkPx addAlpha(const Alpha& a) const {
- return (uint8x8x4_t) {{
- fVec.val[0],
- fVec.val[1],
- fVec.val[2],
- vadd_u8(fVec.val[3], a.fA),
- }};
- }
-};
-
-} // namespace neon
-
-typedef neon::SkPx SkPx;
-
-#endif//SkPx_neon_DEFINED
diff --git a/src/opts/SkPx_none.h b/src/opts/SkPx_none.h
deleted file mode 100644
index 8217eaef3c..0000000000
--- a/src/opts/SkPx_none.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef SkPx_none_DEFINED
-#define SkPx_none_DEFINED
-
-// Nothing fancy here. We're the backup _none case after all.
-// Our declared sweet spot is simply a single pixel at a time.
-
-namespace none {
-
-struct SkPx {
- static const int N = 1;
- uint8_t f8[4];
-
- SkPx(uint32_t px) { memcpy(f8, &px, 4); }
- SkPx(uint8_t x, uint8_t y, uint8_t z, uint8_t a) {
- f8[0] = x; f8[1] = y; f8[2] = z; f8[3] = a;
- }
-
- static SkPx Dup(uint32_t px) { return px; }
- static SkPx Load(const uint32_t* px) { return *px; }
- static SkPx Load(const uint32_t* px, int n) {
- SkASSERT(false); // There are no 0<n<1.
- return 0;
- }
-
- void store(uint32_t* px) const { memcpy(px, f8, 4); }
- void store(uint32_t* px, int n) const {
- SkASSERT(false); // There are no 0<n<1.
- }
-
- struct Alpha {
- uint8_t fA;
- Alpha(uint8_t a) : fA(a) {}
-
- static Alpha Dup(uint8_t a) { return a; }
- static Alpha Load(const uint8_t* a) { return *a; }
- static Alpha Load(const uint8_t* a, int n) {
- SkASSERT(false); // There are no 0<n<1.
- return 0;
- }
- Alpha inv() const { return 255 - fA; }
- };
-
- struct Wide {
- uint16_t f16[4];
-
- Wide(uint16_t x, uint16_t y, uint16_t z, uint16_t a) {
- f16[0] = x; f16[1] = y; f16[2] = z; f16[3] = a;
- }
-
- Wide operator+(const Wide& o) const {
- return Wide(f16[0]+o.f16[0], f16[1]+o.f16[1], f16[2]+o.f16[2], f16[3]+o.f16[3]);
- }
- Wide operator-(const Wide& o) const {
- return Wide(f16[0]-o.f16[0], f16[1]-o.f16[1], f16[2]-o.f16[2], f16[3]-o.f16[3]);
- }
- template <int bits> Wide shl() const {
- return Wide(f16[0]<<bits, f16[1]<<bits, f16[2]<<bits, f16[3]<<bits);
- }
- template <int bits> Wide shr() const {
- return Wide(f16[0]>>bits, f16[1]>>bits, f16[2]>>bits, f16[3]>>bits);
- }
-
- SkPx addNarrowHi(const SkPx& o) const {
- Wide sum = (*this + o.widenLo()).shr<8>();
- return SkPx(sum.f16[0], sum.f16[1], sum.f16[2], sum.f16[3]);
- }
- };
-
- Alpha alpha() const { return f8[3]; }
-
- Wide widenLo() const { return Wide(f8[0], f8[1], f8[2], f8[3]); }
- Wide widenHi() const { return this->widenLo().shl<8>(); }
- Wide widenLoHi() const { return this->widenLo() + this->widenHi(); }
-
- SkPx operator+(const SkPx& o) const {
- return SkPx(f8[0]+o.f8[0], f8[1]+o.f8[1], f8[2]+o.f8[2], f8[3]+o.f8[3]);
- }
- SkPx operator-(const SkPx& o) const {
- return SkPx(f8[0]-o.f8[0], f8[1]-o.f8[1], f8[2]-o.f8[2], f8[3]-o.f8[3]);
- }
- SkPx saturatedAdd(const SkPx& o) const {
- return SkPx(SkTMax(0, SkTMin(255, f8[0]+o.f8[0])),
- SkTMax(0, SkTMin(255, f8[1]+o.f8[1])),
- SkTMax(0, SkTMin(255, f8[2]+o.f8[2])),
- SkTMax(0, SkTMin(255, f8[3]+o.f8[3])));
- }
-
- Wide operator*(const Alpha& a) const {
- return Wide(f8[0]*a.fA, f8[1]*a.fA, f8[2]*a.fA, f8[3]*a.fA);
- }
- SkPx approxMulDiv255(const Alpha& a) const {
- return (*this * a).addNarrowHi(*this);
- }
-
- SkPx addAlpha(const Alpha& a) const {
- return SkPx(f8[0], f8[1], f8[2], f8[3]+a.fA);
- }
-};
-
-} // namespace none
-
-typedef none::SkPx SkPx;
-
-#endif//SkPx_none_DEFINED
diff --git a/src/opts/SkPx_sse.h b/src/opts/SkPx_sse.h
deleted file mode 100644
index 2560946edb..0000000000
--- a/src/opts/SkPx_sse.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef SkPx_sse_DEFINED
-#define SkPx_sse_DEFINED
-
-// sse::SkPx's sweet spot is to work with 4 pixels at a time,
-// stored interlaced, just as they sit in memory: rgba rgba rgba rgba.
-
-// sse::SkPx's best way to work with alphas is similar,
-// replicating the 4 alphas 4 times each across the pixel: aaaa aaaa aaaa aaaa.
-
-// When working with fewer than 4 pixels, we load the pixels in the low lanes,
-// usually filling the top lanes with zeros (but who cares, might be junk).
-
-namespace sse {
-
-struct SkPx {
- static const int N = 4;
-
- __m128i fVec;
- SkPx(__m128i vec) : fVec(vec) {}
-
- static SkPx Dup(uint32_t px) { return _mm_set1_epi32(px); }
- static SkPx Load(const uint32_t* px) { return _mm_loadu_si128((const __m128i*)px); }
- static SkPx Load(const uint32_t* px, int n) {
- SkASSERT(n > 0 && n < 4);
- switch (n) {
- case 1: return _mm_cvtsi32_si128(px[0]);
- case 2: return _mm_loadl_epi64((const __m128i*)px);
- case 3: return _mm_or_si128(_mm_loadl_epi64((const __m128i*)px),
- _mm_slli_si128(_mm_cvtsi32_si128(px[2]), 8));
- }
- return _mm_setzero_si128(); // Not actually reachable.
- }
-
- void store(uint32_t* px) const { _mm_storeu_si128((__m128i*)px, fVec); }
- void store(uint32_t* px, int n) const {
- SkASSERT(n > 0 && n < 4);
- __m128i v = fVec;
- if (n & 1) {
- *px++ = _mm_cvtsi128_si32(v);
- v = _mm_srli_si128(v, 4);
- }
- if (n & 2) {
- _mm_storel_epi64((__m128i*)px, v);
- }
- }
-
- struct Alpha {
- __m128i fVec;
- Alpha(__m128i vec) : fVec(vec) {}
-
- static Alpha Dup(uint8_t a) { return _mm_set1_epi8(a); }
- static Alpha Load(const uint8_t* a) {
- __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____ 3210
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
- return _mm_shuffle_epi8(as, _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0));
- #else
- as = _mm_unpacklo_epi8 (as, as); // ____ ____ 3322 1100
- as = _mm_unpacklo_epi16(as, as); // 3333 2222 1111 0000
- return as;
- #endif
- }
- static Alpha Load(const uint8_t* a, int n) {
- SkASSERT(n > 0 && n < 4);
- uint8_t a4[] = { 0,0,0,0 };
- switch (n) {
- case 3: a4[2] = a[2]; // fall through
- case 2: a4[1] = a[1]; // fall through
- case 1: a4[0] = a[0];
- }
- return Load(a4);
- }
-
- Alpha inv() const { return _mm_sub_epi8(_mm_set1_epi8(~0), fVec); }
- };
-
- struct Wide {
- __m128i fLo, fHi;
- Wide(__m128i lo, __m128i hi) : fLo(lo), fHi(hi) {}
-
- Wide operator+(const Wide& o) const {
- return Wide(_mm_add_epi16(fLo, o.fLo), _mm_add_epi16(fHi, o.fHi));
- }
- Wide operator-(const Wide& o) const {
- return Wide(_mm_sub_epi16(fLo, o.fLo), _mm_sub_epi16(fHi, o.fHi));
- }
- template <int bits> Wide shl() const {
- return Wide(_mm_slli_epi16(fLo, bits), _mm_slli_epi16(fHi, bits));
- }
- template <int bits> Wide shr() const {
- return Wide(_mm_srli_epi16(fLo, bits), _mm_srli_epi16(fHi, bits));
- }
-
- SkPx addNarrowHi(const SkPx& o) const {
- Wide sum = (*this + o.widenLo()).shr<8>();
- return _mm_packus_epi16(sum.fLo, sum.fHi);
- }
- };
-
- Alpha alpha() const {
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
- return _mm_shuffle_epi8(fVec, _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3));
- #else
- // We exploit that A >= rgb for any premul pixel.
- __m128i as = fVec; // 3xxx 2xxx 1xxx 0xxx
- as = _mm_max_epu8(as, _mm_srli_epi32(as, 8)); // 33xx 22xx 11xx 00xx
- as = _mm_max_epu8(as, _mm_srli_epi32(as, 16)); // 3333 2222 1111 0000
- return as;
- #endif
- }
-
- Wide widenLo() const {
- return Wide(_mm_unpacklo_epi8(fVec, _mm_setzero_si128()),
- _mm_unpackhi_epi8(fVec, _mm_setzero_si128()));
- }
- Wide widenHi() const {
- return Wide(_mm_unpacklo_epi8(_mm_setzero_si128(), fVec),
- _mm_unpackhi_epi8(_mm_setzero_si128(), fVec));
- }
- Wide widenLoHi() const {
- return Wide(_mm_unpacklo_epi8(fVec, fVec),
- _mm_unpackhi_epi8(fVec, fVec));
- }
-
- SkPx operator+(const SkPx& o) const { return _mm_add_epi8(fVec, o.fVec); }
- SkPx operator-(const SkPx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
- SkPx saturatedAdd(const SkPx& o) const { return _mm_adds_epi8(fVec, o.fVec); }
-
- Wide operator*(const Alpha& a) const {
- __m128i pLo = _mm_unpacklo_epi8( fVec, _mm_setzero_si128()),
- aLo = _mm_unpacklo_epi8(a.fVec, _mm_setzero_si128()),
- pHi = _mm_unpackhi_epi8( fVec, _mm_setzero_si128()),
- aHi = _mm_unpackhi_epi8(a.fVec, _mm_setzero_si128());
- return Wide(_mm_mullo_epi16(pLo, aLo), _mm_mullo_epi16(pHi, aHi));
- }
- SkPx approxMulDiv255(const Alpha& a) const {
- return (*this * a).addNarrowHi(*this);
- }
-
- SkPx addAlpha(const Alpha& a) const {
- return _mm_add_epi8(fVec, _mm_and_si128(a.fVec, _mm_set1_epi32(0xFF000000)));
- }
-};
-
-} // namespace sse
-
-typedef sse::SkPx SkPx;
-
-#endif//SkPx_sse_DEFINED