aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-07-20 07:14:19 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-07-20 07:14:19 -0700
commit54f313ccb8eba45954fe0a45092433cbf739b053 (patch)
tree2d342205fb60fd0e9e6cf326f5dcb0b372587caf /src
parentb5a1217d8a338940645f8bc23a805dbd7674ff71 (diff)
Clean up dead xfermode opts code.
Now that SK_SUPPORT_LEGACY_XFERMODES is unused, tons of code becomes dead. Nothing is needed in opts/ anymore for x86. We still do runtime NEON detection, which just duplicates Sk4pxXfermode. TBR=reed@google.com BUG=skia: Review URL: https://codereview.chromium.org/1230023011
Diffstat (limited to 'src')
-rw-r--r--src/core/Sk4pxXfermode.h3
-rw-r--r--src/core/SkXfermode.cpp6
-rw-r--r--src/opts/SkXfermode_opts_SSE2.cpp530
-rw-r--r--src/opts/SkXfermode_opts_SSE2.h35
-rw-r--r--src/opts/SkXfermode_opts_arm.cpp25
-rw-r--r--src/opts/SkXfermode_opts_arm_neon.cpp1027
-rw-r--r--src/opts/SkXfermode_opts_arm_neon.h39
-rw-r--r--src/opts/SkXfermode_opts_none.cpp10
-rw-r--r--src/opts/opts_check_x86.cpp33
9 files changed, 18 insertions, 1690 deletions
diff --git a/src/core/Sk4pxXfermode.h b/src/core/Sk4pxXfermode.h
index c671b679f8..0c8dcb5302 100644
--- a/src/core/Sk4pxXfermode.h
+++ b/src/core/Sk4pxXfermode.h
@@ -10,6 +10,7 @@
#include "Sk4px.h"
#include "SkPMFloat.h"
+#include "SkXfermode_proccoeff.h"
// This file is possibly included into multiple .cpp files.
// Each gets its own independent instantiation by wrapping in an anonymous namespace.
@@ -275,7 +276,6 @@ static SkProcCoeffXfermode* SkCreate4pxXfermode(const ProcCoeff& rec, SkXfermode
case SkXfermode::kMultiply_Mode: return SkT4pxXfermode<Multiply>::Create(rec);
case SkXfermode::kDifference_Mode: return SkT4pxXfermode<Difference>::Create(rec);
case SkXfermode::kExclusion_Mode: return SkT4pxXfermode<Exclusion>::Create(rec);
-#if !defined(SK_SUPPORT_LEGACY_XFERMODES) // For staging in Chrome (layout tests).
case SkXfermode::kHardLight_Mode: return SkT4pxXfermode<HardLight>::Create(rec);
case SkXfermode::kOverlay_Mode: return SkT4pxXfermode<Overlay>::Create(rec);
case SkXfermode::kDarken_Mode: return SkT4pxXfermode<Darken>::Create(rec);
@@ -284,7 +284,6 @@ static SkProcCoeffXfermode* SkCreate4pxXfermode(const ProcCoeff& rec, SkXfermode
case SkXfermode::kColorDodge_Mode: return SkTPMFloatXfermode<ColorDodge>::Create(rec);
case SkXfermode::kColorBurn_Mode: return SkTPMFloatXfermode<ColorBurn>::Create(rec);
case SkXfermode::kSoftLight_Mode: return SkTPMFloatXfermode<SoftLight>::Create(rec);
-#endif
default: break;
}
#endif
diff --git a/src/core/SkXfermode.cpp b/src/core/SkXfermode.cpp
index e0124b9b2f..54f1df488f 100644
--- a/src/core/SkXfermode.cpp
+++ b/src/core/SkXfermode.cpp
@@ -7,7 +7,6 @@
*/
#include "SkXfermode.h"
-#include "SkXfermode_opts_SSE2.h"
#include "SkXfermode_proccoeff.h"
#include "Sk4pxXfermode.h"
#include "SkColorPriv.h"
@@ -16,13 +15,8 @@
#include "SkPMFloat.h"
#include "SkReadBuffer.h"
#include "SkString.h"
-#include "SkUtilsArm.h"
#include "SkWriteBuffer.h"
-#if !SK_ARM_NEON_IS_NONE
- #include "SkXfermode_opts_arm_neon.h"
-#endif
-
#define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
static inline unsigned saturated_add(unsigned a, unsigned b) {
diff --git a/src/opts/SkXfermode_opts_SSE2.cpp b/src/opts/SkXfermode_opts_SSE2.cpp
deleted file mode 100644
index 2024a175a8..0000000000
--- a/src/opts/SkXfermode_opts_SSE2.cpp
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright 2014 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "SkColorPriv.h"
-#include "SkColor_opts_SSE2.h"
-#include "SkMathPriv.h"
-#include "SkMath_opts_SSE2.h"
-#include "SkXfermode.h"
-#include "SkXfermode_opts_SSE2.h"
-#include "SkXfermode_proccoeff.h"
-
-////////////////////////////////////////////////////////////////////////////////
-// 4 pixels SSE2 version functions
-////////////////////////////////////////////////////////////////////////////////
-
-static inline __m128i SkDiv255Round_SSE2(const __m128i& a) {
- __m128i prod = _mm_add_epi32(a, _mm_set1_epi32(128)); // prod += 128;
- prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8)); // prod + (prod >> 8)
- prod = _mm_srli_epi32(prod, 8); // >> 8
-
- return prod;
-}
-
-static inline __m128i clamp_div255round_SSE2(const __m128i& prod) {
- // test if > 0
- __m128i cmp1 = _mm_cmpgt_epi32(prod, _mm_setzero_si128());
- // test if < 255*255
- __m128i cmp2 = _mm_cmplt_epi32(prod, _mm_set1_epi32(255*255));
-
- __m128i ret = _mm_setzero_si128();
-
- // if value >= 255*255, value = 255
- ret = _mm_andnot_si128(cmp2, _mm_set1_epi32(255));
-
- __m128i div = SkDiv255Round_SSE2(prod);
-
- // test if > 0 && < 255*255
- __m128i cmp = _mm_and_si128(cmp1, cmp2);
-
- ret = _mm_or_si128(_mm_and_si128(cmp, div), _mm_andnot_si128(cmp, ret));
-
- return ret;
-}
-static inline __m128i SkMin32_SSE2(const __m128i& a, const __m128i& b) {
- __m128i cmp = _mm_cmplt_epi32(a, b);
- return _mm_or_si128(_mm_and_si128(cmp, a), _mm_andnot_si128(cmp, b));
-}
-
-static inline __m128i srcover_byte_SSE2(const __m128i& a, const __m128i& b) {
- // a + b - SkAlphaMulAlpha(a, b);
- return _mm_sub_epi32(_mm_add_epi32(a, b), SkAlphaMulAlpha_SSE2(a, b));
-
-}
-
-// Portable version overlay_byte() is in SkXfermode.cpp.
-static inline __m128i overlay_byte_SSE2(const __m128i& sc, const __m128i& dc,
- const __m128i& sa, const __m128i& da) {
- __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
- __m128i tmp1 = _mm_mullo_epi16(sc, ida);
- __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
- __m128i tmp2 = _mm_mullo_epi16(dc, isa);
- __m128i tmp = _mm_add_epi32(tmp1, tmp2);
-
- __m128i cmp = _mm_cmpgt_epi32(_mm_slli_epi32(dc, 1), da);
- __m128i rc1 = _mm_slli_epi32(sc, 1); // 2 * sc
- rc1 = Multiply32_SSE2(rc1, dc); // *dc
-
- __m128i rc2 = _mm_mullo_epi16(sa, da); // sa * da
- __m128i tmp3 = _mm_slli_epi32(_mm_sub_epi32(da, dc), 1); // 2 * (da - dc)
- tmp3 = Multiply32_SSE2(tmp3, _mm_sub_epi32(sa, sc)); // * (sa - sc)
- rc2 = _mm_sub_epi32(rc2, tmp3);
-
- __m128i rc = _mm_or_si128(_mm_andnot_si128(cmp, rc1),
- _mm_and_si128(cmp, rc2));
- return clamp_div255round_SSE2(_mm_add_epi32(rc, tmp));
-}
-
-static __m128i overlay_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
- __m128i sa = SkGetPackedA32_SSE2(src);
- __m128i da = SkGetPackedA32_SSE2(dst);
-
- __m128i a = srcover_byte_SSE2(sa, da);
- __m128i r = overlay_byte_SSE2(SkGetPackedR32_SSE2(src),
- SkGetPackedR32_SSE2(dst), sa, da);
- __m128i g = overlay_byte_SSE2(SkGetPackedG32_SSE2(src),
- SkGetPackedG32_SSE2(dst), sa, da);
- __m128i b = overlay_byte_SSE2(SkGetPackedB32_SSE2(src),
- SkGetPackedB32_SSE2(dst), sa, da);
- return SkPackARGB32_SSE2(a, r, g, b);
-}
-
-static inline __m128i darken_byte_SSE2(const __m128i& sc, const __m128i& dc,
- const __m128i& sa, const __m128i& da) {
- __m128i sd = _mm_mullo_epi16(sc, da);
- __m128i ds = _mm_mullo_epi16(dc, sa);
-
- __m128i cmp = _mm_cmplt_epi32(sd, ds);
-
- __m128i tmp = _mm_add_epi32(sc, dc);
- __m128i ret1 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(ds));
- __m128i ret2 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(sd));
- __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1),
- _mm_andnot_si128(cmp, ret2));
- return ret;
-}
-
-static __m128i darken_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
- __m128i sa = SkGetPackedA32_SSE2(src);
- __m128i da = SkGetPackedA32_SSE2(dst);
-
- __m128i a = srcover_byte_SSE2(sa, da);
- __m128i r = darken_byte_SSE2(SkGetPackedR32_SSE2(src),
- SkGetPackedR32_SSE2(dst), sa, da);
- __m128i g = darken_byte_SSE2(SkGetPackedG32_SSE2(src),
- SkGetPackedG32_SSE2(dst), sa, da);
- __m128i b = darken_byte_SSE2(SkGetPackedB32_SSE2(src),
- SkGetPackedB32_SSE2(dst), sa, da);
- return SkPackARGB32_SSE2(a, r, g, b);
-}
-
-static inline __m128i lighten_byte_SSE2(const __m128i& sc, const __m128i& dc,
- const __m128i& sa, const __m128i& da) {
- __m128i sd = _mm_mullo_epi16(sc, da);
- __m128i ds = _mm_mullo_epi16(dc, sa);
-
- __m128i cmp = _mm_cmpgt_epi32(sd, ds);
-
- __m128i tmp = _mm_add_epi32(sc, dc);
- __m128i ret1 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(ds));
- __m128i ret2 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(sd));
- __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1),
- _mm_andnot_si128(cmp, ret2));
- return ret;
-}
-
-static __m128i lighten_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
- __m128i sa = SkGetPackedA32_SSE2(src);
- __m128i da = SkGetPackedA32_SSE2(dst);
-
- __m128i a = srcover_byte_SSE2(sa, da);
- __m128i r = lighten_byte_SSE2(SkGetPackedR32_SSE2(src),
- SkGetPackedR32_SSE2(dst), sa, da);
- __m128i g = lighten_byte_SSE2(SkGetPackedG32_SSE2(src),
- SkGetPackedG32_SSE2(dst), sa, da);
- __m128i b = lighten_byte_SSE2(SkGetPackedB32_SSE2(src),
- SkGetPackedB32_SSE2(dst), sa, da);
- return SkPackARGB32_SSE2(a, r, g, b);
-}
-
-static inline __m128i colordodge_byte_SSE2(const __m128i& sc, const __m128i& dc,
- const __m128i& sa, const __m128i& da) {
- __m128i diff = _mm_sub_epi32(sa, sc);
- __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
- __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
-
- // if (0 == dc)
- __m128i cmp1 = _mm_cmpeq_epi32(dc, _mm_setzero_si128());
- __m128i rc1 = _mm_and_si128(cmp1, SkAlphaMulAlpha_SSE2(sc, ida));
-
- // else if (0 == diff)
- __m128i cmp2 = _mm_cmpeq_epi32(diff, _mm_setzero_si128());
- __m128i cmp = _mm_andnot_si128(cmp1, cmp2);
- __m128i tmp1 = _mm_mullo_epi16(sa, da);
- __m128i tmp2 = _mm_mullo_epi16(sc, ida);
- __m128i tmp3 = _mm_mullo_epi16(dc, isa);
- __m128i rc2 = _mm_add_epi32(tmp1, tmp2);
- rc2 = _mm_add_epi32(rc2, tmp3);
- rc2 = clamp_div255round_SSE2(rc2);
- rc2 = _mm_and_si128(cmp, rc2);
-
- // else
- __m128i cmp3 = _mm_or_si128(cmp1, cmp2);
- __m128i value = _mm_mullo_epi16(dc, sa);
- diff = shim_mm_div_epi32(value, diff);
-
- __m128i tmp4 = SkMin32_SSE2(da, diff);
- tmp4 = Multiply32_SSE2(sa, tmp4);
- __m128i rc3 = _mm_add_epi32(tmp4, tmp2);
- rc3 = _mm_add_epi32(rc3, tmp3);
- rc3 = clamp_div255round_SSE2(rc3);
- rc3 = _mm_andnot_si128(cmp3, rc3);
-
- __m128i rc = _mm_or_si128(rc1, rc2);
- rc = _mm_or_si128(rc, rc3);
-
- return rc;
-}
-
-static __m128i colordodge_modeproc_SSE2(const __m128i& src,
- const __m128i& dst) {
- __m128i sa = SkGetPackedA32_SSE2(src);
- __m128i da = SkGetPackedA32_SSE2(dst);
-
- __m128i a = srcover_byte_SSE2(sa, da);
- __m128i r = colordodge_byte_SSE2(SkGetPackedR32_SSE2(src),
- SkGetPackedR32_SSE2(dst), sa, da);
- __m128i g = colordodge_byte_SSE2(SkGetPackedG32_SSE2(src),
- SkGetPackedG32_SSE2(dst), sa, da);
- __m128i b = colordodge_byte_SSE2(SkGetPackedB32_SSE2(src),
- SkGetPackedB32_SSE2(dst), sa, da);
- return SkPackARGB32_SSE2(a, r, g, b);
-}
-
-static inline __m128i colorburn_byte_SSE2(const __m128i& sc, const __m128i& dc,
- const __m128i& sa, const __m128i& da) {
- __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
- __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
-
- // if (dc == da)
- __m128i cmp1 = _mm_cmpeq_epi32(dc, da);
- __m128i tmp1 = _mm_mullo_epi16(sa, da);
- __m128i tmp2 = _mm_mullo_epi16(sc, ida);
- __m128i tmp3 = _mm_mullo_epi16(dc, isa);
- __m128i rc1 = _mm_add_epi32(tmp1, tmp2);
- rc1 = _mm_add_epi32(rc1, tmp3);
- rc1 = clamp_div255round_SSE2(rc1);
- rc1 = _mm_and_si128(cmp1, rc1);
-
- // else if (0 == sc)
- __m128i cmp2 = _mm_cmpeq_epi32(sc, _mm_setzero_si128());
- __m128i rc2 = SkAlphaMulAlpha_SSE2(dc, isa);
- __m128i cmp = _mm_andnot_si128(cmp1, cmp2);
- rc2 = _mm_and_si128(cmp, rc2);
-
- // else
- __m128i cmp3 = _mm_or_si128(cmp1, cmp2);
- __m128i tmp4 = _mm_sub_epi32(da, dc);
- tmp4 = Multiply32_SSE2(tmp4, sa);
- tmp4 = shim_mm_div_epi32(tmp4, sc);
-
- __m128i tmp5 = _mm_sub_epi32(da, SkMin32_SSE2(da, tmp4));
- tmp5 = Multiply32_SSE2(sa, tmp5);
- __m128i rc3 = _mm_add_epi32(tmp5, tmp2);
- rc3 = _mm_add_epi32(rc3, tmp3);
- rc3 = clamp_div255round_SSE2(rc3);
- rc3 = _mm_andnot_si128(cmp3, rc3);
-
- __m128i rc = _mm_or_si128(rc1, rc2);
- rc = _mm_or_si128(rc, rc3);
-
- return rc;
-}
-
-static __m128i colorburn_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
- __m128i sa = SkGetPackedA32_SSE2(src);
- __m128i da = SkGetPackedA32_SSE2(dst);
-
- __m128i a = srcover_byte_SSE2(sa, da);
- __m128i r = colorburn_byte_SSE2(SkGetPackedR32_SSE2(src),
- SkGetPackedR32_SSE2(dst), sa, da);
- __m128i g = colorburn_byte_SSE2(SkGetPackedG32_SSE2(src),
- SkGetPackedG32_SSE2(dst), sa, da);
- __m128i b = colorburn_byte_SSE2(SkGetPackedB32_SSE2(src),
- SkGetPackedB32_SSE2(dst), sa, da);
- return SkPackARGB32_SSE2(a, r, g, b);
-}
-
-static inline __m128i hardlight_byte_SSE2(const __m128i& sc, const __m128i& dc,
- const __m128i& sa, const __m128i& da) {
- // if (2 * sc <= sa)
- __m128i tmp1 = _mm_slli_epi32(sc, 1);
- __m128i cmp1 = _mm_cmpgt_epi32(tmp1, sa);
- __m128i rc1 = _mm_mullo_epi16(sc, dc); // sc * dc;
- rc1 = _mm_slli_epi32(rc1, 1); // 2 * sc * dc
- rc1 = _mm_andnot_si128(cmp1, rc1);
-
- // else
- tmp1 = _mm_mullo_epi16(sa, da);
- __m128i tmp2 = Multiply32_SSE2(_mm_sub_epi32(da, dc),
- _mm_sub_epi32(sa, sc));
- tmp2 = _mm_slli_epi32(tmp2, 1);
- __m128i rc2 = _mm_sub_epi32(tmp1, tmp2);
- rc2 = _mm_and_si128(cmp1, rc2);
-
- __m128i rc = _mm_or_si128(rc1, rc2);
-
- __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
- tmp1 = _mm_mullo_epi16(sc, ida);
- __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
- tmp2 = _mm_mullo_epi16(dc, isa);
- rc = _mm_add_epi32(rc, tmp1);
- rc = _mm_add_epi32(rc, tmp2);
- return clamp_div255round_SSE2(rc);
-}
-
-static __m128i hardlight_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
- __m128i sa = SkGetPackedA32_SSE2(src);
- __m128i da = SkGetPackedA32_SSE2(dst);
-
- __m128i a = srcover_byte_SSE2(sa, da);
- __m128i r = hardlight_byte_SSE2(SkGetPackedR32_SSE2(src),
- SkGetPackedR32_SSE2(dst), sa, da);
- __m128i g = hardlight_byte_SSE2(SkGetPackedG32_SSE2(src),
- SkGetPackedG32_SSE2(dst), sa, da);
- __m128i b = hardlight_byte_SSE2(SkGetPackedB32_SSE2(src),
- SkGetPackedB32_SSE2(dst), sa, da);
- return SkPackARGB32_SSE2(a, r, g, b);
-}
-
-static __m128i sqrt_unit_byte_SSE2(const __m128i& n) {
- return SkSqrtBits_SSE2(n, 15+4);
-}
-
-static inline __m128i softlight_byte_SSE2(const __m128i& sc, const __m128i& dc,
- const __m128i& sa, const __m128i& da) {
- __m128i tmp1, tmp2, tmp3;
-
- // int m = da ? dc * 256 / da : 0;
- __m128i cmp = _mm_cmpeq_epi32(da, _mm_setzero_si128());
- __m128i m = _mm_slli_epi32(dc, 8);
- __m128 x = _mm_cvtepi32_ps(m);
- __m128 y = _mm_cvtepi32_ps(da);
- m = _mm_cvttps_epi32(_mm_div_ps(x, y));
- m = _mm_andnot_si128(cmp, m);
-
- // if (2 * sc <= sa)
- tmp1 = _mm_slli_epi32(sc, 1); // 2 * sc
- __m128i cmp1 = _mm_cmpgt_epi32(tmp1, sa);
- tmp1 = _mm_sub_epi32(tmp1, sa); // 2 * sc - sa
- tmp2 = _mm_sub_epi32(_mm_set1_epi32(256), m); // 256 - m
- tmp1 = Multiply32_SSE2(tmp1, tmp2);
- tmp1 = _mm_srai_epi32(tmp1, 8);
- tmp1 = _mm_add_epi32(sa, tmp1);
- tmp1 = Multiply32_SSE2(dc, tmp1);
- __m128i rc1 = _mm_andnot_si128(cmp1, tmp1);
-
- // else if (4 * dc <= da)
- tmp2 = _mm_slli_epi32(dc, 2); // dc * 4
- __m128i cmp2 = _mm_cmpgt_epi32(tmp2, da);
- __m128i i = _mm_slli_epi32(m, 2); // 4 * m
- __m128i j = _mm_add_epi32(i, _mm_set1_epi32(256)); // 4 * m + 256
- __m128i k = Multiply32_SSE2(i, j); // 4 * m * (4 * m + 256)
- __m128i t = _mm_sub_epi32(m, _mm_set1_epi32(256)); // m - 256
- i = Multiply32_SSE2(k, t); // 4 * m * (4 * m + 256) * (m - 256)
- i = _mm_srai_epi32(i, 16); // >> 16
- j = Multiply32_SSE2(_mm_set1_epi32(7), m); // 7 * m
- tmp2 = _mm_add_epi32(i, j);
- i = Multiply32_SSE2(dc, sa); // dc * sa
- j = _mm_slli_epi32(sc, 1); // 2 * sc
- j = _mm_sub_epi32(j, sa); // 2 * sc - sa
- j = Multiply32_SSE2(da, j); // da * (2 * sc - sa)
- tmp2 = Multiply32_SSE2(j, tmp2); // * tmp
- tmp2 = _mm_srai_epi32(tmp2, 8); // >> 8
- tmp2 = _mm_add_epi32(i, tmp2);
- cmp = _mm_andnot_si128(cmp2, cmp1);
- __m128i rc2 = _mm_and_si128(cmp, tmp2);
- __m128i rc = _mm_or_si128(rc1, rc2);
-
- // else
- tmp3 = sqrt_unit_byte_SSE2(m);
- tmp3 = _mm_sub_epi32(tmp3, m);
- tmp3 = Multiply32_SSE2(j, tmp3); // j = da * (2 * sc - sa)
- tmp3 = _mm_srai_epi32(tmp3, 8);
- tmp3 = _mm_add_epi32(i, tmp3); // i = dc * sa
- cmp = _mm_and_si128(cmp1, cmp2);
- __m128i rc3 = _mm_and_si128(cmp, tmp3);
- rc = _mm_or_si128(rc, rc3);
-
- tmp1 = _mm_sub_epi32(_mm_set1_epi32(255), da); // 255 - da
- tmp1 = _mm_mullo_epi16(sc, tmp1);
- tmp2 = _mm_sub_epi32(_mm_set1_epi32(255), sa); // 255 - sa
- tmp2 = _mm_mullo_epi16(dc, tmp2);
- rc = _mm_add_epi32(rc, tmp1);
- rc = _mm_add_epi32(rc, tmp2);
- return clamp_div255round_SSE2(rc);
-}
-
-static __m128i softlight_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
- __m128i sa = SkGetPackedA32_SSE2(src);
- __m128i da = SkGetPackedA32_SSE2(dst);
-
- __m128i a = srcover_byte_SSE2(sa, da);
- __m128i r = softlight_byte_SSE2(SkGetPackedR32_SSE2(src),
- SkGetPackedR32_SSE2(dst), sa, da);
- __m128i g = softlight_byte_SSE2(SkGetPackedG32_SSE2(src),
- SkGetPackedG32_SSE2(dst), sa, da);
- __m128i b = softlight_byte_SSE2(SkGetPackedB32_SSE2(src),
- SkGetPackedB32_SSE2(dst), sa, da);
- return SkPackARGB32_SSE2(a, r, g, b);
-}
-
-
-////////////////////////////////////////////////////////////////////////////////
-
-typedef __m128i (*SkXfermodeProcSIMD)(const __m128i& src, const __m128i& dst);
-
-void SkSSE2ProcCoeffXfermode::xfer32(SkPMColor dst[], const SkPMColor src[],
- int count, const SkAlpha aa[]) const {
- SkASSERT(dst && src && count >= 0);
-
- SkXfermodeProc proc = this->getProc();
- SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
- SkASSERT(procSIMD != NULL);
-
- if (NULL == aa) {
- if (count >= 4) {
- while (((size_t)dst & 0x0F) != 0) {
- *dst = proc(*src, *dst);
- dst++;
- src++;
- count--;
- }
-
- const __m128i* s = reinterpret_cast<const __m128i*>(src);
- __m128i* d = reinterpret_cast<__m128i*>(dst);
-
- while (count >= 4) {
- __m128i src_pixel = _mm_loadu_si128(s++);
- __m128i dst_pixel = _mm_load_si128(d);
-
- dst_pixel = procSIMD(src_pixel, dst_pixel);
- _mm_store_si128(d++, dst_pixel);
- count -= 4;
- }
-
- src = reinterpret_cast<const SkPMColor*>(s);
- dst = reinterpret_cast<SkPMColor*>(d);
- }
-
- for (int i = count - 1; i >= 0; --i) {
- *dst = proc(*src, *dst);
- dst++;
- src++;
- }
- } else {
- for (int i = count - 1; i >= 0; --i) {
- unsigned a = aa[i];
- if (0 != a) {
- SkPMColor dstC = dst[i];
- SkPMColor C = proc(src[i], dstC);
- if (a != 0xFF) {
- C = SkFourByteInterp(C, dstC, a);
- }
- dst[i] = C;
- }
- }
- }
-}
-
-void SkSSE2ProcCoeffXfermode::xfer16(uint16_t dst[], const SkPMColor src[],
- int count, const SkAlpha aa[]) const {
- SkASSERT(dst && src && count >= 0);
-
- SkXfermodeProc proc = this->getProc();
- SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
- SkASSERT(procSIMD != NULL);
-
- if (NULL == aa) {
- if (count >= 8) {
- while (((size_t)dst & 0x0F) != 0) {
- SkPMColor dstC = SkPixel16ToPixel32(*dst);
- *dst = SkPixel32ToPixel16_ToU16(proc(*src, dstC));
- dst++;
- src++;
- count--;
- }
-
- const __m128i* s = reinterpret_cast<const __m128i*>(src);
- __m128i* d = reinterpret_cast<__m128i*>(dst);
-
- while (count >= 8) {
- __m128i src_pixel1 = _mm_loadu_si128(s++);
- __m128i src_pixel2 = _mm_loadu_si128(s++);
- __m128i dst_pixel = _mm_load_si128(d);
-
- __m128i dst_pixel1 = _mm_unpacklo_epi16(dst_pixel, _mm_setzero_si128());
- __m128i dst_pixel2 = _mm_unpackhi_epi16(dst_pixel, _mm_setzero_si128());
-
- __m128i dstC1 = SkPixel16ToPixel32_SSE2(dst_pixel1);
- __m128i dstC2 = SkPixel16ToPixel32_SSE2(dst_pixel2);
-
- dst_pixel1 = procSIMD(src_pixel1, dstC1);
- dst_pixel2 = procSIMD(src_pixel2, dstC2);
- dst_pixel = SkPixel32ToPixel16_ToU16_SSE2(dst_pixel1, dst_pixel2);
-
- _mm_store_si128(d++, dst_pixel);
- count -= 8;
- }
-
- src = reinterpret_cast<const SkPMColor*>(s);
- dst = reinterpret_cast<uint16_t*>(d);
- }
-
- for (int i = count - 1; i >= 0; --i) {
- SkPMColor dstC = SkPixel16ToPixel32(*dst);
- *dst = SkPixel32ToPixel16_ToU16(proc(*src, dstC));
- dst++;
- src++;
- }
- } else {
- for (int i = count - 1; i >= 0; --i) {
- unsigned a = aa[i];
- if (0 != a) {
- SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
- SkPMColor C = proc(src[i], dstC);
- if (0xFF != a) {
- C = SkFourByteInterp(C, dstC, a);
- }
- dst[i] = SkPixel32ToPixel16_ToU16(C);
- }
- }
- }
-}
-
-#ifndef SK_IGNORE_TO_STRING
-void SkSSE2ProcCoeffXfermode::toString(SkString* str) const {
- this->INHERITED::toString(str);
-}
-#endif
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_SSE2(const ProcCoeff& rec,
- SkXfermode::Mode mode) {
- SkXfermodeProcSIMD proc = nullptr;
- switch (mode) {
- // TODO(mtklein): Sk4pxXfermode has these now. Clean up the whole file!
- case SkProcCoeffXfermode::kOverlay_Mode: proc = overlay_modeproc_SSE2; break;
- case SkProcCoeffXfermode::kDarken_Mode: proc = darken_modeproc_SSE2; break;
- case SkProcCoeffXfermode::kLighten_Mode: proc = lighten_modeproc_SSE2; break;
- case SkProcCoeffXfermode::kHardLight_Mode: proc = hardlight_modeproc_SSE2; break;
- case SkProcCoeffXfermode::kColorDodge_Mode: proc = colordodge_modeproc_SSE2; break;
- case SkProcCoeffXfermode::kColorBurn_Mode: proc = colorburn_modeproc_SSE2; break;
- case SkProcCoeffXfermode::kSoftLight_Mode: proc = softlight_modeproc_SSE2; break;
- default: break;
- }
- return proc ? SkNEW_ARGS(SkSSE2ProcCoeffXfermode, (rec, mode, (void*)proc)) : nullptr;
-}
diff --git a/src/opts/SkXfermode_opts_SSE2.h b/src/opts/SkXfermode_opts_SSE2.h
deleted file mode 100644
index 927e5f4006..0000000000
--- a/src/opts/SkXfermode_opts_SSE2.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2014 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef SkXfermode_opts_SSE2_DEFINED
-#define SkXfermode_opts_SSE2_DEFINED
-
-#include "SkTypes.h"
-#include "SkXfermode_proccoeff.h"
-
-class SK_API SkSSE2ProcCoeffXfermode : public SkProcCoeffXfermode {
-public:
- SkSSE2ProcCoeffXfermode(const ProcCoeff& rec, SkXfermode::Mode mode,
- void* procSIMD)
- : INHERITED(rec, mode), fProcSIMD(procSIMD) {}
-
- void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
- const SkAlpha aa[]) const override;
- void xfer16(uint16_t dst[], const SkPMColor src[],
- int count, const SkAlpha aa[]) const override;
-
- SK_TO_STRING_OVERRIDE()
-
-private:
- void* fProcSIMD;
- typedef SkProcCoeffXfermode INHERITED;
-};
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_SSE2(const ProcCoeff& rec,
- SkXfermode::Mode mode);
-
-#endif // SkXfermode_opts_SSE2_DEFINED
diff --git a/src/opts/SkXfermode_opts_arm.cpp b/src/opts/SkXfermode_opts_arm.cpp
index dd09551ae8..05c330fe81 100644
--- a/src/opts/SkXfermode_opts_arm.cpp
+++ b/src/opts/SkXfermode_opts_arm.cpp
@@ -9,25 +9,20 @@
#include "SkXfermode_proccoeff.h"
#include "SkUtilsArm.h"
-extern SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_neon(const ProcCoeff& rec,
- SkXfermode::Mode mode);
+// If we find we do have NEON, we'll call this method from SkXfermodes_opts_arm_neon.cpp.
+SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_neon(const ProcCoeff& rec,
+ SkXfermode::Mode mode);
-extern SkXfermodeProc SkPlatformXfermodeProcFactory_impl_neon(SkXfermode::Mode mode);
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl(const ProcCoeff& rec,
- SkXfermode::Mode mode) {
+// If we don't have NEON, we'll call this method and return NULL.
+SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl(const ProcCoeff& rec, SkXfermode::Mode mode);
+SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl(const ProcCoeff& rec, SkXfermode::Mode mode) {
return NULL;
}
-SkXfermodeProc SkPlatformXfermodeProcFactory_impl(SkXfermode::Mode mode) {
- return NULL;
-}
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec,
- SkXfermode::Mode mode) {
+SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec, SkXfermode::Mode mode);
+SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec, SkXfermode::Mode mode) {
return SK_ARM_NEON_WRAP(SkPlatformXfermodeFactory_impl)(rec, mode);
}
-SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode) {
- return SK_ARM_NEON_WRAP(SkPlatformXfermodeProcFactory_impl)(mode);
-}
+SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode);
+SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode) { return NULL; }
diff --git a/src/opts/SkXfermode_opts_arm_neon.cpp b/src/opts/SkXfermode_opts_arm_neon.cpp
index 42278165f6..ae0fd17b25 100644
--- a/src/opts/SkXfermode_opts_arm_neon.cpp
+++ b/src/opts/SkXfermode_opts_arm_neon.cpp
@@ -5,1029 +5,10 @@
* found in the LICENSE file.
*/
-#include "SkXfermode.h"
-#include "SkXfermode_proccoeff.h"
-#include "SkColorPriv.h"
-
-#include <arm_neon.h>
-#include "SkColor_opts_neon.h"
-#include "SkXfermode_opts_arm_neon.h"
+// Including Sk4pxXfermode.h from this file should find SK_ARM_HAS_NEON is defined.
#include "Sk4pxXfermode.h"
-#define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b)
-
-
-////////////////////////////////////////////////////////////////////////////////
-// NEONized skia functions
-////////////////////////////////////////////////////////////////////////////////
-
-static inline uint8x8_t SkAlphaMulAlpha_neon8(uint8x8_t color, uint8x8_t alpha) {
- uint16x8_t tmp;
- uint8x8_t ret;
-
- tmp = vmull_u8(color, alpha);
- tmp = vaddq_u16(tmp, vdupq_n_u16(128));
- tmp = vaddq_u16(tmp, vshrq_n_u16(tmp, 8));
-
- ret = vshrn_n_u16(tmp, 8);
-
- return ret;
-}
-
-static inline uint16x8_t SkAlphaMulAlpha_neon8_16(uint8x8_t color, uint8x8_t alpha) {
- uint16x8_t ret;
-
- ret = vmull_u8(color, alpha);
- ret = vaddq_u16(ret, vdupq_n_u16(128));
- ret = vaddq_u16(ret, vshrq_n_u16(ret, 8));
-
- ret = vshrq_n_u16(ret, 8);
-
- return ret;
-}
-
-static inline uint8x8_t SkDiv255Round_neon8_32_8(int32x4_t p1, int32x4_t p2) {
- uint16x8_t tmp;
-
-#ifdef SK_CPU_ARM64
- tmp = vmovn_high_u32(vmovn_u32(vreinterpretq_u32_s32(p1)),
- vreinterpretq_u32_s32(p2));
-#else
- tmp = vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(p1)),
- vmovn_u32(vreinterpretq_u32_s32(p2)));
-#endif
-
- tmp += vdupq_n_u16(128);
- tmp += vshrq_n_u16(tmp, 8);
-
- return vshrn_n_u16(tmp, 8);
-}
-
-static inline uint16x8_t SkDiv255Round_neon8_16_16(uint16x8_t prod) {
- prod += vdupq_n_u16(128);
- prod += vshrq_n_u16(prod, 8);
-
- return vshrq_n_u16(prod, 8);
-}
-
-static inline uint8x8_t clamp_div255round_simd8_32(int32x4_t val1, int32x4_t val2) {
- uint8x8_t ret;
- uint32x4_t cmp1, cmp2;
- uint16x8_t cmp16;
- uint8x8_t cmp8, cmp8_1;
-
- // Test if <= 0
- cmp1 = vcleq_s32(val1, vdupq_n_s32(0));
- cmp2 = vcleq_s32(val2, vdupq_n_s32(0));
-#ifdef SK_CPU_ARM64
- cmp16 = vmovn_high_u32(vmovn_u32(cmp1), cmp2);
-#else
- cmp16 = vcombine_u16(vmovn_u32(cmp1), vmovn_u32(cmp2));
-#endif
- cmp8_1 = vmovn_u16(cmp16);
-
- // Init to zero
- ret = vdup_n_u8(0);
-
- // Test if >= 255*255
- cmp1 = vcgeq_s32(val1, vdupq_n_s32(255*255));
- cmp2 = vcgeq_s32(val2, vdupq_n_s32(255*255));
-#ifdef SK_CPU_ARM64
- cmp16 = vmovn_high_u32(vmovn_u32(cmp1), cmp2);
-#else
- cmp16 = vcombine_u16(vmovn_u32(cmp1), vmovn_u32(cmp2));
-#endif
- cmp8 = vmovn_u16(cmp16);
-
- // Insert 255 where true
- ret = vbsl_u8(cmp8, vdup_n_u8(255), ret);
-
- // Calc SkDiv255Round
- uint8x8_t div = SkDiv255Round_neon8_32_8(val1, val2);
-
- // Insert where false and previous test false
- cmp8 = cmp8 | cmp8_1;
- ret = vbsl_u8(cmp8, ret, div);
-
- // Return the final combination
- return ret;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// 1 pixel modeprocs
-////////////////////////////////////////////////////////////////////////////////
-
-// kSrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
-SkPMColor srcatop_modeproc_neon(SkPMColor src, SkPMColor dst) {
- unsigned sa = SkGetPackedA32(src);
- unsigned da = SkGetPackedA32(dst);
- unsigned isa = 255 - sa;
-
- uint8x8_t vda, visa, vsrc, vdst;
-
- vda = vdup_n_u8(da);
- visa = vdup_n_u8(isa);
-
- uint16x8_t vsrc_wide, vdst_wide;
- vsrc_wide = vmull_u8(vda, vreinterpret_u8_u32(vdup_n_u32(src)));
- vdst_wide = vmull_u8(visa, vreinterpret_u8_u32(vdup_n_u32(dst)));
-
- vsrc_wide += vdupq_n_u16(128);
- vsrc_wide += vshrq_n_u16(vsrc_wide, 8);
-
- vdst_wide += vdupq_n_u16(128);
- vdst_wide += vshrq_n_u16(vdst_wide, 8);
-
- vsrc = vshrn_n_u16(vsrc_wide, 8);
- vdst = vshrn_n_u16(vdst_wide, 8);
-
- vsrc += vdst;
- vsrc = vset_lane_u8(da, vsrc, 3);
-
- return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
-}
-
-// kDstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
-SkPMColor dstatop_modeproc_neon(SkPMColor src, SkPMColor dst) {
- unsigned sa = SkGetPackedA32(src);
- unsigned da = SkGetPackedA32(dst);
- unsigned ida = 255 - da;
-
- uint8x8_t vsa, vida, vsrc, vdst;
-
- vsa = vdup_n_u8(sa);
- vida = vdup_n_u8(ida);
-
- uint16x8_t vsrc_wide, vdst_wide;
- vsrc_wide = vmull_u8(vida, vreinterpret_u8_u32(vdup_n_u32(src)));
- vdst_wide = vmull_u8(vsa, vreinterpret_u8_u32(vdup_n_u32(dst)));
-
- vsrc_wide += vdupq_n_u16(128);
- vsrc_wide += vshrq_n_u16(vsrc_wide, 8);
-
- vdst_wide += vdupq_n_u16(128);
- vdst_wide += vshrq_n_u16(vdst_wide, 8);
-
- vsrc = vshrn_n_u16(vsrc_wide, 8);
- vdst = vshrn_n_u16(vdst_wide, 8);
-
- vsrc += vdst;
- vsrc = vset_lane_u8(sa, vsrc, 3);
-
- return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
-}
-
-// kXor_Mode [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
-SkPMColor xor_modeproc_neon(SkPMColor src, SkPMColor dst) {
- unsigned sa = SkGetPackedA32(src);
- unsigned da = SkGetPackedA32(dst);
- unsigned ret_alpha = sa + da - (SkAlphaMulAlpha(sa, da) << 1);
- unsigned isa = 255 - sa;
- unsigned ida = 255 - da;
-
- uint8x8_t vsrc, vdst, visa, vida;
- uint16x8_t vsrc_wide, vdst_wide;
-
- visa = vdup_n_u8(isa);
- vida = vdup_n_u8(ida);
- vsrc = vreinterpret_u8_u32(vdup_n_u32(src));
- vdst = vreinterpret_u8_u32(vdup_n_u32(dst));
-
- vsrc_wide = vmull_u8(vsrc, vida);
- vdst_wide = vmull_u8(vdst, visa);
-
- vsrc_wide += vdupq_n_u16(128);
- vsrc_wide += vshrq_n_u16(vsrc_wide, 8);
-
- vdst_wide += vdupq_n_u16(128);
- vdst_wide += vshrq_n_u16(vdst_wide, 8);
-
- vsrc = vshrn_n_u16(vsrc_wide, 8);
- vdst = vshrn_n_u16(vdst_wide, 8);
-
- vsrc += vdst;
-
- vsrc = vset_lane_u8(ret_alpha, vsrc, 3);
-
- return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
-}
-
-// kPlus_Mode
-SkPMColor plus_modeproc_neon(SkPMColor src, SkPMColor dst) {
- uint8x8_t vsrc, vdst;
- vsrc = vreinterpret_u8_u32(vdup_n_u32(src));
- vdst = vreinterpret_u8_u32(vdup_n_u32(dst));
- vsrc = vqadd_u8(vsrc, vdst);
-
- return vget_lane_u32(vreinterpret_u32_u8(vsrc), 0);
-}
-
-// kModulate_Mode
-SkPMColor modulate_modeproc_neon(SkPMColor src, SkPMColor dst) {
- uint8x8_t vsrc, vdst, vres;
- uint16x8_t vres_wide;
-
- vsrc = vreinterpret_u8_u32(vdup_n_u32(src));
- vdst = vreinterpret_u8_u32(vdup_n_u32(dst));
-
- vres_wide = vmull_u8(vsrc, vdst);
-
- vres_wide += vdupq_n_u16(128);
- vres_wide += vshrq_n_u16(vres_wide, 8);
-
- vres = vshrn_n_u16(vres_wide, 8);
-
- return vget_lane_u32(vreinterpret_u32_u8(vres), 0);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// 8 pixels modeprocs
-////////////////////////////////////////////////////////////////////////////////
-
-uint8x8x4_t dstover_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint16x8_t src_scale;
-
- src_scale = vsubw_u8(vdupq_n_u16(256), dst.val[NEON_A]);
-
- ret.val[NEON_A] = dst.val[NEON_A] + SkAlphaMul_neon8(src.val[NEON_A], src_scale);
- ret.val[NEON_R] = dst.val[NEON_R] + SkAlphaMul_neon8(src.val[NEON_R], src_scale);
- ret.val[NEON_G] = dst.val[NEON_G] + SkAlphaMul_neon8(src.val[NEON_G], src_scale);
- ret.val[NEON_B] = dst.val[NEON_B] + SkAlphaMul_neon8(src.val[NEON_B], src_scale);
-
- return ret;
-}
-
-uint8x8x4_t srcin_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint16x8_t scale;
-
- scale = SkAlpha255To256_neon8(dst.val[NEON_A]);
-
- ret.val[NEON_A] = SkAlphaMul_neon8(src.val[NEON_A], scale);
- ret.val[NEON_R] = SkAlphaMul_neon8(src.val[NEON_R], scale);
- ret.val[NEON_G] = SkAlphaMul_neon8(src.val[NEON_G], scale);
- ret.val[NEON_B] = SkAlphaMul_neon8(src.val[NEON_B], scale);
-
- return ret;
-}
-
-uint8x8x4_t dstin_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint16x8_t scale;
-
- scale = SkAlpha255To256_neon8(src.val[NEON_A]);
-
- ret = SkAlphaMulQ_neon8(dst, scale);
-
- return ret;
-}
-
-uint8x8x4_t srcout_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint16x8_t scale = vsubw_u8(vdupq_n_u16(256), dst.val[NEON_A]);
-
- ret = SkAlphaMulQ_neon8(src, scale);
-
- return ret;
-}
-
-uint8x8x4_t dstout_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint16x8_t scale = vsubw_u8(vdupq_n_u16(256), src.val[NEON_A]);
-
- ret = SkAlphaMulQ_neon8(dst, scale);
-
- return ret;
-}
-
-uint8x8x4_t srcatop_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint8x8_t isa;
-
- isa = vsub_u8(vdup_n_u8(255), src.val[NEON_A]);
-
- ret.val[NEON_A] = dst.val[NEON_A];
- ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], dst.val[NEON_A])
- + SkAlphaMulAlpha_neon8(dst.val[NEON_R], isa);
- ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], dst.val[NEON_A])
- + SkAlphaMulAlpha_neon8(dst.val[NEON_G], isa);
- ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], dst.val[NEON_A])
- + SkAlphaMulAlpha_neon8(dst.val[NEON_B], isa);
-
- return ret;
-}
-
-uint8x8x4_t dstatop_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint8x8_t ida;
-
- ida = vsub_u8(vdup_n_u8(255), dst.val[NEON_A]);
-
- ret.val[NEON_A] = src.val[NEON_A];
- ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], ida)
- + SkAlphaMulAlpha_neon8(dst.val[NEON_R], src.val[NEON_A]);
- ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], ida)
- + SkAlphaMulAlpha_neon8(dst.val[NEON_G], src.val[NEON_A]);
- ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], ida)
- + SkAlphaMulAlpha_neon8(dst.val[NEON_B], src.val[NEON_A]);
-
- return ret;
-}
-
-uint8x8x4_t xor_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
- uint8x8_t isa, ida;
- uint16x8_t tmp_wide, tmp_wide2;
-
- isa = vsub_u8(vdup_n_u8(255), src.val[NEON_A]);
- ida = vsub_u8(vdup_n_u8(255), dst.val[NEON_A]);
-
- // First calc alpha
- tmp_wide = vmovl_u8(src.val[NEON_A]);
- tmp_wide = vaddw_u8(tmp_wide, dst.val[NEON_A]);
- tmp_wide2 = vshll_n_u8(SkAlphaMulAlpha_neon8(src.val[NEON_A], dst.val[NEON_A]), 1);
- tmp_wide = vsubq_u16(tmp_wide, tmp_wide2);
- ret.val[NEON_A] = vmovn_u16(tmp_wide);
-
- // Then colors
- ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], ida)
- + SkAlphaMulAlpha_neon8(dst.val[NEON_R], isa);
- ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], ida)
- + SkAlphaMulAlpha_neon8(dst.val[NEON_G], isa);
- ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], ida)
- + SkAlphaMulAlpha_neon8(dst.val[NEON_B], isa);
-
- return ret;
-}
-
-uint8x8x4_t plus_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = vqadd_u8(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = vqadd_u8(src.val[NEON_R], dst.val[NEON_R]);
- ret.val[NEON_G] = vqadd_u8(src.val[NEON_G], dst.val[NEON_G]);
- ret.val[NEON_B] = vqadd_u8(src.val[NEON_B], dst.val[NEON_B]);
-
- return ret;
-}
-
-uint8x8x4_t modulate_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = SkAlphaMulAlpha_neon8(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = SkAlphaMulAlpha_neon8(src.val[NEON_R], dst.val[NEON_R]);
- ret.val[NEON_G] = SkAlphaMulAlpha_neon8(src.val[NEON_G], dst.val[NEON_G]);
- ret.val[NEON_B] = SkAlphaMulAlpha_neon8(src.val[NEON_B], dst.val[NEON_B]);
-
- return ret;
-}
-
-static inline uint8x8_t srcover_color(uint8x8_t a, uint8x8_t b) {
- uint16x8_t tmp;
-
- tmp = vaddl_u8(a, b);
- tmp -= SkAlphaMulAlpha_neon8_16(a, b);
-
- return vmovn_u16(tmp);
-}
-
-uint8x8x4_t screen_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = srcover_color(src.val[NEON_R], dst.val[NEON_R]);
- ret.val[NEON_G] = srcover_color(src.val[NEON_G], dst.val[NEON_G]);
- ret.val[NEON_B] = srcover_color(src.val[NEON_B], dst.val[NEON_B]);
-
- return ret;
-}
-
-template <bool overlay>
-static inline uint8x8_t overlay_hardlight_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- /*
- * In the end we're gonna use (rc + tmp) with a different rc
- * coming from an alternative.
- * The whole value (rc + tmp) can always be expressed as
- * VAL = COM - SUB in the if case
- * VAL = COM + SUB - sa*da in the else case
- *
- * with COM = 255 * (sc + dc)
- * and SUB = sc*da + dc*sa - 2*dc*sc
- */
-
- // Prepare common subexpressions
- uint16x8_t const255 = vdupq_n_u16(255);
- uint16x8_t sc_plus_dc = vaddl_u8(sc, dc);
- uint16x8_t scda = vmull_u8(sc, da);
- uint16x8_t dcsa = vmull_u8(dc, sa);
- uint16x8_t sada = vmull_u8(sa, da);
-
- // Prepare non common subexpressions
- uint16x8_t dc2, sc2;
- uint32x4_t scdc2_1, scdc2_2;
- if (overlay) {
- dc2 = vshll_n_u8(dc, 1);
- scdc2_1 = vmull_u16(vget_low_u16(dc2), vget_low_u16(vmovl_u8(sc)));
-#ifdef SK_CPU_ARM64
- scdc2_2 = vmull_high_u16(dc2, vmovl_u8(sc));
-#else
- scdc2_2 = vmull_u16(vget_high_u16(dc2), vget_high_u16(vmovl_u8(sc)));
-#endif
- } else {
- sc2 = vshll_n_u8(sc, 1);
- scdc2_1 = vmull_u16(vget_low_u16(sc2), vget_low_u16(vmovl_u8(dc)));
-#ifdef SK_CPU_ARM64
- scdc2_2 = vmull_high_u16(sc2, vmovl_u8(dc));
-#else
- scdc2_2 = vmull_u16(vget_high_u16(sc2), vget_high_u16(vmovl_u8(dc)));
-#endif
- }
-
- // Calc COM
- int32x4_t com1, com2;
- com1 = vreinterpretq_s32_u32(
- vmull_u16(vget_low_u16(const255), vget_low_u16(sc_plus_dc)));
- com2 = vreinterpretq_s32_u32(
-#ifdef SK_CPU_ARM64
- vmull_high_u16(const255, sc_plus_dc));
-#else
- vmull_u16(vget_high_u16(const255), vget_high_u16(sc_plus_dc)));
-#endif
-
- // Calc SUB
- int32x4_t sub1, sub2;
- sub1 = vreinterpretq_s32_u32(vaddl_u16(vget_low_u16(scda), vget_low_u16(dcsa)));
-#ifdef SK_CPU_ARM64
- sub2 = vreinterpretq_s32_u32(vaddl_high_u16(scda, dcsa));
-#else
- sub2 = vreinterpretq_s32_u32(vaddl_u16(vget_high_u16(scda), vget_high_u16(dcsa)));
-#endif
- sub1 = vsubq_s32(sub1, vreinterpretq_s32_u32(scdc2_1));
- sub2 = vsubq_s32(sub2, vreinterpretq_s32_u32(scdc2_2));
-
- // Compare 2*dc <= da
- uint16x8_t cmp;
-
- if (overlay) {
- cmp = vcleq_u16(dc2, vmovl_u8(da));
- } else {
- cmp = vcleq_u16(sc2, vmovl_u8(sa));
- }
-
- // Prepare variables
- int32x4_t val1_1, val1_2;
- int32x4_t val2_1, val2_2;
- uint32x4_t cmp1, cmp2;
-
- // Doing a signed lengthening allows to save a few instructions
- // thanks to sign extension.
- cmp1 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(cmp))));
-#ifdef SK_CPU_ARM64
- cmp2 = vreinterpretq_u32_s32(vmovl_high_s16(vreinterpretq_s16_u16(cmp)));
-#else
- cmp2 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(cmp))));
-#endif
-
- // Calc COM - SUB
- val1_1 = com1 - sub1;
- val1_2 = com2 - sub2;
-
- // Calc COM + SUB - sa*da
- val2_1 = com1 + sub1;
- val2_2 = com2 + sub2;
-
- val2_1 = vsubq_s32(val2_1, vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(sada))));
-#ifdef SK_CPU_ARM64
- val2_2 = vsubq_s32(val2_2, vreinterpretq_s32_u32(vmovl_high_u16(sada)));
-#else
- val2_2 = vsubq_s32(val2_2, vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(sada))));
-#endif
-
- // Insert where needed
- val1_1 = vbslq_s32(cmp1, val1_1, val2_1);
- val1_2 = vbslq_s32(cmp2, val1_2, val2_2);
-
- // Call the clamp_div255round function
- return clamp_div255round_simd8_32(val1_1, val1_2);
-}
-
-static inline uint8x8_t overlay_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- return overlay_hardlight_color<true>(sc, dc, sa, da);
-}
-
-uint8x8x4_t overlay_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = overlay_color(src.val[NEON_R], dst.val[NEON_R],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_G] = overlay_color(src.val[NEON_G], dst.val[NEON_G],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_B] = overlay_color(src.val[NEON_B], dst.val[NEON_B],
- src.val[NEON_A], dst.val[NEON_A]);
-
- return ret;
-}
-
-template <bool lighten>
-static inline uint8x8_t lighten_darken_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- uint16x8_t sd, ds, cmp, tmp, tmp2;
-
- // Prepare
- sd = vmull_u8(sc, da);
- ds = vmull_u8(dc, sa);
-
- // Do test
- if (lighten) {
- cmp = vcgtq_u16(sd, ds);
- } else {
- cmp = vcltq_u16(sd, ds);
- }
-
- // Assign if
- tmp = vaddl_u8(sc, dc);
- tmp2 = tmp;
- tmp -= SkDiv255Round_neon8_16_16(ds);
-
- // Calc else
- tmp2 -= SkDiv255Round_neon8_16_16(sd);
-
- // Insert where needed
- tmp = vbslq_u16(cmp, tmp, tmp2);
-
- return vmovn_u16(tmp);
-}
-
-static inline uint8x8_t darken_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- return lighten_darken_color<false>(sc, dc, sa, da);
-}
-
-uint8x8x4_t darken_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = darken_color(src.val[NEON_R], dst.val[NEON_R],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_G] = darken_color(src.val[NEON_G], dst.val[NEON_G],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_B] = darken_color(src.val[NEON_B], dst.val[NEON_B],
- src.val[NEON_A], dst.val[NEON_A]);
-
- return ret;
-}
-
-static inline uint8x8_t lighten_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- return lighten_darken_color<true>(sc, dc, sa, da);
-}
-
-uint8x8x4_t lighten_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = lighten_color(src.val[NEON_R], dst.val[NEON_R],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_G] = lighten_color(src.val[NEON_G], dst.val[NEON_G],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_B] = lighten_color(src.val[NEON_B], dst.val[NEON_B],
- src.val[NEON_A], dst.val[NEON_A]);
-
- return ret;
-}
-
-static inline uint8x8_t hardlight_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- return overlay_hardlight_color<false>(sc, dc, sa, da);
-}
-
-uint8x8x4_t hardlight_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = hardlight_color(src.val[NEON_R], dst.val[NEON_R],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_G] = hardlight_color(src.val[NEON_G], dst.val[NEON_G],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_B] = hardlight_color(src.val[NEON_B], dst.val[NEON_B],
- src.val[NEON_A], dst.val[NEON_A]);
-
- return ret;
-}
-
-static inline uint8x8_t difference_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- uint16x8_t sd, ds, tmp;
- int16x8_t val;
-
- sd = vmull_u8(sc, da);
- ds = vmull_u8(dc, sa);
-
- tmp = vminq_u16(sd, ds);
- tmp = SkDiv255Round_neon8_16_16(tmp);
- tmp = vshlq_n_u16(tmp, 1);
-
- val = vreinterpretq_s16_u16(vaddl_u8(sc, dc));
-
- val -= vreinterpretq_s16_u16(tmp);
-
- val = vmaxq_s16(val, vdupq_n_s16(0));
- val = vminq_s16(val, vdupq_n_s16(255));
-
- return vmovn_u16(vreinterpretq_u16_s16(val));
-}
-
-uint8x8x4_t difference_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = difference_color(src.val[NEON_R], dst.val[NEON_R],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_G] = difference_color(src.val[NEON_G], dst.val[NEON_G],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_B] = difference_color(src.val[NEON_B], dst.val[NEON_B],
- src.val[NEON_A], dst.val[NEON_A]);
-
- return ret;
-}
-
-static inline uint8x8_t exclusion_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- /* The equation can be simplified to 255(sc + dc) - 2 * sc * dc */
-
- uint16x8_t sc_plus_dc, scdc, const255;
- int32x4_t term1_1, term1_2, term2_1, term2_2;
-
- /* Calc (sc + dc) and (sc * dc) */
- sc_plus_dc = vaddl_u8(sc, dc);
- scdc = vmull_u8(sc, dc);
-
- /* Prepare constants */
- const255 = vdupq_n_u16(255);
-
- /* Calc the first term */
- term1_1 = vreinterpretq_s32_u32(
- vmull_u16(vget_low_u16(const255), vget_low_u16(sc_plus_dc)));
- term1_2 = vreinterpretq_s32_u32(
-#ifdef SK_CPU_ARM64
- vmull_high_u16(const255, sc_plus_dc));
-#else
- vmull_u16(vget_high_u16(const255), vget_high_u16(sc_plus_dc)));
-#endif
-
- /* Calc the second term */
- term2_1 = vreinterpretq_s32_u32(vshll_n_u16(vget_low_u16(scdc), 1));
-#ifdef SK_CPU_ARM64
- term2_2 = vreinterpretq_s32_u32(vshll_high_n_u16(scdc, 1));
-#else
- term2_2 = vreinterpretq_s32_u32(vshll_n_u16(vget_high_u16(scdc), 1));
-#endif
-
- return clamp_div255round_simd8_32(term1_1 - term2_1, term1_2 - term2_2);
-}
-
-uint8x8x4_t exclusion_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = exclusion_color(src.val[NEON_R], dst.val[NEON_R],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_G] = exclusion_color(src.val[NEON_G], dst.val[NEON_G],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_B] = exclusion_color(src.val[NEON_B], dst.val[NEON_B],
- src.val[NEON_A], dst.val[NEON_A]);
-
- return ret;
-}
-
-static inline uint8x8_t blendfunc_multiply_color(uint8x8_t sc, uint8x8_t dc,
- uint8x8_t sa, uint8x8_t da) {
- uint32x4_t val1, val2;
- uint16x8_t scdc, t1, t2;
-
- t1 = vmull_u8(sc, vdup_n_u8(255) - da);
- t2 = vmull_u8(dc, vdup_n_u8(255) - sa);
- scdc = vmull_u8(sc, dc);
-
- val1 = vaddl_u16(vget_low_u16(t1), vget_low_u16(t2));
-#ifdef SK_CPU_ARM64
- val2 = vaddl_high_u16(t1, t2);
-#else
- val2 = vaddl_u16(vget_high_u16(t1), vget_high_u16(t2));
-#endif
-
- val1 = vaddw_u16(val1, vget_low_u16(scdc));
-#ifdef SK_CPU_ARM64
- val2 = vaddw_high_u16(val2, scdc);
-#else
- val2 = vaddw_u16(val2, vget_high_u16(scdc));
-#endif
-
- return clamp_div255round_simd8_32(
- vreinterpretq_s32_u32(val1), vreinterpretq_s32_u32(val2));
-}
-
-uint8x8x4_t multiply_modeproc_neon8(uint8x8x4_t src, uint8x8x4_t dst) {
- uint8x8x4_t ret;
-
- ret.val[NEON_A] = srcover_color(src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_R] = blendfunc_multiply_color(src.val[NEON_R], dst.val[NEON_R],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_G] = blendfunc_multiply_color(src.val[NEON_G], dst.val[NEON_G],
- src.val[NEON_A], dst.val[NEON_A]);
- ret.val[NEON_B] = blendfunc_multiply_color(src.val[NEON_B], dst.val[NEON_B],
- src.val[NEON_A], dst.val[NEON_A]);
-
- return ret;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-typedef uint8x8x4_t (*SkXfermodeProcSIMD)(uint8x8x4_t src, uint8x8x4_t dst);
-
-extern SkXfermodeProcSIMD gNEONXfermodeProcs[];
-
-void SkNEONProcCoeffXfermode::xfer32(SkPMColor* SK_RESTRICT dst,
- const SkPMColor* SK_RESTRICT src, int count,
- const SkAlpha* SK_RESTRICT aa) const {
- SkASSERT(dst && src && count >= 0);
-
- SkXfermodeProc proc = this->getProc();
- SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
- SkASSERT(procSIMD != NULL);
-
- if (NULL == aa) {
- // Unrolled NEON code
- // We'd like to just do this (modulo a few casts):
- // vst4_u8(dst, procSIMD(vld4_u8(src), vld4_u8(dst)));
- // src += 8;
- // dst += 8;
- // but that tends to generate miserable code. Here are a bunch of faster
- // workarounds for different architectures and compilers.
- while (count >= 8) {
-
-#ifdef SK_CPU_ARM32
- uint8x8x4_t vsrc, vdst, vres;
-#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
- asm volatile (
- "vld4.u8 %h[vsrc], [%[src]]! \t\n"
- "vld4.u8 %h[vdst], [%[dst]] \t\n"
- : [vsrc] "=w" (vsrc), [vdst] "=w" (vdst), [src] "+&r" (src)
- : [dst] "r" (dst)
- :
- );
-#else
- register uint8x8_t d0 asm("d0");
- register uint8x8_t d1 asm("d1");
- register uint8x8_t d2 asm("d2");
- register uint8x8_t d3 asm("d3");
- register uint8x8_t d4 asm("d4");
- register uint8x8_t d5 asm("d5");
- register uint8x8_t d6 asm("d6");
- register uint8x8_t d7 asm("d7");
-
- asm volatile (
- "vld4.u8 {d0-d3},[%[src]]!;"
- "vld4.u8 {d4-d7},[%[dst]];"
- : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3),
- "=w" (d4), "=w" (d5), "=w" (d6), "=w" (d7),
- [src] "+&r" (src)
- : [dst] "r" (dst)
- :
- );
- vsrc.val[0] = d0; vdst.val[0] = d4;
- vsrc.val[1] = d1; vdst.val[1] = d5;
- vsrc.val[2] = d2; vdst.val[2] = d6;
- vsrc.val[3] = d3; vdst.val[3] = d7;
-#endif
-
- vres = procSIMD(vsrc, vdst);
-
- vst4_u8((uint8_t*)dst, vres);
-
- dst += 8;
-
-#else // #ifdef SK_CPU_ARM32
-
- asm volatile (
- "ld4 {v0.8b - v3.8b}, [%[src]], #32 \t\n"
- "ld4 {v4.8b - v7.8b}, [%[dst]] \t\n"
- "blr %[proc] \t\n"
- "st4 {v0.8b - v3.8b}, [%[dst]], #32 \t\n"
- : [src] "+&r" (src), [dst] "+&r" (dst)
- : [proc] "r" (procSIMD)
- : "cc", "memory",
- /* We don't know what proc is going to clobber so we must
- * add everything that is not callee-saved.
- */
- "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9",
- "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18",
- "x30", /* x30 implicitly clobbered by blr */
- "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
- "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
- "v27", "v28", "v29", "v30", "v31"
- );
-
-#endif // #ifdef SK_CPU_ARM32
-
- count -= 8;
- }
- // Leftovers
- for (int i = 0; i < count; i++) {
- dst[i] = proc(src[i], dst[i]);
- }
- } else {
- for (int i = count - 1; i >= 0; --i) {
- unsigned a = aa[i];
- if (0 != a) {
- SkPMColor dstC = dst[i];
- SkPMColor C = proc(src[i], dstC);
- if (a != 0xFF) {
- C = SkFourByteInterp_neon(C, dstC, a);
- }
- dst[i] = C;
- }
- }
- }
-}
-
-void SkNEONProcCoeffXfermode::xfer16(uint16_t* SK_RESTRICT dst,
- const SkPMColor* SK_RESTRICT src, int count,
- const SkAlpha* SK_RESTRICT aa) const {
- SkASSERT(dst && src && count >= 0);
-
- SkXfermodeProc proc = this->getProc();
- SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
- SkASSERT(procSIMD != NULL);
-
- if (NULL == aa) {
- while(count >= 8) {
- uint16x8_t vdst, vres16;
- uint8x8x4_t vdst32, vsrc, vres;
-
- vdst = vld1q_u16(dst);
-
-#ifdef SK_CPU_ARM64
- vsrc = vld4_u8((uint8_t*)src);
-#else
-#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
- asm volatile (
- "vld4.u8 %h[vsrc], [%[src]]! \t\n"
- : [vsrc] "=w" (vsrc), [src] "+&r" (src)
- : :
- );
-#else
- register uint8x8_t d0 asm("d0");
- register uint8x8_t d1 asm("d1");
- register uint8x8_t d2 asm("d2");
- register uint8x8_t d3 asm("d3");
-
- asm volatile (
- "vld4.u8 {d0-d3},[%[src]]!;"
- : "=w" (d0), "=w" (d1), "=w" (d2), "=w" (d3),
- [src] "+&r" (src)
- : :
- );
- vsrc.val[0] = d0;
- vsrc.val[1] = d1;
- vsrc.val[2] = d2;
- vsrc.val[3] = d3;
-#endif
-#endif // #ifdef SK_CPU_ARM64
-
- vdst32 = SkPixel16ToPixel32_neon8(vdst);
- vres = procSIMD(vsrc, vdst32);
- vres16 = SkPixel32ToPixel16_neon8(vres);
-
- vst1q_u16(dst, vres16);
-
- count -= 8;
- dst += 8;
-#ifdef SK_CPU_ARM64
- src += 8;
-#endif
- }
- for (int i = 0; i < count; i++) {
- SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
- dst[i] = SkPixel32ToPixel16_ToU16(proc(src[i], dstC));
- }
- } else {
- for (int i = count - 1; i >= 0; --i) {
- unsigned a = aa[i];
- if (0 != a) {
- SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
- SkPMColor C = proc(src[i], dstC);
- if (0xFF != a) {
- C = SkFourByteInterp_neon(C, dstC, a);
- }
- dst[i] = SkPixel32ToPixel16_ToU16(C);
- }
- }
- }
-}
-
-#ifndef SK_IGNORE_TO_STRING
-void SkNEONProcCoeffXfermode::toString(SkString* str) const {
- this->INHERITED::toString(str);
-}
-#endif
-
-////////////////////////////////////////////////////////////////////////////////
-
-SkXfermodeProcSIMD gNEONXfermodeProcs[] = {
- NULL, // kClear_Mode
- NULL, // kSrc_Mode
- NULL, // kDst_Mode
- NULL, // kSrcOver_Mode
- dstover_modeproc_neon8,
- srcin_modeproc_neon8,
- dstin_modeproc_neon8,
- srcout_modeproc_neon8,
- dstout_modeproc_neon8,
- srcatop_modeproc_neon8,
- dstatop_modeproc_neon8,
- xor_modeproc_neon8,
- plus_modeproc_neon8,
- modulate_modeproc_neon8,
- screen_modeproc_neon8,
-
- overlay_modeproc_neon8,
- darken_modeproc_neon8,
- lighten_modeproc_neon8,
- NULL, // kColorDodge_Mode
- NULL, // kColorBurn_Mode
- hardlight_modeproc_neon8,
- NULL, // kSoftLight_Mode
- difference_modeproc_neon8,
- exclusion_modeproc_neon8,
- multiply_modeproc_neon8,
-
- NULL, // kHue_Mode
- NULL, // kSaturation_Mode
- NULL, // kColor_Mode
- NULL, // kLuminosity_Mode
-};
-
-SK_COMPILE_ASSERT(
- SK_ARRAY_COUNT(gNEONXfermodeProcs) == SkXfermode::kLastMode + 1,
- mode_count_arm
-);
-
-SkXfermodeProc gNEONXfermodeProcs1[] = {
- NULL, // kClear_Mode
- NULL, // kSrc_Mode
- NULL, // kDst_Mode
- NULL, // kSrcOver_Mode
- NULL, // kDstOver_Mode
- NULL, // kSrcIn_Mode
- NULL, // kDstIn_Mode
- NULL, // kSrcOut_Mode
- NULL, // kDstOut_Mode
- srcatop_modeproc_neon,
- dstatop_modeproc_neon,
- xor_modeproc_neon,
- plus_modeproc_neon,
- modulate_modeproc_neon,
- NULL, // kScreen_Mode
-
- NULL, // kOverlay_Mode
- NULL, // kDarken_Mode
- NULL, // kLighten_Mode
- NULL, // kColorDodge_Mode
- NULL, // kColorBurn_Mode
- NULL, // kHardLight_Mode
- NULL, // kSoftLight_Mode
- NULL, // kDifference_Mode
- NULL, // kExclusion_Mode
- NULL, // kMultiply_Mode
-
- NULL, // kHue_Mode
- NULL, // kSaturation_Mode
- NULL, // kColor_Mode
- NULL, // kLuminosity_Mode
-};
-
-SK_COMPILE_ASSERT(
- SK_ARRAY_COUNT(gNEONXfermodeProcs1) == SkXfermode::kLastMode + 1,
- mode1_count_arm
-);
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_neon(const ProcCoeff& rec,
- SkXfermode::Mode mode) {
- if (auto xfermode = SkCreate4pxXfermode(rec, mode)) {
- return xfermode;
- }
- // TODO: Sk4pxXfermode now covers every mode found in this file. Delete them all!
- if (auto proc = gNEONXfermodeProcs[mode]) {
- return SkNEW_ARGS(SkNEONProcCoeffXfermode, (rec, mode, (void*)proc));
- }
- return NULL;
-}
-
-SkXfermodeProc SkPlatformXfermodeProcFactory_impl_neon(SkXfermode::Mode mode) {
- return gNEONXfermodeProcs1[mode];
+SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_neon(const ProcCoeff& r, SkXfermode::Mode m);
+SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_neon(const ProcCoeff& r, SkXfermode::Mode m) {
+ return SkCreate4pxXfermode(r, m);
}
diff --git a/src/opts/SkXfermode_opts_arm_neon.h b/src/opts/SkXfermode_opts_arm_neon.h
deleted file mode 100644
index 206892edd6..0000000000
--- a/src/opts/SkXfermode_opts_arm_neon.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef SkXfermode_opts_arm_neon_DEFINED
-#define SkXfermode_opts_arm_neon_DEFINED
-
-#include "SkXfermode_proccoeff.h"
-
-class SkNEONProcCoeffXfermode : public SkProcCoeffXfermode {
-public:
- SkNEONProcCoeffXfermode(const ProcCoeff& rec, SkXfermode::Mode mode,
- void* procSIMD)
- : INHERITED(rec, mode), fProcSIMD(procSIMD) {}
-
- void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
- const SkAlpha aa[]) const override;
- void xfer16(uint16_t* SK_RESTRICT dst, const SkPMColor* SK_RESTRICT src,
- int count, const SkAlpha* SK_RESTRICT aa) const override;
-
- SK_TO_STRING_OVERRIDE()
-
-private:
- // void* is used to avoid pulling arm_neon.h in the core and having to build
- // it with -mfpu=neon.
- void* fProcSIMD;
- typedef SkProcCoeffXfermode INHERITED;
-};
-
-extern SkPMColor srcatop_modeproc_neon(SkPMColor src, SkPMColor dst);
-extern SkPMColor dstatop_modeproc_neon(SkPMColor src, SkPMColor dst);
-extern SkPMColor xor_modeproc_neon(SkPMColor src, SkPMColor dst);
-extern SkPMColor plus_modeproc_neon(SkPMColor src, SkPMColor dst);
-extern SkPMColor modulate_modeproc_neon(SkPMColor src, SkPMColor dst);
-
-#endif //#ifdef SkXfermode_opts_arm_neon_DEFINED
diff --git a/src/opts/SkXfermode_opts_none.cpp b/src/opts/SkXfermode_opts_none.cpp
index a2f9b479dd..832d92eccd 100644
--- a/src/opts/SkXfermode_opts_none.cpp
+++ b/src/opts/SkXfermode_opts_none.cpp
@@ -8,17 +8,13 @@
#include "SkXfermode.h"
#include "SkXfermode_proccoeff.h"
-// The prototypes below are for Clang
-extern SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec,
- SkXfermode::Mode mode);
-extern SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode);
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec,
- SkXfermode::Mode mode) {
+SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec, SkXfermode::Mode mode);
+SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec, SkXfermode::Mode mode) {
return NULL;
}
+SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode);
SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode) {
return NULL;
}
diff --git a/src/opts/opts_check_x86.cpp b/src/opts/opts_check_x86.cpp
index 2597a2b917..26490eb63d 100644
--- a/src/opts/opts_check_x86.cpp
+++ b/src/opts/opts_check_x86.cpp
@@ -21,8 +21,6 @@
#include "SkRTConf.h"
#include "SkUtils.h"
#include "SkUtils_opts_SSE2.h"
-#include "SkXfermode.h"
-#include "SkXfermode_proccoeff.h"
#if defined(_MSC_VER) && defined(_WIN64)
#include <intrin.h>
@@ -360,34 +358,3 @@ bool SkBoxBlurGetPlatformProcs(SkBoxBlurProc* boxBlurX,
}
return false;
}
-
-////////////////////////////////////////////////////////////////////////////////
-
-extern SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_SSE2(const ProcCoeff& rec,
- SkXfermode::Mode mode);
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl(const ProcCoeff& rec,
- SkXfermode::Mode mode);
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl(const ProcCoeff& rec,
- SkXfermode::Mode mode) {
- return NULL;
-}
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec,
- SkXfermode::Mode mode);
-
-SkProcCoeffXfermode* SkPlatformXfermodeFactory(const ProcCoeff& rec,
- SkXfermode::Mode mode) {
- if (supports_simd(SK_CPU_SSE_LEVEL_SSE2)) {
- return SkPlatformXfermodeFactory_impl_SSE2(rec, mode);
- } else {
- return SkPlatformXfermodeFactory_impl(rec, mode);
- }
-}
-
-SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode);
-
-SkXfermodeProc SkPlatformXfermodeProcFactory(SkXfermode::Mode mode) {
- return NULL;
-}