aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts/SkColor_opts_SSE2.h
diff options
context:
space:
mode:
authorGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-04-09 15:43:46 +0000
committerGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-04-09 15:43:46 +0000
commitc524e98f1edf06b53e65543f5f28217fa13b7aa9 (patch)
treec455bd103111faa286c8bcd1ee94cee52dccac36 /src/opts/SkColor_opts_SSE2.h
parentd715aaa33fc52d36f566caf941787a2cca24d85b (diff)
Xfermode: SSE2 implementation of multiply_modeproc
This patch implements basics for Xfermode SSE optimization. Based on these basics, SSE2 implementation of multiply_modeproc is provided. SSE2 implementation for other modes will come in future. With this patch performance of Xfermode_Multiply will improve about 45%. Here are the data on desktop i7-3770. before: Xfermode_Multiply 8888: cmsecs = 33.30 565: cmsecs = 45.65 after: Xfermode_Multiply 8888: cmsecs = 17.18 565: cmsecs = 24.87 BUG= Committed: http://code.google.com/p/skia/source/detail?r=14006 Committed: http://code.google.com/p/skia/source/detail?r=14050 R=mtklein@google.com, robertphillips@google.com Author: qiankun.miao@intel.com Review URL: https://codereview.chromium.org/202903004 git-svn-id: http://skia.googlecode.com/svn/trunk@14107 2bbb7eff-a529-9590-31e7-b0007b416f81
Diffstat (limited to 'src/opts/SkColor_opts_SSE2.h')
-rw-r--r--src/opts/SkColor_opts_SSE2.h131
1 files changed, 123 insertions, 8 deletions
diff --git a/src/opts/SkColor_opts_SSE2.h b/src/opts/SkColor_opts_SSE2.h
index 13a5be5655..24ab6f73c8 100644
--- a/src/opts/SkColor_opts_SSE2.h
+++ b/src/opts/SkColor_opts_SSE2.h
@@ -10,7 +10,39 @@
#include <emmintrin.h>
-static inline __m128i SkMul16ShiftRound_SSE(__m128i a, __m128i b, int shift) {
+// See #define SkAlphaMulAlpha(a, b) SkMulDiv255Round(a, b) in SkXfermode.cpp.
+static inline __m128i SkAlphaMulAlpha_SSE2(const __m128i& a,
+ const __m128i& b) {
+ __m128i prod = _mm_mullo_epi16(a, b);
+ prod = _mm_add_epi32(prod, _mm_set1_epi32(128));
+ prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8));
+ prod = _mm_srli_epi32(prod, 8);
+
+ return prod;
+}
+
+static inline __m128i SkGetPackedA32_SSE2(const __m128i& src) {
+ __m128i a = _mm_slli_epi32(src, (24 - SK_A32_SHIFT));
+ return _mm_srli_epi32(a, 24);
+}
+
+static inline __m128i SkGetPackedR32_SSE2(const __m128i& src) {
+ __m128i r = _mm_slli_epi32(src, (24 - SK_R32_SHIFT));
+ return _mm_srli_epi32(r, 24);
+}
+
+static inline __m128i SkGetPackedG32_SSE2(const __m128i& src) {
+ __m128i g = _mm_slli_epi32(src, (24 - SK_G32_SHIFT));
+ return _mm_srli_epi32(g, 24);
+}
+
+static inline __m128i SkGetPackedB32_SSE2(const __m128i& src) {
+ __m128i b = _mm_slli_epi32(src, (24 - SK_B32_SHIFT));
+ return _mm_srli_epi32(b, 24);
+}
+
+static inline __m128i SkMul16ShiftRound_SSE2(const __m128i& a,
+ const __m128i& b, int shift) {
__m128i prod = _mm_mullo_epi16(a, b);
prod = _mm_add_epi16(prod, _mm_set1_epi16(1 << (shift - 1)));
prod = _mm_add_epi16(prod, _mm_srli_epi16(prod, shift));
@@ -19,13 +51,96 @@ static inline __m128i SkMul16ShiftRound_SSE(__m128i a, __m128i b, int shift) {
return prod;
}
-static inline __m128i SkPackRGB16_SSE(__m128i r, __m128i g, __m128i b) {
- r = _mm_slli_epi16(r, SK_R16_SHIFT);
- g = _mm_slli_epi16(g, SK_G16_SHIFT);
- b = _mm_slli_epi16(b, SK_B16_SHIFT);
+static inline __m128i SkPackRGB16_SSE2(const __m128i& r,
+ const __m128i& g, const __m128i& b) {
+ __m128i dr = _mm_slli_epi16(r, SK_R16_SHIFT);
+ __m128i dg = _mm_slli_epi16(g, SK_G16_SHIFT);
+ __m128i db = _mm_slli_epi16(b, SK_B16_SHIFT);
+
+ __m128i c = _mm_or_si128(dr, dg);
+ return _mm_or_si128(c, db);
+}
+
+static inline __m128i SkPackARGB32_SSE2(const __m128i& a, const __m128i& r,
+ const __m128i& g, const __m128i& b) {
+ __m128i da = _mm_slli_epi32(a, SK_A32_SHIFT);
+ __m128i dr = _mm_slli_epi32(r, SK_R32_SHIFT);
+ __m128i dg = _mm_slli_epi32(g, SK_G32_SHIFT);
+ __m128i db = _mm_slli_epi32(b, SK_B32_SHIFT);
+
+ __m128i c = _mm_or_si128(da, dr);
+ c = _mm_or_si128(c, dg);
+ return _mm_or_si128(c, db);
+}
+
+static inline __m128i SkPacked16ToR32_SSE2(const __m128i& src) {
+ __m128i r = _mm_srli_epi32(src, SK_R16_SHIFT);
+ r = _mm_and_si128(r, _mm_set1_epi32(SK_R16_MASK));
+ r = _mm_or_si128(_mm_slli_epi32(r, (8 - SK_R16_BITS)),
+ _mm_srli_epi32(r, (2 * SK_R16_BITS - 8)));
+
+ return r;
+}
+
+static inline __m128i SkPacked16ToG32_SSE2(const __m128i& src) {
+ __m128i g = _mm_srli_epi32(src, SK_G16_SHIFT);
+ g = _mm_and_si128(g, _mm_set1_epi32(SK_G16_MASK));
+ g = _mm_or_si128(_mm_slli_epi32(g, (8 - SK_G16_BITS)),
+ _mm_srli_epi32(g, (2 * SK_G16_BITS - 8)));
+
+ return g;
+}
+
+static inline __m128i SkPacked16ToB32_SSE2(const __m128i& src) {
+ __m128i b = _mm_srli_epi32(src, SK_B16_SHIFT);
+ b = _mm_and_si128(b, _mm_set1_epi32(SK_B16_MASK));
+ b = _mm_or_si128(_mm_slli_epi32(b, (8 - SK_B16_BITS)),
+ _mm_srli_epi32(b, (2 * SK_B16_BITS - 8)));
+
+ return b;
+}
+
+static inline __m128i SkPixel16ToPixel32_SSE2(const __m128i& src) {
+ __m128i r = SkPacked16ToR32_SSE2(src);
+ __m128i g = SkPacked16ToG32_SSE2(src);
+ __m128i b = SkPacked16ToB32_SSE2(src);
+
+ return SkPackARGB32_SSE2(_mm_set1_epi32(0xFF), r, g, b);
+}
+
+static inline __m128i SkPixel32ToPixel16_ToU16_SSE2(const __m128i& src_pixel1,
+ const __m128i& src_pixel2) {
+ // Calculate result r.
+ __m128i r1 = _mm_srli_epi32(src_pixel1,
+ SK_R32_SHIFT + (8 - SK_R16_BITS));
+ r1 = _mm_and_si128(r1, _mm_set1_epi32(SK_R16_MASK));
+ __m128i r2 = _mm_srli_epi32(src_pixel2,
+ SK_R32_SHIFT + (8 - SK_R16_BITS));
+ r2 = _mm_and_si128(r2, _mm_set1_epi32(SK_R16_MASK));
+ __m128i r = _mm_packs_epi32(r1, r2);
+
+ // Calculate result g.
+ __m128i g1 = _mm_srli_epi32(src_pixel1,
+ SK_G32_SHIFT + (8 - SK_G16_BITS));
+ g1 = _mm_and_si128(g1, _mm_set1_epi32(SK_G16_MASK));
+ __m128i g2 = _mm_srli_epi32(src_pixel2,
+ SK_G32_SHIFT + (8 - SK_G16_BITS));
+ g2 = _mm_and_si128(g2, _mm_set1_epi32(SK_G16_MASK));
+ __m128i g = _mm_packs_epi32(g1, g2);
+
+ // Calculate result b.
+ __m128i b1 = _mm_srli_epi32(src_pixel1,
+ SK_B32_SHIFT + (8 - SK_B16_BITS));
+ b1 = _mm_and_si128(b1, _mm_set1_epi32(SK_B16_MASK));
+ __m128i b2 = _mm_srli_epi32(src_pixel2,
+ SK_B32_SHIFT + (8 - SK_B16_BITS));
+ b2 = _mm_and_si128(b2, _mm_set1_epi32(SK_B16_MASK));
+ __m128i b = _mm_packs_epi32(b1, b2);
+
+ // Store 8 16-bit colors in dst.
+ __m128i d_pixel = SkPackRGB16_SSE2(r, g, b);
- __m128i c = _mm_or_si128(r, g);
- return _mm_or_si128(c, b);
+ return d_pixel;
}
-#endif//SkColor_opts_SSE2_DEFINED
+#endif // SkColor_opts_SSE2_DEFINED