diff options
author | msarett <msarett@google.com> | 2016-02-16 08:04:34 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2016-02-16 08:04:34 -0800 |
commit | c29cd72df2ac355201486be27c90e60b0a946e4b (patch) | |
tree | fa038558811363876eff6c02dbea7665e20bfdec | |
parent | 18fab30d7c4858ef2521e0380573aac5a21b2ed9 (diff) |
Make png filter functions compatible with libpng
We hope to upstream these functions to libpng (a C library).
Let's make the code that Skia tests on look like the code
that we are submitting.
BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1699953002
Review URL: https://codereview.chromium.org/1699953002
-rw-r--r-- | src/codec/SkPngFilters.cpp | 169 |
1 files changed, 117 insertions, 52 deletions
diff --git a/src/codec/SkPngFilters.cpp b/src/codec/SkPngFilters.cpp index 472123fa15..2777939357 100644 --- a/src/codec/SkPngFilters.cpp +++ b/src/codec/SkPngFilters.cpp @@ -16,25 +16,43 @@ #if defined(__SSE2__) - template <int bpp> - static __m128i load(const void* p) { - static_assert(bpp <= 4, ""); - + static __m128i load3(const void* p) { uint32_t packed; - memcpy(&packed, p, bpp); + memcpy(&packed, p, 3); return _mm_cvtsi32_si128(packed); } - template <int bpp> - static void store(void* p, __m128i v) { - static_assert(bpp <= 4, ""); + static __m128i load4(const void* p) { + return _mm_cvtsi32_si128(*(const int*)p); + } + static void store3(void* p, __m128i v) { uint32_t packed = _mm_cvtsi128_si32(v); - memcpy(p, &packed, bpp); + memcpy(p, &packed, 3); + } + + static void store4(void* p, __m128i v) { + *(int*)p = _mm_cvtsi128_si32(v); + } + + void sk_sub3_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { + // The Sub filter predicts each pixel as the previous pixel, a. + // There is no pixel to the left of the first pixel. It's encoded directly. + // That works with our main loop if we just say that left pixel was zero. + __m128i a, d = _mm_setzero_si128(); + + int rb = row_info->rowbytes; + while (rb > 0) { + a = d; d = load3(row); + d = _mm_add_epi8(d, a); + store3(row, d); + + row += 3; + rb -= 3; + } } - template <int bpp> - static void sk_sub_sse2(png_row_infop row_info, uint8_t* row, const uint8_t*) { + void sk_sub4_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { // The Sub filter predicts each pixel as the previous pixel, a. // There is no pixel to the left of the first pixel. It's encoded directly. // That works with our main loop if we just say that left pixel was zero. @@ -42,17 +60,44 @@ int rb = row_info->rowbytes; while (rb > 0) { - a = d; d = load<bpp>(row); + a = d; d = load4(row); d = _mm_add_epi8(d, a); - store<bpp>(row, d); + store4(row, d); + + row += 4; + rb -= 4; + } + } + + void sk_avg3_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { + // The Avg filter predicts each pixel as the (truncated) average of a and b. + // There's no pixel to the left of the first pixel. Luckily, it's + // predicted to be half of the pixel above it. So again, this works + // perfectly with our loop if we make sure a starts at zero. + const __m128i zero = _mm_setzero_si128(); + __m128i b; + __m128i a, d = zero; + + int rb = row_info->rowbytes; + while (rb > 0) { + b = load3(prev); + a = d; d = load3(row ); - row += bpp; - rb -= bpp; + // PNG requires a truncating average here, so sadly we can't just use _mm_avg_epu8... + __m128i avg = _mm_avg_epu8(a,b); + // ...but we can fix it up by subtracting off 1 if it rounded up. + avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), _mm_set1_epi8(1))); + + d = _mm_add_epi8(d, avg); + store3(row, d); + + prev += 3; + row += 3; + rb -= 3; } } - template <int bpp> - void sk_avg_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { + void sk_avg4_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { // The Avg filter predicts each pixel as the (truncated) average of a and b. // There's no pixel to the left of the first pixel. Luckily, it's // predicted to be half of the pixel above it. So again, this works @@ -63,8 +108,8 @@ int rb = row_info->rowbytes; while (rb > 0) { - b = load<bpp>(prev); - a = d; d = load<bpp>(row ); + b = load4(prev); + a = d; d = load4(row ); // PNG requires a truncating average here, so sadly we can't just use _mm_avg_epu8... __m128i avg = _mm_avg_epu8(a,b); @@ -72,11 +117,11 @@ avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), _mm_set1_epi8(1))); d = _mm_add_epi8(d, avg); - store<bpp>(row, d); + store4(row, d); - prev += bpp; - row += bpp; - rb -= bpp; + prev += 4; + row += 4; + rb -= 4; } } @@ -88,23 +133,22 @@ // Read this all as, return x<0 ? -x : x. // To negate two's complement, you flip all the bits then add 1. __m128i is_negative = _mm_cmplt_epi16(x, _mm_setzero_si128()); - x = _mm_xor_si128(x, is_negative); // Flip negative lanes. - x = _mm_add_epi16(x, _mm_srli_epi16(is_negative, 15)); // +1 to negative lanes, else +0. + x = _mm_xor_si128(x, is_negative); // Flip negative lanes. + x = _mm_add_epi16(x, _mm_srli_epi16(is_negative, 15)); // +1 to negative lanes, else +0. return x; #endif } // Bytewise c ? t : e. static __m128i if_then_else(__m128i c, __m128i t, __m128i e) { - #if 0 && defined(__SSE4_1__) // Make sure we have a bot testing this before enabling. + #if defined(__SSE4_1__) return _mm_blendv_epi8(e,t,c); #else return _mm_or_si128(_mm_and_si128(c, t), _mm_andnot_si128(c, e)); #endif } - template <int bpp> - void sk_paeth_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { + void sk_paeth3_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { // Paeth tries to predict pixel d using the pixel to the left of it, a, // and two pixels from the previous row, b and c: // prev: c b @@ -121,9 +165,8 @@ int rb = row_info->rowbytes; while (rb > 0) { // It's easiest to do this math (particularly, deal with pc) with 16-bit intermediates. - c = b; b = _mm_unpacklo_epi8(load<bpp>(prev), zero); - a = d; d = _mm_unpacklo_epi8(load<bpp>(row ), zero); - + c = b; b = _mm_unpacklo_epi8(load3(prev), zero); + a = d; d = _mm_unpacklo_epi8(load3(row ), zero); __m128i pa = _mm_sub_epi16(b,c), // (p-a) == (a+b-c - a) == (b-c) pb = _mm_sub_epi16(a,c), // (p-b) == (a+b-c - b) == (a-c) pc = _mm_add_epi16(pa,pb); // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) @@ -140,33 +183,55 @@ c)); d = _mm_add_epi8(d, nearest); // Note `_epi8`: we need addition to wrap modulo 255. - store<bpp>(row, _mm_packus_epi16(d,d)); + store3(row, _mm_packus_epi16(d,d)); - prev += bpp; - row += bpp; - rb -= bpp; + prev += 3; + row += 3; + rb -= 3; } } - void sk_sub3_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { - sk_sub_sse2<3>(row_info, row, prev); - } - void sk_sub4_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { - sk_sub_sse2<4>(row_info, row, prev); - } + void sk_paeth4_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { + // Paeth tries to predict pixel d using the pixel to the left of it, a, + // and two pixels from the previous row, b and c: + // prev: c b + // row: a d + // The Paeth function predicts d to be whichever of a, b, or c is nearest to p=a+b-c. - void sk_avg3_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { - sk_avg_sse2<3>(row_info, row, prev); - } - void sk_avg4_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { - sk_avg_sse2<4>(row_info, row, prev); - } + // The first pixel has no left context, and so uses an Up filter, p = b. + // This works naturally with our main loop's p = a+b-c if we force a and c to zero. + // Here we zero b and d, which become c and a respectively at the start of the loop. + const __m128i zero = _mm_setzero_si128(); + __m128i c, b = zero, + a, d = zero; - void sk_paeth3_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { - sk_paeth_sse2<3>(row_info, row, prev); - } - void sk_paeth4_sse2(png_row_infop row_info, uint8_t* row, const uint8_t* prev) { - sk_paeth_sse2<4>(row_info, row, prev); + int rb = row_info->rowbytes; + while (rb > 0) { + // It's easiest to do this math (particularly, deal with pc) with 16-bit intermediates. + c = b; b = _mm_unpacklo_epi8(load4(prev), zero); + a = d; d = _mm_unpacklo_epi8(load4(row ), zero); + __m128i pa = _mm_sub_epi16(b,c), // (p-a) == (a+b-c - a) == (b-c) + pb = _mm_sub_epi16(a,c), // (p-b) == (a+b-c - b) == (a-c) + pc = _mm_add_epi16(pa,pb); // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) + + pa = abs_i16(pa); // |p-a| + pb = abs_i16(pb); // |p-b| + pc = abs_i16(pc); // |p-c| + + __m128i smallest = _mm_min_epi16(pc, _mm_min_epi16(pa, pb)); + + // Paeth breaks ties favoring a over b over c. + __m128i nearest = if_then_else(_mm_cmpeq_epi16(smallest, pa), a, + if_then_else(_mm_cmpeq_epi16(smallest, pb), b, + c)); + + d = _mm_add_epi8(d, nearest); // Note `_epi8`: we need addition to wrap modulo 255. + store4(row, _mm_packus_epi16(d,d)); + + prev += 4; + row += 4; + rb -= 4; + } } #endif |