aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar henrik.smiding <henrik.smiding@intel.com>2014-06-27 08:03:17 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-06-27 08:03:17 -0700
commit3bb195ef0d9691384027d7b61b0b8ef8379aaf5d (patch)
treef230b9c2bf5fb6b6309196c77f326a053110251e
parent982542dce8acbd2f3e7642268b21e76b93230daf (diff)
Add SSE4 optimization of S32A_Opaque_Blitrow
Adds optimization of Skia S32A_Opaque_Blitrow blitter using SSE4.2 SIMD instruction set. Special case for when alpha is zero or opaque. Performance increase of 10%-400% compared to the existing SSE2 optimization (measured on Silvermont architecture). Noticeable in ~25 different skia bench subtests, especially in bitmap_8888_*, repeatTile_*, and morph_*. bitmap_8888_A - 100% faster bitmap_8888_A_source_transparent - 250% faster bitmap_8888_A_source_opaque - 25% faster bitmap_8888_A_scale_bicubic - 75% faster Signed-off-by: Henrik Smiding <henrik.smiding@intel.com> Committed: https://skia.googlesource.com/skia/+/e2527b147679b0c43019fae7d59cc3777d2d097e Committed: https://skia.googlesource.com/skia/+/b5c281e1e06af3be804309877de1dac6145686b9 R=reed@google.com, mtklein@google.com, tomhudson@google.com, djsollen@google.com, joakim.landberg@intel.com Author: henrik.smiding@intel.com Review URL: https://codereview.chromium.org/289473009
-rw-r--r--gyp/opts.gyp38
-rw-r--r--gyp/skia_lib.gyp1
-rw-r--r--src/opts/SkBlitRow_opts_SSE4.h25
-rw-r--r--src/opts/SkBlitRow_opts_SSE4_asm.S469
-rw-r--r--src/opts/SkBlitRow_opts_SSE4_x64_asm.S466
-rw-r--r--src/opts/opts_check_x86.cpp21
-rw-r--r--tests/DeferredCanvasTest.cpp4
7 files changed, 1021 insertions, 3 deletions
diff --git a/gyp/opts.gyp b/gyp/opts.gyp
index 69e3946c34..85c8c503fa 100644
--- a/gyp/opts.gyp
+++ b/gyp/opts.gyp
@@ -46,6 +46,7 @@
],
'dependencies': [
'opts_ssse3',
+ 'opts_sse4',
],
'sources': [
'../src/opts/opts_check_x86.cpp',
@@ -194,10 +195,45 @@
}],
],
},
+ # For the same lame reasons as what is done for skia_opts, we also have to
+ # create another target specifically for SSE4 code as we would not want
+ # to compile the SSE2 code with -msse4 which would potentially allow
+ # gcc to generate SSE4 code.
+ {
+ 'target_name': 'opts_sse4',
+ 'product_name': 'skia_opts_sse4',
+ 'type': 'static_library',
+ 'standalone_static_library': 1,
+ 'dependencies': [
+ 'core.gyp:*',
+ 'effects.gyp:*'
+ ],
+ 'include_dirs': [
+ '../src/core',
+ ],
+ 'conditions': [
+ [ 'skia_os in ["linux", "freebsd", "openbsd", "solaris", "nacl", "chromeos", "android", "mac"] \
+ and not skia_android_framework', {
+ 'cflags': [
+ '-msse4',
+ ],
+ }],
+ [ 'skia_arch_width == 64 and skia_arch_type == "x86"', {
+ 'sources': [
+ '../src/opts/SkBlitRow_opts_SSE4_x64_asm.S',
+ ],
+ }],
+ [ 'skia_arch_width == 32 and skia_arch_type == "x86"', {
+ 'sources': [
+ '../src/opts/SkBlitRow_opts_SSE4_asm.S',
+ ],
+ }],
+ ],
+ },
# NEON code must be compiled with -mfpu=neon which also affects scalar
# code. To support dynamic NEON code paths, we need to build all
# NEON-specific sources in a separate static library. The situation
- # is very similar to the SSSE3 one.
+ # is very similar to the SSSE3 and SSE4 one.
{
'target_name': 'opts_neon',
'product_name': 'skia_opts_neon',
diff --git a/gyp/skia_lib.gyp b/gyp/skia_lib.gyp
index 5f3f14155c..04d4bfc207 100644
--- a/gyp/skia_lib.gyp
+++ b/gyp/skia_lib.gyp
@@ -15,6 +15,7 @@
[ 'skia_arch_type == "x86" and skia_os != "android"', {
'component_libs': [
'opts.gyp:opts_ssse3',
+ 'opts.gyp:opts_sse4',
],
}],
[ 'arm_neon == 1', {
diff --git a/src/opts/SkBlitRow_opts_SSE4.h b/src/opts/SkBlitRow_opts_SSE4.h
new file mode 100644
index 0000000000..6c6276eae0
--- /dev/null
+++ b/src/opts/SkBlitRow_opts_SSE4.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkBlitRow_opts_SSE4_DEFINED
+#define SkBlitRow_opts_SSE4_DEFINED
+
+#include "SkBlitRow.h"
+
+/* Check if we are able to build assembly code, GCC/AT&T syntax.
+ * Had problems with LLVM-GCC 4.2.
+ */
+#if defined(__clang__) || (defined(__GNUC__) && !defined(SK_BUILD_FOR_MAC))
+extern "C" void S32A_Opaque_BlitRow32_SSE4_asm(SkPMColor* SK_RESTRICT dst,
+ const SkPMColor* SK_RESTRICT src,
+ int count, U8CPU alpha);
+
+#define SK_ATT_ASM_SUPPORTED
+#endif
+
+#endif
+
diff --git a/src/opts/SkBlitRow_opts_SSE4_asm.S b/src/opts/SkBlitRow_opts_SSE4_asm.S
new file mode 100644
index 0000000000..8822125f12
--- /dev/null
+++ b/src/opts/SkBlitRow_opts_SSE4_asm.S
@@ -0,0 +1,469 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if defined(__clang__) || (defined(__GNUC__) && !defined(SK_BUILD_FOR_MAC))
+
+#define CFI_PUSH(REG) \
+ .cfi_adjust_cfa_offset 4; \
+ .cfi_rel_offset REG, 0
+
+#define CFI_POP(REG) \
+ .cfi_adjust_cfa_offset -4; \
+ .cfi_restore REG
+
+#define PUSH(REG) pushl REG; CFI_PUSH (REG)
+#define POP(REG) popl REG; CFI_POP (REG)
+#define RETURN POP(%edi); ret
+
+#define EXTRACT_ALPHA(var1, var2) \
+ movdqa %var1, %var2; /* Clone source pixels to extract alpha */\
+ psrlw $8, %var2; /* Discard red and blue, leaving alpha and green */\
+ pshufhw $0xF5, %var2, %var2; /* Repeat alpha for scaling (high) */\
+ movdqa %xmm6, %xmm4; \
+ pshuflw $0xF5, %var2, %var2; /* Repeat alpha for scaling (low) */\
+ movdqa %xmm5, %xmm3; \
+ psubw %var2, %xmm4 /* Finalize alpha calculations */
+
+#define SCALE_PIXELS \
+ psllw $8, %xmm5; /* Filter out red and blue components */\
+ pmulhuw %xmm4, %xmm5; /* Scale red and blue */\
+ psrlw $8, %xmm3; /* Filter out alpha and green components */\
+ pmullw %xmm4, %xmm3 /* Scale alpha and green */
+
+
+/*
+ * void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst,
+ * const SkPMColor* SK_RESTRICT src,
+ * int count, U8CPU alpha)
+ *
+ * This function is divided into six blocks: initialization, blit 4-15 pixels,
+ * blit 0-3 pixels, align destination for 16+ pixel blits,
+ * blit 16+ pixels with source unaligned, blit 16+ pixels with source aligned.
+ * There are some code reuse between the blocks.
+ *
+ * The primary optimization comes from checking the source pixels' alpha value.
+ * If the alpha is zero, the pixel can be skipped entirely.
+ * If the alpha is fully opaque, the pixel can be copied directly to the destination.
+ * According to collected statistics, these two cases are the most common.
+ * The main loop(s) uses pre-loading and unrolling in an attempt to reduce the
+ * memory latency worse-case.
+ */
+
+#ifdef __clang__
+ .text
+#else
+ .section .text.sse4.2,"ax",@progbits
+ .type S32A_Opaque_BlitRow32_SSE4_asm, @function
+#endif
+ .p2align 4
+#if defined(__clang__) && defined(SK_BUILD_FOR_MAC)
+ .global _S32A_Opaque_BlitRow32_SSE4_asm
+_S32A_Opaque_BlitRow32_SSE4_asm:
+#else
+ .global S32A_Opaque_BlitRow32_SSE4_asm
+S32A_Opaque_BlitRow32_SSE4_asm:
+#endif
+ .cfi_startproc
+ movl 8(%esp), %eax // Source pointer
+ movl 12(%esp), %ecx // Pixel count
+ movl 4(%esp), %edx // Destination pointer
+ prefetcht0 (%eax)
+
+ // Setup SSE constants
+ pcmpeqd %xmm7, %xmm7 // 0xFF000000 mask to check alpha
+ pslld $24, %xmm7
+ pcmpeqw %xmm6, %xmm6 // 16-bit 256 to calculate inv. alpha
+ psrlw $15, %xmm6
+ psllw $8, %xmm6
+ pcmpeqw %xmm0, %xmm0 // 0x00FF00FF mask (Must be in xmm0 because of pblendvb)
+ psrlw $8, %xmm0
+ subl $4, %ecx // Check if we have only 0-3 pixels
+ js .LReallySmall
+ PUSH(%edi)
+ cmpl $11, %ecx // Do we have enough pixels to run the main loop?
+ ja .LBigBlit
+
+ // Handle small blits (4-15 pixels)
+ ////////////////////////////////////////////////////////////////////////////////
+ xorl %edi, %edi // Reset offset to zero
+
+.LSmallLoop:
+ lddqu (%eax, %edi), %xmm1 // Load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LSmallAlphaNotOpaqueOrZero
+ jz .LSmallAlphaZero // If all alphas are zero, skip the pixels completely
+ movdqu %xmm1, (%edx, %edi) // Store four destination pixels
+.LSmallAlphaZero:
+ addl $16, %edi
+ subl $4, %ecx // Check if there are four additional pixels, at least
+ jns .LSmallLoop
+ jmp .LSmallRemaining
+
+ // Handle mixed alphas (calculate and scale)
+ .p2align 4
+.LSmallAlphaNotOpaqueOrZero:
+ lddqu (%edx, %edi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ addl $16, %edi
+ subl $4, %ecx // Check if there are four additional pixels, at least
+ pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqu %xmm1, -16(%edx, %edi) // Store four destination pixels
+ jns .LSmallLoop
+
+ // Handle the last 0-3 pixels (also used by the main loops)
+.LSmallRemaining:
+ cmpl $-4, %ecx // Check if we are done
+ je .LSmallExit
+ sall $2, %ecx // Calculate offset for last pixels
+ addl %ecx, %edi
+
+ lddqu (%eax, %edi), %xmm1 // Load last four source pixels (overlapping)
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ jc .LSmallRemainingStoreAll// If all alphas are opaque, just store (overlapping)
+ jz .LSmallExit // If all alphas are zero, skip the pixels completely
+
+ // Handle mixed alphas (calculate and scale)
+ lddqu (%edx, %edi), %xmm5 // Load last four destination pixels (overlapping)
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+
+ psllw $8, %xmm3 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm3 // Scale red and blue
+ movdqa %xmm5, %xmm2
+ psrlw $8, %xmm2 // Filter out alpha and green components
+ pmullw %xmm4, %xmm2 // Scale alpha and green
+
+ cmpl $-8, %ecx // Check how many pixels should be written
+ pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm2, %xmm1 // Add source and destination pixels together
+ jb .LSmallPixelsLeft1
+ ja .LSmallPixelsLeft3 // To avoid double-blending the overlapping pixels...
+ pblendw $0xF0, %xmm1, %xmm5 // Merge only the final two pixels to the destination
+ movdqu %xmm5, (%edx, %edi) // Store last two destination pixels
+.LSmallExit:
+ RETURN
+
+.LSmallPixelsLeft1:
+ pblendw $0xC0, %xmm1, %xmm5 // Merge only the final pixel to the destination
+ movdqu %xmm5, (%edx, %edi) // Store last destination pixel
+ RETURN
+
+.LSmallPixelsLeft3:
+ pblendw $0xFC, %xmm1, %xmm5 // Merge only the final three pixels to the destination
+ movdqu %xmm5, (%edx, %edi) // Store last three destination pixels
+ RETURN
+
+.LSmallRemainingStoreAll:
+ movdqu %xmm1, (%edx, %edi) // Store last destination pixels (overwrite)
+ RETURN
+
+ // Handle really small blits (0-3 pixels)
+ ////////////////////////////////////////////////////////////////////////////////
+.LReallySmall:
+ addl $4, %ecx
+ jle .LReallySmallExit
+ pcmpeqd %xmm1, %xmm1
+ cmp $2, %ecx // Check how many pixels should be read
+ pinsrd $0x0, (%eax), %xmm1 // Load one source pixel
+ pinsrd $0x0, (%edx), %xmm5 // Load one destination pixel
+ jb .LReallySmallCalc
+ pinsrd $0x1, 4(%eax), %xmm1 // Load second source pixel
+ pinsrd $0x1, 4(%edx), %xmm5 // Load second destination pixel
+ je .LReallySmallCalc
+ pinsrd $0x2, 8(%eax), %xmm1 // Load third source pixel
+ pinsrd $0x2, 8(%edx), %xmm5 // Load third destination pixel
+
+.LReallySmallCalc:
+ ptest %xmm7, %xmm1 // Check if all alphas are opaque
+ jc .LReallySmallStore // If all alphas are opaque, just store
+
+ // Handle mixed alphas (calculate and scale)
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+
+ pand %xmm0, %xmm5 // Filter out red and blue components
+ pmullw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ psrlw $8, %xmm5 // Combine results
+ pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+
+.LReallySmallStore:
+ cmp $2, %ecx // Check how many pixels should be written
+ pextrd $0x0, %xmm1, (%edx) // Store one destination pixel
+ jb .LReallySmallExit
+ pextrd $0x1, %xmm1, 4(%edx) // Store second destination pixel
+ je .LReallySmallExit
+ pextrd $0x2, %xmm1, 8(%edx) // Store third destination pixel
+.LReallySmallExit:
+ ret
+
+ // Handle bigger blit operations (16+ pixels)
+ ////////////////////////////////////////////////////////////////////////////////
+ .p2align 4
+.LBigBlit:
+ // Align destination?
+ testl $0xF, %edx
+ lddqu (%eax), %xmm1 // Pre-load four source pixels
+ jz .LAligned
+
+ movl %edx, %edi // Calculate alignment of destination pointer
+ negl %edi
+ andl $0xF, %edi
+
+ // Handle 1-3 pixels to align destination
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ jz .LAlignDone // If all alphas are zero, just skip
+ lddqu (%edx), %xmm5 // Load four destination pixels
+ jc .LAlignStore // If all alphas are opaque, just store
+
+ // Handle mixed alphas (calculate and scale)
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+
+ psllw $8, %xmm3 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm3 // Scale red and blue
+ movdqa %xmm5, %xmm2
+ psrlw $8, %xmm2 // Filter out alpha and green components
+ pmullw %xmm4, %xmm2 // Scale alpha and green
+
+ pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm2, %xmm1 // Add source and destination pixels together
+
+.LAlignStore:
+ cmp $8, %edi // Check how many pixels should be written
+ jb .LAlignPixelsLeft1
+ ja .LAlignPixelsLeft3
+ pblendw $0x0F, %xmm1, %xmm5 // Blend two pixels
+ jmp .LAlignStorePixels
+
+.LAlignPixelsLeft1:
+ pblendw $0x03, %xmm1, %xmm5 // Blend one pixel
+ jmp .LAlignStorePixels
+
+.LAlignPixelsLeft3:
+ pblendw $0x3F, %xmm1, %xmm5 // Blend three pixels
+
+.LAlignStorePixels:
+ movdqu %xmm5, (%edx) // Store destination pixels
+
+.LAlignDone:
+ addl %edi, %eax // Adjust pointers and pixel count
+ addl %edi, %edx
+ shrl $2, %edi
+ lddqu (%eax), %xmm1 // Pre-load new source pixels (after alignment)
+ subl %edi, %ecx
+
+.LAligned: // Destination is guaranteed to be 16 byte aligned
+ xorl %edi, %edi // Reset offset to zero
+ subl $8, %ecx // Decrease counter (Reserve four pixels for the cleanup)
+ testl $0xF, %eax // Check alignment of source pointer
+ jz .LAlignedLoop
+
+ // Source not aligned to destination
+ ////////////////////////////////////////////////////////////////////////////////
+ .p2align 4
+.LUnalignedLoop: // Main loop for unaligned, handles eight pixels per iteration
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero00
+ lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels
+ jz .LAlphaZero00
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+
+.LAlphaZero00:
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero01
+ lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero01
+ movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
+
+.LAlphaZero01:
+ addl $32, %edi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup0
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero00:
+ movdqa (%edx, %edi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+
+ // Handle next four pixels
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero01
+ lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero02
+ movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
+.LAlphaZero02:
+ addl $32, %edi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup0
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero01:
+ movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels
+ addl $32, %edi
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm2 // Add source and destination pixels together
+ subl $8, %ecx
+ movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+
+ // Cleanup - handle pending pixels from loop
+.LLoopCleanup0:
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero02
+ jz .LAlphaZero03
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+.LAlphaZero03:
+ addl $16, %edi
+ subl $4, %ecx
+ js .LSmallRemaining // Reuse code from small loop
+
+.LRemain0:
+ lddqu (%eax, %edi), %xmm1 // Load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero02
+ jz .LAlphaZero04
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+.LAlphaZero04:
+ addl $16, %edi
+ subl $4, %ecx
+ jmp .LSmallRemaining // Reuse code from small loop
+
+.LAlphaNotOpaqueOrZero02:
+ movdqa (%edx, %edi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ addl $16, %edi
+ subl $4, %ecx
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, -16(%edx, %edi) // Store four destination pixels
+ js .LSmallRemaining // Reuse code from small loop
+ jmp .LRemain0
+
+ // Source aligned to destination
+ ////////////////////////////////////////////////////////////////////////////////
+ .p2align 4
+.LAlignedLoop: // Main loop for aligned, handles eight pixels per iteration
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero10
+ movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels
+ jz .LAlphaZero10
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+
+.LAlphaZero10:
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero11
+ movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero11
+ movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
+
+.LAlphaZero11:
+ addl $32, %edi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LAlignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup1
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero10:
+ movdqa (%edx, %edi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+
+ // Handle next four pixels
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero11
+ movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero12
+ movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
+.LAlphaZero12:
+ addl $32, %edi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LAlignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup1
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero11:
+ movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+ movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels
+
+ addl $32, %edi
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm2 // Add source and destination pixels together
+ subl $8, %ecx
+ movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels
+ jae .LAlignedLoop
+ addl $8, %ecx // Adjust pixel count
+
+ // Cleanup - handle pending pixels from loop
+.LLoopCleanup1:
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero12
+ jz .LAlphaZero13
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+.LAlphaZero13:
+ addl $16, %edi
+ subl $4, %ecx
+ js .LSmallRemaining // Reuse code from small loop
+
+.LRemain1:
+ movdqa (%eax, %edi), %xmm1 // Load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero12
+ jz .LAlphaZero14
+ movdqa %xmm1, (%edx, %edi) // Store four destination pixels
+.LAlphaZero14:
+ addl $16, %edi
+ subl $4, %ecx
+ jmp .LSmallRemaining // Reuse code from small loop
+
+.LAlphaNotOpaqueOrZero12:
+ movdqa (%edx, %edi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ addl $16, %edi
+ subl $4, %ecx
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, -16(%edx, %edi) // Store four destination pixels
+ js .LSmallRemaining // Reuse code from small loop
+ jmp .LRemain1
+
+ .cfi_endproc
+#ifndef __clang__
+ .size S32A_Opaque_BlitRow32_SSE4_asm, .-S32A_Opaque_BlitRow32_SSE4_asm
+#endif
+#endif
diff --git a/src/opts/SkBlitRow_opts_SSE4_x64_asm.S b/src/opts/SkBlitRow_opts_SSE4_x64_asm.S
new file mode 100644
index 0000000000..41ba1ec3bf
--- /dev/null
+++ b/src/opts/SkBlitRow_opts_SSE4_x64_asm.S
@@ -0,0 +1,466 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if defined(__clang__) || (defined(__GNUC__) && !defined(SK_BUILD_FOR_MAC))
+
+#define EXTRACT_ALPHA(var1, var2) \
+ movdqa %var1, %var2; /* Clone source pixels to extract alpha */\
+ psrlw $8, %var2; /* Discard red and blue, leaving alpha and green */\
+ pshufhw $0xF5, %var2, %var2; /* Repeat alpha for scaling (high) */\
+ movdqa %xmm6, %xmm4; \
+ pshuflw $0xF5, %var2, %var2; /* Repeat alpha for scaling (low) */\
+ movdqa %xmm5, %xmm3; \
+ psubw %var2, %xmm4 /* Finalize alpha calculations */
+
+#define SCALE_PIXELS \
+ psllw $8, %xmm5; /* Filter out red and blue components */\
+ pmulhuw %xmm4, %xmm5; /* Scale red and blue */\
+ psrlw $8, %xmm3; /* Filter out alpha and green components */\
+ pmullw %xmm4, %xmm3 /* Scale alpha and green */
+
+
+/*
+ * void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst,
+ * const SkPMColor* SK_RESTRICT src,
+ * int count, U8CPU alpha)
+ *
+ * This function is divided into six blocks: initialization, blit 4-15 pixels,
+ * blit 0-3 pixels, align destination for 16+ pixel blits,
+ * blit 16+ pixels with source unaligned, blit 16+ pixels with source aligned.
+ * There are some code reuse between the blocks.
+ *
+ * The primary optimization comes from checking the source pixels' alpha value.
+ * If the alpha is zero, the pixel can be skipped entirely.
+ * If the alpha is fully opaque, the pixel can be copied directly to the destination.
+ * According to collected statistics, these two cases are the most common.
+ * The main loop(s) uses pre-loading and unrolling in an attempt to reduce the
+ * memory latency worse-case.
+ */
+
+#ifdef __clang__
+ .text
+#else
+ .section .text.sse4.2,"ax",@progbits
+ .type S32A_Opaque_BlitRow32_SSE4_asm, @function
+#endif
+ .p2align 4
+#if defined(__clang__) && defined(SK_BUILD_FOR_MAC)
+ .global _S32A_Opaque_BlitRow32_SSE4_asm
+_S32A_Opaque_BlitRow32_SSE4_asm:
+#else
+ .global S32A_Opaque_BlitRow32_SSE4_asm
+S32A_Opaque_BlitRow32_SSE4_asm:
+#endif
+ .cfi_startproc
+ prefetcht0 (%rsi)
+ movl %edx, %ecx // Pixel count
+ movq %rdi, %rdx // Destination pointer
+ movq %rsi, %rax // Source pointer
+
+ // Setup SSE constants
+ movdqa .LAlphaCheckMask(%rip), %xmm7 // 0xFF000000 mask to check alpha
+ movdqa .LInverseAlphaCalc(%rip), %xmm6// 16-bit 256 to calculate inv. alpha
+ movdqa .LResultMergeMask(%rip), %xmm0 // 0x00FF00FF mask (Must be in xmm0 because of pblendvb)
+
+ subl $4, %ecx // Check if we have only 0-3 pixels
+ js .LReallySmall
+ cmpl $11, %ecx // Do we have enough pixels to run the main loop?
+ ja .LBigBlit
+
+ // Handle small blits (4-15 pixels)
+ ////////////////////////////////////////////////////////////////////////////////
+ xorq %rdi, %rdi // Reset offset to zero
+
+.LSmallLoop:
+ lddqu (%rax, %rdi), %xmm1 // Load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LSmallAlphaNotOpaqueOrZero
+ jz .LSmallAlphaZero
+ movdqu %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LSmallAlphaZero:
+ addq $16, %rdi
+ subl $4, %ecx // Check if there are four additional pixels, at least
+ jns .LSmallLoop
+ jmp .LSmallRemaining
+
+ // Handle mixed alphas (calculate and scale)
+ .p2align 4
+.LSmallAlphaNotOpaqueOrZero:
+ lddqu (%rdx, %rdi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ addq $16, %rdi
+ subl $4, %ecx // Check if there are four additional pixels, at least
+ pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqu %xmm1, -16(%rdx, %rdi) // Store four destination pixels
+ jns .LSmallLoop
+
+ // Handle the last 0-3 pixels (also used by the main loops)
+.LSmallRemaining:
+ cmpl $-4, %ecx // Check if we are done
+ je .LSmallExit
+ sall $2, %ecx // Calculate offset for last pixels
+ movslq %ecx, %rcx
+ addq %rcx, %rdi
+
+ lddqu (%rax, %rdi), %xmm1 // Load last four source pixels (overlapping)
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ jc .LSmallRemainingStoreAll// If all alphas are opaque, just store (overlapping)
+ jz .LSmallExit // If all alphas are zero, skip the pixels completely
+
+ // Handle mixed alphas (calculate and scale)
+ lddqu (%rdx, %rdi), %xmm5 // Load last four destination pixels (overlapping)
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+
+ psllw $8, %xmm3 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm3 // Scale red and blue
+ movdqa %xmm5, %xmm2
+ psrlw $8, %xmm2 // Filter out alpha and green components
+ pmullw %xmm4, %xmm2 // Scale alpha and green
+
+ cmpl $-8, %ecx // Check how many pixels should be written
+ pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm2, %xmm1 // Add source and destination pixels together
+ jb .LSmallPixelsLeft1
+ ja .LSmallPixelsLeft3 // To avoid double-blending the overlapping pixels...
+ pblendw $0xF0, %xmm1, %xmm5 // Merge only the final two pixels to the destination
+ movdqu %xmm5, (%rdx, %rdi) // Store last two destination pixels
+.LSmallExit:
+ ret
+
+.LSmallPixelsLeft1:
+ pblendw $0xC0, %xmm1, %xmm5 // Merge only the final pixel to the destination
+ movdqu %xmm5, (%rdx, %rdi) // Store last destination pixel
+ ret
+
+.LSmallPixelsLeft3:
+ pblendw $0xFC, %xmm1, %xmm5 // Merge only the final three pixels to the destination
+ movdqu %xmm5, (%rdx, %rdi) // Store last three destination pixels
+ ret
+
+.LSmallRemainingStoreAll:
+ movdqu %xmm1, (%rdx, %rdi) // Store last destination pixels (overwrite)
+ ret
+
+ // Handle really small blits (0-3 pixels)
+ ////////////////////////////////////////////////////////////////////////////////
+.LReallySmall:
+ addl $4, %ecx
+ jle .LReallySmallExit
+ pcmpeqd %xmm1, %xmm1
+ cmpl $2, %ecx // Check how many pixels should be read
+ pinsrd $0x0, (%rax), %xmm1 // Load one source pixel
+ pinsrd $0x0, (%rdx), %xmm5 // Load one destination pixel
+ jb .LReallySmallCalc
+ pinsrd $0x1, 4(%rax), %xmm1 // Load second source pixel
+ pinsrd $0x1, 4(%rdx), %xmm5 // Load second destination pixel
+ je .LReallySmallCalc
+ pinsrd $0x2, 8(%rax), %xmm1 // Load third source pixel
+ pinsrd $0x2, 8(%rdx), %xmm5 // Load third destination pixel
+
+.LReallySmallCalc:
+ ptest %xmm7, %xmm1 // Check if all alphas are opaque
+ jc .LReallySmallStore // If all alphas are opaque, just store
+
+ // Handle mixed alphas (calculate and scale)
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+
+ pand %xmm0, %xmm5 // Filter out red and blue components
+ pmullw %xmm4, %xmm5 // Scale red and blue
+ psrlw $8, %xmm3 // Filter out alpha and green components
+ pmullw %xmm4, %xmm3 // Scale alpha and green
+
+ psrlw $8, %xmm5 // Combine results
+ pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+
+.LReallySmallStore:
+ cmpl $2, %ecx // Check how many pixels should be written
+ pextrd $0x0, %xmm1, (%rdx) // Store one destination pixel
+ jb .LReallySmallExit
+ pextrd $0x1, %xmm1, 4(%rdx) // Store second destination pixel
+ je .LReallySmallExit
+ pextrd $0x2, %xmm1, 8(%rdx) // Store third destination pixel
+.LReallySmallExit:
+ ret
+
+ // Handle bigger blit operations (16+ pixels)
+ ////////////////////////////////////////////////////////////////////////////////
+ .p2align 4
+.LBigBlit:
+ // Align destination?
+ testl $0xF, %edx
+ lddqu (%rax), %xmm1 // Pre-load four source pixels
+ jz .LAligned
+
+ movq %rdx, %rdi // Calculate alignment of destination pointer
+ negq %rdi
+ andl $0xF, %edi
+
+ // Handle 1-3 pixels to align destination
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ jz .LAlignDone // If all alphas are zero, just skip
+ lddqu (%rdx), %xmm5 // Load four destination pixels
+ jc .LAlignStore // If all alphas are opaque, just store
+
+ // Handle mixed alphas (calculate and scale)
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+
+ psllw $8, %xmm3 // Filter out red and blue components
+ pmulhuw %xmm4, %xmm3 // Scale red and blue
+ movdqa %xmm5, %xmm2
+ psrlw $8, %xmm2 // Filter out alpha and green components
+ pmullw %xmm4, %xmm2 // Scale alpha and green
+
+ pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm2, %xmm1 // Add source and destination pixels together
+
+.LAlignStore:
+ cmpl $8, %edi // Check how many pixels should be written
+ jb .LAlignPixelsLeft1
+ ja .LAlignPixelsLeft3
+ pblendw $0x0F, %xmm1, %xmm5 // Blend two pixels
+ jmp .LAlignStorePixels
+
+.LAlignPixelsLeft1:
+ pblendw $0x03, %xmm1, %xmm5 // Blend one pixel
+ jmp .LAlignStorePixels
+
+.LAlignPixelsLeft3:
+ pblendw $0x3F, %xmm1, %xmm5 // Blend three pixels
+
+.LAlignStorePixels:
+ movdqu %xmm5, (%rdx) // Store destination pixels
+
+.LAlignDone:
+ addq %rdi, %rax // Adjust pointers and pixel count
+ addq %rdi, %rdx
+ shrq $2, %rdi
+ lddqu (%rax), %xmm1 // Pre-load new source pixels (after alignment)
+ subl %edi, %ecx
+
+.LAligned: // Destination is guaranteed to be 16 byte aligned
+ xorq %rdi, %rdi // Reset offset to zero
+ subl $8, %ecx // Decrease counter (Reserve four pixels for the cleanup)
+ testl $0xF, %eax // Check alignment of source pointer
+ jz .LAlignedLoop
+
+ // Source not aligned to destination
+ ////////////////////////////////////////////////////////////////////////////////
+ .p2align 4
+.LUnalignedLoop: // Main loop for unaligned, handles eight pixels per iteration
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero00
+ lddqu 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ jz .LAlphaZero00
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero00:
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero01
+ lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero01
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero01:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup0
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero00:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ lddqu 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+ // Handle next four pixels
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero01
+ lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero02
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+.LAlphaZero02:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup0
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero01:
+ movdqa 16(%rdx, %rdi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ addq $32, %rdi
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm2 // Add source and destination pixels together
+ subl $8, %ecx
+ movdqa %xmm2, -16(%rdx, %rdi) // Store four destination pixels
+ jae .LUnalignedLoop
+ addl $8, %ecx // Adjust pixel count
+
+ // Cleanup - handle pending pixels from loop
+.LLoopCleanup0:
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero02
+ jz .LAlphaZero03
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LAlphaZero03:
+ addq $16, %rdi
+ subl $4, %ecx
+ js .LSmallRemaining // Reuse code from small loop
+
+.LRemain0:
+ lddqu (%rax, %rdi), %xmm1 // Load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero02
+ jz .LAlphaZero04
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LAlphaZero04:
+ addq $16, %rdi
+ subl $4, %ecx
+ jmp .LSmallRemaining // Reuse code from small loop
+
+.LAlphaNotOpaqueOrZero02:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ addq $16, %rdi
+ subl $4, %ecx
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, -16(%rdx, %rdi) // Store four destination pixels
+ js .LSmallRemaining // Reuse code from small loop
+ jmp .LRemain0
+
+ // Source aligned to destination
+ ////////////////////////////////////////////////////////////////////////////////
+ .p2align 4
+.LAlignedLoop: // Main loop for aligned, handles eight pixels per iteration
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero10
+ movdqa 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ jz .LAlphaZero10
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero10:
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero11
+ movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero11
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+
+.LAlphaZero11:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LAlignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup1
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero10:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ movdqa 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+
+ // Handle next four pixels
+ ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero11
+ movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+ jz .LAlphaZero12
+ movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
+.LAlphaZero12:
+ addq $32, %rdi // Adjust offset and pixel count
+ subl $8, %ecx
+ jae .LAlignedLoop
+ addl $8, %ecx // Adjust pixel count
+ jmp .LLoopCleanup1
+
+ .p2align 4
+.LAlphaNotOpaqueOrZero11:
+ movdqa 16(%rdx, %rdi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+ movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
+
+ addq $32, %rdi
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm2 // Add source and destination pixels together
+ subl $8, %ecx
+ movdqa %xmm2, -16(%rdx, %rdi) // Store four destination pixels
+ jae .LAlignedLoop
+ addl $8, %ecx // Adjust pixel count
+
+ // Cleanup - handle four pending pixels from loop
+.LLoopCleanup1:
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero12
+ jz .LAlphaZero13
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LAlphaZero13:
+ addq $16, %rdi
+ subl $4, %ecx
+ js .LSmallRemaining // Reuse code from small loop
+
+.LRemain1:
+ movdqa (%rax, %rdi), %xmm1 // Pre-load four source pixels
+ ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
+ ja .LAlphaNotOpaqueOrZero12
+ jz .LAlphaZero14
+ movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
+.LAlphaZero14:
+ addq $16, %rdi
+ subl $4, %ecx
+ jmp .LSmallRemaining // Reuse code from small loop
+
+.LAlphaNotOpaqueOrZero12:
+ movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
+ EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
+ SCALE_PIXELS // Scale pixels using alpha
+
+ addq $16, %rdi
+ subl $4, %ecx
+ pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
+ paddb %xmm3, %xmm1 // Add source and destination pixels together
+ movdqa %xmm1, -16(%rdx, %rdi) // Store four destination pixels
+ js .LSmallRemaining // Reuse code from small loop
+ jmp .LRemain1
+
+ .cfi_endproc
+#ifndef __clang__
+ .size S32A_Opaque_BlitRow32_SSE4_asm, .-S32A_Opaque_BlitRow32_SSE4_asm
+#endif
+
+ // Constants for SSE code
+#ifndef __clang__
+ .section .rodata
+#endif
+ .p2align 4
+.LAlphaCheckMask:
+ .long 0xFF000000, 0xFF000000, 0xFF000000, 0xFF000000
+.LInverseAlphaCalc:
+ .word 256, 256, 256, 256, 256, 256, 256, 256
+.LResultMergeMask:
+ .long 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF
+#endif
diff --git a/src/opts/opts_check_x86.cpp b/src/opts/opts_check_x86.cpp
index 6af47729cd..b94969890d 100644
--- a/src/opts/opts_check_x86.cpp
+++ b/src/opts/opts_check_x86.cpp
@@ -12,6 +12,7 @@
#include "SkBlitRect_opts_SSE2.h"
#include "SkBlitRow.h"
#include "SkBlitRow_opts_SSE2.h"
+#include "SkBlitRow_opts_SSE4.h"
#include "SkBlurImage_opts_SSE2.h"
#include "SkMorphology_opts.h"
#include "SkMorphology_opts_SSE2.h"
@@ -82,6 +83,8 @@ static int get_SIMD_level() {
getcpuid(1, cpu_info);
if ((cpu_info[2] & (1<<20)) != 0) {
return SK_CPU_SSE_LEVEL_SSE42;
+ } else if ((cpu_info[2] & (1<<19)) != 0) {
+ return SK_CPU_SSE_LEVEL_SSE41;
} else if ((cpu_info[2] & (1<<9)) != 0) {
return SK_CPU_SSE_LEVEL_SSSE3;
} else if ((cpu_info[3] & (1<<26)) != 0) {
@@ -206,16 +209,30 @@ SkBlitRow::Proc SkBlitRow::PlatformProcs565(unsigned flags) {
}
}
-static SkBlitRow::Proc32 platform_32_procs[] = {
+static SkBlitRow::Proc32 platform_32_procs_SSE2[] = {
NULL, // S32_Opaque,
S32_Blend_BlitRow32_SSE2, // S32_Blend,
S32A_Opaque_BlitRow32_SSE2, // S32A_Opaque
S32A_Blend_BlitRow32_SSE2, // S32A_Blend,
};
+#if defined(SK_ATT_ASM_SUPPORTED)
+static SkBlitRow::Proc32 platform_32_procs_SSE4[] = {
+ NULL, // S32_Opaque,
+ S32_Blend_BlitRow32_SSE2, // S32_Blend,
+ S32A_Opaque_BlitRow32_SSE4_asm, // S32A_Opaque
+ S32A_Blend_BlitRow32_SSE2, // S32A_Blend,
+};
+#endif
+
SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
+#if defined(SK_ATT_ASM_SUPPORTED)
+ if (supports_simd(SK_CPU_SSE_LEVEL_SSE41)) {
+ return platform_32_procs_SSE4[flags];
+ } else
+#endif
if (supports_simd(SK_CPU_SSE_LEVEL_SSE2)) {
- return platform_32_procs[flags];
+ return platform_32_procs_SSE2[flags];
} else {
return NULL;
}
diff --git a/tests/DeferredCanvasTest.cpp b/tests/DeferredCanvasTest.cpp
index 747b23b9ab..8aaeaed5f3 100644
--- a/tests/DeferredCanvasTest.cpp
+++ b/tests/DeferredCanvasTest.cpp
@@ -487,6 +487,7 @@ static void TestDeferredCanvasMemoryLimit(skiatest::Reporter* reporter) {
SkBitmap sourceImage;
// 100 by 100 image, takes 40,000 bytes in memory
sourceImage.allocN32Pixels(100, 100);
+ sourceImage.eraseColor(SK_ColorGREEN);
for (int i = 0; i < 5; i++) {
sourceImage.notifyPixelsChanged(); // to force re-serialization
@@ -520,6 +521,7 @@ static void TestDeferredCanvasBitmapCaching(skiatest::Reporter* reporter) {
SkBitmap sourceImages[imageCount];
for (int i = 0; i < imageCount; i++) {
sourceImages[i].allocN32Pixels(100, 100);
+ sourceImages[i].eraseColor(SK_ColorGREEN);
}
size_t bitmapSize = sourceImages[0].getSize();
@@ -619,6 +621,7 @@ static void TestDeferredCanvasBitmapShaderNoLeak(skiatest::Reporter* reporter) {
SkPaint paint;
SkBitmap paintPattern;
paintPattern.allocN32Pixels(10, 10);
+ paintPattern.eraseColor(SK_ColorGREEN);
paint.setShader(SkNEW_ARGS(SkBitmapProcShader,
(paintPattern, SkShader::kClamp_TileMode, SkShader::kClamp_TileMode)))->unref();
canvas->drawPaint(paint);
@@ -647,6 +650,7 @@ static void TestDeferredCanvasBitmapSizeThreshold(skiatest::Reporter* reporter)
SkBitmap sourceImage;
// 100 by 100 image, takes 40,000 bytes in memory
sourceImage.allocN32Pixels(100, 100);
+ sourceImage.eraseColor(SK_ColorGREEN);
// 1 under : should not store the image
{