aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/jumper/SkJumper_generated.S
diff options
context:
space:
mode:
Diffstat (limited to 'src/jumper/SkJumper_generated.S')
-rw-r--r--src/jumper/SkJumper_generated.S5968
1 files changed, 3019 insertions, 2949 deletions
diff --git a/src/jumper/SkJumper_generated.S b/src/jumper/SkJumper_generated.S
index 07f0b75c8d..861d81baec 100644
--- a/src/jumper/SkJumper_generated.S
+++ b/src/jumper/SkJumper_generated.S
@@ -9576,9 +9576,9 @@ _sk_seed_shader_skx:
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
.byte 98,242,125,40,124,201 // vpbroadcastd %ecx,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 98,241,116,56,88,13,174,136,3,0 // vaddps 0x388ae(%rip){1to8},%ymm1,%ymm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,241,116,56,88,13,30,138,3,0 // vaddps 0x38a1e(%rip){1to8},%ymm1,%ymm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,21,167,136,3,0 // vbroadcastss 0x388a7(%rip),%ymm2 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,21,23,138,3,0 // vbroadcastss 0x38a17(%rip),%ymm2 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,228,87,219 // vxorps %ymm3,%ymm3,%ymm3
.byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
.byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
@@ -9592,16 +9592,16 @@ FUNCTION(_sk_dither_skx)
_sk_dither_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 98,114,125,40,124,194 // vpbroadcastd %edx,%ymm8
- .byte 197,61,254,5,45,138,3,0 // vpaddd 0x38a2d(%rip),%ymm8,%ymm8 # 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x45c>
+ .byte 197,61,254,5,141,139,3,0 // vpaddd 0x38b8d(%rip),%ymm8,%ymm8 # 38cc0 <_sk_srcover_bgra_8888_sse2_lowp+0x44c>
.byte 98,114,125,40,124,201 // vpbroadcastd %ecx,%ymm9
.byte 196,65,53,239,200 // vpxor %ymm8,%ymm9,%ymm9
- .byte 196,98,125,88,21,113,136,3,0 // vpbroadcastd 0x38871(%rip),%ymm10 # 389b8 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
+ .byte 196,98,125,88,21,225,137,3,0 // vpbroadcastd 0x389e1(%rip),%ymm10 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
.byte 196,65,53,219,218 // vpand %ymm10,%ymm9,%ymm11
.byte 196,193,37,114,243,5 // vpslld $0x5,%ymm11,%ymm11
.byte 196,65,61,219,210 // vpand %ymm10,%ymm8,%ymm10
.byte 196,193,45,114,242,4 // vpslld $0x4,%ymm10,%ymm10
- .byte 196,98,125,88,37,86,136,3,0 // vpbroadcastd 0x38856(%rip),%ymm12 # 389bc <_sk_srcover_bgra_8888_sse2_lowp+0x2b8>
- .byte 196,98,125,88,45,81,136,3,0 // vpbroadcastd 0x38851(%rip),%ymm13 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 196,98,125,88,37,198,137,3,0 // vpbroadcastd 0x389c6(%rip),%ymm12 # 38b2c <_sk_srcover_bgra_8888_sse2_lowp+0x2b8>
+ .byte 196,98,125,88,45,193,137,3,0 // vpbroadcastd 0x389c1(%rip),%ymm13 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 196,65,53,219,245 // vpand %ymm13,%ymm9,%ymm14
.byte 196,193,13,114,246,2 // vpslld $0x2,%ymm14,%ymm14
.byte 196,65,37,235,222 // vpor %ymm14,%ymm11,%ymm11
@@ -9616,8 +9616,8 @@ _sk_dither_skx:
.byte 196,65,61,235,195 // vpor %ymm11,%ymm8,%ymm8
.byte 196,65,61,235,193 // vpor %ymm9,%ymm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,13,3,136,3,0 // vbroadcastss 0x38803(%rip),%ymm9 # 389c4 <_sk_srcover_bgra_8888_sse2_lowp+0x2c0>
- .byte 98,114,61,56,168,13,253,135,3,0 // vfmadd213ps 0x387fd(%rip){1to8},%ymm8,%ymm9 # 389c8 <_sk_srcover_bgra_8888_sse2_lowp+0x2c4>
+ .byte 196,98,125,24,13,115,137,3,0 // vbroadcastss 0x38973(%rip),%ymm9 # 38b34 <_sk_srcover_bgra_8888_sse2_lowp+0x2c0>
+ .byte 98,114,61,56,168,13,109,137,3,0 // vfmadd213ps 0x3896d(%rip){1to8},%ymm8,%ymm9 # 38b38 <_sk_srcover_bgra_8888_sse2_lowp+0x2c4>
.byte 196,98,125,24,0 // vbroadcastss (%rax),%ymm8
.byte 196,65,52,89,192 // vmulps %ymm8,%ymm9,%ymm8
.byte 197,188,88,192 // vaddps %ymm0,%ymm8,%ymm0
@@ -9650,7 +9650,7 @@ HIDDEN _sk_black_color_skx
FUNCTION(_sk_black_color_skx)
_sk_black_color_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,138,135,3,0 // vbroadcastss 0x3878a(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,250,136,3,0 // vbroadcastss 0x388fa(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
@@ -9661,7 +9661,7 @@ HIDDEN _sk_white_color_skx
FUNCTION(_sk_white_color_skx)
_sk_white_color_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,113,135,3,0 // vbroadcastss 0x38771(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,225,136,3,0 // vbroadcastss 0x388e1(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 197,252,40,216 // vmovaps %ymm0,%ymm3
@@ -9707,7 +9707,7 @@ HIDDEN _sk_srcatop_skx
FUNCTION(_sk_srcatop_skx)
_sk_srcatop_skx:
.byte 197,252,89,199 // vmulps %ymm7,%ymm0,%ymm0
- .byte 196,98,125,24,5,16,135,3,0 // vbroadcastss 0x38710(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,128,136,3,0 // vbroadcastss 0x38880(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,226,61,184,196 // vfmadd231ps %ymm4,%ymm8,%ymm0
.byte 197,244,89,207 // vmulps %ymm7,%ymm1,%ymm1
@@ -9723,7 +9723,7 @@ HIDDEN _sk_dstatop_skx
.globl _sk_dstatop_skx
FUNCTION(_sk_dstatop_skx)
_sk_dstatop_skx:
- .byte 196,98,125,24,5,223,134,3,0 // vbroadcastss 0x386df(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,79,136,3,0 // vbroadcastss 0x3884f(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,226,101,184,196 // vfmadd231ps %ymm4,%ymm3,%ymm0
@@ -9762,7 +9762,7 @@ HIDDEN _sk_srcout_skx
.globl _sk_srcout_skx
FUNCTION(_sk_srcout_skx)
_sk_srcout_skx:
- .byte 196,98,125,24,5,130,134,3,0 // vbroadcastss 0x38682(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,242,135,3,0 // vbroadcastss 0x387f2(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 197,188,89,201 // vmulps %ymm1,%ymm8,%ymm1
@@ -9775,7 +9775,7 @@ HIDDEN _sk_dstout_skx
.globl _sk_dstout_skx
FUNCTION(_sk_dstout_skx)
_sk_dstout_skx:
- .byte 196,226,125,24,5,97,134,3,0 // vbroadcastss 0x38661(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,209,135,3,0 // vbroadcastss 0x387d1(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,92,219 // vsubps %ymm3,%ymm0,%ymm3
.byte 197,228,89,196 // vmulps %ymm4,%ymm3,%ymm0
.byte 197,228,89,205 // vmulps %ymm5,%ymm3,%ymm1
@@ -9788,7 +9788,7 @@ HIDDEN _sk_srcover_skx
.globl _sk_srcover_skx
FUNCTION(_sk_srcover_skx)
_sk_srcover_skx:
- .byte 196,98,125,24,5,64,134,3,0 // vbroadcastss 0x38640(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,176,135,3,0 // vbroadcastss 0x387b0(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,93,184,192 // vfmadd231ps %ymm8,%ymm4,%ymm0
.byte 196,194,85,184,200 // vfmadd231ps %ymm8,%ymm5,%ymm1
@@ -9801,7 +9801,7 @@ HIDDEN _sk_dstover_skx
.globl _sk_dstover_skx
FUNCTION(_sk_dstover_skx)
_sk_dstover_skx:
- .byte 196,98,125,24,5,27,134,3,0 // vbroadcastss 0x3861b(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,139,135,3,0 // vbroadcastss 0x3878b(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 196,226,61,168,196 // vfmadd213ps %ymm4,%ymm8,%ymm0
.byte 196,226,61,168,205 // vfmadd213ps %ymm5,%ymm8,%ymm1
@@ -9825,7 +9825,7 @@ HIDDEN _sk_multiply_skx
.globl _sk_multiply_skx
FUNCTION(_sk_multiply_skx)
_sk_multiply_skx:
- .byte 196,98,125,24,5,226,133,3,0 // vbroadcastss 0x385e2(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,82,135,3,0 // vbroadcastss 0x38752(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,52,89,208 // vmulps %ymm0,%ymm9,%ymm10
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -9848,7 +9848,7 @@ HIDDEN _sk_plus__skx
FUNCTION(_sk_plus__skx)
_sk_plus__skx:
.byte 197,252,88,196 // vaddps %ymm4,%ymm0,%ymm0
- .byte 196,98,125,24,5,145,133,3,0 // vbroadcastss 0x38591(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,1,135,3,0 // vbroadcastss 0x38701(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 197,244,88,205 // vaddps %ymm5,%ymm1,%ymm1
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
@@ -9878,7 +9878,7 @@ HIDDEN _sk_xor__skx
.globl _sk_xor__skx
FUNCTION(_sk_xor__skx)
_sk_xor__skx:
- .byte 196,98,125,24,5,60,133,3,0 // vbroadcastss 0x3853c(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,172,134,3,0 // vbroadcastss 0x386ac(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,180,89,192 // vmulps %ymm0,%ymm9,%ymm0
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -9912,7 +9912,7 @@ _sk_darken_skx:
.byte 197,100,89,206 // vmulps %ymm6,%ymm3,%ymm9
.byte 196,193,108,95,209 // vmaxps %ymm9,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,192,132,3,0 // vbroadcastss 0x384c0(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,48,134,3,0 // vbroadcastss 0x38630(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -9937,7 +9937,7 @@ _sk_lighten_skx:
.byte 197,100,89,206 // vmulps %ymm6,%ymm3,%ymm9
.byte 196,193,108,93,209 // vminps %ymm9,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,107,132,3,0 // vbroadcastss 0x3846b(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,219,133,3,0 // vbroadcastss 0x385db(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -9965,7 +9965,7 @@ _sk_difference_skx:
.byte 196,193,108,93,209 // vminps %ymm9,%ymm2,%ymm2
.byte 197,236,88,210 // vaddps %ymm2,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,10,132,3,0 // vbroadcastss 0x3840a(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,122,133,3,0 // vbroadcastss 0x3857a(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -9987,7 +9987,7 @@ _sk_exclusion_skx:
.byte 197,236,89,214 // vmulps %ymm6,%ymm2,%ymm2
.byte 197,236,88,210 // vaddps %ymm2,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,196,131,3,0 // vbroadcastss 0x383c4(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,52,133,3,0 // vbroadcastss 0x38534(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -9999,7 +9999,7 @@ FUNCTION(_sk_colorburn_skx)
_sk_colorburn_skx:
.byte 98,241,92,40,194,199,0 // vcmpeqps %ymm7,%ymm4,%k0
.byte 98,114,126,40,56,192 // vpmovm2d %k0,%ymm8
- .byte 196,98,125,24,13,161,131,3,0 // vbroadcastss 0x383a1(%rip),%ymm9 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,13,17,133,3,0 // vbroadcastss 0x38511(%rip),%ymm9 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,52,92,215 // vsubps %ymm7,%ymm9,%ymm10
.byte 197,44,89,216 // vmulps %ymm0,%ymm10,%ymm11
.byte 197,36,88,228 // vaddps %ymm4,%ymm11,%ymm12
@@ -10063,7 +10063,7 @@ _sk_colordodge_skx:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 98,209,92,40,194,192,0 // vcmpeqps %ymm8,%ymm4,%k0
.byte 98,114,126,40,56,200 // vpmovm2d %k0,%ymm9
- .byte 196,98,125,24,21,114,130,3,0 // vbroadcastss 0x38272(%rip),%ymm10 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,21,226,131,3,0 // vbroadcastss 0x383e2(%rip),%ymm10 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,44,92,223 // vsubps %ymm7,%ymm10,%ymm11
.byte 197,36,89,224 // vmulps %ymm0,%ymm11,%ymm12
.byte 98,241,124,40,194,195,0 // vcmpeqps %ymm3,%ymm0,%k0
@@ -10120,7 +10120,7 @@ HIDDEN _sk_hardlight_skx
.globl _sk_hardlight_skx
FUNCTION(_sk_hardlight_skx)
_sk_hardlight_skx:
- .byte 196,98,125,24,5,106,129,3,0 // vbroadcastss 0x3816a(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,218,130,3,0 // vbroadcastss 0x382da(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,52,89,208 // vmulps %ymm0,%ymm9,%ymm10
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -10174,7 +10174,7 @@ HIDDEN _sk_overlay_skx
.globl _sk_overlay_skx
FUNCTION(_sk_overlay_skx)
_sk_overlay_skx:
- .byte 196,98,125,24,5,134,128,3,0 // vbroadcastss 0x38086(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,246,129,3,0 // vbroadcastss 0x381f6(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,52,89,208 // vmulps %ymm0,%ymm9,%ymm10
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -10240,15 +10240,15 @@ _sk_softlight_skx:
.byte 196,65,52,88,217 // vaddps %ymm9,%ymm9,%ymm11
.byte 196,65,36,88,235 // vaddps %ymm11,%ymm11,%ymm13
.byte 197,4,92,243 // vsubps %ymm3,%ymm15,%ymm14
- .byte 196,98,125,24,29,100,127,3,0 // vbroadcastss 0x37f64(%rip),%ymm11 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,29,212,128,3,0 // vbroadcastss 0x380d4(%rip),%ymm11 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,65,36,92,225 // vsubps %ymm9,%ymm11,%ymm12
.byte 196,98,13,168,227 // vfmadd213ps %ymm3,%ymm14,%ymm12
.byte 197,156,89,212 // vmulps %ymm4,%ymm12,%ymm2
.byte 196,66,21,168,237 // vfmadd213ps %ymm13,%ymm13,%ymm13
- .byte 98,226,125,40,24,21,95,127,3,0 // vbroadcastss 0x37f5f(%rip),%ymm18 # 389cc <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
+ .byte 98,226,125,40,24,21,207,128,3,0 // vbroadcastss 0x380cf(%rip),%ymm18 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
.byte 98,161,52,40,88,202 // vaddps %ymm18,%ymm9,%ymm17
.byte 98,81,116,32,89,229 // vmulps %ymm13,%ymm17,%ymm12
- .byte 98,226,125,40,24,29,77,127,3,0 // vbroadcastss 0x37f4d(%rip),%ymm19 # 389d0 <_sk_srcover_bgra_8888_sse2_lowp+0x2cc>
+ .byte 98,226,125,40,24,29,189,128,3,0 // vbroadcastss 0x380bd(%rip),%ymm19 # 38b40 <_sk_srcover_bgra_8888_sse2_lowp+0x2cc>
.byte 98,50,53,40,184,227 // vfmadd231ps %ymm19,%ymm9,%ymm12
.byte 98,194,125,40,78,201 // vrsqrt14ps %ymm9,%ymm17
.byte 98,162,125,40,76,201 // vrcp14ps %ymm17,%ymm17
@@ -10370,11 +10370,11 @@ _sk_hue_skx:
.byte 196,65,28,89,219 // vmulps %ymm11,%ymm12,%ymm11
.byte 196,65,36,94,222 // vdivps %ymm14,%ymm11,%ymm11
.byte 196,99,37,74,225,240 // vblendvps %ymm15,%ymm1,%ymm11,%ymm12
- .byte 196,98,125,24,45,232,124,3,0 // vbroadcastss 0x37ce8(%rip),%ymm13 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,53,227,124,3,0 // vbroadcastss 0x37ce3(%rip),%ymm14 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,45,88,126,3,0 // vbroadcastss 0x37e58(%rip),%ymm13 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,53,83,126,3,0 // vbroadcastss 0x37e53(%rip),%ymm14 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,254 // vmulps %ymm14,%ymm5,%ymm15
.byte 196,66,93,184,253 // vfmadd231ps %ymm13,%ymm4,%ymm15
- .byte 98,226,125,40,24,5,211,124,3,0 // vbroadcastss 0x37cd3(%rip),%ymm16 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 98,226,125,40,24,5,67,126,3,0 // vbroadcastss 0x37e43(%rip),%ymm16 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 98,50,77,40,184,248 // vfmadd231ps %ymm16,%ymm6,%ymm15
.byte 196,65,44,89,222 // vmulps %ymm14,%ymm10,%ymm11
.byte 196,66,53,184,221 // vfmadd231ps %ymm13,%ymm9,%ymm11
@@ -10431,7 +10431,7 @@ _sk_hue_skx:
.byte 196,65,12,88,219 // vaddps %ymm11,%ymm14,%ymm11
.byte 196,67,45,74,203,144 // vblendvps %ymm9,%ymm11,%ymm10,%ymm9
.byte 197,52,95,201 // vmaxps %ymm1,%ymm9,%ymm9
- .byte 196,226,125,24,13,111,123,3,0 // vbroadcastss 0x37b6f(%rip),%ymm1 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,13,223,124,3,0 // vbroadcastss 0x37cdf(%rip),%ymm1 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,116,92,215 // vsubps %ymm7,%ymm1,%ymm10
.byte 197,172,89,192 // vmulps %ymm0,%ymm10,%ymm0
.byte 197,116,92,219 // vsubps %ymm3,%ymm1,%ymm11
@@ -10483,11 +10483,11 @@ _sk_saturation_skx:
.byte 196,65,28,89,219 // vmulps %ymm11,%ymm12,%ymm11
.byte 196,65,36,94,222 // vdivps %ymm14,%ymm11,%ymm11
.byte 196,99,37,74,225,240 // vblendvps %ymm15,%ymm1,%ymm11,%ymm12
- .byte 196,98,125,24,45,164,122,3,0 // vbroadcastss 0x37aa4(%rip),%ymm13 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,53,159,122,3,0 // vbroadcastss 0x37a9f(%rip),%ymm14 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,45,20,124,3,0 // vbroadcastss 0x37c14(%rip),%ymm13 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,53,15,124,3,0 // vbroadcastss 0x37c0f(%rip),%ymm14 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,254 // vmulps %ymm14,%ymm5,%ymm15
.byte 196,66,93,184,253 // vfmadd231ps %ymm13,%ymm4,%ymm15
- .byte 98,226,125,40,24,5,143,122,3,0 // vbroadcastss 0x37a8f(%rip),%ymm16 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 98,226,125,40,24,5,255,123,3,0 // vbroadcastss 0x37bff(%rip),%ymm16 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 98,50,77,40,184,248 // vfmadd231ps %ymm16,%ymm6,%ymm15
.byte 196,65,44,89,222 // vmulps %ymm14,%ymm10,%ymm11
.byte 196,66,53,184,221 // vfmadd231ps %ymm13,%ymm9,%ymm11
@@ -10544,7 +10544,7 @@ _sk_saturation_skx:
.byte 196,65,12,88,219 // vaddps %ymm11,%ymm14,%ymm11
.byte 196,67,45,74,203,144 // vblendvps %ymm9,%ymm11,%ymm10,%ymm9
.byte 197,52,95,201 // vmaxps %ymm1,%ymm9,%ymm9
- .byte 196,226,125,24,13,43,121,3,0 // vbroadcastss 0x3792b(%rip),%ymm1 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,13,155,122,3,0 // vbroadcastss 0x37a9b(%rip),%ymm1 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,116,92,215 // vsubps %ymm7,%ymm1,%ymm10
.byte 197,172,89,192 // vmulps %ymm0,%ymm10,%ymm0
.byte 197,116,92,219 // vsubps %ymm3,%ymm1,%ymm11
@@ -10568,11 +10568,11 @@ _sk_color_skx:
.byte 197,124,89,199 // vmulps %ymm7,%ymm0,%ymm8
.byte 197,116,89,207 // vmulps %ymm7,%ymm1,%ymm9
.byte 197,108,89,223 // vmulps %ymm7,%ymm2,%ymm11
- .byte 196,98,125,24,37,244,120,3,0 // vbroadcastss 0x378f4(%rip),%ymm12 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,45,239,120,3,0 // vbroadcastss 0x378ef(%rip),%ymm13 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,37,100,122,3,0 // vbroadcastss 0x37a64(%rip),%ymm12 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,45,95,122,3,0 // vbroadcastss 0x37a5f(%rip),%ymm13 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,245 // vmulps %ymm13,%ymm5,%ymm14
.byte 196,66,93,184,244 // vfmadd231ps %ymm12,%ymm4,%ymm14
- .byte 196,98,125,24,61,224,120,3,0 // vbroadcastss 0x378e0(%rip),%ymm15 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,98,125,24,61,80,122,3,0 // vbroadcastss 0x37a50(%rip),%ymm15 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,66,77,184,247 // vfmadd231ps %ymm15,%ymm6,%ymm14
.byte 196,65,52,89,213 // vmulps %ymm13,%ymm9,%ymm10
.byte 196,66,61,184,212 // vfmadd231ps %ymm12,%ymm8,%ymm10
@@ -10630,7 +10630,7 @@ _sk_color_skx:
.byte 196,65,20,88,219 // vaddps %ymm11,%ymm13,%ymm11
.byte 196,67,53,74,203,192 // vblendvps %ymm12,%ymm11,%ymm9,%ymm9
.byte 98,49,52,40,95,202 // vmaxps %ymm18,%ymm9,%ymm9
- .byte 196,98,125,24,29,113,119,3,0 // vbroadcastss 0x37771(%rip),%ymm11 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,29,225,120,3,0 // vbroadcastss 0x378e1(%rip),%ymm11 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,36,92,231 // vsubps %ymm7,%ymm11,%ymm12
.byte 197,156,89,192 // vmulps %ymm0,%ymm12,%ymm0
.byte 197,36,92,219 // vsubps %ymm3,%ymm11,%ymm11
@@ -10654,11 +10654,11 @@ _sk_luminosity_skx:
.byte 197,100,89,196 // vmulps %ymm4,%ymm3,%ymm8
.byte 197,100,89,205 // vmulps %ymm5,%ymm3,%ymm9
.byte 197,100,89,222 // vmulps %ymm6,%ymm3,%ymm11
- .byte 196,98,125,24,37,60,119,3,0 // vbroadcastss 0x3773c(%rip),%ymm12 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,45,55,119,3,0 // vbroadcastss 0x37737(%rip),%ymm13 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,37,172,120,3,0 // vbroadcastss 0x378ac(%rip),%ymm12 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,45,167,120,3,0 // vbroadcastss 0x378a7(%rip),%ymm13 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,116,89,245 // vmulps %ymm13,%ymm1,%ymm14
.byte 196,66,125,184,244 // vfmadd231ps %ymm12,%ymm0,%ymm14
- .byte 196,98,125,24,61,40,119,3,0 // vbroadcastss 0x37728(%rip),%ymm15 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,98,125,24,61,152,120,3,0 // vbroadcastss 0x37898(%rip),%ymm15 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,66,109,184,247 // vfmadd231ps %ymm15,%ymm2,%ymm14
.byte 196,65,52,89,213 // vmulps %ymm13,%ymm9,%ymm10
.byte 196,66,61,184,212 // vfmadd231ps %ymm12,%ymm8,%ymm10
@@ -10716,7 +10716,7 @@ _sk_luminosity_skx:
.byte 196,65,20,88,219 // vaddps %ymm11,%ymm13,%ymm11
.byte 196,67,53,74,203,192 // vblendvps %ymm12,%ymm11,%ymm9,%ymm9
.byte 98,49,52,40,95,202 // vmaxps %ymm18,%ymm9,%ymm9
- .byte 196,98,125,24,29,185,117,3,0 // vbroadcastss 0x375b9(%rip),%ymm11 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,29,41,119,3,0 // vbroadcastss 0x37729(%rip),%ymm11 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,36,92,231 // vsubps %ymm7,%ymm11,%ymm12
.byte 197,156,89,192 // vmulps %ymm0,%ymm12,%ymm0
.byte 197,36,92,219 // vsubps %ymm3,%ymm11,%ymm11
@@ -10759,9 +10759,9 @@ _sk_srcover_rgba_8888_skx:
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
.byte 197,197,114,215,24 // vpsrld $0x18,%ymm7,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,16,117,3,0 // vbroadcastss 0x37510(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,128,118,3,0 // vbroadcastss 0x37680(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,47,117,3,0 // vbroadcastss 0x3752f(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,13,159,118,3,0 // vbroadcastss 0x3769f(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 196,194,93,184,192 // vfmadd231ps %ymm8,%ymm4,%ymm0
.byte 196,193,116,89,201 // vmulps %ymm9,%ymm1,%ymm1
@@ -10903,9 +10903,9 @@ _sk_srcover_bgra_8888_skx:
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
.byte 197,197,114,215,24 // vpsrld $0x18,%ymm7,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,211,114,3,0 // vbroadcastss 0x372d3(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,67,116,3,0 // vbroadcastss 0x37443(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,242,114,3,0 // vbroadcastss 0x372f2(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,13,98,116,3,0 // vbroadcastss 0x37462(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 196,194,93,184,192 // vfmadd231ps %ymm8,%ymm4,%ymm0
.byte 196,193,116,89,201 // vmulps %ymm9,%ymm1,%ymm1
@@ -11034,7 +11034,7 @@ HIDDEN _sk_clamp_1_skx
.globl _sk_clamp_1_skx
FUNCTION(_sk_clamp_1_skx)
_sk_clamp_1_skx:
- .byte 196,98,125,24,5,214,112,3,0 // vbroadcastss 0x370d6(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,70,114,3,0 // vbroadcastss 0x37246(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 196,193,108,93,208 // vminps %ymm8,%ymm2,%ymm2
@@ -11046,7 +11046,7 @@ HIDDEN _sk_clamp_a_skx
.globl _sk_clamp_a_skx
FUNCTION(_sk_clamp_a_skx)
_sk_clamp_a_skx:
- .byte 98,241,100,56,93,29,180,112,3,0 // vminps 0x370b4(%rip){1to8},%ymm3,%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,100,56,93,29,36,114,3,0 // vminps 0x37224(%rip){1to8},%ymm3,%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,93,195 // vminps %ymm3,%ymm0,%ymm0
.byte 197,244,93,203 // vminps %ymm3,%ymm1,%ymm1
.byte 197,236,93,211 // vminps %ymm3,%ymm2,%ymm2
@@ -11057,7 +11057,7 @@ HIDDEN _sk_clamp_a_dst_skx
.globl _sk_clamp_a_dst_skx
FUNCTION(_sk_clamp_a_dst_skx)
_sk_clamp_a_dst_skx:
- .byte 98,241,68,56,93,61,154,112,3,0 // vminps 0x3709a(%rip){1to8},%ymm7,%ymm7 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,68,56,93,61,10,114,3,0 // vminps 0x3720a(%rip){1to8},%ymm7,%ymm7 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,220,93,231 // vminps %ymm7,%ymm4,%ymm4
.byte 197,212,93,239 // vminps %ymm7,%ymm5,%ymm5
.byte 197,204,93,247 // vminps %ymm7,%ymm6,%ymm6
@@ -11089,7 +11089,7 @@ HIDDEN _sk_invert_skx
.globl _sk_invert_skx
FUNCTION(_sk_invert_skx)
_sk_invert_skx:
- .byte 196,98,125,24,5,90,112,3,0 // vbroadcastss 0x3705a(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,202,113,3,0 // vbroadcastss 0x371ca(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,188,92,192 // vsubps %ymm0,%ymm8,%ymm0
.byte 197,188,92,201 // vsubps %ymm1,%ymm8,%ymm1
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
@@ -11143,9 +11143,9 @@ HIDDEN _sk_unpremul_skx
.globl _sk_unpremul_skx
FUNCTION(_sk_unpremul_skx)
_sk_unpremul_skx:
- .byte 196,98,125,24,5,245,111,3,0 // vbroadcastss 0x36ff5(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,101,113,3,0 // vbroadcastss 0x37165(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,94,195 // vdivps %ymm3,%ymm8,%ymm8
- .byte 98,241,60,56,194,5,22,112,3,0,1 // vcmpltps 0x37016(%rip){1to8},%ymm8,%k0 # 389e4 <_sk_srcover_bgra_8888_sse2_lowp+0x2e0>
+ .byte 98,241,60,56,194,5,134,113,3,0,1 // vcmpltps 0x37186(%rip){1to8},%ymm8,%k0 # 38b54 <_sk_srcover_bgra_8888_sse2_lowp+0x2e0>
.byte 98,114,126,40,56,200 // vpmovm2d %k0,%ymm9
.byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
.byte 196,67,45,74,192,144 // vblendvps %ymm9,%ymm8,%ymm10,%ymm8
@@ -11159,16 +11159,16 @@ HIDDEN _sk_from_srgb_skx
.globl _sk_from_srgb_skx
FUNCTION(_sk_from_srgb_skx)
_sk_from_srgb_skx:
- .byte 196,98,125,24,5,240,111,3,0 // vbroadcastss 0x36ff0(%rip),%ymm8 # 389e8 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
+ .byte 196,98,125,24,5,96,113,3,0 // vbroadcastss 0x37160(%rip),%ymm8 # 38b58 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 197,124,89,208 // vmulps %ymm0,%ymm0,%ymm10
- .byte 196,98,125,24,29,226,111,3,0 // vbroadcastss 0x36fe2(%rip),%ymm11 # 389ec <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
- .byte 196,98,125,24,37,193,111,3,0 // vbroadcastss 0x36fc1(%rip),%ymm12 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,29,82,113,3,0 // vbroadcastss 0x37152(%rip),%ymm11 # 38b5c <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
+ .byte 196,98,125,24,37,49,113,3,0 // vbroadcastss 0x37131(%rip),%ymm12 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,124,40,236 // vmovaps %ymm12,%ymm13
.byte 196,66,125,168,235 // vfmadd213ps %ymm11,%ymm0,%ymm13
- .byte 196,98,125,24,53,202,111,3,0 // vbroadcastss 0x36fca(%rip),%ymm14 # 389f0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
+ .byte 196,98,125,24,53,58,113,3,0 // vbroadcastss 0x3713a(%rip),%ymm14 # 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
.byte 196,66,45,168,238 // vfmadd213ps %ymm14,%ymm10,%ymm13
- .byte 196,98,125,24,21,192,111,3,0 // vbroadcastss 0x36fc0(%rip),%ymm10 # 389f4 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
+ .byte 196,98,125,24,21,48,113,3,0 // vbroadcastss 0x37130(%rip),%ymm10 # 38b64 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
.byte 98,209,124,40,194,194,1 // vcmpltps %ymm10,%ymm0,%k0
.byte 98,242,126,40,56,192 // vpmovm2d %k0,%ymm0
.byte 196,195,21,74,193,0 // vblendvps %ymm0,%ymm9,%ymm13,%ymm0
@@ -11194,16 +11194,16 @@ HIDDEN _sk_from_srgb_dst_skx
.globl _sk_from_srgb_dst_skx
FUNCTION(_sk_from_srgb_dst_skx)
_sk_from_srgb_dst_skx:
- .byte 196,98,125,24,5,67,111,3,0 // vbroadcastss 0x36f43(%rip),%ymm8 # 389e8 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
+ .byte 196,98,125,24,5,179,112,3,0 // vbroadcastss 0x370b3(%rip),%ymm8 # 38b58 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
.byte 196,65,92,89,200 // vmulps %ymm8,%ymm4,%ymm9
.byte 197,92,89,212 // vmulps %ymm4,%ymm4,%ymm10
- .byte 196,98,125,24,29,53,111,3,0 // vbroadcastss 0x36f35(%rip),%ymm11 # 389ec <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
- .byte 196,98,125,24,37,20,111,3,0 // vbroadcastss 0x36f14(%rip),%ymm12 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,29,165,112,3,0 // vbroadcastss 0x370a5(%rip),%ymm11 # 38b5c <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
+ .byte 196,98,125,24,37,132,112,3,0 // vbroadcastss 0x37084(%rip),%ymm12 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,124,40,236 // vmovaps %ymm12,%ymm13
.byte 196,66,93,168,235 // vfmadd213ps %ymm11,%ymm4,%ymm13
- .byte 196,98,125,24,53,29,111,3,0 // vbroadcastss 0x36f1d(%rip),%ymm14 # 389f0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
+ .byte 196,98,125,24,53,141,112,3,0 // vbroadcastss 0x3708d(%rip),%ymm14 # 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
.byte 196,66,45,168,238 // vfmadd213ps %ymm14,%ymm10,%ymm13
- .byte 196,98,125,24,21,19,111,3,0 // vbroadcastss 0x36f13(%rip),%ymm10 # 389f4 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
+ .byte 196,98,125,24,21,131,112,3,0 // vbroadcastss 0x37083(%rip),%ymm10 # 38b64 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
.byte 98,209,92,40,194,194,1 // vcmpltps %ymm10,%ymm4,%k0
.byte 98,242,126,40,56,224 // vpmovm2d %k0,%ymm4
.byte 196,195,21,74,225,64 // vblendvps %ymm4,%ymm9,%ymm13,%ymm4
@@ -11230,19 +11230,19 @@ HIDDEN _sk_to_srgb_skx
FUNCTION(_sk_to_srgb_skx)
_sk_to_srgb_skx:
.byte 98,114,125,40,78,192 // vrsqrt14ps %ymm0,%ymm8
- .byte 196,98,125,24,13,160,110,3,0 // vbroadcastss 0x36ea0(%rip),%ymm9 # 389f8 <_sk_srcover_bgra_8888_sse2_lowp+0x2f4>
+ .byte 196,98,125,24,13,16,112,3,0 // vbroadcastss 0x37010(%rip),%ymm9 # 38b68 <_sk_srcover_bgra_8888_sse2_lowp+0x2f4>
.byte 196,65,124,89,209 // vmulps %ymm9,%ymm0,%ymm10
- .byte 196,98,125,24,29,150,110,3,0 // vbroadcastss 0x36e96(%rip),%ymm11 # 389fc <_sk_srcover_bgra_8888_sse2_lowp+0x2f8>
- .byte 196,98,125,24,37,145,110,3,0 // vbroadcastss 0x36e91(%rip),%ymm12 # 38a00 <_sk_srcover_bgra_8888_sse2_lowp+0x2fc>
+ .byte 196,98,125,24,29,6,112,3,0 // vbroadcastss 0x37006(%rip),%ymm11 # 38b6c <_sk_srcover_bgra_8888_sse2_lowp+0x2f8>
+ .byte 196,98,125,24,37,1,112,3,0 // vbroadcastss 0x37001(%rip),%ymm12 # 38b70 <_sk_srcover_bgra_8888_sse2_lowp+0x2fc>
.byte 196,65,124,40,236 // vmovaps %ymm12,%ymm13
.byte 196,66,61,168,235 // vfmadd213ps %ymm11,%ymm8,%ymm13
- .byte 196,98,125,24,53,130,110,3,0 // vbroadcastss 0x36e82(%rip),%ymm14 # 38a04 <_sk_srcover_bgra_8888_sse2_lowp+0x300>
+ .byte 196,98,125,24,53,242,111,3,0 // vbroadcastss 0x36ff2(%rip),%ymm14 # 38b74 <_sk_srcover_bgra_8888_sse2_lowp+0x300>
.byte 196,66,61,168,238 // vfmadd213ps %ymm14,%ymm8,%ymm13
- .byte 196,98,125,24,61,120,110,3,0 // vbroadcastss 0x36e78(%rip),%ymm15 # 38a08 <_sk_srcover_bgra_8888_sse2_lowp+0x304>
+ .byte 196,98,125,24,61,232,111,3,0 // vbroadcastss 0x36fe8(%rip),%ymm15 # 38b78 <_sk_srcover_bgra_8888_sse2_lowp+0x304>
.byte 196,65,60,88,199 // vaddps %ymm15,%ymm8,%ymm8
.byte 98,82,125,40,76,192 // vrcp14ps %ymm8,%ymm8
.byte 196,65,20,89,192 // vmulps %ymm8,%ymm13,%ymm8
- .byte 196,98,125,24,45,99,110,3,0 // vbroadcastss 0x36e63(%rip),%ymm13 # 38a0c <_sk_srcover_bgra_8888_sse2_lowp+0x308>
+ .byte 196,98,125,24,45,211,111,3,0 // vbroadcastss 0x36fd3(%rip),%ymm13 # 38b7c <_sk_srcover_bgra_8888_sse2_lowp+0x308>
.byte 98,209,124,40,194,197,1 // vcmpltps %ymm13,%ymm0,%k0
.byte 98,242,126,40,56,192 // vpmovm2d %k0,%ymm0
.byte 196,195,61,74,194,0 // vblendvps %ymm0,%ymm10,%ymm8,%ymm0
@@ -11280,7 +11280,7 @@ _sk_rgb_to_hsl_skx:
.byte 197,116,93,194 // vminps %ymm2,%ymm1,%ymm8
.byte 196,65,124,93,208 // vminps %ymm8,%ymm0,%ymm10
.byte 98,193,52,40,92,194 // vsubps %ymm10,%ymm9,%ymm16
- .byte 196,98,125,24,5,84,109,3,0 // vbroadcastss 0x36d54(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,196,110,3,0 // vbroadcastss 0x36ec4(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 98,49,60,40,94,224 // vdivps %ymm16,%ymm8,%ymm12
.byte 98,209,52,40,194,194,0 // vcmpeqps %ymm10,%ymm9,%k0
.byte 98,114,126,40,56,192 // vpmovm2d %k0,%ymm8
@@ -11289,23 +11289,23 @@ _sk_rgb_to_hsl_skx:
.byte 197,116,92,242 // vsubps %ymm2,%ymm1,%ymm14
.byte 98,241,116,40,194,194,1 // vcmpltps %ymm2,%ymm1,%k0
.byte 98,114,126,40,56,248 // vpmovm2d %k0,%ymm15
- .byte 196,98,125,24,29,118,109,3,0 // vbroadcastss 0x36d76(%rip),%ymm11 # 38a10 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
+ .byte 196,98,125,24,29,230,110,3,0 // vbroadcastss 0x36ee6(%rip),%ymm11 # 38b80 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
.byte 197,196,87,255 // vxorps %ymm7,%ymm7,%ymm7
.byte 196,67,69,74,219,240 // vblendvps %ymm15,%ymm11,%ymm7,%ymm11
.byte 196,66,29,168,243 // vfmadd213ps %ymm11,%ymm12,%ymm14
.byte 98,241,52,40,194,193,0 // vcmpeqps %ymm1,%ymm9,%k0
.byte 98,114,126,40,56,216 // vpmovm2d %k0,%ymm11
.byte 197,236,92,208 // vsubps %ymm0,%ymm2,%ymm2
- .byte 196,98,125,24,61,81,109,3,0 // vbroadcastss 0x36d51(%rip),%ymm15 # 38a14 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
+ .byte 196,98,125,24,61,193,110,3,0 // vbroadcastss 0x36ec1(%rip),%ymm15 # 38b84 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
.byte 196,194,29,168,215 // vfmadd213ps %ymm15,%ymm12,%ymm2
.byte 197,252,92,193 // vsubps %ymm1,%ymm0,%ymm0
- .byte 98,242,29,56,168,5,66,109,3,0 // vfmadd213ps 0x36d42(%rip){1to8},%ymm12,%ymm0 # 38a18 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
+ .byte 98,242,29,56,168,5,178,110,3,0 // vfmadd213ps 0x36eb2(%rip){1to8},%ymm12,%ymm0 # 38b88 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
.byte 196,227,125,74,194,176 // vblendvps %ymm11,%ymm2,%ymm0,%ymm0
.byte 196,195,125,74,198,208 // vblendvps %ymm13,%ymm14,%ymm0,%ymm0
.byte 196,227,125,74,199,128 // vblendvps %ymm8,%ymm7,%ymm0,%ymm0
- .byte 98,241,124,56,89,5,42,109,3,0 // vmulps 0x36d2a(%rip){1to8},%ymm0,%ymm0 # 38a1c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
+ .byte 98,241,124,56,89,5,154,110,3,0 // vmulps 0x36e9a(%rip){1to8},%ymm0,%ymm0 # 38b8c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
.byte 196,193,52,88,202 // vaddps %ymm10,%ymm9,%ymm1
- .byte 196,98,125,24,29,176,108,3,0 // vbroadcastss 0x36cb0(%rip),%ymm11 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,29,32,110,3,0 // vbroadcastss 0x36e20(%rip),%ymm11 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,116,89,211 // vmulps %ymm11,%ymm1,%ymm2
.byte 98,241,36,40,194,194,1 // vcmpltps %ymm2,%ymm11,%k0
.byte 98,114,126,40,56,216 // vpmovm2d %k0,%ymm11
@@ -11324,7 +11324,7 @@ FUNCTION(_sk_hsl_to_rgb_skx)
_sk_hsl_to_rgb_skx:
.byte 98,225,124,40,40,215 // vmovaps %ymm7,%ymm18
.byte 98,225,124,40,40,230 // vmovaps %ymm6,%ymm20
- .byte 98,226,125,40,24,5,98,108,3,0 // vbroadcastss 0x36c62(%rip),%ymm16 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,226,125,40,24,5,210,109,3,0 // vbroadcastss 0x36dd2(%rip),%ymm16 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 98,241,124,32,194,194,2 // vcmpleps %ymm2,%ymm16,%k0
.byte 98,114,126,40,56,192 // vpmovm2d %k0,%ymm8
.byte 197,116,89,202 // vmulps %ymm2,%ymm1,%ymm9
@@ -11332,27 +11332,27 @@ _sk_hsl_to_rgb_skx:
.byte 196,67,53,74,194,128 // vblendvps %ymm8,%ymm10,%ymm9,%ymm8
.byte 197,60,88,218 // vaddps %ymm2,%ymm8,%ymm11
.byte 196,65,124,40,203 // vmovaps %ymm11,%ymm9
- .byte 98,114,109,56,186,13,151,108,3,0 // vfmsub231ps 0x36c97(%rip){1to8},%ymm2,%ymm9 # 38a14 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
+ .byte 98,114,109,56,186,13,7,110,3,0 // vfmsub231ps 0x36e07(%rip){1to8},%ymm2,%ymm9 # 38b84 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 98,209,116,40,194,192,0 // vcmpeqps %ymm8,%ymm1,%k0
.byte 98,242,126,40,56,248 // vpmovm2d %k0,%ymm7
- .byte 98,241,124,56,88,13,135,108,3,0 // vaddps 0x36c87(%rip){1to8},%ymm0,%ymm1 # 38a20 <_sk_srcover_bgra_8888_sse2_lowp+0x31c>
+ .byte 98,241,124,56,88,13,247,109,3,0 // vaddps 0x36df7(%rip){1to8},%ymm0,%ymm1 # 38b90 <_sk_srcover_bgra_8888_sse2_lowp+0x31c>
.byte 196,99,125,8,193,1 // vroundps $0x1,%ymm1,%ymm8
.byte 196,193,116,92,200 // vsubps %ymm8,%ymm1,%ymm1
- .byte 98,226,125,40,24,29,118,108,3,0 // vbroadcastss 0x36c76(%rip),%ymm19 # 38a24 <_sk_srcover_bgra_8888_sse2_lowp+0x320>
+ .byte 98,226,125,40,24,29,230,109,3,0 // vbroadcastss 0x36de6(%rip),%ymm19 # 38b94 <_sk_srcover_bgra_8888_sse2_lowp+0x320>
.byte 98,241,100,32,194,193,2 // vcmpleps %ymm1,%ymm19,%k0
.byte 98,114,126,40,56,192 // vpmovm2d %k0,%ymm8
.byte 196,65,36,92,241 // vsubps %ymm9,%ymm11,%ymm14
- .byte 196,98,125,24,61,71,108,3,0 // vbroadcastss 0x36c47(%rip),%ymm15 # 38a10 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
+ .byte 196,98,125,24,61,183,109,3,0 // vbroadcastss 0x36db7(%rip),%ymm15 # 38b80 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
.byte 196,65,116,89,231 // vmulps %ymm15,%ymm1,%ymm12
- .byte 98,226,125,40,24,13,64,108,3,0 // vbroadcastss 0x36c40(%rip),%ymm17 # 38a18 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
+ .byte 98,226,125,40,24,13,176,109,3,0 // vbroadcastss 0x36db0(%rip),%ymm17 # 38b88 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
.byte 98,81,116,32,92,212 // vsubps %ymm12,%ymm17,%ymm10
.byte 196,66,13,168,209 // vfmadd213ps %ymm9,%ymm14,%ymm10
.byte 196,67,45,74,193,128 // vblendvps %ymm8,%ymm9,%ymm10,%ymm8
.byte 98,241,124,32,194,193,2 // vcmpleps %ymm1,%ymm16,%k0
.byte 98,114,126,40,56,208 // vpmovm2d %k0,%ymm10
.byte 196,67,37,74,192,160 // vblendvps %ymm10,%ymm8,%ymm11,%ymm8
- .byte 196,98,125,24,21,23,108,3,0 // vbroadcastss 0x36c17(%rip),%ymm10 # 38a1c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
+ .byte 196,98,125,24,21,135,109,3,0 // vbroadcastss 0x36d87(%rip),%ymm10 # 38b8c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
.byte 98,241,44,40,194,193,2 // vcmpleps %ymm1,%ymm10,%k0
.byte 98,242,126,40,56,200 // vpmovm2d %k0,%ymm1
.byte 196,66,13,168,225 // vfmadd213ps %ymm9,%ymm14,%ymm12
@@ -11374,7 +11374,7 @@ _sk_hsl_to_rgb_skx:
.byte 196,66,13,168,233 // vfmadd213ps %ymm9,%ymm14,%ymm13
.byte 196,195,21,74,200,16 // vblendvps %ymm1,%ymm8,%ymm13,%ymm1
.byte 196,227,117,74,202,112 // vblendvps %ymm7,%ymm2,%ymm1,%ymm1
- .byte 98,241,124,56,88,5,157,107,3,0 // vaddps 0x36b9d(%rip){1to8},%ymm0,%ymm0 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 98,241,124,56,88,5,13,109,3,0 // vaddps 0x36d0d(%rip){1to8},%ymm0,%ymm0 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,99,125,8,192,1 // vroundps $0x1,%ymm0,%ymm8
.byte 196,193,124,92,192 // vsubps %ymm8,%ymm0,%ymm0
.byte 98,241,100,32,194,192,2 // vcmpleps %ymm0,%ymm19,%k0
@@ -11423,10 +11423,10 @@ _sk_scale_u8_skx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,54 // jne 1f67 <_sk_scale_u8_skx+0x4e>
.byte 196,2,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm8
- .byte 197,57,219,5,81,115,3,0 // vpand 0x37351(%rip),%xmm8,%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,57,219,5,177,116,3,0 // vpand 0x374b1(%rip),%xmm8,%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 98,113,60,56,89,5,217,106,3,0 // vmulps 0x36ad9(%rip){1to8},%ymm8,%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,113,60,56,89,5,73,108,3,0 // vmulps 0x36c49(%rip){1to8},%ymm8,%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 197,188,89,201 // vmulps %ymm1,%ymm8,%ymm1
.byte 197,188,89,210 // vmulps %ymm2,%ymm8,%ymm2
@@ -11503,15 +11503,15 @@ _sk_scale_565_skx:
.byte 15,133,145,0,0,0 // jne 20e0 <_sk_scale_565_skx+0xb0>
.byte 196,1,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm8
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
- .byte 98,113,61,56,219,13,204,105,3,0 // vpandd 0x369cc(%rip){1to8},%ymm8,%ymm9 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 98,113,61,56,219,13,60,107,3,0 // vpandd 0x36b3c(%rip){1to8},%ymm8,%ymm9 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 196,65,124,91,201 // vcvtdq2ps %ymm9,%ymm9
- .byte 98,113,52,56,89,13,193,105,3,0 // vmulps 0x369c1(%rip){1to8},%ymm9,%ymm9 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
- .byte 98,113,61,56,219,21,187,105,3,0 // vpandd 0x369bb(%rip){1to8},%ymm8,%ymm10 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 98,113,52,56,89,13,49,107,3,0 // vmulps 0x36b31(%rip){1to8},%ymm9,%ymm9 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 98,113,61,56,219,21,43,107,3,0 // vpandd 0x36b2b(%rip){1to8},%ymm8,%ymm10 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 196,65,124,91,210 // vcvtdq2ps %ymm10,%ymm10
- .byte 98,113,44,56,89,21,176,105,3,0 // vmulps 0x369b0(%rip){1to8},%ymm10,%ymm10 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
- .byte 98,113,61,56,219,5,170,105,3,0 // vpandd 0x369aa(%rip){1to8},%ymm8,%ymm8 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 98,113,44,56,89,21,32,107,3,0 // vmulps 0x36b20(%rip){1to8},%ymm10,%ymm10 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 98,113,61,56,219,5,26,107,3,0 // vpandd 0x36b1a(%rip){1to8},%ymm8,%ymm8 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 98,113,60,56,89,5,159,105,3,0 // vmulps 0x3699f(%rip){1to8},%ymm8,%ymm8 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 98,113,60,56,89,5,15,107,3,0 // vmulps 0x36b0f(%rip){1to8},%ymm8,%ymm8 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 98,241,100,40,194,199,1 // vcmpltps %ymm7,%ymm3,%k0
.byte 98,114,126,40,56,216 // vpmovm2d %k0,%ymm11
.byte 196,65,44,93,224 // vminps %ymm8,%ymm10,%ymm12
@@ -11607,10 +11607,10 @@ _sk_lerp_u8_skx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,74 // jne 2235 <_sk_lerp_u8_skx+0x62>
.byte 196,2,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm8
- .byte 197,57,219,5,151,112,3,0 // vpand 0x37097(%rip),%xmm8,%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,57,219,5,247,113,3,0 // vpand 0x371f7(%rip),%xmm8,%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 98,113,60,56,89,5,31,104,3,0 // vmulps 0x3681f(%rip){1to8},%ymm8,%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,113,60,56,89,5,143,105,3,0 // vmulps 0x3698f(%rip){1to8},%ymm8,%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,92,196 // vsubps %ymm4,%ymm0,%ymm0
.byte 196,226,61,168,196 // vfmadd213ps %ymm4,%ymm8,%ymm0
.byte 197,244,92,205 // vsubps %ymm5,%ymm1,%ymm1
@@ -11690,15 +11690,15 @@ _sk_lerp_565_skx:
.byte 15,133,165,0,0,0 // jne 23c0 <_sk_lerp_565_skx+0xc4>
.byte 196,1,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm8
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
- .byte 98,113,61,56,219,13,0,103,3,0 // vpandd 0x36700(%rip){1to8},%ymm8,%ymm9 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 98,113,61,56,219,13,112,104,3,0 // vpandd 0x36870(%rip){1to8},%ymm8,%ymm9 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 196,65,124,91,201 // vcvtdq2ps %ymm9,%ymm9
- .byte 98,113,52,56,89,13,245,102,3,0 // vmulps 0x366f5(%rip){1to8},%ymm9,%ymm9 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
- .byte 98,113,61,56,219,21,239,102,3,0 // vpandd 0x366ef(%rip){1to8},%ymm8,%ymm10 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 98,113,52,56,89,13,101,104,3,0 // vmulps 0x36865(%rip){1to8},%ymm9,%ymm9 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 98,113,61,56,219,21,95,104,3,0 // vpandd 0x3685f(%rip){1to8},%ymm8,%ymm10 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 196,65,124,91,210 // vcvtdq2ps %ymm10,%ymm10
- .byte 98,113,44,56,89,21,228,102,3,0 // vmulps 0x366e4(%rip){1to8},%ymm10,%ymm10 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
- .byte 98,113,61,56,219,5,222,102,3,0 // vpandd 0x366de(%rip){1to8},%ymm8,%ymm8 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 98,113,44,56,89,21,84,104,3,0 // vmulps 0x36854(%rip){1to8},%ymm10,%ymm10 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 98,113,61,56,219,5,78,104,3,0 // vpandd 0x3684e(%rip){1to8},%ymm8,%ymm8 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 98,113,60,56,89,5,211,102,3,0 // vmulps 0x366d3(%rip){1to8},%ymm8,%ymm8 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 98,113,60,56,89,5,67,104,3,0 // vmulps 0x36843(%rip){1to8},%ymm8,%ymm8 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 98,241,100,40,194,199,1 // vcmpltps %ymm7,%ymm3,%k0
.byte 98,114,126,40,56,216 // vpmovm2d %k0,%ymm11
.byte 196,65,44,93,224 // vminps %ymm8,%ymm10,%ymm12
@@ -11798,7 +11798,7 @@ _sk_load_tables_skx:
.byte 196,162,61,146,20,136 // vgatherdps %ymm8,(%rax,%ymm9,4),%ymm2
.byte 197,229,114,211,24 // vpsrld $0x18,%ymm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 98,241,100,56,89,29,38,101,3,0 // vmulps 0x36526(%rip){1to8},%ymm3,%ymm3 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,241,100,56,89,29,150,102,3,0 // vmulps 0x36696(%rip){1to8},%ymm3,%ymm3 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
@@ -11877,7 +11877,7 @@ _sk_load_tables_u16_be_skx:
.byte 197,113,105,219 // vpunpckhwd %xmm3,%xmm1,%xmm11
.byte 197,185,108,200 // vpunpcklqdq %xmm0,%xmm8,%xmm1
.byte 197,57,109,192 // vpunpckhqdq %xmm0,%xmm8,%xmm8
- .byte 197,121,111,21,85,108,3,0 // vmovdqa 0x36c55(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,121,111,21,181,109,3,0 // vmovdqa 0x36db5(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,193,113,219,194 // vpand %xmm10,%xmm1,%xmm0
.byte 196,226,125,51,200 // vpmovzxwd %xmm0,%ymm1
.byte 76,139,64,8 // mov 0x8(%rax),%r8
@@ -11903,7 +11903,7 @@ _sk_load_tables_u16_be_skx:
.byte 197,185,235,219 // vpor %xmm3,%xmm8,%xmm3
.byte 196,226,125,51,219 // vpmovzxwd %xmm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 98,241,100,56,89,29,140,99,3,0 // vmulps 0x3638c(%rip){1to8},%ymm3,%ymm3 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 98,241,100,56,89,29,252,100,3,0 // vmulps 0x364fc(%rip){1to8},%ymm3,%ymm3 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 196,1,123,16,4,72 // vmovsd (%r8,%r9,2),%xmm8
@@ -11960,7 +11960,7 @@ _sk_load_tables_rgb_u16_be_skx:
.byte 197,249,105,193 // vpunpckhwd %xmm1,%xmm0,%xmm0
.byte 197,105,108,200 // vpunpcklqdq %xmm0,%xmm2,%xmm9
.byte 197,185,108,195 // vpunpcklqdq %xmm3,%xmm8,%xmm0
- .byte 197,121,111,21,222,106,3,0 // vmovdqa 0x36ade(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,121,111,21,62,108,3,0 // vmovdqa 0x36c3e(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,193,121,219,194 // vpand %xmm10,%xmm0,%xmm0
.byte 196,226,125,51,200 // vpmovzxwd %xmm0,%ymm1
.byte 76,139,64,8 // mov 0x8(%rax),%r8
@@ -11981,7 +11981,7 @@ _sk_load_tables_rgb_u16_be_skx:
.byte 196,226,125,51,219 // vpmovzxwd %xmm3,%ymm3
.byte 196,226,29,146,20,152 // vgatherdps %ymm12,(%rax,%ymm3,4),%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,157,97,3,0 // vbroadcastss 0x3619d(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,13,99,3,0 // vbroadcastss 0x3630d(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 196,129,121,110,4,72 // vmovd (%r8,%r9,2),%xmm0
.byte 196,65,49,87,201 // vxorpd %xmm9,%xmm9,%xmm9
@@ -12050,7 +12050,7 @@ HIDDEN _sk_byte_tables_skx
FUNCTION(_sk_byte_tables_skx)
_sk_byte_tables_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,146,96,3,0 // vbroadcastss 0x36092(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,2,98,3,0 // vbroadcastss 0x36202(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 197,125,91,200 // vcvtps2dq %ymm0,%ymm9
.byte 196,65,249,126,200 // vmovq %xmm9,%r8
@@ -12180,7 +12180,7 @@ _sk_byte_tables_skx:
.byte 67,15,182,4,17 // movzbl (%r9,%r10,1),%eax
.byte 196,194,125,49,193 // vpmovzxbd %xmm9,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,100,94,3,0 // vbroadcastss 0x35e64(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,212,95,3,0 // vbroadcastss 0x35fd4(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 196,226,125,49,201 // vpmovzxbd %xmm1,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
@@ -12301,7 +12301,7 @@ _sk_byte_tables_rgb_skx:
.byte 67,15,182,4,17 // movzbl (%r9,%r10,1),%eax
.byte 196,194,125,49,193 // vpmovzxbd %xmm9,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,59,92,3,0 // vbroadcastss 0x35c3b(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,171,93,3,0 // vbroadcastss 0x35dab(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 196,194,125,49,202 // vpmovzxbd %xmm10,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
@@ -12398,33 +12398,33 @@ _sk_parametric_r_skx:
.byte 98,114,125,56,168,80,2 // vfmadd213ps 0x8(%rax){1to8},%ymm0,%ymm10
.byte 196,226,125,24,0 // vbroadcastss (%rax),%ymm0
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,47,91,3,0 // vbroadcastss 0x35b2f(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 98,114,37,56,168,37,45,91,3,0 // vfmadd213ps 0x35b2d(%rip){1to8},%ymm11,%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
- .byte 98,113,44,56,84,21,31,91,3,0 // vandps 0x35b1f(%rip){1to8},%ymm10,%ymm10 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
- .byte 98,113,44,56,86,21,117,90,3,0 // vorps 0x35a75(%rip){1to8},%ymm10,%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 98,114,45,56,188,37,19,91,3,0 // vfnmadd231ps 0x35b13(%rip){1to8},%ymm10,%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
- .byte 98,113,44,56,88,21,13,91,3,0 // vaddps 0x35b0d(%rip){1to8},%ymm10,%ymm10 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
- .byte 196,98,125,24,29,8,91,3,0 // vbroadcastss 0x35b08(%rip),%ymm11 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,159,92,3,0 // vbroadcastss 0x35c9f(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 98,114,37,56,168,37,157,92,3,0 // vfmadd213ps 0x35c9d(%rip){1to8},%ymm11,%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 98,113,44,56,84,21,143,92,3,0 // vandps 0x35c8f(%rip){1to8},%ymm10,%ymm10 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 98,113,44,56,86,21,229,91,3,0 // vorps 0x35be5(%rip){1to8},%ymm10,%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,114,45,56,188,37,131,92,3,0 // vfnmadd231ps 0x35c83(%rip){1to8},%ymm10,%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 98,113,44,56,88,21,125,92,3,0 // vaddps 0x35c7d(%rip){1to8},%ymm10,%ymm10 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,29,120,92,3,0 // vbroadcastss 0x35c78(%rip),%ymm11 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,65,28,92,210 // vsubps %ymm10,%ymm12,%ymm10
.byte 196,193,124,89,194 // vmulps %ymm10,%ymm0,%ymm0
.byte 196,99,125,8,208,1 // vroundps $0x1,%ymm0,%ymm10
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 98,241,124,56,88,5,232,90,3,0 // vaddps 0x35ae8(%rip){1to8},%ymm0,%ymm0 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
- .byte 98,242,45,56,188,5,226,90,3,0 // vfnmadd231ps 0x35ae2(%rip){1to8},%ymm10,%ymm0 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
- .byte 196,98,125,24,29,221,90,3,0 // vbroadcastss 0x35add(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 98,241,124,56,88,5,88,92,3,0 // vaddps 0x35c58(%rip){1to8},%ymm0,%ymm0 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 98,242,45,56,188,5,82,92,3,0 // vfnmadd231ps 0x35c52(%rip){1to8},%ymm10,%ymm0 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,77,92,3,0 // vbroadcastss 0x35c4d(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,211,90,3,0 // vbroadcastss 0x35ad3(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,67,92,3,0 // vbroadcastss 0x35c43(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,124,88,194 // vaddps %ymm10,%ymm0,%ymm0
- .byte 98,241,124,56,89,5,195,90,3,0 // vmulps 0x35ac3(%rip){1to8},%ymm0,%ymm0 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 98,241,124,56,89,5,51,92,3,0 // vmulps 0x35c33(%rip){1to8},%ymm0,%ymm0 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 197,253,91,192 // vcvtps2dq %ymm0,%ymm0
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
.byte 197,172,88,192 // vaddps %ymm0,%ymm10,%ymm0
.byte 196,195,125,74,193,128 // vblendvps %ymm8,%ymm9,%ymm0,%ymm0
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
- .byte 98,241,124,56,93,5,219,89,3,0 // vminps 0x359db(%rip){1to8},%ymm0,%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,124,56,93,5,75,91,3,0 // vminps 0x35b4b(%rip){1to8},%ymm0,%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -12441,33 +12441,33 @@ _sk_parametric_g_skx:
.byte 98,114,117,56,168,80,2 // vfmadd213ps 0x8(%rax){1to8},%ymm1,%ymm10
.byte 196,226,125,24,8 // vbroadcastss (%rax),%ymm1
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,50,90,3,0 // vbroadcastss 0x35a32(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 98,114,37,56,168,37,48,90,3,0 // vfmadd213ps 0x35a30(%rip){1to8},%ymm11,%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
- .byte 98,113,44,56,84,21,34,90,3,0 // vandps 0x35a22(%rip){1to8},%ymm10,%ymm10 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
- .byte 98,113,44,56,86,21,120,89,3,0 // vorps 0x35978(%rip){1to8},%ymm10,%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 98,114,45,56,188,37,22,90,3,0 // vfnmadd231ps 0x35a16(%rip){1to8},%ymm10,%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
- .byte 98,113,44,56,88,21,16,90,3,0 // vaddps 0x35a10(%rip){1to8},%ymm10,%ymm10 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
- .byte 196,98,125,24,29,11,90,3,0 // vbroadcastss 0x35a0b(%rip),%ymm11 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,162,91,3,0 // vbroadcastss 0x35ba2(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 98,114,37,56,168,37,160,91,3,0 // vfmadd213ps 0x35ba0(%rip){1to8},%ymm11,%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 98,113,44,56,84,21,146,91,3,0 // vandps 0x35b92(%rip){1to8},%ymm10,%ymm10 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 98,113,44,56,86,21,232,90,3,0 // vorps 0x35ae8(%rip){1to8},%ymm10,%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,114,45,56,188,37,134,91,3,0 // vfnmadd231ps 0x35b86(%rip){1to8},%ymm10,%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 98,113,44,56,88,21,128,91,3,0 // vaddps 0x35b80(%rip){1to8},%ymm10,%ymm10 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,29,123,91,3,0 // vbroadcastss 0x35b7b(%rip),%ymm11 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,65,28,92,210 // vsubps %ymm10,%ymm12,%ymm10
.byte 196,193,116,89,202 // vmulps %ymm10,%ymm1,%ymm1
.byte 196,99,125,8,209,1 // vroundps $0x1,%ymm1,%ymm10
.byte 196,65,116,92,210 // vsubps %ymm10,%ymm1,%ymm10
- .byte 98,241,116,56,88,13,235,89,3,0 // vaddps 0x359eb(%rip){1to8},%ymm1,%ymm1 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
- .byte 98,242,45,56,188,13,229,89,3,0 // vfnmadd231ps 0x359e5(%rip){1to8},%ymm10,%ymm1 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
- .byte 196,98,125,24,29,224,89,3,0 // vbroadcastss 0x359e0(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 98,241,116,56,88,13,91,91,3,0 // vaddps 0x35b5b(%rip){1to8},%ymm1,%ymm1 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 98,242,45,56,188,13,85,91,3,0 // vfnmadd231ps 0x35b55(%rip){1to8},%ymm10,%ymm1 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,80,91,3,0 // vbroadcastss 0x35b50(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,214,89,3,0 // vbroadcastss 0x359d6(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,70,91,3,0 // vbroadcastss 0x35b46(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,116,88,202 // vaddps %ymm10,%ymm1,%ymm1
- .byte 98,241,116,56,89,13,198,89,3,0 // vmulps 0x359c6(%rip){1to8},%ymm1,%ymm1 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 98,241,116,56,89,13,54,91,3,0 // vmulps 0x35b36(%rip){1to8},%ymm1,%ymm1 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 197,253,91,201 // vcvtps2dq %ymm1,%ymm1
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
.byte 197,172,88,201 // vaddps %ymm1,%ymm10,%ymm1
.byte 196,195,117,74,201,128 // vblendvps %ymm8,%ymm9,%ymm1,%ymm1
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,116,95,200 // vmaxps %ymm8,%ymm1,%ymm1
- .byte 98,241,116,56,93,13,222,88,3,0 // vminps 0x358de(%rip){1to8},%ymm1,%ymm1 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,116,56,93,13,78,90,3,0 // vminps 0x35a4e(%rip){1to8},%ymm1,%ymm1 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -12484,33 +12484,33 @@ _sk_parametric_b_skx:
.byte 98,114,109,56,168,80,2 // vfmadd213ps 0x8(%rax){1to8},%ymm2,%ymm10
.byte 196,226,125,24,16 // vbroadcastss (%rax),%ymm2
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,53,89,3,0 // vbroadcastss 0x35935(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 98,114,37,56,168,37,51,89,3,0 // vfmadd213ps 0x35933(%rip){1to8},%ymm11,%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
- .byte 98,113,44,56,84,21,37,89,3,0 // vandps 0x35925(%rip){1to8},%ymm10,%ymm10 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
- .byte 98,113,44,56,86,21,123,88,3,0 // vorps 0x3587b(%rip){1to8},%ymm10,%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 98,114,45,56,188,37,25,89,3,0 // vfnmadd231ps 0x35919(%rip){1to8},%ymm10,%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
- .byte 98,113,44,56,88,21,19,89,3,0 // vaddps 0x35913(%rip){1to8},%ymm10,%ymm10 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
- .byte 196,98,125,24,29,14,89,3,0 // vbroadcastss 0x3590e(%rip),%ymm11 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,165,90,3,0 // vbroadcastss 0x35aa5(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 98,114,37,56,168,37,163,90,3,0 // vfmadd213ps 0x35aa3(%rip){1to8},%ymm11,%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 98,113,44,56,84,21,149,90,3,0 // vandps 0x35a95(%rip){1to8},%ymm10,%ymm10 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 98,113,44,56,86,21,235,89,3,0 // vorps 0x359eb(%rip){1to8},%ymm10,%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,114,45,56,188,37,137,90,3,0 // vfnmadd231ps 0x35a89(%rip){1to8},%ymm10,%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 98,113,44,56,88,21,131,90,3,0 // vaddps 0x35a83(%rip){1to8},%ymm10,%ymm10 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,29,126,90,3,0 // vbroadcastss 0x35a7e(%rip),%ymm11 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,65,28,92,210 // vsubps %ymm10,%ymm12,%ymm10
.byte 196,193,108,89,210 // vmulps %ymm10,%ymm2,%ymm2
.byte 196,99,125,8,210,1 // vroundps $0x1,%ymm2,%ymm10
.byte 196,65,108,92,210 // vsubps %ymm10,%ymm2,%ymm10
- .byte 98,241,108,56,88,21,238,88,3,0 // vaddps 0x358ee(%rip){1to8},%ymm2,%ymm2 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
- .byte 98,242,45,56,188,21,232,88,3,0 // vfnmadd231ps 0x358e8(%rip){1to8},%ymm10,%ymm2 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
- .byte 196,98,125,24,29,227,88,3,0 // vbroadcastss 0x358e3(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 98,241,108,56,88,21,94,90,3,0 // vaddps 0x35a5e(%rip){1to8},%ymm2,%ymm2 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 98,242,45,56,188,21,88,90,3,0 // vfnmadd231ps 0x35a58(%rip){1to8},%ymm10,%ymm2 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,83,90,3,0 // vbroadcastss 0x35a53(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,217,88,3,0 // vbroadcastss 0x358d9(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,73,90,3,0 // vbroadcastss 0x35a49(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,108,88,210 // vaddps %ymm10,%ymm2,%ymm2
- .byte 98,241,108,56,89,21,201,88,3,0 // vmulps 0x358c9(%rip){1to8},%ymm2,%ymm2 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 98,241,108,56,89,21,57,90,3,0 // vmulps 0x35a39(%rip){1to8},%ymm2,%ymm2 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 197,253,91,210 // vcvtps2dq %ymm2,%ymm2
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
.byte 197,172,88,210 // vaddps %ymm2,%ymm10,%ymm2
.byte 196,195,109,74,209,128 // vblendvps %ymm8,%ymm9,%ymm2,%ymm2
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,108,95,208 // vmaxps %ymm8,%ymm2,%ymm2
- .byte 98,241,108,56,93,21,225,87,3,0 // vminps 0x357e1(%rip){1to8},%ymm2,%ymm2 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,108,56,93,21,81,89,3,0 // vminps 0x35951(%rip){1to8},%ymm2,%ymm2 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -12527,33 +12527,33 @@ _sk_parametric_a_skx:
.byte 98,114,101,56,168,80,2 // vfmadd213ps 0x8(%rax){1to8},%ymm3,%ymm10
.byte 196,226,125,24,24 // vbroadcastss (%rax),%ymm3
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,56,88,3,0 // vbroadcastss 0x35838(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 98,114,37,56,168,37,54,88,3,0 // vfmadd213ps 0x35836(%rip){1to8},%ymm11,%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
- .byte 98,113,44,56,84,21,40,88,3,0 // vandps 0x35828(%rip){1to8},%ymm10,%ymm10 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
- .byte 98,113,44,56,86,21,126,87,3,0 // vorps 0x3577e(%rip){1to8},%ymm10,%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 98,114,45,56,188,37,28,88,3,0 // vfnmadd231ps 0x3581c(%rip){1to8},%ymm10,%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
- .byte 98,113,44,56,88,21,22,88,3,0 // vaddps 0x35816(%rip){1to8},%ymm10,%ymm10 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
- .byte 196,98,125,24,29,17,88,3,0 // vbroadcastss 0x35811(%rip),%ymm11 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,168,89,3,0 // vbroadcastss 0x359a8(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 98,114,37,56,168,37,166,89,3,0 // vfmadd213ps 0x359a6(%rip){1to8},%ymm11,%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 98,113,44,56,84,21,152,89,3,0 // vandps 0x35998(%rip){1to8},%ymm10,%ymm10 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 98,113,44,56,86,21,238,88,3,0 // vorps 0x358ee(%rip){1to8},%ymm10,%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,114,45,56,188,37,140,89,3,0 // vfnmadd231ps 0x3598c(%rip){1to8},%ymm10,%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 98,113,44,56,88,21,134,89,3,0 // vaddps 0x35986(%rip){1to8},%ymm10,%ymm10 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,29,129,89,3,0 // vbroadcastss 0x35981(%rip),%ymm11 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,65,28,92,210 // vsubps %ymm10,%ymm12,%ymm10
.byte 196,193,100,89,218 // vmulps %ymm10,%ymm3,%ymm3
.byte 196,99,125,8,211,1 // vroundps $0x1,%ymm3,%ymm10
.byte 196,65,100,92,210 // vsubps %ymm10,%ymm3,%ymm10
- .byte 98,241,100,56,88,29,241,87,3,0 // vaddps 0x357f1(%rip){1to8},%ymm3,%ymm3 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
- .byte 98,242,45,56,188,29,235,87,3,0 // vfnmadd231ps 0x357eb(%rip){1to8},%ymm10,%ymm3 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
- .byte 196,98,125,24,29,230,87,3,0 // vbroadcastss 0x357e6(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 98,241,100,56,88,29,97,89,3,0 // vaddps 0x35961(%rip){1to8},%ymm3,%ymm3 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 98,242,45,56,188,29,91,89,3,0 // vfnmadd231ps 0x3595b(%rip){1to8},%ymm10,%ymm3 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,86,89,3,0 // vbroadcastss 0x35956(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,220,87,3,0 // vbroadcastss 0x357dc(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,76,89,3,0 // vbroadcastss 0x3594c(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,100,88,218 // vaddps %ymm10,%ymm3,%ymm3
- .byte 98,241,100,56,89,29,204,87,3,0 // vmulps 0x357cc(%rip){1to8},%ymm3,%ymm3 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 98,241,100,56,89,29,60,89,3,0 // vmulps 0x3593c(%rip){1to8},%ymm3,%ymm3 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 197,253,91,219 // vcvtps2dq %ymm3,%ymm3
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
.byte 197,172,88,219 // vaddps %ymm3,%ymm10,%ymm3
.byte 196,195,101,74,217,128 // vblendvps %ymm8,%ymm9,%ymm3,%ymm3
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,100,95,216 // vmaxps %ymm8,%ymm3,%ymm3
- .byte 98,241,100,56,93,29,228,86,3,0 // vminps 0x356e4(%rip){1to8},%ymm3,%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,100,56,93,29,84,88,3,0 // vminps 0x35854(%rip){1to8},%ymm3,%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -12564,33 +12564,33 @@ _sk_gamma_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 98,226,125,40,24,48 // vbroadcastss (%rax),%ymm22
.byte 197,124,91,200 // vcvtdq2ps %ymm0,%ymm9
- .byte 196,98,125,24,21,99,87,3,0 // vbroadcastss 0x35763(%rip),%ymm10 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 196,98,125,24,29,94,87,3,0 // vbroadcastss 0x3575e(%rip),%ymm11 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,21,211,88,3,0 // vbroadcastss 0x358d3(%rip),%ymm10 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,29,206,88,3,0 // vbroadcastss 0x358ce(%rip),%ymm11 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,193,124,84,195 // vandps %ymm11,%ymm0,%ymm0
- .byte 196,98,125,24,37,176,86,3,0 // vbroadcastss 0x356b0(%rip),%ymm12 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,37,32,88,3,0 // vbroadcastss 0x35820(%rip),%ymm12 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,124,86,196 // vorps %ymm12,%ymm0,%ymm0
- .byte 196,98,125,24,45,70,87,3,0 // vbroadcastss 0x35746(%rip),%ymm13 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,45,182,88,3,0 // vbroadcastss 0x358b6(%rip),%ymm13 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,66,45,168,205 // vfmadd213ps %ymm13,%ymm10,%ymm9
- .byte 196,98,125,24,53,60,87,3,0 // vbroadcastss 0x3573c(%rip),%ymm14 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,53,172,88,3,0 // vbroadcastss 0x358ac(%rip),%ymm14 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,66,125,188,206 // vfnmadd231ps %ymm14,%ymm0,%ymm9
- .byte 196,98,125,24,61,50,87,3,0 // vbroadcastss 0x35732(%rip),%ymm15 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,61,162,88,3,0 // vbroadcastss 0x358a2(%rip),%ymm15 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,193,124,88,199 // vaddps %ymm15,%ymm0,%ymm0
- .byte 98,226,125,40,24,5,39,87,3,0 // vbroadcastss 0x35727(%rip),%ymm16 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 98,226,125,40,24,5,151,88,3,0 // vbroadcastss 0x35897(%rip),%ymm16 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 98,241,124,32,94,192 // vdivps %ymm0,%ymm16,%ymm0
.byte 197,180,92,192 // vsubps %ymm0,%ymm9,%ymm0
.byte 98,177,124,40,89,198 // vmulps %ymm22,%ymm0,%ymm0
.byte 196,99,125,8,200,1 // vroundps $0x1,%ymm0,%ymm9
.byte 196,65,124,92,201 // vsubps %ymm9,%ymm0,%ymm9
- .byte 98,226,125,40,24,13,6,87,3,0 // vbroadcastss 0x35706(%rip),%ymm17 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 98,226,125,40,24,13,118,88,3,0 // vbroadcastss 0x35876(%rip),%ymm17 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 98,177,124,40,88,193 // vaddps %ymm17,%ymm0,%ymm0
- .byte 98,226,125,40,24,21,250,86,3,0 // vbroadcastss 0x356fa(%rip),%ymm18 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 98,226,125,40,24,21,106,88,3,0 // vbroadcastss 0x3586a(%rip),%ymm18 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 98,178,53,40,188,194 // vfnmadd231ps %ymm18,%ymm9,%ymm0
- .byte 98,226,125,40,24,29,238,86,3,0 // vbroadcastss 0x356ee(%rip),%ymm19 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 98,226,125,40,24,29,94,88,3,0 // vbroadcastss 0x3585e(%rip),%ymm19 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 98,81,100,32,92,201 // vsubps %ymm9,%ymm19,%ymm9
- .byte 98,226,125,40,24,37,226,86,3,0 // vbroadcastss 0x356e2(%rip),%ymm20 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 98,226,125,40,24,37,82,88,3,0 // vbroadcastss 0x35852(%rip),%ymm20 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 98,81,92,32,94,201 // vdivps %ymm9,%ymm20,%ymm9
.byte 196,193,124,88,193 // vaddps %ymm9,%ymm0,%ymm0
- .byte 196,98,125,24,13,210,86,3,0 // vbroadcastss 0x356d2(%rip),%ymm9 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,13,66,88,3,0 // vbroadcastss 0x35842(%rip),%ymm9 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,253,91,192 // vcvtps2dq %ymm0,%ymm0
.byte 98,225,124,40,91,233 // vcvtdq2ps %ymm1,%ymm21
@@ -12636,23 +12636,23 @@ HIDDEN _sk_lab_to_xyz_skx
.globl _sk_lab_to_xyz_skx
FUNCTION(_sk_lab_to_xyz_skx)
_sk_lab_to_xyz_skx:
- .byte 196,98,125,24,5,254,85,3,0 // vbroadcastss 0x355fe(%rip),%ymm8 # 38a78 <_sk_srcover_bgra_8888_sse2_lowp+0x374>
- .byte 196,98,125,24,13,93,85,3,0 // vbroadcastss 0x3555d(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 196,98,125,24,21,240,85,3,0 // vbroadcastss 0x355f0(%rip),%ymm10 # 38a7c <_sk_srcover_bgra_8888_sse2_lowp+0x378>
+ .byte 196,98,125,24,5,110,87,3,0 // vbroadcastss 0x3576e(%rip),%ymm8 # 38be8 <_sk_srcover_bgra_8888_sse2_lowp+0x374>
+ .byte 196,98,125,24,13,205,86,3,0 // vbroadcastss 0x356cd(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,21,96,87,3,0 // vbroadcastss 0x35760(%rip),%ymm10 # 38bec <_sk_srcover_bgra_8888_sse2_lowp+0x378>
.byte 196,194,53,168,202 // vfmadd213ps %ymm10,%ymm9,%ymm1
.byte 196,194,53,168,210 // vfmadd213ps %ymm10,%ymm9,%ymm2
- .byte 98,114,125,56,168,5,224,85,3,0 // vfmadd213ps 0x355e0(%rip){1to8},%ymm0,%ymm8 # 38a80 <_sk_srcover_bgra_8888_sse2_lowp+0x37c>
- .byte 98,241,60,56,89,5,218,85,3,0 // vmulps 0x355da(%rip){1to8},%ymm8,%ymm0 # 38a84 <_sk_srcover_bgra_8888_sse2_lowp+0x380>
- .byte 98,242,125,56,152,13,212,85,3,0 // vfmadd132ps 0x355d4(%rip){1to8},%ymm0,%ymm1 # 38a88 <_sk_srcover_bgra_8888_sse2_lowp+0x384>
- .byte 98,242,125,56,156,21,206,85,3,0 // vfnmadd132ps 0x355ce(%rip){1to8},%ymm0,%ymm2 # 38a8c <_sk_srcover_bgra_8888_sse2_lowp+0x388>
+ .byte 98,114,125,56,168,5,80,87,3,0 // vfmadd213ps 0x35750(%rip){1to8},%ymm0,%ymm8 # 38bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x37c>
+ .byte 98,241,60,56,89,5,74,87,3,0 // vmulps 0x3574a(%rip){1to8},%ymm8,%ymm0 # 38bf4 <_sk_srcover_bgra_8888_sse2_lowp+0x380>
+ .byte 98,242,125,56,152,13,68,87,3,0 // vfmadd132ps 0x35744(%rip){1to8},%ymm0,%ymm1 # 38bf8 <_sk_srcover_bgra_8888_sse2_lowp+0x384>
+ .byte 98,242,125,56,156,21,62,87,3,0 // vfnmadd132ps 0x3573e(%rip){1to8},%ymm0,%ymm2 # 38bfc <_sk_srcover_bgra_8888_sse2_lowp+0x388>
.byte 197,116,89,193 // vmulps %ymm1,%ymm1,%ymm8
.byte 196,65,116,89,192 // vmulps %ymm8,%ymm1,%ymm8
- .byte 196,98,125,24,13,192,85,3,0 // vbroadcastss 0x355c0(%rip),%ymm9 # 38a90 <_sk_srcover_bgra_8888_sse2_lowp+0x38c>
+ .byte 196,98,125,24,13,48,87,3,0 // vbroadcastss 0x35730(%rip),%ymm9 # 38c00 <_sk_srcover_bgra_8888_sse2_lowp+0x38c>
.byte 98,209,52,40,194,192,1 // vcmpltps %ymm8,%ymm9,%k0
.byte 98,114,126,40,56,208 // vpmovm2d %k0,%ymm10
- .byte 196,98,125,24,29,174,85,3,0 // vbroadcastss 0x355ae(%rip),%ymm11 # 38a94 <_sk_srcover_bgra_8888_sse2_lowp+0x390>
+ .byte 196,98,125,24,29,30,87,3,0 // vbroadcastss 0x3571e(%rip),%ymm11 # 38c04 <_sk_srcover_bgra_8888_sse2_lowp+0x390>
.byte 196,193,116,88,203 // vaddps %ymm11,%ymm1,%ymm1
- .byte 196,98,125,24,37,164,85,3,0 // vbroadcastss 0x355a4(%rip),%ymm12 # 38a98 <_sk_srcover_bgra_8888_sse2_lowp+0x394>
+ .byte 196,98,125,24,37,20,87,3,0 // vbroadcastss 0x35714(%rip),%ymm12 # 38c08 <_sk_srcover_bgra_8888_sse2_lowp+0x394>
.byte 196,193,116,89,204 // vmulps %ymm12,%ymm1,%ymm1
.byte 196,67,117,74,192,160 // vblendvps %ymm10,%ymm8,%ymm1,%ymm8
.byte 197,252,89,200 // vmulps %ymm0,%ymm0,%ymm1
@@ -12669,8 +12669,8 @@ _sk_lab_to_xyz_skx:
.byte 196,193,108,88,211 // vaddps %ymm11,%ymm2,%ymm2
.byte 196,193,108,89,212 // vmulps %ymm12,%ymm2,%ymm2
.byte 196,227,109,74,208,144 // vblendvps %ymm9,%ymm0,%ymm2,%ymm2
- .byte 98,241,60,56,89,5,73,85,3,0 // vmulps 0x35549(%rip){1to8},%ymm8,%ymm0 # 38a9c <_sk_srcover_bgra_8888_sse2_lowp+0x398>
- .byte 98,241,108,56,89,21,67,85,3,0 // vmulps 0x35543(%rip){1to8},%ymm2,%ymm2 # 38aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x39c>
+ .byte 98,241,60,56,89,5,185,86,3,0 // vmulps 0x356b9(%rip){1to8},%ymm8,%ymm0 # 38c0c <_sk_srcover_bgra_8888_sse2_lowp+0x398>
+ .byte 98,241,108,56,89,21,179,86,3,0 // vmulps 0x356b3(%rip){1to8},%ymm2,%ymm2 # 38c10 <_sk_srcover_bgra_8888_sse2_lowp+0x39c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -12687,10 +12687,10 @@ _sk_load_a8_skx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,49 // jne 35aa <_sk_load_a8_skx+0x49>
.byte 196,130,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm0
- .byte 197,249,219,5,9,93,3,0 // vpand 0x35d09(%rip),%xmm0,%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,249,219,5,105,94,3,0 // vpand 0x35e69(%rip),%xmm0,%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,29,146,84,3,0 // vmulps 0x35492(%rip){1to8},%ymm0,%ymm3 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,241,124,56,89,29,2,86,3,0 // vmulps 0x35602(%rip){1to8},%ymm0,%ymm3 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
.byte 197,245,239,201 // vpxor %ymm1,%ymm1,%ymm1
@@ -12764,10 +12764,10 @@ _sk_load_a8_dst_skx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,49 // jne 36b5 <_sk_load_a8_dst_skx+0x49>
.byte 196,130,121,48,36,24 // vpmovzxbw (%r8,%r11,1),%xmm4
- .byte 197,217,219,37,254,91,3,0 // vpand 0x35bfe(%rip),%xmm4,%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,217,219,37,94,93,3,0 // vpand 0x35d5e(%rip),%xmm4,%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,228 // vpmovzxwd %xmm4,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 98,241,92,56,89,61,135,83,3,0 // vmulps 0x35387(%rip){1to8},%ymm4,%ymm7 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,241,92,56,89,61,247,84,3,0 // vmulps 0x354f7(%rip){1to8},%ymm4,%ymm7 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -12883,7 +12883,7 @@ _sk_gather_a8_skx:
.byte 196,227,121,32,192,7 // vpinsrb $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,49,192 // vpmovzxbd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,29,202,81,3,0 // vmulps 0x351ca(%rip){1to8},%ymm0,%ymm3 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,241,124,56,89,29,58,83,3,0 // vmulps 0x3533a(%rip){1to8},%ymm0,%ymm3 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
.byte 197,245,239,201 // vpxor %ymm1,%ymm1,%ymm1
@@ -12904,7 +12904,7 @@ _sk_store_a8_skx:
.byte 77,15,175,193 // imul %r9,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 98,113,100,56,89,5,74,81,3,0 // vmulps 0x3514a(%rip){1to8},%ymm3,%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 98,113,100,56,89,5,186,82,3,0 // vmulps 0x352ba(%rip){1to8},%ymm3,%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,125,91,192 // vcvtps2dq %ymm8,%ymm8
.byte 196,67,125,57,193,1 // vextracti128 $0x1,%ymm8,%xmm9
.byte 196,66,57,43,193 // vpackusdw %xmm9,%xmm8,%xmm8
@@ -12971,12 +12971,12 @@ _sk_load_g8_skx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,54 // jne 3992 <_sk_load_g8_skx+0x4e>
.byte 196,130,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm0
- .byte 197,249,219,5,38,89,3,0 // vpand 0x35926(%rip),%xmm0,%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,249,219,5,134,90,3,0 // vpand 0x35a86(%rip),%xmm0,%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,5,175,80,3,0 // vmulps 0x350af(%rip){1to8},%ymm0,%ymm0 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,241,124,56,89,5,31,82,3,0 // vmulps 0x3521f(%rip){1to8},%ymm0,%ymm0 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,44,80,3,0 // vbroadcastss 0x3502c(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,156,81,3,0 // vbroadcastss 0x3519c(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 255,224 // jmpq *%rax
@@ -13048,12 +13048,12 @@ _sk_load_g8_dst_skx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,54 // jne 3aa2 <_sk_load_g8_dst_skx+0x4e>
.byte 196,130,121,48,36,24 // vpmovzxbw (%r8,%r11,1),%xmm4
- .byte 197,217,219,37,22,88,3,0 // vpand 0x35816(%rip),%xmm4,%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,217,219,37,118,89,3,0 // vpand 0x35976(%rip),%xmm4,%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,228 // vpmovzxwd %xmm4,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 98,241,92,56,89,37,159,79,3,0 // vmulps 0x34f9f(%rip){1to8},%ymm4,%ymm4 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,241,92,56,89,37,15,81,3,0 // vmulps 0x3510f(%rip){1to8},%ymm4,%ymm4 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,61,28,79,3,0 // vbroadcastss 0x34f1c(%rip),%ymm7 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,61,140,80,3,0 // vbroadcastss 0x3508c(%rip),%ymm7 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,236 // vmovaps %ymm4,%ymm5
.byte 197,252,40,244 // vmovaps %ymm4,%ymm6
.byte 255,224 // jmpq *%rax
@@ -13167,9 +13167,9 @@ _sk_gather_g8_skx:
.byte 196,227,121,32,192,7 // vpinsrb $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,49,192 // vpmovzxbd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,5,218,77,3,0 // vmulps 0x34dda(%rip){1to8},%ymm0,%ymm0 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 98,241,124,56,89,5,74,79,3,0 // vmulps 0x34f4a(%rip){1to8},%ymm0,%ymm0 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,87,77,3,0 // vbroadcastss 0x34d57(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,199,78,3,0 // vbroadcastss 0x34ec7(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 91 // pop %rbx
@@ -13193,17 +13193,17 @@ _sk_load_565_skx:
.byte 117,96 // jne 3ce9 <_sk_load_565_skx+0x7b>
.byte 196,129,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm0
.byte 196,226,125,51,208 // vpmovzxwd %xmm0,%ymm2
- .byte 98,241,109,56,219,5,146,77,3,0 // vpandd 0x34d92(%rip){1to8},%ymm2,%ymm0 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 98,241,109,56,219,5,2,79,3,0 // vpandd 0x34f02(%rip){1to8},%ymm2,%ymm0 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,5,136,77,3,0 // vmulps 0x34d88(%rip){1to8},%ymm0,%ymm0 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
- .byte 98,241,109,56,219,13,130,77,3,0 // vpandd 0x34d82(%rip){1to8},%ymm2,%ymm1 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 98,241,124,56,89,5,248,78,3,0 // vmulps 0x34ef8(%rip){1to8},%ymm0,%ymm0 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 98,241,109,56,219,13,242,78,3,0 // vpandd 0x34ef2(%rip){1to8},%ymm2,%ymm1 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 98,241,116,56,89,13,120,77,3,0 // vmulps 0x34d78(%rip){1to8},%ymm1,%ymm1 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
- .byte 98,241,109,56,219,21,114,77,3,0 // vpandd 0x34d72(%rip){1to8},%ymm2,%ymm2 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 98,241,116,56,89,13,232,78,3,0 // vmulps 0x34ee8(%rip){1to8},%ymm1,%ymm1 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 98,241,109,56,219,21,226,78,3,0 // vpandd 0x34ee2(%rip){1to8},%ymm2,%ymm2 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 98,241,108,56,89,21,104,77,3,0 // vmulps 0x34d68(%rip){1to8},%ymm2,%ymm2 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 98,241,108,56,89,21,216,78,3,0 // vmulps 0x34ed8(%rip){1to8},%ymm2,%ymm2 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,205,76,3,0 // vbroadcastss 0x34ccd(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,61,78,3,0 // vbroadcastss 0x34e3d(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,7 // and $0x7,%r9b
@@ -13271,17 +13271,17 @@ _sk_load_565_dst_skx:
.byte 117,96 // jne 3e1b <_sk_load_565_dst_skx+0x7b>
.byte 196,129,122,111,36,88 // vmovdqu (%r8,%r11,2),%xmm4
.byte 196,226,125,51,244 // vpmovzxwd %xmm4,%ymm6
- .byte 98,241,77,56,219,37,96,76,3,0 // vpandd 0x34c60(%rip){1to8},%ymm6,%ymm4 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 98,241,77,56,219,37,208,77,3,0 // vpandd 0x34dd0(%rip){1to8},%ymm6,%ymm4 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 98,241,92,56,89,37,86,76,3,0 // vmulps 0x34c56(%rip){1to8},%ymm4,%ymm4 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
- .byte 98,241,77,56,219,45,80,76,3,0 // vpandd 0x34c50(%rip){1to8},%ymm6,%ymm5 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 98,241,92,56,89,37,198,77,3,0 // vmulps 0x34dc6(%rip){1to8},%ymm4,%ymm4 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 98,241,77,56,219,45,192,77,3,0 // vpandd 0x34dc0(%rip){1to8},%ymm6,%ymm5 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 98,241,84,56,89,45,70,76,3,0 // vmulps 0x34c46(%rip){1to8},%ymm5,%ymm5 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
- .byte 98,241,77,56,219,53,64,76,3,0 // vpandd 0x34c40(%rip){1to8},%ymm6,%ymm6 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 98,241,84,56,89,45,182,77,3,0 // vmulps 0x34db6(%rip){1to8},%ymm5,%ymm5 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 98,241,77,56,219,53,176,77,3,0 // vpandd 0x34db0(%rip){1to8},%ymm6,%ymm6 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
- .byte 98,241,76,56,89,53,54,76,3,0 // vmulps 0x34c36(%rip){1to8},%ymm6,%ymm6 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 98,241,76,56,89,53,166,77,3,0 // vmulps 0x34da6(%rip){1to8},%ymm6,%ymm6 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,61,155,75,3,0 // vbroadcastss 0x34b9b(%rip),%ymm7 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,61,11,77,3,0 // vbroadcastss 0x34d0b(%rip),%ymm7 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,7 // and $0x7,%r9b
@@ -13389,17 +13389,17 @@ _sk_gather_565_skx:
.byte 65,15,183,4,88 // movzwl (%r8,%rbx,2),%eax
.byte 197,249,196,192,7 // vpinsrw $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,51,208 // vpmovzxwd %xmm0,%ymm2
- .byte 98,241,109,56,219,5,123,74,3,0 // vpandd 0x34a7b(%rip){1to8},%ymm2,%ymm0 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 98,241,109,56,219,5,235,75,3,0 // vpandd 0x34beb(%rip){1to8},%ymm2,%ymm0 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,5,113,74,3,0 // vmulps 0x34a71(%rip){1to8},%ymm0,%ymm0 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
- .byte 98,241,109,56,219,13,107,74,3,0 // vpandd 0x34a6b(%rip){1to8},%ymm2,%ymm1 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 98,241,124,56,89,5,225,75,3,0 // vmulps 0x34be1(%rip){1to8},%ymm0,%ymm0 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 98,241,109,56,219,13,219,75,3,0 // vpandd 0x34bdb(%rip){1to8},%ymm2,%ymm1 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 98,241,116,56,89,13,97,74,3,0 // vmulps 0x34a61(%rip){1to8},%ymm1,%ymm1 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
- .byte 98,241,109,56,219,21,91,74,3,0 // vpandd 0x34a5b(%rip){1to8},%ymm2,%ymm2 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 98,241,116,56,89,13,209,75,3,0 // vmulps 0x34bd1(%rip){1to8},%ymm1,%ymm1 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 98,241,109,56,219,21,203,75,3,0 // vpandd 0x34bcb(%rip){1to8},%ymm2,%ymm2 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 98,241,108,56,89,21,81,74,3,0 // vmulps 0x34a51(%rip){1to8},%ymm2,%ymm2 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 98,241,108,56,89,21,193,75,3,0 // vmulps 0x34bc1(%rip){1to8},%ymm2,%ymm2 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,182,73,3,0 // vbroadcastss 0x349b6(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,38,75,3,0 // vbroadcastss 0x34b26(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
.byte 65,94 // pop %r14
@@ -13417,11 +13417,11 @@ _sk_store_565_skx:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,126,74,3,0 // vbroadcastss 0x34a7e(%rip),%ymm8 # 38aa4 <_sk_srcover_bgra_8888_sse2_lowp+0x3a0>
+ .byte 196,98,125,24,5,238,75,3,0 // vbroadcastss 0x34bee(%rip),%ymm8 # 38c14 <_sk_srcover_bgra_8888_sse2_lowp+0x3a0>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,193,53,114,241,11 // vpslld $0xb,%ymm9,%ymm9
- .byte 98,113,116,56,89,21,104,74,3,0 // vmulps 0x34a68(%rip){1to8},%ymm1,%ymm10 # 38aa8 <_sk_srcover_bgra_8888_sse2_lowp+0x3a4>
+ .byte 98,113,116,56,89,21,216,75,3,0 // vmulps 0x34bd8(%rip){1to8},%ymm1,%ymm10 # 38c18 <_sk_srcover_bgra_8888_sse2_lowp+0x3a4>
.byte 196,65,125,91,210 // vcvtps2dq %ymm10,%ymm10
.byte 196,193,45,114,242,5 // vpslld $0x5,%ymm10,%ymm10
.byte 196,65,45,235,201 // vpor %ymm9,%ymm10,%ymm9
@@ -13468,7 +13468,7 @@ _sk_store_565_skx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,225 // jmpq ffffffffe20040f8 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe1fcb9f4>
+ .byte 233,255,255,255,225 // jmpq ffffffffe20040f8 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe1fcb884>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
@@ -13491,18 +13491,18 @@ _sk_load_4444_skx:
.byte 117,111 // jne 418a <_sk_load_4444_skx+0x8a>
.byte 196,129,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm0
.byte 196,226,125,51,216 // vpmovzxwd %xmm0,%ymm3
- .byte 98,241,101,56,219,5,124,73,3,0 // vpandd 0x3497c(%rip){1to8},%ymm3,%ymm0 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 98,241,101,56,219,5,236,74,3,0 // vpandd 0x34aec(%rip){1to8},%ymm3,%ymm0 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,5,114,73,3,0 // vmulps 0x34972(%rip){1to8},%ymm0,%ymm0 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
- .byte 98,241,101,56,219,13,108,73,3,0 // vpandd 0x3496c(%rip){1to8},%ymm3,%ymm1 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 98,241,124,56,89,5,226,74,3,0 // vmulps 0x34ae2(%rip){1to8},%ymm0,%ymm0 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 98,241,101,56,219,13,220,74,3,0 // vpandd 0x34adc(%rip){1to8},%ymm3,%ymm1 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 98,241,116,56,89,13,98,73,3,0 // vmulps 0x34962(%rip){1to8},%ymm1,%ymm1 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
- .byte 98,241,101,56,219,21,92,73,3,0 // vpandd 0x3495c(%rip){1to8},%ymm3,%ymm2 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 98,241,116,56,89,13,210,74,3,0 // vmulps 0x34ad2(%rip){1to8},%ymm1,%ymm1 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 98,241,101,56,219,21,204,74,3,0 // vpandd 0x34acc(%rip){1to8},%ymm3,%ymm2 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 98,241,108,56,89,21,82,73,3,0 // vmulps 0x34952(%rip){1to8},%ymm2,%ymm2 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
- .byte 98,241,101,56,219,29,76,73,3,0 // vpandd 0x3494c(%rip){1to8},%ymm3,%ymm3 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 98,241,108,56,89,21,194,74,3,0 // vmulps 0x34ac2(%rip){1to8},%ymm2,%ymm2 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 98,241,101,56,219,29,188,74,3,0 // vpandd 0x34abc(%rip){1to8},%ymm3,%ymm3 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 98,241,100,56,89,29,66,73,3,0 // vmulps 0x34942(%rip){1to8},%ymm3,%ymm3 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 98,241,100,56,89,29,178,74,3,0 // vmulps 0x34ab2(%rip){1to8},%ymm3,%ymm3 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
@@ -13572,18 +13572,18 @@ _sk_load_4444_dst_skx:
.byte 117,111 // jne 42ca <_sk_load_4444_dst_skx+0x8a>
.byte 196,129,122,111,36,88 // vmovdqu (%r8,%r11,2),%xmm4
.byte 196,226,125,51,252 // vpmovzxwd %xmm4,%ymm7
- .byte 98,241,69,56,219,37,60,72,3,0 // vpandd 0x3483c(%rip){1to8},%ymm7,%ymm4 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 98,241,69,56,219,37,172,73,3,0 // vpandd 0x349ac(%rip){1to8},%ymm7,%ymm4 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 98,241,92,56,89,37,50,72,3,0 // vmulps 0x34832(%rip){1to8},%ymm4,%ymm4 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
- .byte 98,241,69,56,219,45,44,72,3,0 // vpandd 0x3482c(%rip){1to8},%ymm7,%ymm5 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 98,241,92,56,89,37,162,73,3,0 // vmulps 0x349a2(%rip){1to8},%ymm4,%ymm4 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 98,241,69,56,219,45,156,73,3,0 // vpandd 0x3499c(%rip){1to8},%ymm7,%ymm5 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 98,241,84,56,89,45,34,72,3,0 // vmulps 0x34822(%rip){1to8},%ymm5,%ymm5 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
- .byte 98,241,69,56,219,53,28,72,3,0 // vpandd 0x3481c(%rip){1to8},%ymm7,%ymm6 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 98,241,84,56,89,45,146,73,3,0 // vmulps 0x34992(%rip){1to8},%ymm5,%ymm5 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 98,241,69,56,219,53,140,73,3,0 // vpandd 0x3498c(%rip){1to8},%ymm7,%ymm6 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
- .byte 98,241,76,56,89,53,18,72,3,0 // vmulps 0x34812(%rip){1to8},%ymm6,%ymm6 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
- .byte 98,241,69,56,219,61,12,72,3,0 // vpandd 0x3480c(%rip){1to8},%ymm7,%ymm7 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 98,241,76,56,89,53,130,73,3,0 // vmulps 0x34982(%rip){1to8},%ymm6,%ymm6 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 98,241,69,56,219,61,124,73,3,0 // vpandd 0x3497c(%rip){1to8},%ymm7,%ymm7 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 98,241,68,56,89,61,2,72,3,0 // vmulps 0x34802(%rip){1to8},%ymm7,%ymm7 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 98,241,68,56,89,61,114,73,3,0 // vmulps 0x34972(%rip){1to8},%ymm7,%ymm7 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
@@ -13692,18 +13692,18 @@ _sk_gather_4444_skx:
.byte 65,15,183,4,88 // movzwl (%r8,%rbx,2),%eax
.byte 197,249,196,192,7 // vpinsrw $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,51,216 // vpmovzxwd %xmm0,%ymm3
- .byte 98,241,101,56,219,5,71,70,3,0 // vpandd 0x34647(%rip){1to8},%ymm3,%ymm0 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 98,241,101,56,219,5,183,71,3,0 // vpandd 0x347b7(%rip){1to8},%ymm3,%ymm0 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 98,241,124,56,89,5,61,70,3,0 // vmulps 0x3463d(%rip){1to8},%ymm0,%ymm0 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
- .byte 98,241,101,56,219,13,55,70,3,0 // vpandd 0x34637(%rip){1to8},%ymm3,%ymm1 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 98,241,124,56,89,5,173,71,3,0 // vmulps 0x347ad(%rip){1to8},%ymm0,%ymm0 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 98,241,101,56,219,13,167,71,3,0 // vpandd 0x347a7(%rip){1to8},%ymm3,%ymm1 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 98,241,116,56,89,13,45,70,3,0 // vmulps 0x3462d(%rip){1to8},%ymm1,%ymm1 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
- .byte 98,241,101,56,219,21,39,70,3,0 // vpandd 0x34627(%rip){1to8},%ymm3,%ymm2 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 98,241,116,56,89,13,157,71,3,0 // vmulps 0x3479d(%rip){1to8},%ymm1,%ymm1 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 98,241,101,56,219,21,151,71,3,0 // vpandd 0x34797(%rip){1to8},%ymm3,%ymm2 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 98,241,108,56,89,21,29,70,3,0 // vmulps 0x3461d(%rip){1to8},%ymm2,%ymm2 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
- .byte 98,241,101,56,219,29,23,70,3,0 // vpandd 0x34617(%rip){1to8},%ymm3,%ymm3 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 98,241,108,56,89,21,141,71,3,0 // vmulps 0x3478d(%rip){1to8},%ymm2,%ymm2 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 98,241,101,56,219,29,135,71,3,0 // vpandd 0x34787(%rip){1to8},%ymm3,%ymm3 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 98,241,100,56,89,29,13,70,3,0 // vmulps 0x3460d(%rip){1to8},%ymm3,%ymm3 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 98,241,100,56,89,29,125,71,3,0 // vmulps 0x3477d(%rip){1to8},%ymm3,%ymm3 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
@@ -13722,7 +13722,7 @@ _sk_store_4444_skx:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,231,69,3,0 // vbroadcastss 0x345e7(%rip),%ymm8 # 38acc <_sk_srcover_bgra_8888_sse2_lowp+0x3c8>
+ .byte 196,98,125,24,5,87,71,3,0 // vbroadcastss 0x34757(%rip),%ymm8 # 38c3c <_sk_srcover_bgra_8888_sse2_lowp+0x3c8>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,193,53,114,241,12 // vpslld $0xc,%ymm9,%ymm9
@@ -13777,7 +13777,7 @@ _sk_store_4444_skx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,224 // callq ffffffffe10045c8 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fcbec4>
+ .byte 232,255,255,255,224 // callq ffffffffe10045c8 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fcbd54>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
@@ -13803,7 +13803,7 @@ _sk_load_8888_skx:
.byte 197,251,146,200 // kmovd %eax,%k1
.byte 98,241,127,169,111,195 // vmovdqu8 %ymm3,%ymm0{%k1}{z}
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,30,68,3,0 // vbroadcastss 0x3441e(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,142,69,3,0 // vbroadcastss 0x3458e(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 197,245,114,211,8 // vpsrld $0x8,%ymm3,%ymm1
.byte 98,241,127,169,111,201 // vmovdqu8 %ymm1,%ymm1{%k1}{z}
@@ -13890,7 +13890,7 @@ _sk_load_8888_dst_skx:
.byte 197,251,146,200 // kmovd %eax,%k1
.byte 98,241,127,169,111,231 // vmovdqu8 %ymm7,%ymm4{%k1}{z}
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,98,125,24,5,218,66,3,0 // vbroadcastss 0x342da(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,74,68,3,0 // vbroadcastss 0x3444a(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,92,89,224 // vmulps %ymm8,%ymm4,%ymm4
.byte 197,213,114,215,8 // vpsrld $0x8,%ymm7,%ymm5
.byte 98,241,127,169,111,237 // vmovdqu8 %ymm5,%ymm5{%k1}{z}
@@ -13984,7 +13984,7 @@ _sk_gather_8888_skx:
.byte 197,251,146,200 // kmovd %eax,%k1
.byte 98,209,127,169,111,192 // vmovdqu8 %ymm8,%ymm0{%k1}{z}
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,29,109,65,3,0 // vbroadcastss 0x3416d(%rip),%ymm3 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,29,221,66,3,0 // vbroadcastss 0x342dd(%rip),%ymm3 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,195 // vmulps %ymm3,%ymm0,%ymm0
.byte 196,193,117,114,208,8 // vpsrld $0x8,%ymm8,%ymm1
.byte 98,241,127,169,111,201 // vmovdqu8 %ymm1,%ymm1{%k1}{z}
@@ -14011,7 +14011,7 @@ _sk_store_8888_skx:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,194,64,3,0 // vbroadcastss 0x340c2(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,50,66,3,0 // vbroadcastss 0x34232(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,65,116,89,208 // vmulps %ymm8,%ymm1,%ymm10
@@ -14067,7 +14067,7 @@ _sk_store_8888_skx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,219 // jmpq ffffffffdc004a00 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfcc2fc>
+ .byte 233,255,255,255,219 // jmpq ffffffffdc004a00 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfcc18c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,205 // dec %ebp
@@ -14093,7 +14093,7 @@ _sk_load_bgra_skx:
.byte 197,251,146,200 // kmovd %eax,%k1
.byte 98,241,127,169,111,195 // vmovdqu8 %ymm3,%ymm0{%k1}{z}
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,230,63,3,0 // vbroadcastss 0x33fe6(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,86,65,3,0 // vbroadcastss 0x34156(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,208 // vmulps %ymm8,%ymm0,%ymm2
.byte 197,253,114,211,8 // vpsrld $0x8,%ymm3,%ymm0
.byte 98,241,127,169,111,192 // vmovdqu8 %ymm0,%ymm0{%k1}{z}
@@ -14180,7 +14180,7 @@ _sk_load_bgra_dst_skx:
.byte 197,251,146,200 // kmovd %eax,%k1
.byte 98,241,127,169,111,231 // vmovdqu8 %ymm7,%ymm4{%k1}{z}
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,98,125,24,5,162,62,3,0 // vbroadcastss 0x33ea2(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,18,64,3,0 // vbroadcastss 0x34012(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,92,89,240 // vmulps %ymm8,%ymm4,%ymm6
.byte 197,221,114,215,8 // vpsrld $0x8,%ymm7,%ymm4
.byte 98,241,127,169,111,228 // vmovdqu8 %ymm4,%ymm4{%k1}{z}
@@ -14274,7 +14274,7 @@ _sk_gather_bgra_skx:
.byte 197,251,146,200 // kmovd %eax,%k1
.byte 98,209,127,169,111,192 // vmovdqu8 %ymm8,%ymm0{%k1}{z}
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,29,53,61,3,0 // vbroadcastss 0x33d35(%rip),%ymm3 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,29,165,62,3,0 // vbroadcastss 0x33ea5(%rip),%ymm3 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,211 // vmulps %ymm3,%ymm0,%ymm2
.byte 196,193,125,114,208,8 // vpsrld $0x8,%ymm8,%ymm0
.byte 98,241,127,169,111,192 // vmovdqu8 %ymm0,%ymm0{%k1}{z}
@@ -14301,7 +14301,7 @@ _sk_store_bgra_skx:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,138,60,3,0 // vbroadcastss 0x33c8a(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,250,61,3,0 // vbroadcastss 0x33dfa(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,108,89,200 // vmulps %ymm8,%ymm2,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,65,116,89,208 // vmulps %ymm8,%ymm1,%ymm10
@@ -14357,7 +14357,7 @@ _sk_store_bgra_skx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,219 // jmpq ffffffffdc004e38 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfcc734>
+ .byte 233,255,255,255,219 // jmpq ffffffffdc004e38 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfcc5c4>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,205 // dec %ebp
@@ -14620,7 +14620,7 @@ _sk_load_u16_be_skx:
.byte 197,241,235,192 // vpor %xmm0,%xmm1,%xmm0
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,21,243,55,3,0 // vbroadcastss 0x337f3(%rip),%ymm10 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,98,125,24,21,99,57,3,0 // vbroadcastss 0x33963(%rip),%ymm10 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 196,193,124,89,194 // vmulps %ymm10,%ymm0,%ymm0
.byte 197,185,109,202 // vpunpckhqdq %xmm2,%xmm8,%xmm1
.byte 197,233,113,241,8 // vpsllw $0x8,%xmm1,%xmm2
@@ -14711,7 +14711,7 @@ _sk_load_rgb_u16_be_skx:
.byte 197,241,235,192 // vpor %xmm0,%xmm1,%xmm0
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,13,101,54,3,0 // vbroadcastss 0x33665(%rip),%ymm9 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,98,125,24,13,213,55,3,0 // vbroadcastss 0x337d5(%rip),%ymm9 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,185,109,203 // vpunpckhqdq %xmm3,%xmm8,%xmm1
.byte 197,225,113,241,8 // vpsllw $0x8,%xmm1,%xmm3
@@ -14727,7 +14727,7 @@ _sk_load_rgb_u16_be_skx:
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
.byte 196,193,108,89,209 // vmulps %ymm9,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,133,53,3,0 // vbroadcastss 0x33585(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,245,54,3,0 // vbroadcastss 0x336f5(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 196,193,121,110,4,64 // vmovd (%r8,%rax,2),%xmm0
.byte 196,65,49,87,201 // vxorpd %xmm9,%xmm9,%xmm9
@@ -14803,7 +14803,7 @@ _sk_store_u16_be_skx:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 73,99,193 // movslq %r9d,%rax
- .byte 196,98,125,24,5,78,53,3,0 // vbroadcastss 0x3354e(%rip),%ymm8 # 38ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x3cc>
+ .byte 196,98,125,24,5,190,54,3,0 // vbroadcastss 0x336be(%rip),%ymm8 # 38c40 <_sk_srcover_bgra_8888_sse2_lowp+0x3cc>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,67,125,57,202,1 // vextracti128 $0x1,%ymm9,%xmm10
@@ -15145,7 +15145,7 @@ _sk_mirror_x_skx:
.byte 196,65,124,92,218 // vsubps %ymm10,%ymm0,%ymm11
.byte 196,193,58,88,192 // vaddss %xmm8,%xmm8,%xmm0
.byte 196,98,125,24,192 // vbroadcastss %xmm0,%ymm8
- .byte 197,178,89,5,187,46,3,0 // vmulss 0x32ebb(%rip),%xmm9,%xmm0 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,178,89,5,43,48,3,0 // vmulss 0x3302b(%rip),%xmm9,%xmm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,164,89,192 // vmulps %ymm0,%ymm11,%ymm0
.byte 196,227,125,8,192,1 // vroundps $0x1,%ymm0,%ymm0
@@ -15168,7 +15168,7 @@ _sk_mirror_y_skx:
.byte 196,65,116,92,218 // vsubps %ymm10,%ymm1,%ymm11
.byte 196,193,58,88,200 // vaddss %xmm8,%xmm8,%xmm1
.byte 196,98,125,24,193 // vbroadcastss %xmm1,%ymm8
- .byte 197,178,89,13,106,46,3,0 // vmulss 0x32e6a(%rip),%xmm9,%xmm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,178,89,13,218,47,3,0 // vmulss 0x32fda(%rip),%xmm9,%xmm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,226,125,24,201 // vbroadcastss %xmm1,%ymm1
.byte 197,164,89,201 // vmulps %ymm1,%ymm11,%ymm1
.byte 196,227,125,8,201,1 // vroundps $0x1,%ymm1,%ymm1
@@ -15186,7 +15186,7 @@ FUNCTION(_sk_clamp_x_1_skx)
_sk_clamp_x_1_skx:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 98,241,124,56,93,5,49,46,3,0 // vminps 0x32e31(%rip){1to8},%ymm0,%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,124,56,93,5,161,47,3,0 // vminps 0x32fa1(%rip){1to8},%ymm0,%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15198,7 +15198,7 @@ _sk_repeat_x_1_skx:
.byte 196,193,124,92,192 // vsubps %ymm8,%ymm0,%ymm0
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 98,241,124,56,93,5,15,46,3,0 // vminps 0x32e0f(%rip){1to8},%ymm0,%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,124,56,93,5,127,47,3,0 // vminps 0x32f7f(%rip){1to8},%ymm0,%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15206,9 +15206,9 @@ HIDDEN _sk_mirror_x_1_skx
.globl _sk_mirror_x_1_skx
FUNCTION(_sk_mirror_x_1_skx)
_sk_mirror_x_1_skx:
- .byte 196,98,125,24,5,26,46,3,0 // vbroadcastss 0x32e1a(%rip),%ymm8 # 389cc <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
+ .byte 196,98,125,24,5,138,47,3,0 // vbroadcastss 0x32f8a(%rip),%ymm8 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
- .byte 98,113,124,56,89,13,239,45,3,0 // vmulps 0x32def(%rip){1to8},%ymm0,%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,113,124,56,89,13,95,47,3,0 // vmulps 0x32f5f(%rip){1to8},%ymm0,%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,67,125,8,201,1 // vroundps $0x1,%ymm9,%ymm9
.byte 196,65,52,88,201 // vaddps %ymm9,%ymm9,%ymm9
.byte 196,193,124,92,193 // vsubps %ymm9,%ymm0,%ymm0
@@ -15217,7 +15217,7 @@ _sk_mirror_x_1_skx:
.byte 197,60,92,200 // vsubps %ymm0,%ymm8,%ymm9
.byte 197,180,84,192 // vandps %ymm0,%ymm9,%ymm0
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 98,241,124,56,93,5,195,45,3,0 // vminps 0x32dc3(%rip){1to8},%ymm0,%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,124,56,93,5,51,47,3,0 // vminps 0x32f33(%rip){1to8},%ymm0,%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15225,9 +15225,9 @@ HIDDEN _sk_luminance_to_alpha_skx
.globl _sk_luminance_to_alpha_skx
FUNCTION(_sk_luminance_to_alpha_skx)
_sk_luminance_to_alpha_skx:
- .byte 98,241,116,56,89,29,213,46,3,0 // vmulps 0x32ed5(%rip){1to8},%ymm1,%ymm3 # 38ad4 <_sk_srcover_bgra_8888_sse2_lowp+0x3d0>
- .byte 98,242,125,56,184,29,207,46,3,0 // vfmadd231ps 0x32ecf(%rip){1to8},%ymm0,%ymm3 # 38ad8 <_sk_srcover_bgra_8888_sse2_lowp+0x3d4>
- .byte 98,242,109,56,184,29,201,46,3,0 // vfmadd231ps 0x32ec9(%rip){1to8},%ymm2,%ymm3 # 38adc <_sk_srcover_bgra_8888_sse2_lowp+0x3d8>
+ .byte 98,241,116,56,89,29,69,48,3,0 // vmulps 0x33045(%rip){1to8},%ymm1,%ymm3 # 38c44 <_sk_srcover_bgra_8888_sse2_lowp+0x3d0>
+ .byte 98,242,125,56,184,29,63,48,3,0 // vfmadd231ps 0x3303f(%rip){1to8},%ymm0,%ymm3 # 38c48 <_sk_srcover_bgra_8888_sse2_lowp+0x3d4>
+ .byte 98,242,109,56,184,29,57,48,3,0 // vfmadd231ps 0x33039(%rip){1to8},%ymm2,%ymm3 # 38c4c <_sk_srcover_bgra_8888_sse2_lowp+0x3d8>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
@@ -15476,7 +15476,7 @@ _sk_gradient_skx:
.byte 76,139,72,72 // mov 0x48(%rax),%r9
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 65,186,1,0,0,0 // mov $0x1,%r10d
- .byte 196,226,125,24,21,210,41,3,0 // vbroadcastss 0x329d2(%rip),%ymm2 # 389b8 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
+ .byte 196,226,125,24,21,66,43,3,0 // vbroadcastss 0x32b42(%rip),%ymm2 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
.byte 196,65,53,239,201 // vpxor %ymm9,%ymm9,%ymm9
.byte 196,130,125,24,28,145 // vbroadcastss (%r9,%r10,4),%ymm3
.byte 98,241,100,40,194,192,2 // vcmpleps %ymm0,%ymm3,%k0
@@ -15576,24 +15576,24 @@ _sk_xy_to_unit_angle_skx:
.byte 196,65,52,95,226 // vmaxps %ymm10,%ymm9,%ymm12
.byte 196,65,36,94,220 // vdivps %ymm12,%ymm11,%ymm11
.byte 196,65,36,89,227 // vmulps %ymm11,%ymm11,%ymm12
- .byte 196,98,125,24,45,84,41,3,0 // vbroadcastss 0x32954(%rip),%ymm13 # 38ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
- .byte 98,114,29,56,168,45,78,41,3,0 // vfmadd213ps 0x3294e(%rip){1to8},%ymm12,%ymm13 # 38ae4 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
- .byte 98,114,29,56,168,45,72,41,3,0 // vfmadd213ps 0x32948(%rip){1to8},%ymm12,%ymm13 # 38ae8 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
- .byte 98,114,29,56,168,45,66,41,3,0 // vfmadd213ps 0x32942(%rip){1to8},%ymm12,%ymm13 # 38aec <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
+ .byte 196,98,125,24,45,196,42,3,0 // vbroadcastss 0x32ac4(%rip),%ymm13 # 38c50 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
+ .byte 98,114,29,56,168,45,190,42,3,0 // vfmadd213ps 0x32abe(%rip){1to8},%ymm12,%ymm13 # 38c54 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
+ .byte 98,114,29,56,168,45,184,42,3,0 // vfmadd213ps 0x32ab8(%rip){1to8},%ymm12,%ymm13 # 38c58 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
+ .byte 98,114,29,56,168,45,178,42,3,0 // vfmadd213ps 0x32ab2(%rip){1to8},%ymm12,%ymm13 # 38c5c <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
.byte 196,65,36,89,221 // vmulps %ymm13,%ymm11,%ymm11
.byte 98,209,52,40,194,194,1 // vcmpltps %ymm10,%ymm9,%k0
.byte 98,114,126,40,56,200 // vpmovm2d %k0,%ymm9
- .byte 196,98,125,24,21,43,41,3,0 // vbroadcastss 0x3292b(%rip),%ymm10 # 38af0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
+ .byte 196,98,125,24,21,155,42,3,0 // vbroadcastss 0x32a9b(%rip),%ymm10 # 38c60 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
.byte 196,65,44,92,211 // vsubps %ymm11,%ymm10,%ymm10
.byte 196,67,37,74,202,144 // vblendvps %ymm9,%ymm10,%ymm11,%ymm9
.byte 98,209,124,40,194,192,1 // vcmpltps %ymm8,%ymm0,%k0
.byte 98,242,126,40,56,192 // vpmovm2d %k0,%ymm0
- .byte 196,98,125,24,21,202,39,3,0 // vbroadcastss 0x327ca(%rip),%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,21,58,41,3,0 // vbroadcastss 0x3293a(%rip),%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,92,209 // vsubps %ymm9,%ymm10,%ymm10
.byte 196,195,53,74,194,0 // vblendvps %ymm0,%ymm10,%ymm9,%ymm0
.byte 98,209,116,40,194,192,1 // vcmpltps %ymm8,%ymm1,%k0
.byte 98,114,126,40,56,200 // vpmovm2d %k0,%ymm9
- .byte 196,98,125,24,21,173,39,3,0 // vbroadcastss 0x327ad(%rip),%ymm10 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,21,29,41,3,0 // vbroadcastss 0x3291d(%rip),%ymm10 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,44,92,208 // vsubps %ymm0,%ymm10,%ymm10
.byte 196,195,125,74,194,144 // vblendvps %ymm9,%ymm10,%ymm0,%ymm0
.byte 98,209,124,40,194,192,3 // vcmpunordps %ymm8,%ymm0,%k0
@@ -15622,20 +15622,20 @@ _sk_xy_to_2pt_conical_quadratic_max_skx:
.byte 197,50,89,80,76 // vmulss 0x4c(%rax),%xmm9,%xmm10
.byte 196,66,125,24,210 // vbroadcastss %xmm10,%ymm10
.byte 197,44,88,208 // vaddps %ymm0,%ymm10,%ymm10
- .byte 98,113,44,56,89,21,149,40,3,0 // vmulps 0x32895(%rip){1to8},%ymm10,%ymm10 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 98,113,44,56,89,21,5,42,3,0 // vmulps 0x32a05(%rip){1to8},%ymm10,%ymm10 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 197,116,89,217 // vmulps %ymm1,%ymm1,%ymm11
.byte 196,98,125,184,216 // vfmadd231ps %ymm0,%ymm0,%ymm11
.byte 196,193,50,89,193 // vmulss %xmm9,%xmm9,%xmm0
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,164,92,192 // vsubps %ymm0,%ymm11,%ymm0
- .byte 98,113,60,56,89,5,120,40,3,0 // vmulps 0x32878(%rip){1to8},%ymm8,%ymm8 # 38af8 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
+ .byte 98,113,60,56,89,5,232,41,3,0 // vmulps 0x329e8(%rip){1to8},%ymm8,%ymm8 # 38c68 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,194,45,184,194 // vfmadd231ps %ymm10,%ymm10,%ymm0
.byte 197,252,81,192 // vsqrtps %ymm0,%ymm0
.byte 196,98,125,24,64,68 // vbroadcastss 0x44(%rax),%ymm8
- .byte 98,113,44,56,87,13,95,40,3,0 // vxorps 0x3285f(%rip){1to8},%ymm10,%ymm9 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 98,113,44,56,87,13,207,41,3,0 // vxorps 0x329cf(%rip){1to8},%ymm10,%ymm9 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 98,113,60,56,89,5,4,39,3,0 // vmulps 0x32704(%rip){1to8},%ymm8,%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,113,60,56,89,5,116,40,3,0 // vmulps 0x32874(%rip){1to8},%ymm8,%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,89,208 // vmulps %ymm8,%ymm10,%ymm10
.byte 197,180,92,192 // vsubps %ymm0,%ymm9,%ymm0
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
@@ -15653,20 +15653,20 @@ _sk_xy_to_2pt_conical_quadratic_min_skx:
.byte 197,50,89,80,76 // vmulss 0x4c(%rax),%xmm9,%xmm10
.byte 196,66,125,24,210 // vbroadcastss %xmm10,%ymm10
.byte 197,44,88,208 // vaddps %ymm0,%ymm10,%ymm10
- .byte 98,113,44,56,89,21,13,40,3,0 // vmulps 0x3280d(%rip){1to8},%ymm10,%ymm10 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 98,113,44,56,89,21,125,41,3,0 // vmulps 0x3297d(%rip){1to8},%ymm10,%ymm10 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 197,116,89,217 // vmulps %ymm1,%ymm1,%ymm11
.byte 196,98,125,184,216 // vfmadd231ps %ymm0,%ymm0,%ymm11
.byte 196,193,50,89,193 // vmulss %xmm9,%xmm9,%xmm0
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,164,92,192 // vsubps %ymm0,%ymm11,%ymm0
- .byte 98,113,60,56,89,5,240,39,3,0 // vmulps 0x327f0(%rip){1to8},%ymm8,%ymm8 # 38af8 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
+ .byte 98,113,60,56,89,5,96,41,3,0 // vmulps 0x32960(%rip){1to8},%ymm8,%ymm8 # 38c68 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,194,45,184,194 // vfmadd231ps %ymm10,%ymm10,%ymm0
.byte 197,252,81,192 // vsqrtps %ymm0,%ymm0
.byte 196,98,125,24,64,68 // vbroadcastss 0x44(%rax),%ymm8
- .byte 98,113,44,56,87,13,215,39,3,0 // vxorps 0x327d7(%rip){1to8},%ymm10,%ymm9 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 98,113,44,56,87,13,71,41,3,0 // vxorps 0x32947(%rip){1to8},%ymm10,%ymm9 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 98,113,60,56,89,5,124,38,3,0 // vmulps 0x3267c(%rip){1to8},%ymm8,%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,113,60,56,89,5,236,39,3,0 // vmulps 0x327ec(%rip){1to8},%ymm8,%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,89,208 // vmulps %ymm8,%ymm10,%ymm10
.byte 197,180,92,192 // vsubps %ymm0,%ymm9,%ymm0
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
@@ -15683,13 +15683,13 @@ _sk_xy_to_2pt_conical_linear_skx:
.byte 197,58,89,72,76 // vmulss 0x4c(%rax),%xmm8,%xmm9
.byte 196,66,125,24,201 // vbroadcastss %xmm9,%ymm9
.byte 197,52,88,200 // vaddps %ymm0,%ymm9,%ymm9
- .byte 98,113,52,56,89,13,139,39,3,0 // vmulps 0x3278b(%rip){1to8},%ymm9,%ymm9 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 98,113,52,56,89,13,251,40,3,0 // vmulps 0x328fb(%rip){1to8},%ymm9,%ymm9 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 197,116,89,209 // vmulps %ymm1,%ymm1,%ymm10
.byte 196,98,125,184,208 // vfmadd231ps %ymm0,%ymm0,%ymm10
.byte 196,193,58,89,192 // vmulss %xmm8,%xmm8,%xmm0
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,172,92,192 // vsubps %ymm0,%ymm10,%ymm0
- .byte 98,241,124,56,87,5,114,39,3,0 // vxorps 0x32772(%rip){1to8},%ymm0,%ymm0 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 98,241,124,56,87,5,226,40,3,0 // vxorps 0x328e2(%rip){1to8},%ymm0,%ymm0 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,193,124,94,193 // vdivps %ymm9,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15731,7 +15731,7 @@ HIDDEN _sk_save_xy_skx
FUNCTION(_sk_save_xy_skx)
_sk_save_xy_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,179,37,3,0 // vbroadcastss 0x325b3(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,35,39,3,0 // vbroadcastss 0x32723(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,124,88,200 // vaddps %ymm8,%ymm0,%ymm9
.byte 196,67,125,8,209,1 // vroundps $0x1,%ymm9,%ymm10
.byte 196,65,52,92,202 // vsubps %ymm10,%ymm9,%ymm9
@@ -15766,8 +15766,8 @@ FUNCTION(_sk_bilinear_nx_skx)
_sk_bilinear_nx_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,0 // vmovups (%rax),%ymm0
- .byte 98,241,124,56,88,5,136,38,3,0 // vaddps 0x32688(%rip){1to8},%ymm0,%ymm0 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
- .byte 196,98,125,24,5,51,37,3,0 // vbroadcastss 0x32533(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,124,56,88,5,248,39,3,0 // vaddps 0x327f8(%rip){1to8},%ymm0,%ymm0 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,98,125,24,5,163,38,3,0 // vbroadcastss 0x326a3(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -15780,7 +15780,7 @@ _sk_bilinear_px_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,0 // vmovups (%rax),%ymm0
.byte 197,124,16,128,128,0,0,0 // vmovups 0x80(%rax),%ymm8
- .byte 98,241,124,56,88,5,3,37,3,0 // vaddps 0x32503(%rip){1to8},%ymm0,%ymm0 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,241,124,56,88,5,115,38,3,0 // vaddps 0x32673(%rip){1to8},%ymm0,%ymm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15791,8 +15791,8 @@ FUNCTION(_sk_bilinear_ny_skx)
_sk_bilinear_ny_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,72,64 // vmovups 0x40(%rax),%ymm1
- .byte 98,241,116,56,88,13,54,38,3,0 // vaddps 0x32636(%rip){1to8},%ymm1,%ymm1 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
- .byte 196,98,125,24,5,225,36,3,0 // vbroadcastss 0x324e1(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,116,56,88,13,166,39,3,0 // vaddps 0x327a6(%rip){1to8},%ymm1,%ymm1 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,98,125,24,5,81,38,3,0 // vbroadcastss 0x32651(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -15805,7 +15805,7 @@ _sk_bilinear_py_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,72,64 // vmovups 0x40(%rax),%ymm1
.byte 197,124,16,128,192,0,0,0 // vmovups 0xc0(%rax),%ymm8
- .byte 98,241,116,56,88,13,176,36,3,0 // vaddps 0x324b0(%rip){1to8},%ymm1,%ymm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,241,116,56,88,13,32,38,3,0 // vaddps 0x32620(%rip){1to8},%ymm1,%ymm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15816,12 +15816,12 @@ FUNCTION(_sk_bicubic_n3x_skx)
_sk_bicubic_n3x_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,0 // vmovups (%rax),%ymm0
- .byte 98,241,124,56,88,5,232,37,3,0 // vaddps 0x325e8(%rip){1to8},%ymm0,%ymm0 # 38b04 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
- .byte 196,98,125,24,5,143,36,3,0 // vbroadcastss 0x3248f(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,124,56,88,5,88,39,3,0 // vaddps 0x32758(%rip){1to8},%ymm0,%ymm0 # 38c74 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
+ .byte 196,98,125,24,5,255,37,3,0 // vbroadcastss 0x325ff(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,205,37,3,0 // vbroadcastss 0x325cd(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
- .byte 98,114,61,56,168,21,227,36,3,0 // vfmadd213ps 0x324e3(%rip){1to8},%ymm8,%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,61,39,3,0 // vbroadcastss 0x3273d(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 98,114,61,56,168,21,83,38,3,0 // vfmadd213ps 0x32653(%rip){1to8},%ymm8,%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,44,89,193 // vmulps %ymm9,%ymm10,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -15833,13 +15833,13 @@ FUNCTION(_sk_bicubic_n1x_skx)
_sk_bicubic_n1x_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,0 // vmovups (%rax),%ymm0
- .byte 98,241,124,56,88,5,154,37,3,0 // vaddps 0x3259a(%rip){1to8},%ymm0,%ymm0 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
- .byte 196,98,125,24,5,69,36,3,0 // vbroadcastss 0x32445(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,124,56,88,5,10,39,3,0 // vaddps 0x3270a(%rip){1to8},%ymm0,%ymm0 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,98,125,24,5,181,37,3,0 // vbroadcastss 0x325b5(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
- .byte 196,98,125,24,13,140,37,3,0 // vbroadcastss 0x3258c(%rip),%ymm9 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
- .byte 98,114,61,56,168,13,134,37,3,0 // vfmadd213ps 0x32586(%rip){1to8},%ymm8,%ymm9 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
- .byte 98,114,61,56,168,13,28,36,3,0 // vfmadd213ps 0x3241c(%rip){1to8},%ymm8,%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 98,114,61,56,168,13,118,37,3,0 // vfmadd213ps 0x32576(%rip){1to8},%ymm8,%ymm9 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,13,252,38,3,0 // vbroadcastss 0x326fc(%rip),%ymm9 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 98,114,61,56,168,13,246,38,3,0 // vfmadd213ps 0x326f6(%rip){1to8},%ymm8,%ymm9 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 98,114,61,56,168,13,140,37,3,0 // vfmadd213ps 0x3258c(%rip){1to8},%ymm8,%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,114,61,56,168,13,230,38,3,0 // vfmadd213ps 0x326e6(%rip){1to8},%ymm8,%ymm9 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 197,124,17,136,0,1,0,0 // vmovups %ymm9,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15849,13 +15849,13 @@ HIDDEN _sk_bicubic_p1x_skx
FUNCTION(_sk_bicubic_p1x_skx)
_sk_bicubic_p1x_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,251,35,3,0 // vbroadcastss 0x323fb(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,107,37,3,0 // vbroadcastss 0x3256b(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,188,88,0 // vaddps (%rax),%ymm8,%ymm0
.byte 197,124,16,136,128,0,0,0 // vmovups 0x80(%rax),%ymm9
- .byte 196,98,125,24,21,66,37,3,0 // vbroadcastss 0x32542(%rip),%ymm10 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
- .byte 98,114,53,56,168,21,60,37,3,0 // vfmadd213ps 0x3253c(%rip){1to8},%ymm9,%ymm10 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,21,178,38,3,0 // vbroadcastss 0x326b2(%rip),%ymm10 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 98,114,53,56,168,21,172,38,3,0 // vfmadd213ps 0x326ac(%rip){1to8},%ymm9,%ymm10 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,66,53,168,208 // vfmadd213ps %ymm8,%ymm9,%ymm10
- .byte 98,114,53,56,168,21,49,37,3,0 // vfmadd213ps 0x32531(%rip){1to8},%ymm9,%ymm10 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 98,114,53,56,168,21,161,38,3,0 // vfmadd213ps 0x326a1(%rip){1to8},%ymm9,%ymm10 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 197,124,17,144,0,1,0,0 // vmovups %ymm10,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15867,10 +15867,10 @@ _sk_bicubic_p3x_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,0 // vmovups (%rax),%ymm0
.byte 197,124,16,128,128,0,0,0 // vmovups 0x80(%rax),%ymm8
- .byte 98,241,124,56,88,5,9,37,3,0 // vaddps 0x32509(%rip){1to8},%ymm0,%ymm0 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 98,241,124,56,88,5,121,38,3,0 // vaddps 0x32679(%rip){1to8},%ymm0,%ymm0 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,243,36,3,0 // vbroadcastss 0x324f3(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
- .byte 98,114,61,56,168,21,9,36,3,0 // vfmadd213ps 0x32409(%rip){1to8},%ymm8,%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,99,38,3,0 // vbroadcastss 0x32663(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 98,114,61,56,168,21,121,37,3,0 // vfmadd213ps 0x32579(%rip){1to8},%ymm8,%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,52,89,194 // vmulps %ymm10,%ymm9,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -15882,12 +15882,12 @@ FUNCTION(_sk_bicubic_n3y_skx)
_sk_bicubic_n3y_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,72,64 // vmovups 0x40(%rax),%ymm1
- .byte 98,241,116,56,88,13,195,36,3,0 // vaddps 0x324c3(%rip){1to8},%ymm1,%ymm1 # 38b04 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
- .byte 196,98,125,24,5,106,35,3,0 // vbroadcastss 0x3236a(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,116,56,88,13,51,38,3,0 // vaddps 0x32633(%rip){1to8},%ymm1,%ymm1 # 38c74 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
+ .byte 196,98,125,24,5,218,36,3,0 // vbroadcastss 0x324da(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,168,36,3,0 // vbroadcastss 0x324a8(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
- .byte 98,114,61,56,168,21,190,35,3,0 // vfmadd213ps 0x323be(%rip){1to8},%ymm8,%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,24,38,3,0 // vbroadcastss 0x32618(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 98,114,61,56,168,21,46,37,3,0 // vfmadd213ps 0x3252e(%rip){1to8},%ymm8,%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,44,89,193 // vmulps %ymm9,%ymm10,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -15899,13 +15899,13 @@ FUNCTION(_sk_bicubic_n1y_skx)
_sk_bicubic_n1y_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,72,64 // vmovups 0x40(%rax),%ymm1
- .byte 98,241,116,56,88,13,116,36,3,0 // vaddps 0x32474(%rip){1to8},%ymm1,%ymm1 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
- .byte 196,98,125,24,5,31,35,3,0 // vbroadcastss 0x3231f(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 98,241,116,56,88,13,228,37,3,0 // vaddps 0x325e4(%rip){1to8},%ymm1,%ymm1 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,98,125,24,5,143,36,3,0 // vbroadcastss 0x3248f(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
- .byte 196,98,125,24,13,102,36,3,0 // vbroadcastss 0x32466(%rip),%ymm9 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
- .byte 98,114,61,56,168,13,96,36,3,0 // vfmadd213ps 0x32460(%rip){1to8},%ymm8,%ymm9 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
- .byte 98,114,61,56,168,13,246,34,3,0 // vfmadd213ps 0x322f6(%rip){1to8},%ymm8,%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 98,114,61,56,168,13,80,36,3,0 // vfmadd213ps 0x32450(%rip){1to8},%ymm8,%ymm9 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,13,214,37,3,0 // vbroadcastss 0x325d6(%rip),%ymm9 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 98,114,61,56,168,13,208,37,3,0 // vfmadd213ps 0x325d0(%rip){1to8},%ymm8,%ymm9 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 98,114,61,56,168,13,102,36,3,0 // vfmadd213ps 0x32466(%rip){1to8},%ymm8,%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 98,114,61,56,168,13,192,37,3,0 // vfmadd213ps 0x325c0(%rip){1to8},%ymm8,%ymm9 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 197,124,17,136,64,1,0,0 // vmovups %ymm9,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15915,13 +15915,13 @@ HIDDEN _sk_bicubic_p1y_skx
FUNCTION(_sk_bicubic_p1y_skx)
_sk_bicubic_p1y_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,213,34,3,0 // vbroadcastss 0x322d5(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,69,36,3,0 // vbroadcastss 0x32445(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,188,88,72,64 // vaddps 0x40(%rax),%ymm8,%ymm1
.byte 197,124,16,136,192,0,0,0 // vmovups 0xc0(%rax),%ymm9
- .byte 196,98,125,24,21,27,36,3,0 // vbroadcastss 0x3241b(%rip),%ymm10 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
- .byte 98,114,53,56,168,21,21,36,3,0 // vfmadd213ps 0x32415(%rip){1to8},%ymm9,%ymm10 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,21,139,37,3,0 // vbroadcastss 0x3258b(%rip),%ymm10 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 98,114,53,56,168,21,133,37,3,0 // vfmadd213ps 0x32585(%rip){1to8},%ymm9,%ymm10 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,66,53,168,208 // vfmadd213ps %ymm8,%ymm9,%ymm10
- .byte 98,114,53,56,168,21,10,36,3,0 // vfmadd213ps 0x3240a(%rip){1to8},%ymm9,%ymm10 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 98,114,53,56,168,21,122,37,3,0 // vfmadd213ps 0x3257a(%rip){1to8},%ymm9,%ymm10 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 197,124,17,144,64,1,0,0 // vmovups %ymm10,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -15933,10 +15933,10 @@ _sk_bicubic_p3y_skx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,72,64 // vmovups 0x40(%rax),%ymm1
.byte 197,124,16,128,192,0,0,0 // vmovups 0xc0(%rax),%ymm8
- .byte 98,241,116,56,88,13,225,35,3,0 // vaddps 0x323e1(%rip){1to8},%ymm1,%ymm1 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 98,241,116,56,88,13,81,37,3,0 // vaddps 0x32551(%rip){1to8},%ymm1,%ymm1 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,203,35,3,0 // vbroadcastss 0x323cb(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
- .byte 98,114,61,56,168,21,225,34,3,0 // vfmadd213ps 0x322e1(%rip){1to8},%ymm8,%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,59,37,3,0 // vbroadcastss 0x3253b(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 98,114,61,56,168,21,81,36,3,0 // vfmadd213ps 0x32451(%rip){1to8},%ymm8,%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,52,89,194 // vmulps %ymm10,%ymm9,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -16062,7 +16062,7 @@ _sk_clut_3D_skx:
.byte 98,226,61,40,64,224 // vpmulld %ymm0,%ymm8,%ymm20
.byte 98,209,93,32,254,193 // vpaddd %ymm9,%ymm20,%ymm0
.byte 72,139,0 // mov (%rax),%rax
- .byte 98,98,125,40,88,29,213,33,3,0 // vpbroadcastd 0x321d5(%rip),%ymm27 # 38b1c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
+ .byte 98,98,125,40,88,29,69,35,3,0 // vpbroadcastd 0x32345(%rip),%ymm27 # 38c8c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
.byte 98,146,125,40,64,195 // vpmulld %ymm27,%ymm0,%ymm0
.byte 196,65,45,239,210 // vpxor %ymm10,%ymm10,%ymm10
.byte 197,245,118,201 // vpcmpeqd %ymm1,%ymm1,%ymm1
@@ -16075,9 +16075,9 @@ _sk_clut_3D_skx:
.byte 196,65,37,118,219 // vpcmpeqd %ymm11,%ymm11,%ymm11
.byte 196,162,37,146,12,144 // vgatherdps %ymm11,(%rax,%ymm10,4),%ymm1
.byte 98,97,124,40,40,233 // vmovaps %ymm1,%ymm29
- .byte 196,98,125,24,21,139,33,3,0 // vbroadcastss 0x3218b(%rip),%ymm10 # 38b18 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
+ .byte 196,98,125,24,21,251,34,3,0 // vbroadcastss 0x322fb(%rip),%ymm10 # 38c88 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
.byte 98,81,60,32,88,218 // vaddps %ymm10,%ymm24,%ymm11
- .byte 98,226,125,40,88,5,35,32,3,0 // vpbroadcastd 0x32023(%rip),%ymm16 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 98,226,125,40,88,5,147,33,3,0 // vpbroadcastd 0x32193(%rip),%ymm16 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 98,177,125,40,254,192 // vpaddd %ymm16,%ymm0,%ymm0
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 196,65,29,118,228 // vpcmpeqd %ymm12,%ymm12,%ymm12
@@ -16317,7 +16317,7 @@ _sk_clut_4D_skx:
.byte 98,162,45,40,64,229 // vpmulld %ymm21,%ymm10,%ymm20
.byte 98,241,93,32,254,193 // vpaddd %ymm1,%ymm20,%ymm0
.byte 72,139,0 // mov (%rax),%rax
- .byte 98,226,125,40,88,5,103,28,3,0 // vpbroadcastd 0x31c67(%rip),%ymm16 # 38b1c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
+ .byte 98,226,125,40,88,5,215,29,3,0 // vpbroadcastd 0x31dd7(%rip),%ymm16 # 38c8c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
.byte 98,50,125,40,64,216 // vpmulld %ymm16,%ymm0,%ymm11
.byte 196,65,28,87,228 // vxorps %ymm12,%ymm12,%ymm12
.byte 197,253,118,192 // vpcmpeqd %ymm0,%ymm0,%ymm0
@@ -16329,9 +16329,9 @@ _sk_clut_4D_skx:
.byte 196,65,29,118,228 // vpcmpeqd %ymm12,%ymm12,%ymm12
.byte 196,98,29,146,44,128 // vgatherdps %ymm12,(%rax,%ymm0,4),%ymm13
.byte 197,124,17,172,36,192,4,0,0 // vmovups %ymm13,0x4c0(%rsp)
- .byte 196,226,125,24,5,25,28,3,0 // vbroadcastss 0x31c19(%rip),%ymm0 # 38b18 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
+ .byte 196,226,125,24,5,137,29,3,0 // vbroadcastss 0x31d89(%rip),%ymm0 # 38c88 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
.byte 98,113,28,32,88,224 // vaddps %ymm0,%ymm28,%ymm12
- .byte 98,226,125,40,88,13,177,26,3,0 // vpbroadcastd 0x31ab1(%rip),%ymm17 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 98,226,125,40,88,13,33,28,3,0 // vpbroadcastd 0x31c21(%rip),%ymm17 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 98,49,37,40,254,217 // vpaddd %ymm17,%ymm11,%ymm11
.byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
.byte 196,65,21,118,237 // vpcmpeqd %ymm13,%ymm13,%ymm13
@@ -16736,7 +16736,7 @@ _sk_clut_4D_skx:
.byte 197,228,92,214 // vsubps %ymm6,%ymm3,%ymm2
.byte 196,226,93,168,214 // vfmadd213ps %ymm6,%ymm4,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,79,17,3,0 // vbroadcastss 0x3114f(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,191,18,3,0 // vbroadcastss 0x312bf(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 98,145,124,40,40,227 // vmovaps %ymm27,%ymm4
.byte 197,252,16,108,36,128 // vmovups -0x80(%rsp),%ymm5
.byte 197,252,16,116,36,160 // vmovups -0x60(%rsp),%ymm6
@@ -16748,11 +16748,11 @@ HIDDEN _sk_gauss_a_to_rgba_skx
.globl _sk_gauss_a_to_rgba_skx
FUNCTION(_sk_gauss_a_to_rgba_skx)
_sk_gauss_a_to_rgba_skx:
- .byte 196,226,125,24,5,145,18,3,0 // vbroadcastss 0x31291(%rip),%ymm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x41c>
- .byte 98,242,101,56,168,5,139,18,3,0 // vfmadd213ps 0x3128b(%rip){1to8},%ymm3,%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x420>
- .byte 98,242,101,56,168,5,133,18,3,0 // vfmadd213ps 0x31285(%rip){1to8},%ymm3,%ymm0 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x424>
- .byte 98,242,101,56,168,5,127,18,3,0 // vfmadd213ps 0x3127f(%rip){1to8},%ymm3,%ymm0 # 38b2c <_sk_srcover_bgra_8888_sse2_lowp+0x428>
- .byte 98,242,101,56,168,5,121,18,3,0 // vfmadd213ps 0x31279(%rip){1to8},%ymm3,%ymm0 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x42c>
+ .byte 196,226,125,24,5,1,20,3,0 // vbroadcastss 0x31401(%rip),%ymm0 # 38c90 <_sk_srcover_bgra_8888_sse2_lowp+0x41c>
+ .byte 98,242,101,56,168,5,251,19,3,0 // vfmadd213ps 0x313fb(%rip){1to8},%ymm3,%ymm0 # 38c94 <_sk_srcover_bgra_8888_sse2_lowp+0x420>
+ .byte 98,242,101,56,168,5,245,19,3,0 // vfmadd213ps 0x313f5(%rip){1to8},%ymm3,%ymm0 # 38c98 <_sk_srcover_bgra_8888_sse2_lowp+0x424>
+ .byte 98,242,101,56,168,5,239,19,3,0 // vfmadd213ps 0x313ef(%rip){1to8},%ymm3,%ymm0 # 38c9c <_sk_srcover_bgra_8888_sse2_lowp+0x428>
+ .byte 98,242,101,56,168,5,233,19,3,0 // vfmadd213ps 0x313e9(%rip){1to8},%ymm3,%ymm0 # 38ca0 <_sk_srcover_bgra_8888_sse2_lowp+0x42c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
@@ -16852,10 +16852,10 @@ _sk_seed_shader_hsw:
.byte 197,249,110,201 // vmovd %ecx,%xmm1
.byte 196,226,125,88,201 // vpbroadcastd %xmm1,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,21,226,15,3,0 // vbroadcastss 0x30fe2(%rip),%ymm2 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,21,82,17,3,0 // vbroadcastss 0x31152(%rip),%ymm2 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,244,88,202 // vaddps %ymm2,%ymm1,%ymm1
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,21,215,15,3,0 // vbroadcastss 0x30fd7(%rip),%ymm2 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,21,71,17,3,0 // vbroadcastss 0x31147(%rip),%ymm2 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,228,87,219 // vxorps %ymm3,%ymm3,%ymm3
.byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
.byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
@@ -16870,17 +16870,17 @@ _sk_dither_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,121,110,194 // vmovd %edx,%xmm8
.byte 196,66,125,88,192 // vpbroadcastd %xmm8,%ymm8
- .byte 197,61,254,5,122,17,3,0 // vpaddd 0x3117a(%rip),%ymm8,%ymm8 # 38b80 <_sk_srcover_bgra_8888_sse2_lowp+0x47c>
+ .byte 197,61,254,5,218,18,3,0 // vpaddd 0x312da(%rip),%ymm8,%ymm8 # 38ce0 <_sk_srcover_bgra_8888_sse2_lowp+0x46c>
.byte 197,121,110,201 // vmovd %ecx,%xmm9
.byte 196,66,125,88,201 // vpbroadcastd %xmm9,%ymm9
.byte 196,65,53,239,200 // vpxor %ymm8,%ymm9,%ymm9
- .byte 196,98,125,88,21,155,15,3,0 // vpbroadcastd 0x30f9b(%rip),%ymm10 # 389b8 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
+ .byte 196,98,125,88,21,11,17,3,0 // vpbroadcastd 0x3110b(%rip),%ymm10 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
.byte 196,65,53,219,218 // vpand %ymm10,%ymm9,%ymm11
.byte 196,193,37,114,243,5 // vpslld $0x5,%ymm11,%ymm11
.byte 196,65,61,219,210 // vpand %ymm10,%ymm8,%ymm10
.byte 196,193,45,114,242,4 // vpslld $0x4,%ymm10,%ymm10
- .byte 196,98,125,88,37,128,15,3,0 // vpbroadcastd 0x30f80(%rip),%ymm12 # 389bc <_sk_srcover_bgra_8888_sse2_lowp+0x2b8>
- .byte 196,98,125,88,45,123,15,3,0 // vpbroadcastd 0x30f7b(%rip),%ymm13 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 196,98,125,88,37,240,16,3,0 // vpbroadcastd 0x310f0(%rip),%ymm12 # 38b2c <_sk_srcover_bgra_8888_sse2_lowp+0x2b8>
+ .byte 196,98,125,88,45,235,16,3,0 // vpbroadcastd 0x310eb(%rip),%ymm13 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 196,65,53,219,245 // vpand %ymm13,%ymm9,%ymm14
.byte 196,193,13,114,246,2 // vpslld $0x2,%ymm14,%ymm14
.byte 196,65,37,235,222 // vpor %ymm14,%ymm11,%ymm11
@@ -16895,8 +16895,8 @@ _sk_dither_hsw:
.byte 196,65,61,235,195 // vpor %ymm11,%ymm8,%ymm8
.byte 196,65,61,235,193 // vpor %ymm9,%ymm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,13,45,15,3,0 // vbroadcastss 0x30f2d(%rip),%ymm9 # 389c4 <_sk_srcover_bgra_8888_sse2_lowp+0x2c0>
- .byte 196,98,125,24,21,40,15,3,0 // vbroadcastss 0x30f28(%rip),%ymm10 # 389c8 <_sk_srcover_bgra_8888_sse2_lowp+0x2c4>
+ .byte 196,98,125,24,13,157,16,3,0 // vbroadcastss 0x3109d(%rip),%ymm9 # 38b34 <_sk_srcover_bgra_8888_sse2_lowp+0x2c0>
+ .byte 196,98,125,24,21,152,16,3,0 // vbroadcastss 0x31098(%rip),%ymm10 # 38b38 <_sk_srcover_bgra_8888_sse2_lowp+0x2c4>
.byte 196,66,61,184,209 // vfmadd231ps %ymm9,%ymm8,%ymm10
.byte 196,98,125,24,0 // vbroadcastss (%rax),%ymm8
.byte 196,65,44,89,192 // vmulps %ymm8,%ymm10,%ymm8
@@ -16930,7 +16930,7 @@ HIDDEN _sk_black_color_hsw
FUNCTION(_sk_black_color_hsw)
_sk_black_color_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,176,14,3,0 // vbroadcastss 0x30eb0(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,32,16,3,0 // vbroadcastss 0x31020(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
@@ -16941,7 +16941,7 @@ HIDDEN _sk_white_color_hsw
FUNCTION(_sk_white_color_hsw)
_sk_white_color_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,151,14,3,0 // vbroadcastss 0x30e97(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,7,16,3,0 // vbroadcastss 0x31007(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 197,252,40,216 // vmovaps %ymm0,%ymm3
@@ -16987,7 +16987,7 @@ HIDDEN _sk_srcatop_hsw
FUNCTION(_sk_srcatop_hsw)
_sk_srcatop_hsw:
.byte 197,252,89,199 // vmulps %ymm7,%ymm0,%ymm0
- .byte 196,98,125,24,5,54,14,3,0 // vbroadcastss 0x30e36(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,166,15,3,0 // vbroadcastss 0x30fa6(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,226,61,184,196 // vfmadd231ps %ymm4,%ymm8,%ymm0
.byte 197,244,89,207 // vmulps %ymm7,%ymm1,%ymm1
@@ -17003,7 +17003,7 @@ HIDDEN _sk_dstatop_hsw
.globl _sk_dstatop_hsw
FUNCTION(_sk_dstatop_hsw)
_sk_dstatop_hsw:
- .byte 196,98,125,24,5,5,14,3,0 // vbroadcastss 0x30e05(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,117,15,3,0 // vbroadcastss 0x30f75(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,226,101,184,196 // vfmadd231ps %ymm4,%ymm3,%ymm0
@@ -17042,7 +17042,7 @@ HIDDEN _sk_srcout_hsw
.globl _sk_srcout_hsw
FUNCTION(_sk_srcout_hsw)
_sk_srcout_hsw:
- .byte 196,98,125,24,5,168,13,3,0 // vbroadcastss 0x30da8(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,24,15,3,0 // vbroadcastss 0x30f18(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 197,188,89,201 // vmulps %ymm1,%ymm8,%ymm1
@@ -17055,7 +17055,7 @@ HIDDEN _sk_dstout_hsw
.globl _sk_dstout_hsw
FUNCTION(_sk_dstout_hsw)
_sk_dstout_hsw:
- .byte 196,226,125,24,5,135,13,3,0 // vbroadcastss 0x30d87(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,247,14,3,0 // vbroadcastss 0x30ef7(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,92,219 // vsubps %ymm3,%ymm0,%ymm3
.byte 197,228,89,196 // vmulps %ymm4,%ymm3,%ymm0
.byte 197,228,89,205 // vmulps %ymm5,%ymm3,%ymm1
@@ -17068,7 +17068,7 @@ HIDDEN _sk_srcover_hsw
.globl _sk_srcover_hsw
FUNCTION(_sk_srcover_hsw)
_sk_srcover_hsw:
- .byte 196,98,125,24,5,102,13,3,0 // vbroadcastss 0x30d66(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,214,14,3,0 // vbroadcastss 0x30ed6(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,93,184,192 // vfmadd231ps %ymm8,%ymm4,%ymm0
.byte 196,194,85,184,200 // vfmadd231ps %ymm8,%ymm5,%ymm1
@@ -17081,7 +17081,7 @@ HIDDEN _sk_dstover_hsw
.globl _sk_dstover_hsw
FUNCTION(_sk_dstover_hsw)
_sk_dstover_hsw:
- .byte 196,98,125,24,5,65,13,3,0 // vbroadcastss 0x30d41(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,177,14,3,0 // vbroadcastss 0x30eb1(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 196,226,61,168,196 // vfmadd213ps %ymm4,%ymm8,%ymm0
.byte 196,226,61,168,205 // vfmadd213ps %ymm5,%ymm8,%ymm1
@@ -17105,7 +17105,7 @@ HIDDEN _sk_multiply_hsw
.globl _sk_multiply_hsw
FUNCTION(_sk_multiply_hsw)
_sk_multiply_hsw:
- .byte 196,98,125,24,5,8,13,3,0 // vbroadcastss 0x30d08(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,120,14,3,0 // vbroadcastss 0x30e78(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,52,89,208 // vmulps %ymm0,%ymm9,%ymm10
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -17128,7 +17128,7 @@ HIDDEN _sk_plus__hsw
FUNCTION(_sk_plus__hsw)
_sk_plus__hsw:
.byte 197,252,88,196 // vaddps %ymm4,%ymm0,%ymm0
- .byte 196,98,125,24,5,183,12,3,0 // vbroadcastss 0x30cb7(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,39,14,3,0 // vbroadcastss 0x30e27(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 197,244,88,205 // vaddps %ymm5,%ymm1,%ymm1
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
@@ -17158,7 +17158,7 @@ HIDDEN _sk_xor__hsw
.globl _sk_xor__hsw
FUNCTION(_sk_xor__hsw)
_sk_xor__hsw:
- .byte 196,98,125,24,5,98,12,3,0 // vbroadcastss 0x30c62(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,210,13,3,0 // vbroadcastss 0x30dd2(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,180,89,192 // vmulps %ymm0,%ymm9,%ymm0
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -17192,7 +17192,7 @@ _sk_darken_hsw:
.byte 197,100,89,206 // vmulps %ymm6,%ymm3,%ymm9
.byte 196,193,108,95,209 // vmaxps %ymm9,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,230,11,3,0 // vbroadcastss 0x30be6(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,86,13,3,0 // vbroadcastss 0x30d56(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -17217,7 +17217,7 @@ _sk_lighten_hsw:
.byte 197,100,89,206 // vmulps %ymm6,%ymm3,%ymm9
.byte 196,193,108,93,209 // vminps %ymm9,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,145,11,3,0 // vbroadcastss 0x30b91(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,1,13,3,0 // vbroadcastss 0x30d01(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -17245,7 +17245,7 @@ _sk_difference_hsw:
.byte 196,193,108,93,209 // vminps %ymm9,%ymm2,%ymm2
.byte 197,236,88,210 // vaddps %ymm2,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,48,11,3,0 // vbroadcastss 0x30b30(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,160,12,3,0 // vbroadcastss 0x30ca0(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -17267,7 +17267,7 @@ _sk_exclusion_hsw:
.byte 197,236,89,214 // vmulps %ymm6,%ymm2,%ymm2
.byte 197,236,88,210 // vaddps %ymm2,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,234,10,3,0 // vbroadcastss 0x30aea(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,90,12,3,0 // vbroadcastss 0x30c5a(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 196,194,69,184,216 // vfmadd231ps %ymm8,%ymm7,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -17277,7 +17277,7 @@ HIDDEN _sk_colorburn_hsw
.globl _sk_colorburn_hsw
FUNCTION(_sk_colorburn_hsw)
_sk_colorburn_hsw:
- .byte 196,98,125,24,5,212,10,3,0 // vbroadcastss 0x30ad4(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,68,12,3,0 // vbroadcastss 0x30c44(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,52,89,216 // vmulps %ymm0,%ymm9,%ymm11
.byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
@@ -17335,7 +17335,7 @@ HIDDEN _sk_colordodge_hsw
FUNCTION(_sk_colordodge_hsw)
_sk_colordodge_hsw:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
- .byte 196,98,125,24,13,228,9,3,0 // vbroadcastss 0x309e4(%rip),%ymm9 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,13,84,11,3,0 // vbroadcastss 0x30b54(%rip),%ymm9 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,52,92,215 // vsubps %ymm7,%ymm9,%ymm10
.byte 197,44,89,216 // vmulps %ymm0,%ymm10,%ymm11
.byte 197,52,92,203 // vsubps %ymm3,%ymm9,%ymm9
@@ -17388,7 +17388,7 @@ HIDDEN _sk_hardlight_hsw
.globl _sk_hardlight_hsw
FUNCTION(_sk_hardlight_hsw)
_sk_hardlight_hsw:
- .byte 196,98,125,24,5,254,8,3,0 // vbroadcastss 0x308fe(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,110,10,3,0 // vbroadcastss 0x30a6e(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,215 // vsubps %ymm7,%ymm8,%ymm10
.byte 197,44,89,216 // vmulps %ymm0,%ymm10,%ymm11
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -17439,7 +17439,7 @@ HIDDEN _sk_overlay_hsw
.globl _sk_overlay_hsw
FUNCTION(_sk_overlay_hsw)
_sk_overlay_hsw:
- .byte 196,98,125,24,5,50,8,3,0 // vbroadcastss 0x30832(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,162,9,3,0 // vbroadcastss 0x309a2(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,215 // vsubps %ymm7,%ymm8,%ymm10
.byte 197,44,89,216 // vmulps %ymm0,%ymm10,%ymm11
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -17500,10 +17500,10 @@ _sk_softlight_hsw:
.byte 196,65,20,88,197 // vaddps %ymm13,%ymm13,%ymm8
.byte 196,65,60,88,192 // vaddps %ymm8,%ymm8,%ymm8
.byte 196,66,61,168,192 // vfmadd213ps %ymm8,%ymm8,%ymm8
- .byte 196,98,125,24,29,77,7,3,0 // vbroadcastss 0x3074d(%rip),%ymm11 # 389cc <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
+ .byte 196,98,125,24,29,189,8,3,0 // vbroadcastss 0x308bd(%rip),%ymm11 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
.byte 196,65,20,88,227 // vaddps %ymm11,%ymm13,%ymm12
.byte 196,65,28,89,192 // vmulps %ymm8,%ymm12,%ymm8
- .byte 196,98,125,24,37,62,7,3,0 // vbroadcastss 0x3073e(%rip),%ymm12 # 389d0 <_sk_srcover_bgra_8888_sse2_lowp+0x2cc>
+ .byte 196,98,125,24,37,174,8,3,0 // vbroadcastss 0x308ae(%rip),%ymm12 # 38b40 <_sk_srcover_bgra_8888_sse2_lowp+0x2cc>
.byte 196,66,21,184,196 // vfmadd231ps %ymm12,%ymm13,%ymm8
.byte 196,65,124,82,245 // vrsqrtps %ymm13,%ymm14
.byte 196,65,124,83,246 // vrcpps %ymm14,%ymm14
@@ -17513,7 +17513,7 @@ _sk_softlight_hsw:
.byte 197,4,194,255,2 // vcmpleps %ymm7,%ymm15,%ymm15
.byte 196,67,13,74,240,240 // vblendvps %ymm15,%ymm8,%ymm14,%ymm14
.byte 197,116,88,249 // vaddps %ymm1,%ymm1,%ymm15
- .byte 196,98,125,24,5,237,6,3,0 // vbroadcastss 0x306ed(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,93,8,3,0 // vbroadcastss 0x3085d(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,65,60,92,237 // vsubps %ymm13,%ymm8,%ymm13
.byte 197,132,92,195 // vsubps %ymm3,%ymm15,%ymm0
.byte 196,98,125,168,235 // vfmadd213ps %ymm3,%ymm0,%ymm13
@@ -17626,11 +17626,11 @@ _sk_hue_hsw:
.byte 196,65,28,89,210 // vmulps %ymm10,%ymm12,%ymm10
.byte 196,65,44,94,214 // vdivps %ymm14,%ymm10,%ymm10
.byte 196,67,45,74,224,240 // vblendvps %ymm15,%ymm8,%ymm10,%ymm12
- .byte 196,98,125,24,53,5,5,3,0 // vbroadcastss 0x30505(%rip),%ymm14 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,61,0,5,3,0 // vbroadcastss 0x30500(%rip),%ymm15 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,53,117,6,3,0 // vbroadcastss 0x30675(%rip),%ymm14 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,61,112,6,3,0 // vbroadcastss 0x30670(%rip),%ymm15 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,239 // vmulps %ymm15,%ymm5,%ymm13
.byte 196,66,93,184,238 // vfmadd231ps %ymm14,%ymm4,%ymm13
- .byte 196,226,125,24,5,241,4,3,0 // vbroadcastss 0x304f1(%rip),%ymm0 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,226,125,24,5,97,6,3,0 // vbroadcastss 0x30661(%rip),%ymm0 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,98,77,184,232 // vfmadd231ps %ymm0,%ymm6,%ymm13
.byte 196,65,116,89,215 // vmulps %ymm15,%ymm1,%ymm10
.byte 196,66,53,184,214 // vfmadd231ps %ymm14,%ymm9,%ymm10
@@ -17685,7 +17685,7 @@ _sk_hue_hsw:
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
.byte 196,65,36,95,200 // vmaxps %ymm8,%ymm11,%ymm9
.byte 196,65,116,95,192 // vmaxps %ymm8,%ymm1,%ymm8
- .byte 196,226,125,24,13,178,3,3,0 // vbroadcastss 0x303b2(%rip),%ymm1 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,13,34,5,3,0 // vbroadcastss 0x30522(%rip),%ymm1 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,116,92,215 // vsubps %ymm7,%ymm1,%ymm10
.byte 197,172,89,210 // vmulps %ymm2,%ymm10,%ymm2
.byte 197,116,92,219 // vsubps %ymm3,%ymm1,%ymm11
@@ -17739,11 +17739,11 @@ _sk_saturation_hsw:
.byte 196,65,28,89,210 // vmulps %ymm10,%ymm12,%ymm10
.byte 196,65,44,94,214 // vdivps %ymm14,%ymm10,%ymm10
.byte 196,67,45,74,224,240 // vblendvps %ymm15,%ymm8,%ymm10,%ymm12
- .byte 196,98,125,24,53,229,2,3,0 // vbroadcastss 0x302e5(%rip),%ymm14 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,61,224,2,3,0 // vbroadcastss 0x302e0(%rip),%ymm15 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,53,85,4,3,0 // vbroadcastss 0x30455(%rip),%ymm14 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,61,80,4,3,0 // vbroadcastss 0x30450(%rip),%ymm15 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,239 // vmulps %ymm15,%ymm5,%ymm13
.byte 196,66,93,184,238 // vfmadd231ps %ymm14,%ymm4,%ymm13
- .byte 196,226,125,24,5,209,2,3,0 // vbroadcastss 0x302d1(%rip),%ymm0 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,226,125,24,5,65,4,3,0 // vbroadcastss 0x30441(%rip),%ymm0 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,98,77,184,232 // vfmadd231ps %ymm0,%ymm6,%ymm13
.byte 196,65,116,89,215 // vmulps %ymm15,%ymm1,%ymm10
.byte 196,66,53,184,214 // vfmadd231ps %ymm14,%ymm9,%ymm10
@@ -17798,7 +17798,7 @@ _sk_saturation_hsw:
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
.byte 196,65,36,95,200 // vmaxps %ymm8,%ymm11,%ymm9
.byte 196,65,116,95,192 // vmaxps %ymm8,%ymm1,%ymm8
- .byte 196,226,125,24,13,146,1,3,0 // vbroadcastss 0x30192(%rip),%ymm1 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,13,2,3,3,0 // vbroadcastss 0x30302(%rip),%ymm1 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,116,92,215 // vsubps %ymm7,%ymm1,%ymm10
.byte 197,172,89,210 // vmulps %ymm2,%ymm10,%ymm2
.byte 197,116,92,219 // vsubps %ymm3,%ymm1,%ymm11
@@ -17826,11 +17826,11 @@ _sk_color_hsw:
.byte 197,108,89,199 // vmulps %ymm7,%ymm2,%ymm8
.byte 197,116,89,215 // vmulps %ymm7,%ymm1,%ymm10
.byte 197,52,89,223 // vmulps %ymm7,%ymm9,%ymm11
- .byte 196,98,125,24,45,71,1,3,0 // vbroadcastss 0x30147(%rip),%ymm13 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,53,66,1,3,0 // vbroadcastss 0x30142(%rip),%ymm14 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,45,183,2,3,0 // vbroadcastss 0x302b7(%rip),%ymm13 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,53,178,2,3,0 // vbroadcastss 0x302b2(%rip),%ymm14 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,230 // vmulps %ymm14,%ymm5,%ymm12
.byte 196,66,93,184,229 // vfmadd231ps %ymm13,%ymm4,%ymm12
- .byte 196,98,125,24,61,51,1,3,0 // vbroadcastss 0x30133(%rip),%ymm15 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,98,125,24,61,163,2,3,0 // vbroadcastss 0x302a3(%rip),%ymm15 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,66,77,184,231 // vfmadd231ps %ymm15,%ymm6,%ymm12
.byte 196,65,44,89,206 // vmulps %ymm14,%ymm10,%ymm9
.byte 196,66,61,184,205 // vfmadd231ps %ymm13,%ymm8,%ymm9
@@ -17886,7 +17886,7 @@ _sk_color_hsw:
.byte 196,193,116,95,206 // vmaxps %ymm14,%ymm1,%ymm1
.byte 196,65,44,95,198 // vmaxps %ymm14,%ymm10,%ymm8
.byte 196,65,124,95,206 // vmaxps %ymm14,%ymm0,%ymm9
- .byte 196,226,125,24,5,233,255,2,0 // vbroadcastss 0x2ffe9(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,89,1,3,0 // vbroadcastss 0x30159(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,124,92,215 // vsubps %ymm7,%ymm0,%ymm10
.byte 197,172,89,210 // vmulps %ymm2,%ymm10,%ymm2
.byte 197,124,92,219 // vsubps %ymm3,%ymm0,%ymm11
@@ -17914,11 +17914,11 @@ _sk_luminosity_hsw:
.byte 197,100,89,196 // vmulps %ymm4,%ymm3,%ymm8
.byte 197,100,89,213 // vmulps %ymm5,%ymm3,%ymm10
.byte 197,100,89,222 // vmulps %ymm6,%ymm3,%ymm11
- .byte 196,98,125,24,45,158,255,2,0 // vbroadcastss 0x2ff9e(%rip),%ymm13 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
- .byte 196,98,125,24,53,153,255,2,0 // vbroadcastss 0x2ff99(%rip),%ymm14 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,45,14,1,3,0 // vbroadcastss 0x3010e(%rip),%ymm13 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,53,9,1,3,0 // vbroadcastss 0x30109(%rip),%ymm14 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,116,89,230 // vmulps %ymm14,%ymm1,%ymm12
.byte 196,66,109,184,229 // vfmadd231ps %ymm13,%ymm2,%ymm12
- .byte 196,98,125,24,61,138,255,2,0 // vbroadcastss 0x2ff8a(%rip),%ymm15 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,98,125,24,61,250,0,3,0 // vbroadcastss 0x300fa(%rip),%ymm15 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,66,53,184,231 // vfmadd231ps %ymm15,%ymm9,%ymm12
.byte 196,65,44,89,206 // vmulps %ymm14,%ymm10,%ymm9
.byte 196,66,61,184,205 // vfmadd231ps %ymm13,%ymm8,%ymm9
@@ -17974,7 +17974,7 @@ _sk_luminosity_hsw:
.byte 196,193,116,95,206 // vmaxps %ymm14,%ymm1,%ymm1
.byte 196,65,44,95,198 // vmaxps %ymm14,%ymm10,%ymm8
.byte 196,65,124,95,206 // vmaxps %ymm14,%ymm0,%ymm9
- .byte 196,226,125,24,5,64,254,2,0 // vbroadcastss 0x2fe40(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,176,255,2,0 // vbroadcastss 0x2ffb0(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,124,92,215 // vsubps %ymm7,%ymm0,%ymm10
.byte 197,172,89,210 // vmulps %ymm2,%ymm10,%ymm2
.byte 197,124,92,219 // vsubps %ymm3,%ymm0,%ymm11
@@ -18005,17 +18005,17 @@ _sk_srcover_rgba_8888_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,179,0,0,0 // jne 8c89 <_sk_srcover_rgba_8888_hsw+0xd3>
.byte 196,129,126,111,60,152 // vmovdqu (%r8,%r11,4),%ymm7
- .byte 197,197,219,37,188,255,2,0 // vpand 0x2ffbc(%rip),%ymm7,%ymm4 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x49c>
+ .byte 197,197,219,37,28,1,3,0 // vpand 0x3011c(%rip),%ymm7,%ymm4 # 38d00 <_sk_srcover_bgra_8888_sse2_lowp+0x48c>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,69,0,45,207,255,2,0 // vpshufb 0x2ffcf(%rip),%ymm7,%ymm5 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x4bc>
+ .byte 196,226,69,0,45,47,1,3,0 // vpshufb 0x3012f(%rip),%ymm7,%ymm5 # 38d20 <_sk_srcover_bgra_8888_sse2_lowp+0x4ac>
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 196,226,69,0,53,226,255,2,0 // vpshufb 0x2ffe2(%rip),%ymm7,%ymm6 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x4dc>
+ .byte 196,226,69,0,53,66,1,3,0 // vpshufb 0x30142(%rip),%ymm7,%ymm6 # 38d40 <_sk_srcover_bgra_8888_sse2_lowp+0x4cc>
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
.byte 197,197,114,215,24 // vpsrld $0x18,%ymm7,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,160,253,2,0 // vbroadcastss 0x2fda0(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,16,255,2,0 // vbroadcastss 0x2ff10(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,191,253,2,0 // vbroadcastss 0x2fdbf(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,13,47,255,2,0 // vbroadcastss 0x2ff2f(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 196,194,93,184,192 // vfmadd231ps %ymm8,%ymm4,%ymm0
.byte 196,193,116,89,201 // vmulps %ymm9,%ymm1,%ymm1
@@ -18141,17 +18141,17 @@ _sk_srcover_bgra_8888_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,179,0,0,0 // jne 8eb7 <_sk_srcover_bgra_8888_hsw+0xd3>
.byte 196,129,126,111,60,152 // vmovdqu (%r8,%r11,4),%ymm7
- .byte 197,197,219,37,238,253,2,0 // vpand 0x2fdee(%rip),%ymm7,%ymm4 # 38c00 <_sk_srcover_bgra_8888_sse2_lowp+0x4fc>
+ .byte 197,197,219,37,78,255,2,0 // vpand 0x2ff4e(%rip),%ymm7,%ymm4 # 38d60 <_sk_srcover_bgra_8888_sse2_lowp+0x4ec>
.byte 197,252,91,244 // vcvtdq2ps %ymm4,%ymm6
- .byte 196,226,69,0,37,1,254,2,0 // vpshufb 0x2fe01(%rip),%ymm7,%ymm4 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x51c>
+ .byte 196,226,69,0,37,97,255,2,0 // vpshufb 0x2ff61(%rip),%ymm7,%ymm4 # 38d80 <_sk_srcover_bgra_8888_sse2_lowp+0x50c>
.byte 197,252,91,236 // vcvtdq2ps %ymm4,%ymm5
- .byte 196,226,69,0,37,20,254,2,0 // vpshufb 0x2fe14(%rip),%ymm7,%ymm4 # 38c40 <_sk_srcover_bgra_8888_sse2_lowp+0x53c>
+ .byte 196,226,69,0,37,116,255,2,0 // vpshufb 0x2ff74(%rip),%ymm7,%ymm4 # 38da0 <_sk_srcover_bgra_8888_sse2_lowp+0x52c>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
.byte 197,197,114,215,24 // vpsrld $0x18,%ymm7,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,114,251,2,0 // vbroadcastss 0x2fb72(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,226,252,2,0 // vbroadcastss 0x2fce2(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,145,251,2,0 // vbroadcastss 0x2fb91(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,13,1,253,2,0 // vbroadcastss 0x2fd01(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 196,194,93,184,192 // vfmadd231ps %ymm8,%ymm4,%ymm0
.byte 196,193,116,89,201 // vmulps %ymm9,%ymm1,%ymm1
@@ -18279,7 +18279,7 @@ HIDDEN _sk_clamp_1_hsw
.globl _sk_clamp_1_hsw
FUNCTION(_sk_clamp_1_hsw)
_sk_clamp_1_hsw:
- .byte 196,98,125,24,5,126,249,2,0 // vbroadcastss 0x2f97e(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,238,250,2,0 // vbroadcastss 0x2faee(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 196,193,108,93,208 // vminps %ymm8,%ymm2,%ymm2
@@ -18291,7 +18291,7 @@ HIDDEN _sk_clamp_a_hsw
.globl _sk_clamp_a_hsw
FUNCTION(_sk_clamp_a_hsw)
_sk_clamp_a_hsw:
- .byte 196,98,125,24,5,93,249,2,0 // vbroadcastss 0x2f95d(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,205,250,2,0 // vbroadcastss 0x2facd(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,100,93,216 // vminps %ymm8,%ymm3,%ymm3
.byte 197,252,93,195 // vminps %ymm3,%ymm0,%ymm0
.byte 197,244,93,203 // vminps %ymm3,%ymm1,%ymm1
@@ -18303,7 +18303,7 @@ HIDDEN _sk_clamp_a_dst_hsw
.globl _sk_clamp_a_dst_hsw
FUNCTION(_sk_clamp_a_dst_hsw)
_sk_clamp_a_dst_hsw:
- .byte 196,98,125,24,5,63,249,2,0 // vbroadcastss 0x2f93f(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,175,250,2,0 // vbroadcastss 0x2faaf(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,68,93,248 // vminps %ymm8,%ymm7,%ymm7
.byte 197,220,93,231 // vminps %ymm7,%ymm4,%ymm4
.byte 197,212,93,239 // vminps %ymm7,%ymm5,%ymm5
@@ -18336,7 +18336,7 @@ HIDDEN _sk_invert_hsw
.globl _sk_invert_hsw
FUNCTION(_sk_invert_hsw)
_sk_invert_hsw:
- .byte 196,98,125,24,5,250,248,2,0 // vbroadcastss 0x2f8fa(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,106,250,2,0 // vbroadcastss 0x2fa6a(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,188,92,192 // vsubps %ymm0,%ymm8,%ymm0
.byte 197,188,92,201 // vsubps %ymm1,%ymm8,%ymm1
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
@@ -18390,9 +18390,9 @@ HIDDEN _sk_unpremul_hsw
.globl _sk_unpremul_hsw
FUNCTION(_sk_unpremul_hsw)
_sk_unpremul_hsw:
- .byte 196,98,125,24,5,149,248,2,0 // vbroadcastss 0x2f895(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,5,250,2,0 // vbroadcastss 0x2fa05(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,94,195 // vdivps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,184,248,2,0 // vbroadcastss 0x2f8b8(%rip),%ymm9 # 389e4 <_sk_srcover_bgra_8888_sse2_lowp+0x2e0>
+ .byte 196,98,125,24,13,40,250,2,0 // vbroadcastss 0x2fa28(%rip),%ymm9 # 38b54 <_sk_srcover_bgra_8888_sse2_lowp+0x2e0>
.byte 196,65,60,194,201,1 // vcmpltps %ymm9,%ymm8,%ymm9
.byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
.byte 196,67,45,74,192,144 // vblendvps %ymm9,%ymm8,%ymm10,%ymm8
@@ -18406,16 +18406,16 @@ HIDDEN _sk_from_srgb_hsw
.globl _sk_from_srgb_hsw
FUNCTION(_sk_from_srgb_hsw)
_sk_from_srgb_hsw:
- .byte 196,98,125,24,5,146,248,2,0 // vbroadcastss 0x2f892(%rip),%ymm8 # 389e8 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
+ .byte 196,98,125,24,5,2,250,2,0 // vbroadcastss 0x2fa02(%rip),%ymm8 # 38b58 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 197,124,89,208 // vmulps %ymm0,%ymm0,%ymm10
- .byte 196,98,125,24,29,132,248,2,0 // vbroadcastss 0x2f884(%rip),%ymm11 # 389ec <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
- .byte 196,98,125,24,37,99,248,2,0 // vbroadcastss 0x2f863(%rip),%ymm12 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,29,244,249,2,0 // vbroadcastss 0x2f9f4(%rip),%ymm11 # 38b5c <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
+ .byte 196,98,125,24,37,211,249,2,0 // vbroadcastss 0x2f9d3(%rip),%ymm12 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,124,40,236 // vmovaps %ymm12,%ymm13
.byte 196,66,125,168,235 // vfmadd213ps %ymm11,%ymm0,%ymm13
- .byte 196,98,125,24,53,108,248,2,0 // vbroadcastss 0x2f86c(%rip),%ymm14 # 389f0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
+ .byte 196,98,125,24,53,220,249,2,0 // vbroadcastss 0x2f9dc(%rip),%ymm14 # 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
.byte 196,66,45,168,238 // vfmadd213ps %ymm14,%ymm10,%ymm13
- .byte 196,98,125,24,21,98,248,2,0 // vbroadcastss 0x2f862(%rip),%ymm10 # 389f4 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
+ .byte 196,98,125,24,21,210,249,2,0 // vbroadcastss 0x2f9d2(%rip),%ymm10 # 38b64 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
.byte 196,193,124,194,194,1 // vcmpltps %ymm10,%ymm0,%ymm0
.byte 196,195,21,74,193,0 // vblendvps %ymm0,%ymm9,%ymm13,%ymm0
.byte 196,65,116,89,200 // vmulps %ymm8,%ymm1,%ymm9
@@ -18438,16 +18438,16 @@ HIDDEN _sk_from_srgb_dst_hsw
.globl _sk_from_srgb_dst_hsw
FUNCTION(_sk_from_srgb_dst_hsw)
_sk_from_srgb_dst_hsw:
- .byte 196,98,125,24,5,250,247,2,0 // vbroadcastss 0x2f7fa(%rip),%ymm8 # 389e8 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
+ .byte 196,98,125,24,5,106,249,2,0 // vbroadcastss 0x2f96a(%rip),%ymm8 # 38b58 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
.byte 196,65,92,89,200 // vmulps %ymm8,%ymm4,%ymm9
.byte 197,92,89,212 // vmulps %ymm4,%ymm4,%ymm10
- .byte 196,98,125,24,29,236,247,2,0 // vbroadcastss 0x2f7ec(%rip),%ymm11 # 389ec <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
- .byte 196,98,125,24,37,203,247,2,0 // vbroadcastss 0x2f7cb(%rip),%ymm12 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,29,92,249,2,0 // vbroadcastss 0x2f95c(%rip),%ymm11 # 38b5c <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
+ .byte 196,98,125,24,37,59,249,2,0 // vbroadcastss 0x2f93b(%rip),%ymm12 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,124,40,236 // vmovaps %ymm12,%ymm13
.byte 196,66,93,168,235 // vfmadd213ps %ymm11,%ymm4,%ymm13
- .byte 196,98,125,24,53,212,247,2,0 // vbroadcastss 0x2f7d4(%rip),%ymm14 # 389f0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
+ .byte 196,98,125,24,53,68,249,2,0 // vbroadcastss 0x2f944(%rip),%ymm14 # 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
.byte 196,66,45,168,238 // vfmadd213ps %ymm14,%ymm10,%ymm13
- .byte 196,98,125,24,21,202,247,2,0 // vbroadcastss 0x2f7ca(%rip),%ymm10 # 389f4 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
+ .byte 196,98,125,24,21,58,249,2,0 // vbroadcastss 0x2f93a(%rip),%ymm10 # 38b64 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
.byte 196,193,92,194,226,1 // vcmpltps %ymm10,%ymm4,%ymm4
.byte 196,195,21,74,225,64 // vblendvps %ymm4,%ymm9,%ymm13,%ymm4
.byte 196,65,84,89,200 // vmulps %ymm8,%ymm5,%ymm9
@@ -18471,19 +18471,19 @@ HIDDEN _sk_to_srgb_hsw
FUNCTION(_sk_to_srgb_hsw)
_sk_to_srgb_hsw:
.byte 197,124,82,200 // vrsqrtps %ymm0,%ymm9
- .byte 196,98,125,24,5,110,247,2,0 // vbroadcastss 0x2f76e(%rip),%ymm8 # 389f8 <_sk_srcover_bgra_8888_sse2_lowp+0x2f4>
+ .byte 196,98,125,24,5,222,248,2,0 // vbroadcastss 0x2f8de(%rip),%ymm8 # 38b68 <_sk_srcover_bgra_8888_sse2_lowp+0x2f4>
.byte 196,65,124,89,208 // vmulps %ymm8,%ymm0,%ymm10
- .byte 196,98,125,24,29,100,247,2,0 // vbroadcastss 0x2f764(%rip),%ymm11 # 389fc <_sk_srcover_bgra_8888_sse2_lowp+0x2f8>
- .byte 196,98,125,24,37,95,247,2,0 // vbroadcastss 0x2f75f(%rip),%ymm12 # 38a00 <_sk_srcover_bgra_8888_sse2_lowp+0x2fc>
+ .byte 196,98,125,24,29,212,248,2,0 // vbroadcastss 0x2f8d4(%rip),%ymm11 # 38b6c <_sk_srcover_bgra_8888_sse2_lowp+0x2f8>
+ .byte 196,98,125,24,37,207,248,2,0 // vbroadcastss 0x2f8cf(%rip),%ymm12 # 38b70 <_sk_srcover_bgra_8888_sse2_lowp+0x2fc>
.byte 196,65,124,40,236 // vmovaps %ymm12,%ymm13
.byte 196,66,53,168,235 // vfmadd213ps %ymm11,%ymm9,%ymm13
- .byte 196,98,125,24,53,128,248,2,0 // vbroadcastss 0x2f880(%rip),%ymm14 # 38b34 <_sk_srcover_bgra_8888_sse2_lowp+0x430>
+ .byte 196,98,125,24,53,240,249,2,0 // vbroadcastss 0x2f9f0(%rip),%ymm14 # 38ca4 <_sk_srcover_bgra_8888_sse2_lowp+0x430>
.byte 196,66,53,168,238 // vfmadd213ps %ymm14,%ymm9,%ymm13
- .byte 196,98,125,24,61,118,248,2,0 // vbroadcastss 0x2f876(%rip),%ymm15 # 38b38 <_sk_srcover_bgra_8888_sse2_lowp+0x434>
+ .byte 196,98,125,24,61,230,249,2,0 // vbroadcastss 0x2f9e6(%rip),%ymm15 # 38ca8 <_sk_srcover_bgra_8888_sse2_lowp+0x434>
.byte 196,65,52,88,207 // vaddps %ymm15,%ymm9,%ymm9
.byte 196,65,124,83,201 // vrcpps %ymm9,%ymm9
.byte 196,65,20,89,201 // vmulps %ymm9,%ymm13,%ymm9
- .byte 196,98,125,24,45,50,247,2,0 // vbroadcastss 0x2f732(%rip),%ymm13 # 38a0c <_sk_srcover_bgra_8888_sse2_lowp+0x308>
+ .byte 196,98,125,24,45,162,248,2,0 // vbroadcastss 0x2f8a2(%rip),%ymm13 # 38b7c <_sk_srcover_bgra_8888_sse2_lowp+0x308>
.byte 196,193,124,194,197,1 // vcmpltps %ymm13,%ymm0,%ymm0
.byte 196,195,53,74,194,0 // vblendvps %ymm0,%ymm10,%ymm9,%ymm0
.byte 197,124,82,201 // vrsqrtps %ymm1,%ymm9
@@ -18517,26 +18517,26 @@ _sk_rgb_to_hsl_hsw:
.byte 197,116,93,202 // vminps %ymm2,%ymm1,%ymm9
.byte 196,65,124,93,201 // vminps %ymm9,%ymm0,%ymm9
.byte 196,65,60,92,209 // vsubps %ymm9,%ymm8,%ymm10
- .byte 196,98,125,24,29,73,246,2,0 // vbroadcastss 0x2f649(%rip),%ymm11 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,29,185,247,2,0 // vbroadcastss 0x2f7b9(%rip),%ymm11 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,65,36,94,218 // vdivps %ymm10,%ymm11,%ymm11
.byte 197,116,92,226 // vsubps %ymm2,%ymm1,%ymm12
.byte 197,116,194,234,1 // vcmpltps %ymm2,%ymm1,%ymm13
- .byte 196,98,125,24,53,142,246,2,0 // vbroadcastss 0x2f68e(%rip),%ymm14 # 38a10 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
+ .byte 196,98,125,24,53,254,247,2,0 // vbroadcastss 0x2f7fe(%rip),%ymm14 # 38b80 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
.byte 196,65,4,87,255 // vxorps %ymm15,%ymm15,%ymm15
.byte 196,67,5,74,238,208 // vblendvps %ymm13,%ymm14,%ymm15,%ymm13
.byte 196,66,37,168,229 // vfmadd213ps %ymm13,%ymm11,%ymm12
.byte 197,236,92,208 // vsubps %ymm0,%ymm2,%ymm2
.byte 197,124,92,233 // vsubps %ymm1,%ymm0,%ymm13
- .byte 196,98,125,24,53,117,246,2,0 // vbroadcastss 0x2f675(%rip),%ymm14 # 38a18 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
+ .byte 196,98,125,24,53,229,247,2,0 // vbroadcastss 0x2f7e5(%rip),%ymm14 # 38b88 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
.byte 196,66,37,168,238 // vfmadd213ps %ymm14,%ymm11,%ymm13
- .byte 196,98,125,24,53,99,246,2,0 // vbroadcastss 0x2f663(%rip),%ymm14 # 38a14 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
+ .byte 196,98,125,24,53,211,247,2,0 // vbroadcastss 0x2f7d3(%rip),%ymm14 # 38b84 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
.byte 196,194,37,168,214 // vfmadd213ps %ymm14,%ymm11,%ymm2
.byte 197,188,194,201,0 // vcmpeqps %ymm1,%ymm8,%ymm1
.byte 196,227,21,74,202,16 // vblendvps %ymm1,%ymm2,%ymm13,%ymm1
.byte 197,188,194,192,0 // vcmpeqps %ymm0,%ymm8,%ymm0
.byte 196,195,117,74,196,0 // vblendvps %ymm0,%ymm12,%ymm1,%ymm0
.byte 196,193,60,88,201 // vaddps %ymm9,%ymm8,%ymm1
- .byte 196,98,125,24,29,214,245,2,0 // vbroadcastss 0x2f5d6(%rip),%ymm11 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,29,70,247,2,0 // vbroadcastss 0x2f746(%rip),%ymm11 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,116,89,211 // vmulps %ymm11,%ymm1,%ymm2
.byte 197,36,194,218,1 // vcmpltps %ymm2,%ymm11,%ymm11
.byte 196,65,12,92,224 // vsubps %ymm8,%ymm14,%ymm12
@@ -18546,7 +18546,7 @@ _sk_rgb_to_hsl_hsw:
.byte 197,172,94,201 // vdivps %ymm1,%ymm10,%ymm1
.byte 196,195,125,74,199,128 // vblendvps %ymm8,%ymm15,%ymm0,%ymm0
.byte 196,195,117,74,207,128 // vblendvps %ymm8,%ymm15,%ymm1,%ymm1
- .byte 196,98,125,24,5,9,246,2,0 // vbroadcastss 0x2f609(%rip),%ymm8 # 38a1c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
+ .byte 196,98,125,24,5,121,247,2,0 // vbroadcastss 0x2f779(%rip),%ymm8 # 38b8c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -18563,30 +18563,30 @@ _sk_hsl_to_rgb_hsw:
.byte 197,252,17,92,36,128 // vmovups %ymm3,-0x80(%rsp)
.byte 197,252,40,233 // vmovaps %ymm1,%ymm5
.byte 197,252,40,224 // vmovaps %ymm0,%ymm4
- .byte 196,98,125,24,5,98,245,2,0 // vbroadcastss 0x2f562(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,210,246,2,0 // vbroadcastss 0x2f6d2(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,60,194,202,2 // vcmpleps %ymm2,%ymm8,%ymm9
.byte 197,84,89,210 // vmulps %ymm2,%ymm5,%ymm10
.byte 196,65,84,92,218 // vsubps %ymm10,%ymm5,%ymm11
.byte 196,67,45,74,203,144 // vblendvps %ymm9,%ymm11,%ymm10,%ymm9
.byte 197,52,88,210 // vaddps %ymm2,%ymm9,%ymm10
- .byte 196,98,125,24,13,165,245,2,0 // vbroadcastss 0x2f5a5(%rip),%ymm9 # 38a14 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
+ .byte 196,98,125,24,13,21,247,2,0 // vbroadcastss 0x2f715(%rip),%ymm9 # 38b84 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
.byte 196,66,109,170,202 // vfmsub213ps %ymm10,%ymm2,%ymm9
- .byte 196,98,125,24,29,163,245,2,0 // vbroadcastss 0x2f5a3(%rip),%ymm11 # 38a20 <_sk_srcover_bgra_8888_sse2_lowp+0x31c>
+ .byte 196,98,125,24,29,19,247,2,0 // vbroadcastss 0x2f713(%rip),%ymm11 # 38b90 <_sk_srcover_bgra_8888_sse2_lowp+0x31c>
.byte 196,65,92,88,219 // vaddps %ymm11,%ymm4,%ymm11
.byte 196,67,125,8,227,1 // vroundps $0x1,%ymm11,%ymm12
.byte 196,65,36,92,252 // vsubps %ymm12,%ymm11,%ymm15
.byte 196,65,44,92,217 // vsubps %ymm9,%ymm10,%ymm11
- .byte 196,98,125,24,45,117,245,2,0 // vbroadcastss 0x2f575(%rip),%ymm13 # 38a10 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
+ .byte 196,98,125,24,45,229,246,2,0 // vbroadcastss 0x2f6e5(%rip),%ymm13 # 38b80 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
.byte 196,193,4,89,197 // vmulps %ymm13,%ymm15,%ymm0
- .byte 196,98,125,24,53,111,245,2,0 // vbroadcastss 0x2f56f(%rip),%ymm14 # 38a18 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
+ .byte 196,98,125,24,53,223,246,2,0 // vbroadcastss 0x2f6df(%rip),%ymm14 # 38b88 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
.byte 197,12,92,224 // vsubps %ymm0,%ymm14,%ymm12
.byte 196,66,37,168,225 // vfmadd213ps %ymm9,%ymm11,%ymm12
- .byte 196,226,125,24,29,105,245,2,0 // vbroadcastss 0x2f569(%rip),%ymm3 # 38a24 <_sk_srcover_bgra_8888_sse2_lowp+0x320>
+ .byte 196,226,125,24,29,217,246,2,0 // vbroadcastss 0x2f6d9(%rip),%ymm3 # 38b94 <_sk_srcover_bgra_8888_sse2_lowp+0x320>
.byte 196,193,100,194,255,2 // vcmpleps %ymm15,%ymm3,%ymm7
.byte 196,195,29,74,249,112 // vblendvps %ymm7,%ymm9,%ymm12,%ymm7
.byte 196,65,60,194,231,2 // vcmpleps %ymm15,%ymm8,%ymm12
.byte 196,227,45,74,255,192 // vblendvps %ymm12,%ymm7,%ymm10,%ymm7
- .byte 196,98,125,24,37,64,245,2,0 // vbroadcastss 0x2f540(%rip),%ymm12 # 38a1c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
+ .byte 196,98,125,24,37,176,246,2,0 // vbroadcastss 0x2f6b0(%rip),%ymm12 # 38b8c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
.byte 196,65,28,194,255,2 // vcmpleps %ymm15,%ymm12,%ymm15
.byte 196,194,37,168,193 // vfmadd213ps %ymm9,%ymm11,%ymm0
.byte 196,99,125,74,255,240 // vblendvps %ymm15,%ymm7,%ymm0,%ymm15
@@ -18602,7 +18602,7 @@ _sk_hsl_to_rgb_hsw:
.byte 197,156,194,192,2 // vcmpleps %ymm0,%ymm12,%ymm0
.byte 196,194,37,168,249 // vfmadd213ps %ymm9,%ymm11,%ymm7
.byte 196,227,69,74,201,0 // vblendvps %ymm0,%ymm1,%ymm7,%ymm1
- .byte 196,226,125,24,5,244,244,2,0 // vbroadcastss 0x2f4f4(%rip),%ymm0 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,226,125,24,5,100,246,2,0 // vbroadcastss 0x2f664(%rip),%ymm0 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 197,220,88,192 // vaddps %ymm0,%ymm4,%ymm0
.byte 196,227,125,8,224,1 // vroundps $0x1,%ymm0,%ymm4
.byte 197,252,92,196 // vsubps %ymm4,%ymm0,%ymm0
@@ -18656,10 +18656,10 @@ _sk_scale_u8_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,58 // jne 9623 <_sk_scale_u8_hsw+0x52>
.byte 196,2,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm8
- .byte 197,57,219,5,153,252,2,0 // vpand 0x2fc99(%rip),%xmm8,%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,57,219,5,249,253,2,0 // vpand 0x2fdf9(%rip),%xmm8,%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,13,34,244,2,0 // vbroadcastss 0x2f422(%rip),%ymm9 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,13,146,245,2,0 // vbroadcastss 0x2f592(%rip),%ymm9 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 197,188,89,201 // vmulps %ymm1,%ymm8,%ymm1
@@ -18736,20 +18736,20 @@ _sk_scale_565_hsw:
.byte 15,133,161,0,0,0 // jne 9798 <_sk_scale_565_hsw+0xc0>
.byte 196,1,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm8
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
- .byte 196,98,125,88,13,37,243,2,0 // vpbroadcastd 0x2f325(%rip),%ymm9 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,98,125,88,13,149,244,2,0 // vpbroadcastd 0x2f495(%rip),%ymm9 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 196,65,61,219,201 // vpand %ymm9,%ymm8,%ymm9
.byte 196,65,124,91,201 // vcvtdq2ps %ymm9,%ymm9
- .byte 196,98,125,24,21,22,243,2,0 // vbroadcastss 0x2f316(%rip),%ymm10 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,98,125,24,21,134,244,2,0 // vbroadcastss 0x2f486(%rip),%ymm10 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 196,65,52,89,202 // vmulps %ymm10,%ymm9,%ymm9
- .byte 196,98,125,88,21,12,243,2,0 // vpbroadcastd 0x2f30c(%rip),%ymm10 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,98,125,88,21,124,244,2,0 // vpbroadcastd 0x2f47c(%rip),%ymm10 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 196,65,61,219,210 // vpand %ymm10,%ymm8,%ymm10
.byte 196,65,124,91,210 // vcvtdq2ps %ymm10,%ymm10
- .byte 196,98,125,24,29,253,242,2,0 // vbroadcastss 0x2f2fd(%rip),%ymm11 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,98,125,24,29,109,244,2,0 // vbroadcastss 0x2f46d(%rip),%ymm11 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
- .byte 196,98,125,88,29,243,242,2,0 // vpbroadcastd 0x2f2f3(%rip),%ymm11 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,98,125,88,29,99,244,2,0 // vpbroadcastd 0x2f463(%rip),%ymm11 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 196,65,61,219,195 // vpand %ymm11,%ymm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,29,228,242,2,0 // vbroadcastss 0x2f2e4(%rip),%ymm11 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,98,125,24,29,84,244,2,0 // vbroadcastss 0x2f454(%rip),%ymm11 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 197,100,194,223,1 // vcmpltps %ymm7,%ymm3,%ymm11
.byte 196,65,44,93,224 // vminps %ymm8,%ymm10,%ymm12
@@ -18840,10 +18840,10 @@ _sk_lerp_u8_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,78 // jne 98d1 <_sk_lerp_u8_hsw+0x66>
.byte 196,2,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm8
- .byte 197,57,219,5,255,249,2,0 // vpand 0x2f9ff(%rip),%xmm8,%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,57,219,5,95,251,2,0 // vpand 0x2fb5f(%rip),%xmm8,%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,13,136,241,2,0 // vbroadcastss 0x2f188(%rip),%ymm9 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,13,248,242,2,0 // vbroadcastss 0x2f2f8(%rip),%ymm9 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,252,92,196 // vsubps %ymm4,%ymm0,%ymm0
.byte 196,226,61,168,196 // vfmadd213ps %ymm4,%ymm8,%ymm0
@@ -18895,7 +18895,7 @@ _sk_lerp_u8_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,221 // callq ffffffffde00997c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfd1278>
+ .byte 232,255,255,255,221 // callq ffffffffde00997c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfd1108>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,210 // callq *%rdx
@@ -18921,20 +18921,20 @@ _sk_lerp_565_hsw:
.byte 15,133,181,0,0,0 // jne 9a5c <_sk_lerp_565_hsw+0xd4>
.byte 196,1,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm8
.byte 196,66,125,51,192 // vpmovzxwd %xmm8,%ymm8
- .byte 196,98,125,88,13,117,240,2,0 // vpbroadcastd 0x2f075(%rip),%ymm9 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,98,125,88,13,229,241,2,0 // vpbroadcastd 0x2f1e5(%rip),%ymm9 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 196,65,61,219,201 // vpand %ymm9,%ymm8,%ymm9
.byte 196,65,124,91,201 // vcvtdq2ps %ymm9,%ymm9
- .byte 196,98,125,24,21,102,240,2,0 // vbroadcastss 0x2f066(%rip),%ymm10 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,98,125,24,21,214,241,2,0 // vbroadcastss 0x2f1d6(%rip),%ymm10 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 196,65,52,89,202 // vmulps %ymm10,%ymm9,%ymm9
- .byte 196,98,125,88,21,92,240,2,0 // vpbroadcastd 0x2f05c(%rip),%ymm10 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,98,125,88,21,204,241,2,0 // vpbroadcastd 0x2f1cc(%rip),%ymm10 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 196,65,61,219,210 // vpand %ymm10,%ymm8,%ymm10
.byte 196,65,124,91,210 // vcvtdq2ps %ymm10,%ymm10
- .byte 196,98,125,24,29,77,240,2,0 // vbroadcastss 0x2f04d(%rip),%ymm11 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,98,125,24,29,189,241,2,0 // vbroadcastss 0x2f1bd(%rip),%ymm11 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
- .byte 196,98,125,88,29,67,240,2,0 // vpbroadcastd 0x2f043(%rip),%ymm11 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,98,125,88,29,179,241,2,0 // vpbroadcastd 0x2f1b3(%rip),%ymm11 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 196,65,61,219,195 // vpand %ymm11,%ymm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,29,52,240,2,0 // vbroadcastss 0x2f034(%rip),%ymm11 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,98,125,24,29,164,241,2,0 // vbroadcastss 0x2f1a4(%rip),%ymm11 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 197,100,194,223,1 // vcmpltps %ymm7,%ymm3,%ymm11
.byte 196,65,44,93,224 // vminps %ymm8,%ymm10,%ymm12
@@ -19008,7 +19008,7 @@ _sk_load_tables_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,116 // jne 9b7e <_sk_load_tables_hsw+0x7e>
.byte 196,193,126,111,28,144 // vmovdqu (%r8,%rdx,4),%ymm3
- .byte 197,229,219,13,72,241,2,0 // vpand 0x2f148(%rip),%ymm3,%ymm1 # 38c60 <_sk_srcover_bgra_8888_sse2_lowp+0x55c>
+ .byte 197,229,219,13,168,242,2,0 // vpand 0x2f2a8(%rip),%ymm3,%ymm1 # 38dc0 <_sk_srcover_bgra_8888_sse2_lowp+0x54c>
.byte 196,65,61,118,192 // vpcmpeqd %ymm8,%ymm8,%ymm8
.byte 76,139,64,8 // mov 0x8(%rax),%r8
.byte 76,139,72,16 // mov 0x10(%rax),%r9
@@ -19016,16 +19016,16 @@ _sk_load_tables_hsw:
.byte 197,253,239,192 // vpxor %ymm0,%ymm0,%ymm0
.byte 196,65,53,118,201 // vpcmpeqd %ymm9,%ymm9,%ymm9
.byte 196,194,53,146,4,136 // vgatherdps %ymm9,(%r8,%ymm1,4),%ymm0
- .byte 196,98,101,0,13,63,241,2,0 // vpshufb 0x2f13f(%rip),%ymm3,%ymm9 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x57c>
+ .byte 196,98,101,0,13,159,242,2,0 // vpshufb 0x2f29f(%rip),%ymm3,%ymm9 # 38de0 <_sk_srcover_bgra_8888_sse2_lowp+0x56c>
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 196,65,45,118,210 // vpcmpeqd %ymm10,%ymm10,%ymm10
.byte 196,130,45,146,12,137 // vgatherdps %ymm10,(%r9,%ymm9,4),%ymm1
.byte 72,139,64,24 // mov 0x18(%rax),%rax
- .byte 196,98,101,0,13,67,241,2,0 // vpshufb 0x2f143(%rip),%ymm3,%ymm9 # 38ca0 <_sk_srcover_bgra_8888_sse2_lowp+0x59c>
+ .byte 196,98,101,0,13,163,242,2,0 // vpshufb 0x2f2a3(%rip),%ymm3,%ymm9 # 38e00 <_sk_srcover_bgra_8888_sse2_lowp+0x58c>
.byte 196,162,61,146,20,136 // vgatherdps %ymm8,(%rax,%ymm9,4),%ymm2
.byte 197,229,114,211,24 // vpsrld $0x18,%ymm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,98,125,24,5,183,238,2,0 // vbroadcastss 0x2eeb7(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,39,240,2,0 // vbroadcastss 0x2f027(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,100,89,216 // vmulps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -19106,7 +19106,7 @@ _sk_load_tables_u16_be_hsw:
.byte 197,185,108,200 // vpunpcklqdq %xmm0,%xmm8,%xmm1
.byte 197,57,109,192 // vpunpckhqdq %xmm0,%xmm8,%xmm8
.byte 196,65,49,108,214 // vpunpcklqdq %xmm14,%xmm9,%xmm10
- .byte 197,121,111,29,224,245,2,0 // vmovdqa 0x2f5e0(%rip),%xmm11 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,121,111,29,64,247,2,0 // vmovdqa 0x2f740(%rip),%xmm11 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,193,113,219,195 // vpand %xmm11,%xmm1,%xmm0
.byte 196,226,125,51,200 // vpmovzxwd %xmm0,%ymm1
.byte 196,65,29,118,228 // vpcmpeqd %ymm12,%ymm12,%ymm12
@@ -19131,7 +19131,7 @@ _sk_load_tables_u16_be_hsw:
.byte 197,185,235,219 // vpor %xmm3,%xmm8,%xmm3
.byte 196,226,125,51,219 // vpmovzxwd %xmm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,98,125,24,5,28,237,2,0 // vbroadcastss 0x2ed1c(%rip),%ymm8 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,98,125,24,5,140,238,2,0 // vbroadcastss 0x2ee8c(%rip),%ymm8 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 196,193,100,89,216 // vmulps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -19191,7 +19191,7 @@ _sk_load_tables_rgb_u16_be_hsw:
.byte 197,105,108,192 // vpunpcklqdq %xmm0,%xmm2,%xmm8
.byte 197,241,108,195 // vpunpcklqdq %xmm3,%xmm1,%xmm0
.byte 197,241,109,203 // vpunpckhqdq %xmm3,%xmm1,%xmm1
- .byte 197,249,111,29,96,244,2,0 // vmovdqa 0x2f460(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,249,111,29,192,245,2,0 // vmovdqa 0x2f5c0(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 197,249,219,195 // vpand %xmm3,%xmm0,%xmm0
.byte 196,98,125,51,200 // vpmovzxwd %xmm0,%ymm9
.byte 196,65,45,118,210 // vpcmpeqd %ymm10,%ymm10,%ymm10
@@ -19211,7 +19211,7 @@ _sk_load_tables_rgb_u16_be_hsw:
.byte 196,226,125,51,219 // vpmovzxwd %xmm3,%ymm3
.byte 196,226,45,146,20,152 // vgatherdps %ymm10,(%rax,%ymm3,4),%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,37,235,2,0 // vbroadcastss 0x2eb25(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,149,236,2,0 // vbroadcastss 0x2ec95(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 196,129,121,110,4,72 // vmovd (%r8,%r9,2),%xmm0
.byte 196,65,49,239,201 // vpxor %xmm9,%xmm9,%xmm9
@@ -19280,7 +19280,7 @@ HIDDEN _sk_byte_tables_hsw
FUNCTION(_sk_byte_tables_hsw)
_sk_byte_tables_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,26,234,2,0 // vbroadcastss 0x2ea1a(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,138,235,2,0 // vbroadcastss 0x2eb8a(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 197,125,91,200 // vcvtps2dq %ymm0,%ymm9
.byte 196,65,249,126,200 // vmovq %xmm9,%r8
@@ -19410,7 +19410,7 @@ _sk_byte_tables_hsw:
.byte 67,15,182,4,17 // movzbl (%r9,%r10,1),%eax
.byte 196,194,125,49,193 // vpmovzxbd %xmm9,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,236,231,2,0 // vbroadcastss 0x2e7ec(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,92,233,2,0 // vbroadcastss 0x2e95c(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 196,226,125,49,201 // vpmovzxbd %xmm1,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
@@ -19532,7 +19532,7 @@ _sk_byte_tables_rgb_hsw:
.byte 67,15,182,4,17 // movzbl (%r9,%r10,1),%eax
.byte 196,194,125,49,193 // vpmovzxbd %xmm9,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,192,229,2,0 // vbroadcastss 0x2e5c0(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,48,231,2,0 // vbroadcastss 0x2e730(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 196,226,125,49,201 // vpmovzxbd %xmm1,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
@@ -19635,33 +19635,33 @@ _sk_parametric_r_hsw:
.byte 196,66,125,168,211 // vfmadd213ps %ymm11,%ymm0,%ymm10
.byte 196,226,125,24,0 // vbroadcastss (%rax),%ymm0
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,166,228,2,0 // vbroadcastss 0x2e4a6(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 196,98,125,24,45,161,228,2,0 // vbroadcastss 0x2e4a1(%rip),%ymm13 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,22,230,2,0 // vbroadcastss 0x2e616(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,45,17,230,2,0 // vbroadcastss 0x2e611(%rip),%ymm13 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,65,44,84,213 // vandps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,243,227,2,0 // vbroadcastss 0x2e3f3(%rip),%ymm13 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,45,99,229,2,0 // vbroadcastss 0x2e563(%rip),%ymm13 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,86,213 // vorps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,137,228,2,0 // vbroadcastss 0x2e489(%rip),%ymm13 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,45,249,229,2,0 // vbroadcastss 0x2e5f9(%rip),%ymm13 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,66,37,184,236 // vfmadd231ps %ymm12,%ymm11,%ymm13
- .byte 196,98,125,24,29,127,228,2,0 // vbroadcastss 0x2e47f(%rip),%ymm11 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,29,239,229,2,0 // vbroadcastss 0x2e5ef(%rip),%ymm11 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,66,45,172,221 // vfnmadd213ps %ymm13,%ymm10,%ymm11
- .byte 196,98,125,24,37,117,228,2,0 // vbroadcastss 0x2e475(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,229,229,2,0 // vbroadcastss 0x2e5e5(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,65,44,88,212 // vaddps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,37,107,228,2,0 // vbroadcastss 0x2e46b(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,219,229,2,0 // vbroadcastss 0x2e5db(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,28,94,210 // vdivps %ymm10,%ymm12,%ymm10
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
.byte 196,193,124,89,194 // vmulps %ymm10,%ymm0,%ymm0
.byte 196,99,125,8,208,1 // vroundps $0x1,%ymm0,%ymm10
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 196,98,125,24,29,76,228,2,0 // vbroadcastss 0x2e44c(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,188,229,2,0 // vbroadcastss 0x2e5bc(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,124,88,195 // vaddps %ymm11,%ymm0,%ymm0
- .byte 196,98,125,24,29,66,228,2,0 // vbroadcastss 0x2e442(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,178,229,2,0 // vbroadcastss 0x2e5b2(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,98,45,172,216 // vfnmadd213ps %ymm0,%ymm10,%ymm11
- .byte 196,226,125,24,5,56,228,2,0 // vbroadcastss 0x2e438(%rip),%ymm0 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,226,125,24,5,168,229,2,0 // vbroadcastss 0x2e5a8(%rip),%ymm0 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,193,124,92,194 // vsubps %ymm10,%ymm0,%ymm0
- .byte 196,98,125,24,21,46,228,2,0 // vbroadcastss 0x2e42e(%rip),%ymm10 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,21,158,229,2,0 // vbroadcastss 0x2e59e(%rip),%ymm10 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 197,172,94,192 // vdivps %ymm0,%ymm10,%ymm0
.byte 197,164,88,192 // vaddps %ymm0,%ymm11,%ymm0
- .byte 196,98,125,24,21,33,228,2,0 // vbroadcastss 0x2e421(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,145,229,2,0 // vbroadcastss 0x2e591(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,124,89,194 // vmulps %ymm10,%ymm0,%ymm0
.byte 197,253,91,192 // vcvtps2dq %ymm0,%ymm0
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -19669,7 +19669,7 @@ _sk_parametric_r_hsw:
.byte 196,195,125,74,193,128 // vblendvps %ymm8,%ymm9,%ymm0,%ymm0
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,53,227,2,0 // vbroadcastss 0x2e335(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,165,228,2,0 // vbroadcastss 0x2e4a5(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -19689,33 +19689,33 @@ _sk_parametric_g_hsw:
.byte 196,66,117,168,211 // vfmadd213ps %ymm11,%ymm1,%ymm10
.byte 196,226,125,24,8 // vbroadcastss (%rax),%ymm1
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,129,227,2,0 // vbroadcastss 0x2e381(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 196,98,125,24,45,124,227,2,0 // vbroadcastss 0x2e37c(%rip),%ymm13 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,241,228,2,0 // vbroadcastss 0x2e4f1(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,45,236,228,2,0 // vbroadcastss 0x2e4ec(%rip),%ymm13 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,65,44,84,213 // vandps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,206,226,2,0 // vbroadcastss 0x2e2ce(%rip),%ymm13 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,45,62,228,2,0 // vbroadcastss 0x2e43e(%rip),%ymm13 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,86,213 // vorps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,100,227,2,0 // vbroadcastss 0x2e364(%rip),%ymm13 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,45,212,228,2,0 // vbroadcastss 0x2e4d4(%rip),%ymm13 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,66,37,184,236 // vfmadd231ps %ymm12,%ymm11,%ymm13
- .byte 196,98,125,24,29,90,227,2,0 // vbroadcastss 0x2e35a(%rip),%ymm11 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,29,202,228,2,0 // vbroadcastss 0x2e4ca(%rip),%ymm11 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,66,45,172,221 // vfnmadd213ps %ymm13,%ymm10,%ymm11
- .byte 196,98,125,24,37,80,227,2,0 // vbroadcastss 0x2e350(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,192,228,2,0 // vbroadcastss 0x2e4c0(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,65,44,88,212 // vaddps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,37,70,227,2,0 // vbroadcastss 0x2e346(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,182,228,2,0 // vbroadcastss 0x2e4b6(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,28,94,210 // vdivps %ymm10,%ymm12,%ymm10
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
.byte 196,193,116,89,202 // vmulps %ymm10,%ymm1,%ymm1
.byte 196,99,125,8,209,1 // vroundps $0x1,%ymm1,%ymm10
.byte 196,65,116,92,210 // vsubps %ymm10,%ymm1,%ymm10
- .byte 196,98,125,24,29,39,227,2,0 // vbroadcastss 0x2e327(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,151,228,2,0 // vbroadcastss 0x2e497(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,116,88,203 // vaddps %ymm11,%ymm1,%ymm1
- .byte 196,98,125,24,29,29,227,2,0 // vbroadcastss 0x2e31d(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,141,228,2,0 // vbroadcastss 0x2e48d(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,98,45,172,217 // vfnmadd213ps %ymm1,%ymm10,%ymm11
- .byte 196,226,125,24,13,19,227,2,0 // vbroadcastss 0x2e313(%rip),%ymm1 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,226,125,24,13,131,228,2,0 // vbroadcastss 0x2e483(%rip),%ymm1 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,193,116,92,202 // vsubps %ymm10,%ymm1,%ymm1
- .byte 196,98,125,24,21,9,227,2,0 // vbroadcastss 0x2e309(%rip),%ymm10 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,21,121,228,2,0 // vbroadcastss 0x2e479(%rip),%ymm10 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 197,172,94,201 // vdivps %ymm1,%ymm10,%ymm1
.byte 197,164,88,201 // vaddps %ymm1,%ymm11,%ymm1
- .byte 196,98,125,24,21,252,226,2,0 // vbroadcastss 0x2e2fc(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,108,228,2,0 // vbroadcastss 0x2e46c(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,116,89,202 // vmulps %ymm10,%ymm1,%ymm1
.byte 197,253,91,201 // vcvtps2dq %ymm1,%ymm1
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -19723,7 +19723,7 @@ _sk_parametric_g_hsw:
.byte 196,195,117,74,201,128 // vblendvps %ymm8,%ymm9,%ymm1,%ymm1
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,116,95,200 // vmaxps %ymm8,%ymm1,%ymm1
- .byte 196,98,125,24,5,16,226,2,0 // vbroadcastss 0x2e210(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,128,227,2,0 // vbroadcastss 0x2e380(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -19743,33 +19743,33 @@ _sk_parametric_b_hsw:
.byte 196,66,109,168,211 // vfmadd213ps %ymm11,%ymm2,%ymm10
.byte 196,226,125,24,16 // vbroadcastss (%rax),%ymm2
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,92,226,2,0 // vbroadcastss 0x2e25c(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 196,98,125,24,45,87,226,2,0 // vbroadcastss 0x2e257(%rip),%ymm13 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,204,227,2,0 // vbroadcastss 0x2e3cc(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,45,199,227,2,0 // vbroadcastss 0x2e3c7(%rip),%ymm13 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,65,44,84,213 // vandps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,169,225,2,0 // vbroadcastss 0x2e1a9(%rip),%ymm13 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,45,25,227,2,0 // vbroadcastss 0x2e319(%rip),%ymm13 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,86,213 // vorps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,63,226,2,0 // vbroadcastss 0x2e23f(%rip),%ymm13 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,45,175,227,2,0 // vbroadcastss 0x2e3af(%rip),%ymm13 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,66,37,184,236 // vfmadd231ps %ymm12,%ymm11,%ymm13
- .byte 196,98,125,24,29,53,226,2,0 // vbroadcastss 0x2e235(%rip),%ymm11 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,29,165,227,2,0 // vbroadcastss 0x2e3a5(%rip),%ymm11 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,66,45,172,221 // vfnmadd213ps %ymm13,%ymm10,%ymm11
- .byte 196,98,125,24,37,43,226,2,0 // vbroadcastss 0x2e22b(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,155,227,2,0 // vbroadcastss 0x2e39b(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,65,44,88,212 // vaddps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,37,33,226,2,0 // vbroadcastss 0x2e221(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,145,227,2,0 // vbroadcastss 0x2e391(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,28,94,210 // vdivps %ymm10,%ymm12,%ymm10
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
.byte 196,193,108,89,210 // vmulps %ymm10,%ymm2,%ymm2
.byte 196,99,125,8,210,1 // vroundps $0x1,%ymm2,%ymm10
.byte 196,65,108,92,210 // vsubps %ymm10,%ymm2,%ymm10
- .byte 196,98,125,24,29,2,226,2,0 // vbroadcastss 0x2e202(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,114,227,2,0 // vbroadcastss 0x2e372(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,108,88,211 // vaddps %ymm11,%ymm2,%ymm2
- .byte 196,98,125,24,29,248,225,2,0 // vbroadcastss 0x2e1f8(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,104,227,2,0 // vbroadcastss 0x2e368(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,98,45,172,218 // vfnmadd213ps %ymm2,%ymm10,%ymm11
- .byte 196,226,125,24,21,238,225,2,0 // vbroadcastss 0x2e1ee(%rip),%ymm2 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,226,125,24,21,94,227,2,0 // vbroadcastss 0x2e35e(%rip),%ymm2 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,193,108,92,210 // vsubps %ymm10,%ymm2,%ymm2
- .byte 196,98,125,24,21,228,225,2,0 // vbroadcastss 0x2e1e4(%rip),%ymm10 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,21,84,227,2,0 // vbroadcastss 0x2e354(%rip),%ymm10 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 197,172,94,210 // vdivps %ymm2,%ymm10,%ymm2
.byte 197,164,88,210 // vaddps %ymm2,%ymm11,%ymm2
- .byte 196,98,125,24,21,215,225,2,0 // vbroadcastss 0x2e1d7(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,71,227,2,0 // vbroadcastss 0x2e347(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,108,89,210 // vmulps %ymm10,%ymm2,%ymm2
.byte 197,253,91,210 // vcvtps2dq %ymm2,%ymm2
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -19777,7 +19777,7 @@ _sk_parametric_b_hsw:
.byte 196,195,109,74,209,128 // vblendvps %ymm8,%ymm9,%ymm2,%ymm2
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,108,95,208 // vmaxps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,24,5,235,224,2,0 // vbroadcastss 0x2e0eb(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,91,226,2,0 // vbroadcastss 0x2e25b(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,108,93,208 // vminps %ymm8,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -19797,33 +19797,33 @@ _sk_parametric_a_hsw:
.byte 196,66,101,168,211 // vfmadd213ps %ymm11,%ymm3,%ymm10
.byte 196,226,125,24,24 // vbroadcastss (%rax),%ymm3
.byte 196,65,124,91,218 // vcvtdq2ps %ymm10,%ymm11
- .byte 196,98,125,24,37,55,225,2,0 // vbroadcastss 0x2e137(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 196,98,125,24,45,50,225,2,0 // vbroadcastss 0x2e132(%rip),%ymm13 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,167,226,2,0 // vbroadcastss 0x2e2a7(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,45,162,226,2,0 // vbroadcastss 0x2e2a2(%rip),%ymm13 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,65,44,84,213 // vandps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,132,224,2,0 // vbroadcastss 0x2e084(%rip),%ymm13 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,45,244,225,2,0 // vbroadcastss 0x2e1f4(%rip),%ymm13 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,86,213 // vorps %ymm13,%ymm10,%ymm10
- .byte 196,98,125,24,45,26,225,2,0 // vbroadcastss 0x2e11a(%rip),%ymm13 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,45,138,226,2,0 // vbroadcastss 0x2e28a(%rip),%ymm13 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,66,37,184,236 // vfmadd231ps %ymm12,%ymm11,%ymm13
- .byte 196,98,125,24,29,16,225,2,0 // vbroadcastss 0x2e110(%rip),%ymm11 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,29,128,226,2,0 // vbroadcastss 0x2e280(%rip),%ymm11 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,66,45,172,221 // vfnmadd213ps %ymm13,%ymm10,%ymm11
- .byte 196,98,125,24,37,6,225,2,0 // vbroadcastss 0x2e106(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,118,226,2,0 // vbroadcastss 0x2e276(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,65,44,88,212 // vaddps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,37,252,224,2,0 // vbroadcastss 0x2e0fc(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,108,226,2,0 // vbroadcastss 0x2e26c(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,28,94,210 // vdivps %ymm10,%ymm12,%ymm10
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
.byte 196,193,100,89,218 // vmulps %ymm10,%ymm3,%ymm3
.byte 196,99,125,8,211,1 // vroundps $0x1,%ymm3,%ymm10
.byte 196,65,100,92,210 // vsubps %ymm10,%ymm3,%ymm10
- .byte 196,98,125,24,29,221,224,2,0 // vbroadcastss 0x2e0dd(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,77,226,2,0 // vbroadcastss 0x2e24d(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,100,88,219 // vaddps %ymm11,%ymm3,%ymm3
- .byte 196,98,125,24,29,211,224,2,0 // vbroadcastss 0x2e0d3(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,67,226,2,0 // vbroadcastss 0x2e243(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,98,45,172,219 // vfnmadd213ps %ymm3,%ymm10,%ymm11
- .byte 196,226,125,24,29,201,224,2,0 // vbroadcastss 0x2e0c9(%rip),%ymm3 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,226,125,24,29,57,226,2,0 // vbroadcastss 0x2e239(%rip),%ymm3 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,193,100,92,218 // vsubps %ymm10,%ymm3,%ymm3
- .byte 196,98,125,24,21,191,224,2,0 // vbroadcastss 0x2e0bf(%rip),%ymm10 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,21,47,226,2,0 // vbroadcastss 0x2e22f(%rip),%ymm10 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 197,172,94,219 // vdivps %ymm3,%ymm10,%ymm3
.byte 197,164,88,219 // vaddps %ymm3,%ymm11,%ymm3
- .byte 196,98,125,24,21,178,224,2,0 // vbroadcastss 0x2e0b2(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,34,226,2,0 // vbroadcastss 0x2e222(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,100,89,218 // vmulps %ymm10,%ymm3,%ymm3
.byte 197,253,91,219 // vcvtps2dq %ymm3,%ymm3
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -19831,7 +19831,7 @@ _sk_parametric_a_hsw:
.byte 196,195,101,74,217,128 // vblendvps %ymm8,%ymm9,%ymm3,%ymm3
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,100,95,216 // vmaxps %ymm8,%ymm3,%ymm3
- .byte 196,98,125,24,5,198,223,2,0 // vbroadcastss 0x2dfc6(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,54,225,2,0 // vbroadcastss 0x2e136(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,100,93,216 // vminps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -19850,35 +19850,35 @@ _sk_gamma_hsw:
.byte 197,252,40,241 // vmovaps %ymm1,%ymm6
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,124,91,208 // vcvtdq2ps %ymm0,%ymm10
- .byte 196,98,125,24,29,29,224,2,0 // vbroadcastss 0x2e01d(%rip),%ymm11 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
- .byte 196,226,125,24,45,24,224,2,0 // vbroadcastss 0x2e018(%rip),%ymm5 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,29,141,225,2,0 // vbroadcastss 0x2e18d(%rip),%ymm11 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,226,125,24,45,136,225,2,0 // vbroadcastss 0x2e188(%rip),%ymm5 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 197,124,84,205 // vandps %ymm5,%ymm0,%ymm9
- .byte 196,226,125,24,37,107,223,2,0 // vbroadcastss 0x2df6b(%rip),%ymm4 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,37,219,224,2,0 // vbroadcastss 0x2e0db(%rip),%ymm4 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,52,86,228 // vorps %ymm4,%ymm9,%ymm12
- .byte 196,98,125,24,45,2,224,2,0 // vbroadcastss 0x2e002(%rip),%ymm13 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,45,114,225,2,0 // vbroadcastss 0x2e172(%rip),%ymm13 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,66,37,168,213 // vfmadd213ps %ymm13,%ymm11,%ymm10
- .byte 196,98,125,24,13,248,223,2,0 // vbroadcastss 0x2dff8(%rip),%ymm9 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,13,104,225,2,0 // vbroadcastss 0x2e168(%rip),%ymm9 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,66,29,188,209 // vfnmadd231ps %ymm9,%ymm12,%ymm10
.byte 197,124,91,246 // vcvtdq2ps %ymm6,%ymm14
.byte 196,66,37,168,245 // vfmadd213ps %ymm13,%ymm11,%ymm14
.byte 197,124,91,255 // vcvtdq2ps %ymm7,%ymm15
.byte 196,66,37,168,253 // vfmadd213ps %ymm13,%ymm11,%ymm15
- .byte 196,98,125,24,29,220,223,2,0 // vbroadcastss 0x2dfdc(%rip),%ymm11 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,29,76,225,2,0 // vbroadcastss 0x2e14c(%rip),%ymm11 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,65,28,88,227 // vaddps %ymm11,%ymm12,%ymm12
- .byte 196,98,125,24,45,210,223,2,0 // vbroadcastss 0x2dfd2(%rip),%ymm13 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,45,66,225,2,0 // vbroadcastss 0x2e142(%rip),%ymm13 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 196,65,20,94,228 // vdivps %ymm12,%ymm13,%ymm12
.byte 196,65,44,92,212 // vsubps %ymm12,%ymm10,%ymm10
.byte 196,98,125,24,32 // vbroadcastss (%rax),%ymm12
.byte 196,65,44,89,212 // vmulps %ymm12,%ymm10,%ymm10
.byte 196,67,125,8,194,1 // vroundps $0x1,%ymm10,%ymm8
.byte 196,65,44,92,192 // vsubps %ymm8,%ymm10,%ymm8
- .byte 196,226,125,24,21,174,223,2,0 // vbroadcastss 0x2dfae(%rip),%ymm2 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,226,125,24,21,30,225,2,0 // vbroadcastss 0x2e11e(%rip),%ymm2 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 197,44,88,210 // vaddps %ymm2,%ymm10,%ymm10
- .byte 196,226,125,24,29,165,223,2,0 // vbroadcastss 0x2dfa5(%rip),%ymm3 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,226,125,24,29,21,225,2,0 // vbroadcastss 0x2e115(%rip),%ymm3 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,98,61,188,211 // vfnmadd231ps %ymm3,%ymm8,%ymm10
- .byte 196,226,125,24,13,155,223,2,0 // vbroadcastss 0x2df9b(%rip),%ymm1 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,226,125,24,13,11,225,2,0 // vbroadcastss 0x2e10b(%rip),%ymm1 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,116,92,192 // vsubps %ymm8,%ymm1,%ymm8
- .byte 196,226,125,24,5,145,223,2,0 // vbroadcastss 0x2df91(%rip),%ymm0 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,226,125,24,5,1,225,2,0 // vbroadcastss 0x2e101(%rip),%ymm0 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,124,94,192 // vdivps %ymm8,%ymm0,%ymm8
.byte 196,65,44,88,192 // vaddps %ymm8,%ymm10,%ymm8
.byte 197,204,84,245 // vandps %ymm5,%ymm6,%ymm6
@@ -19909,7 +19909,7 @@ _sk_gamma_hsw:
.byte 197,244,92,205 // vsubps %ymm5,%ymm1,%ymm1
.byte 197,252,94,193 // vdivps %ymm1,%ymm0,%ymm0
.byte 197,236,88,192 // vaddps %ymm0,%ymm2,%ymm0
- .byte 196,226,125,24,13,2,223,2,0 // vbroadcastss 0x2df02(%rip),%ymm1 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,226,125,24,13,114,224,2,0 // vbroadcastss 0x2e072(%rip),%ymm1 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 197,188,89,209 // vmulps %ymm1,%ymm8,%ymm2
.byte 197,204,89,217 // vmulps %ymm1,%ymm6,%ymm3
.byte 197,252,89,225 // vmulps %ymm1,%ymm0,%ymm4
@@ -19929,26 +19929,26 @@ HIDDEN _sk_lab_to_xyz_hsw
.globl _sk_lab_to_xyz_hsw
FUNCTION(_sk_lab_to_xyz_hsw)
_sk_lab_to_xyz_hsw:
- .byte 196,98,125,24,5,192,222,2,0 // vbroadcastss 0x2dec0(%rip),%ymm8 # 38a78 <_sk_srcover_bgra_8888_sse2_lowp+0x374>
- .byte 196,98,125,24,13,31,222,2,0 // vbroadcastss 0x2de1f(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 196,98,125,24,21,178,222,2,0 // vbroadcastss 0x2deb2(%rip),%ymm10 # 38a7c <_sk_srcover_bgra_8888_sse2_lowp+0x378>
+ .byte 196,98,125,24,5,48,224,2,0 // vbroadcastss 0x2e030(%rip),%ymm8 # 38be8 <_sk_srcover_bgra_8888_sse2_lowp+0x374>
+ .byte 196,98,125,24,13,143,223,2,0 // vbroadcastss 0x2df8f(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,21,34,224,2,0 // vbroadcastss 0x2e022(%rip),%ymm10 # 38bec <_sk_srcover_bgra_8888_sse2_lowp+0x378>
.byte 196,194,53,168,202 // vfmadd213ps %ymm10,%ymm9,%ymm1
.byte 196,194,53,168,210 // vfmadd213ps %ymm10,%ymm9,%ymm2
- .byte 196,98,125,24,13,163,222,2,0 // vbroadcastss 0x2dea3(%rip),%ymm9 # 38a80 <_sk_srcover_bgra_8888_sse2_lowp+0x37c>
+ .byte 196,98,125,24,13,19,224,2,0 // vbroadcastss 0x2e013(%rip),%ymm9 # 38bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x37c>
.byte 196,66,125,184,200 // vfmadd231ps %ymm8,%ymm0,%ymm9
- .byte 196,226,125,24,5,153,222,2,0 // vbroadcastss 0x2de99(%rip),%ymm0 # 38a84 <_sk_srcover_bgra_8888_sse2_lowp+0x380>
+ .byte 196,226,125,24,5,9,224,2,0 // vbroadcastss 0x2e009(%rip),%ymm0 # 38bf4 <_sk_srcover_bgra_8888_sse2_lowp+0x380>
.byte 197,180,89,192 // vmulps %ymm0,%ymm9,%ymm0
- .byte 196,98,125,24,5,144,222,2,0 // vbroadcastss 0x2de90(%rip),%ymm8 # 38a88 <_sk_srcover_bgra_8888_sse2_lowp+0x384>
+ .byte 196,98,125,24,5,0,224,2,0 // vbroadcastss 0x2e000(%rip),%ymm8 # 38bf8 <_sk_srcover_bgra_8888_sse2_lowp+0x384>
.byte 196,98,117,168,192 // vfmadd213ps %ymm0,%ymm1,%ymm8
- .byte 196,98,125,24,13,134,222,2,0 // vbroadcastss 0x2de86(%rip),%ymm9 # 38a8c <_sk_srcover_bgra_8888_sse2_lowp+0x388>
+ .byte 196,98,125,24,13,246,223,2,0 // vbroadcastss 0x2dff6(%rip),%ymm9 # 38bfc <_sk_srcover_bgra_8888_sse2_lowp+0x388>
.byte 196,98,109,172,200 // vfnmadd213ps %ymm0,%ymm2,%ymm9
.byte 196,193,60,89,200 // vmulps %ymm8,%ymm8,%ymm1
.byte 197,188,89,201 // vmulps %ymm1,%ymm8,%ymm1
- .byte 196,226,125,24,21,115,222,2,0 // vbroadcastss 0x2de73(%rip),%ymm2 # 38a90 <_sk_srcover_bgra_8888_sse2_lowp+0x38c>
+ .byte 196,226,125,24,21,227,223,2,0 // vbroadcastss 0x2dfe3(%rip),%ymm2 # 38c00 <_sk_srcover_bgra_8888_sse2_lowp+0x38c>
.byte 197,108,194,209,1 // vcmpltps %ymm1,%ymm2,%ymm10
- .byte 196,98,125,24,29,105,222,2,0 // vbroadcastss 0x2de69(%rip),%ymm11 # 38a94 <_sk_srcover_bgra_8888_sse2_lowp+0x390>
+ .byte 196,98,125,24,29,217,223,2,0 // vbroadcastss 0x2dfd9(%rip),%ymm11 # 38c04 <_sk_srcover_bgra_8888_sse2_lowp+0x390>
.byte 196,65,60,88,195 // vaddps %ymm11,%ymm8,%ymm8
- .byte 196,98,125,24,37,95,222,2,0 // vbroadcastss 0x2de5f(%rip),%ymm12 # 38a98 <_sk_srcover_bgra_8888_sse2_lowp+0x394>
+ .byte 196,98,125,24,37,207,223,2,0 // vbroadcastss 0x2dfcf(%rip),%ymm12 # 38c08 <_sk_srcover_bgra_8888_sse2_lowp+0x394>
.byte 196,65,60,89,196 // vmulps %ymm12,%ymm8,%ymm8
.byte 196,99,61,74,193,160 // vblendvps %ymm10,%ymm1,%ymm8,%ymm8
.byte 197,252,89,200 // vmulps %ymm0,%ymm0,%ymm1
@@ -19963,9 +19963,9 @@ _sk_lab_to_xyz_hsw:
.byte 196,65,52,88,203 // vaddps %ymm11,%ymm9,%ymm9
.byte 196,65,52,89,204 // vmulps %ymm12,%ymm9,%ymm9
.byte 196,227,53,74,208,32 // vblendvps %ymm2,%ymm0,%ymm9,%ymm2
- .byte 196,226,125,24,5,20,222,2,0 // vbroadcastss 0x2de14(%rip),%ymm0 # 38a9c <_sk_srcover_bgra_8888_sse2_lowp+0x398>
+ .byte 196,226,125,24,5,132,223,2,0 // vbroadcastss 0x2df84(%rip),%ymm0 # 38c0c <_sk_srcover_bgra_8888_sse2_lowp+0x398>
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,11,222,2,0 // vbroadcastss 0x2de0b(%rip),%ymm8 # 38aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x39c>
+ .byte 196,98,125,24,5,123,223,2,0 // vbroadcastss 0x2df7b(%rip),%ymm8 # 38c10 <_sk_srcover_bgra_8888_sse2_lowp+0x39c>
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -19983,10 +19983,10 @@ _sk_load_a8_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,52 // jne acea <_sk_load_a8_hsw+0x4c>
.byte 196,130,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm0
- .byte 197,249,219,5,204,229,2,0 // vpand 0x2e5cc(%rip),%xmm0,%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,249,219,5,44,231,2,0 // vpand 0x2e72c(%rip),%xmm0,%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,86,221,2,0 // vbroadcastss 0x2dd56(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,198,222,2,0 // vbroadcastss 0x2dec6(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,217 // vmulps %ymm1,%ymm0,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
@@ -20059,10 +20059,10 @@ _sk_load_a8_dst_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,52 // jne ade8 <_sk_load_a8_dst_hsw+0x4c>
.byte 196,130,121,48,36,24 // vpmovzxbw (%r8,%r11,1),%xmm4
- .byte 197,217,219,37,206,228,2,0 // vpand 0x2e4ce(%rip),%xmm4,%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,217,219,37,46,230,2,0 // vpand 0x2e62e(%rip),%xmm4,%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,228 // vpmovzxwd %xmm4,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,88,220,2,0 // vbroadcastss 0x2dc58(%rip),%ymm5 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,45,200,221,2,0 // vbroadcastss 0x2ddc8(%rip),%ymm5 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,220,89,253 // vmulps %ymm5,%ymm4,%ymm7
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
@@ -20110,7 +20110,7 @@ _sk_load_a8_dst_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,221 // callq ffffffffde00ae90 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfd278c>
+ .byte 232,255,255,255,221 // callq ffffffffde00ae90 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfd261c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,210 // callq *%rdx
@@ -20177,7 +20177,7 @@ _sk_gather_a8_hsw:
.byte 196,227,121,32,192,7 // vpinsrb $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,49,192 // vpmovzxbd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,159,218,2,0 // vbroadcastss 0x2da9f(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,15,220,2,0 // vbroadcastss 0x2dc0f(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,217 // vmulps %ymm1,%ymm0,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
@@ -20199,7 +20199,7 @@ _sk_store_a8_hsw:
.byte 77,15,175,193 // imul %r9,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,28,218,2,0 // vbroadcastss 0x2da1c(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,140,219,2,0 // vbroadcastss 0x2db8c(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,100,89,192 // vmulps %ymm8,%ymm3,%ymm8
.byte 196,65,125,91,192 // vcvtps2dq %ymm8,%ymm8
.byte 196,67,125,25,193,1 // vextractf128 $0x1,%ymm8,%xmm9
@@ -20224,13 +20224,13 @@ _sk_store_a8_hsw:
.byte 196,3,121,20,4,24,0 // vpextrb $0x0,%xmm8,(%r8,%r11,1)
.byte 235,202 // jmp afe8 <_sk_store_a8_hsw+0x40>
.byte 196,3,121,20,68,24,2,4 // vpextrb $0x4,%xmm8,0x2(%r8,%r11,1)
- .byte 196,98,57,0,5,113,226,2,0 // vpshufb 0x2e271(%rip),%xmm8,%xmm8 # 392a0 <_sk_srcover_bgra_8888_sse2_lowp+0xb9c>
+ .byte 196,98,57,0,5,209,227,2,0 // vpshufb 0x2e3d1(%rip),%xmm8,%xmm8 # 39400 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
.byte 196,3,121,21,4,24,0 // vpextrw $0x0,%xmm8,(%r8,%r11,1)
.byte 235,176 // jmp afe8 <_sk_store_a8_hsw+0x40>
.byte 196,3,121,20,68,24,6,12 // vpextrb $0xc,%xmm8,0x6(%r8,%r11,1)
.byte 196,3,121,20,68,24,5,10 // vpextrb $0xa,%xmm8,0x5(%r8,%r11,1)
.byte 196,3,121,20,68,24,4,8 // vpextrb $0x8,%xmm8,0x4(%r8,%r11,1)
- .byte 196,98,57,0,5,87,226,2,0 // vpshufb 0x2e257(%rip),%xmm8,%xmm8 # 392b0 <_sk_srcover_bgra_8888_sse2_lowp+0xbac>
+ .byte 196,98,57,0,5,183,227,2,0 // vpshufb 0x2e3b7(%rip),%xmm8,%xmm8 # 39410 <_sk_srcover_bgra_8888_sse2_lowp+0xb9c>
.byte 196,1,121,126,4,24 // vmovd %xmm8,(%r8,%r11,1)
.byte 235,135 // jmp afe8 <_sk_store_a8_hsw+0x40>
.byte 15,31,0 // nopl (%rax)
@@ -20267,13 +20267,13 @@ _sk_load_g8_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,57 // jne b0d1 <_sk_load_g8_hsw+0x51>
.byte 196,130,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm0
- .byte 197,249,219,5,234,225,2,0 // vpand 0x2e1ea(%rip),%xmm0,%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,249,219,5,74,227,2,0 // vpand 0x2e34a(%rip),%xmm0,%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,116,217,2,0 // vbroadcastss 0x2d974(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,228,218,2,0 // vbroadcastss 0x2dae4(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,237,216,2,0 // vbroadcastss 0x2d8ed(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,93,218,2,0 // vbroadcastss 0x2da5d(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 255,224 // jmpq *%rax
@@ -20318,7 +20318,7 @@ _sk_load_g8_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,222 // jmpq ffffffffdf00b178 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefd2a74>
+ .byte 233,255,255,255,222 // jmpq ffffffffdf00b178 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefd2904>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,211 // callq *%rbx
@@ -20342,13 +20342,13 @@ _sk_load_g8_dst_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,57 // jne b1d5 <_sk_load_g8_dst_hsw+0x51>
.byte 196,130,121,48,36,24 // vpmovzxbw (%r8,%r11,1),%xmm4
- .byte 197,217,219,37,230,224,2,0 // vpand 0x2e0e6(%rip),%xmm4,%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,217,219,37,70,226,2,0 // vpand 0x2e246(%rip),%xmm4,%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,125,51,228 // vpmovzxwd %xmm4,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,112,216,2,0 // vbroadcastss 0x2d870(%rip),%ymm5 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,45,224,217,2,0 // vbroadcastss 0x2d9e0(%rip),%ymm5 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,220,89,229 // vmulps %ymm5,%ymm4,%ymm4
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,61,233,215,2,0 // vbroadcastss 0x2d7e9(%rip),%ymm7 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,61,89,217,2,0 // vbroadcastss 0x2d959(%rip),%ymm7 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,236 // vmovaps %ymm4,%ymm5
.byte 197,252,40,244 // vmovaps %ymm4,%ymm6
.byte 255,224 // jmpq *%rax
@@ -20393,7 +20393,7 @@ _sk_load_g8_dst_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,222 // jmpq ffffffffdf00b27c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefd2b78>
+ .byte 233,255,255,255,222 // jmpq ffffffffdf00b27c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefd2a08>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,211 // callq *%rbx
@@ -20460,10 +20460,10 @@ _sk_gather_g8_hsw:
.byte 196,227,121,32,192,7 // vpinsrb $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,49,192 // vpmovzxbd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,179,214,2,0 // vbroadcastss 0x2d6b3(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,35,216,2,0 // vbroadcastss 0x2d823(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,44,214,2,0 // vbroadcastss 0x2d62c(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,156,215,2,0 // vbroadcastss 0x2d79c(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 91 // pop %rbx
@@ -20487,23 +20487,23 @@ _sk_load_565_hsw:
.byte 117,114 // jne b426 <_sk_load_565_hsw+0x8d>
.byte 196,129,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm0
.byte 196,226,125,51,208 // vpmovzxwd %xmm0,%ymm2
- .byte 196,226,125,88,5,104,214,2,0 // vpbroadcastd 0x2d668(%rip),%ymm0 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,226,125,88,5,216,215,2,0 // vpbroadcastd 0x2d7d8(%rip),%ymm0 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,237,219,192 // vpand %ymm0,%ymm2,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,91,214,2,0 // vbroadcastss 0x2d65b(%rip),%ymm1 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,226,125,24,13,203,215,2,0 // vbroadcastss 0x2d7cb(%rip),%ymm1 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,88,13,82,214,2,0 // vpbroadcastd 0x2d652(%rip),%ymm1 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,226,125,88,13,194,215,2,0 // vpbroadcastd 0x2d7c2(%rip),%ymm1 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,237,219,201 // vpand %ymm1,%ymm2,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,29,69,214,2,0 // vbroadcastss 0x2d645(%rip),%ymm3 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,226,125,24,29,181,215,2,0 // vbroadcastss 0x2d7b5(%rip),%ymm3 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 197,244,89,203 // vmulps %ymm3,%ymm1,%ymm1
- .byte 196,226,125,88,29,60,214,2,0 // vpbroadcastd 0x2d63c(%rip),%ymm3 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,226,125,88,29,172,215,2,0 // vpbroadcastd 0x2d7ac(%rip),%ymm3 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,237,219,211 // vpand %ymm3,%ymm2,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,226,125,24,29,47,214,2,0 // vbroadcastss 0x2d62f(%rip),%ymm3 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,226,125,24,29,159,215,2,0 // vbroadcastss 0x2d79f(%rip),%ymm3 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 197,236,89,211 // vmulps %ymm3,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,144,213,2,0 // vbroadcastss 0x2d590(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,0,215,2,0 // vbroadcastss 0x2d700(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,7 // and $0x7,%r9b
@@ -20566,23 +20566,23 @@ _sk_load_565_dst_hsw:
.byte 117,114 // jne b551 <_sk_load_565_dst_hsw+0x8d>
.byte 196,129,122,111,36,88 // vmovdqu (%r8,%r11,2),%xmm4
.byte 196,226,125,51,244 // vpmovzxwd %xmm4,%ymm6
- .byte 196,226,125,88,37,61,213,2,0 // vpbroadcastd 0x2d53d(%rip),%ymm4 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,226,125,88,37,173,214,2,0 // vpbroadcastd 0x2d6ad(%rip),%ymm4 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,205,219,228 // vpand %ymm4,%ymm6,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,48,213,2,0 // vbroadcastss 0x2d530(%rip),%ymm5 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,226,125,24,45,160,214,2,0 // vbroadcastss 0x2d6a0(%rip),%ymm5 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 197,220,89,229 // vmulps %ymm5,%ymm4,%ymm4
- .byte 196,226,125,88,45,39,213,2,0 // vpbroadcastd 0x2d527(%rip),%ymm5 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,226,125,88,45,151,214,2,0 // vpbroadcastd 0x2d697(%rip),%ymm5 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,205,219,237 // vpand %ymm5,%ymm6,%ymm5
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 196,226,125,24,61,26,213,2,0 // vbroadcastss 0x2d51a(%rip),%ymm7 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,226,125,24,61,138,214,2,0 // vbroadcastss 0x2d68a(%rip),%ymm7 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 197,212,89,239 // vmulps %ymm7,%ymm5,%ymm5
- .byte 196,226,125,88,61,17,213,2,0 // vpbroadcastd 0x2d511(%rip),%ymm7 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,226,125,88,61,129,214,2,0 // vpbroadcastd 0x2d681(%rip),%ymm7 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,205,219,247 // vpand %ymm7,%ymm6,%ymm6
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
- .byte 196,226,125,24,61,4,213,2,0 // vbroadcastss 0x2d504(%rip),%ymm7 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,226,125,24,61,116,214,2,0 // vbroadcastss 0x2d674(%rip),%ymm7 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 197,204,89,247 // vmulps %ymm7,%ymm6,%ymm6
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,61,101,212,2,0 // vbroadcastss 0x2d465(%rip),%ymm7 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,61,213,213,2,0 // vbroadcastss 0x2d5d5(%rip),%ymm7 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,7 // and $0x7,%r9b
@@ -20688,23 +20688,23 @@ _sk_gather_565_hsw:
.byte 65,15,183,4,88 // movzwl (%r8,%rbx,2),%eax
.byte 197,249,196,192,7 // vpinsrw $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,51,208 // vpmovzxwd %xmm0,%ymm2
- .byte 196,226,125,88,5,88,211,2,0 // vpbroadcastd 0x2d358(%rip),%ymm0 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,226,125,88,5,200,212,2,0 // vpbroadcastd 0x2d4c8(%rip),%ymm0 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,237,219,192 // vpand %ymm0,%ymm2,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,75,211,2,0 // vbroadcastss 0x2d34b(%rip),%ymm1 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,226,125,24,13,187,212,2,0 // vbroadcastss 0x2d4bb(%rip),%ymm1 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,88,13,66,211,2,0 // vpbroadcastd 0x2d342(%rip),%ymm1 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,226,125,88,13,178,212,2,0 // vpbroadcastd 0x2d4b2(%rip),%ymm1 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,237,219,201 // vpand %ymm1,%ymm2,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,29,53,211,2,0 // vbroadcastss 0x2d335(%rip),%ymm3 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,226,125,24,29,165,212,2,0 // vbroadcastss 0x2d4a5(%rip),%ymm3 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 197,244,89,203 // vmulps %ymm3,%ymm1,%ymm1
- .byte 196,226,125,88,29,44,211,2,0 // vpbroadcastd 0x2d32c(%rip),%ymm3 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,226,125,88,29,156,212,2,0 // vpbroadcastd 0x2d49c(%rip),%ymm3 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,237,219,211 // vpand %ymm3,%ymm2,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,226,125,24,29,31,211,2,0 // vbroadcastss 0x2d31f(%rip),%ymm3 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,226,125,24,29,143,212,2,0 // vbroadcastss 0x2d48f(%rip),%ymm3 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 197,236,89,211 // vmulps %ymm3,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,128,210,2,0 // vbroadcastss 0x2d280(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,240,211,2,0 // vbroadcastss 0x2d3f0(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
.byte 65,94 // pop %r14
@@ -20722,11 +20722,11 @@ _sk_store_565_hsw:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,72,211,2,0 // vbroadcastss 0x2d348(%rip),%ymm8 # 38aa4 <_sk_srcover_bgra_8888_sse2_lowp+0x3a0>
+ .byte 196,98,125,24,5,184,212,2,0 // vbroadcastss 0x2d4b8(%rip),%ymm8 # 38c14 <_sk_srcover_bgra_8888_sse2_lowp+0x3a0>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,193,53,114,241,11 // vpslld $0xb,%ymm9,%ymm9
- .byte 196,98,125,24,21,51,211,2,0 // vbroadcastss 0x2d333(%rip),%ymm10 # 38aa8 <_sk_srcover_bgra_8888_sse2_lowp+0x3a4>
+ .byte 196,98,125,24,21,163,212,2,0 // vbroadcastss 0x2d4a3(%rip),%ymm10 # 38c18 <_sk_srcover_bgra_8888_sse2_lowp+0x3a4>
.byte 196,65,116,89,210 // vmulps %ymm10,%ymm1,%ymm10
.byte 196,65,125,91,210 // vcvtps2dq %ymm10,%ymm10
.byte 196,193,45,114,242,5 // vpslld $0x5,%ymm10,%ymm10
@@ -20778,7 +20778,7 @@ _sk_store_565_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,224 // callq ffffffffe100b828 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fd3124>
+ .byte 232,255,255,255,224 // callq ffffffffe100b828 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fd2fb4>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // .byte 0xff
@@ -20798,25 +20798,25 @@ _sk_load_4444_hsw:
.byte 15,133,138,0,0,0 // jne b8d5 <_sk_load_4444_hsw+0xa9>
.byte 196,129,122,111,4,88 // vmovdqu (%r8,%r11,2),%xmm0
.byte 196,226,125,51,216 // vpmovzxwd %xmm0,%ymm3
- .byte 196,226,125,88,5,77,210,2,0 // vpbroadcastd 0x2d24d(%rip),%ymm0 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 196,226,125,88,5,189,211,2,0 // vpbroadcastd 0x2d3bd(%rip),%ymm0 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,229,219,192 // vpand %ymm0,%ymm3,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,64,210,2,0 // vbroadcastss 0x2d240(%rip),%ymm1 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 196,226,125,24,13,176,211,2,0 // vbroadcastss 0x2d3b0(%rip),%ymm1 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,88,13,55,210,2,0 // vpbroadcastd 0x2d237(%rip),%ymm1 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 196,226,125,88,13,167,211,2,0 // vpbroadcastd 0x2d3a7(%rip),%ymm1 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,229,219,201 // vpand %ymm1,%ymm3,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,21,42,210,2,0 // vbroadcastss 0x2d22a(%rip),%ymm2 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 196,226,125,24,21,154,211,2,0 // vbroadcastss 0x2d39a(%rip),%ymm2 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
.byte 197,244,89,202 // vmulps %ymm2,%ymm1,%ymm1
- .byte 196,226,125,88,21,33,210,2,0 // vpbroadcastd 0x2d221(%rip),%ymm2 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 196,226,125,88,21,145,211,2,0 // vpbroadcastd 0x2d391(%rip),%ymm2 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,229,219,210 // vpand %ymm2,%ymm3,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,98,125,24,5,20,210,2,0 // vbroadcastss 0x2d214(%rip),%ymm8 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 196,98,125,24,5,132,211,2,0 // vbroadcastss 0x2d384(%rip),%ymm8 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,88,5,10,210,2,0 // vpbroadcastd 0x2d20a(%rip),%ymm8 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 196,98,125,88,5,122,211,2,0 // vpbroadcastd 0x2d37a(%rip),%ymm8 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 196,193,101,219,216 // vpand %ymm8,%ymm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,98,125,24,5,252,209,2,0 // vbroadcastss 0x2d1fc(%rip),%ymm8 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 196,98,125,24,5,108,211,2,0 // vbroadcastss 0x2d36c(%rip),%ymm8 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 196,193,100,89,216 // vmulps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -20884,25 +20884,25 @@ _sk_load_4444_dst_hsw:
.byte 15,133,138,0,0,0 // jne ba21 <_sk_load_4444_dst_hsw+0xa9>
.byte 196,129,122,111,36,88 // vmovdqu (%r8,%r11,2),%xmm4
.byte 196,226,125,51,252 // vpmovzxwd %xmm4,%ymm7
- .byte 196,226,125,88,37,1,209,2,0 // vpbroadcastd 0x2d101(%rip),%ymm4 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 196,226,125,88,37,113,210,2,0 // vpbroadcastd 0x2d271(%rip),%ymm4 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,197,219,228 // vpand %ymm4,%ymm7,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,244,208,2,0 // vbroadcastss 0x2d0f4(%rip),%ymm5 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 196,226,125,24,45,100,210,2,0 // vbroadcastss 0x2d264(%rip),%ymm5 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
.byte 197,220,89,229 // vmulps %ymm5,%ymm4,%ymm4
- .byte 196,226,125,88,45,235,208,2,0 // vpbroadcastd 0x2d0eb(%rip),%ymm5 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 196,226,125,88,45,91,210,2,0 // vpbroadcastd 0x2d25b(%rip),%ymm5 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,197,219,237 // vpand %ymm5,%ymm7,%ymm5
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 196,226,125,24,53,222,208,2,0 // vbroadcastss 0x2d0de(%rip),%ymm6 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 196,226,125,24,53,78,210,2,0 // vbroadcastss 0x2d24e(%rip),%ymm6 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
.byte 197,212,89,238 // vmulps %ymm6,%ymm5,%ymm5
- .byte 196,226,125,88,53,213,208,2,0 // vpbroadcastd 0x2d0d5(%rip),%ymm6 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 196,226,125,88,53,69,210,2,0 // vpbroadcastd 0x2d245(%rip),%ymm6 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,197,219,246 // vpand %ymm6,%ymm7,%ymm6
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
- .byte 196,98,125,24,5,200,208,2,0 // vbroadcastss 0x2d0c8(%rip),%ymm8 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 196,98,125,24,5,56,210,2,0 // vbroadcastss 0x2d238(%rip),%ymm8 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
.byte 196,193,76,89,240 // vmulps %ymm8,%ymm6,%ymm6
- .byte 196,98,125,88,5,190,208,2,0 // vpbroadcastd 0x2d0be(%rip),%ymm8 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 196,98,125,88,5,46,210,2,0 // vpbroadcastd 0x2d22e(%rip),%ymm8 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 196,193,69,219,248 // vpand %ymm8,%ymm7,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,176,208,2,0 // vbroadcastss 0x2d0b0(%rip),%ymm8 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 196,98,125,24,5,32,210,2,0 // vbroadcastss 0x2d220(%rip),%ymm8 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 196,193,68,89,248 // vmulps %ymm8,%ymm7,%ymm7
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -21010,25 +21010,25 @@ _sk_gather_4444_hsw:
.byte 65,15,183,4,88 // movzwl (%r8,%rbx,2),%eax
.byte 197,249,196,192,7 // vpinsrw $0x7,%eax,%xmm0,%xmm0
.byte 196,226,125,51,216 // vpmovzxwd %xmm0,%ymm3
- .byte 196,226,125,88,5,0,207,2,0 // vpbroadcastd 0x2cf00(%rip),%ymm0 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 196,226,125,88,5,112,208,2,0 // vpbroadcastd 0x2d070(%rip),%ymm0 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,229,219,192 // vpand %ymm0,%ymm3,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,243,206,2,0 // vbroadcastss 0x2cef3(%rip),%ymm1 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 196,226,125,24,13,99,208,2,0 // vbroadcastss 0x2d063(%rip),%ymm1 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,88,13,234,206,2,0 // vpbroadcastd 0x2ceea(%rip),%ymm1 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 196,226,125,88,13,90,208,2,0 // vpbroadcastd 0x2d05a(%rip),%ymm1 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,229,219,201 // vpand %ymm1,%ymm3,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,21,221,206,2,0 // vbroadcastss 0x2cedd(%rip),%ymm2 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 196,226,125,24,21,77,208,2,0 // vbroadcastss 0x2d04d(%rip),%ymm2 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
.byte 197,244,89,202 // vmulps %ymm2,%ymm1,%ymm1
- .byte 196,226,125,88,21,212,206,2,0 // vpbroadcastd 0x2ced4(%rip),%ymm2 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 196,226,125,88,21,68,208,2,0 // vpbroadcastd 0x2d044(%rip),%ymm2 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,229,219,210 // vpand %ymm2,%ymm3,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,98,125,24,5,199,206,2,0 // vbroadcastss 0x2cec7(%rip),%ymm8 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 196,98,125,24,5,55,208,2,0 // vbroadcastss 0x2d037(%rip),%ymm8 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,88,5,189,206,2,0 // vpbroadcastd 0x2cebd(%rip),%ymm8 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 196,98,125,88,5,45,208,2,0 // vpbroadcastd 0x2d02d(%rip),%ymm8 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 196,193,101,219,216 // vpand %ymm8,%ymm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,98,125,24,5,175,206,2,0 // vbroadcastss 0x2ceaf(%rip),%ymm8 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 196,98,125,24,5,31,208,2,0 // vbroadcastss 0x2d01f(%rip),%ymm8 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 196,193,100,89,216 // vmulps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 91 // pop %rbx
@@ -21048,7 +21048,7 @@ _sk_store_4444_hsw:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,132,206,2,0 // vbroadcastss 0x2ce84(%rip),%ymm8 # 38acc <_sk_srcover_bgra_8888_sse2_lowp+0x3c8>
+ .byte 196,98,125,24,5,244,207,2,0 // vbroadcastss 0x2cff4(%rip),%ymm8 # 38c3c <_sk_srcover_bgra_8888_sse2_lowp+0x3c8>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,193,53,114,241,12 // vpslld $0xc,%ymm9,%ymm9
@@ -21107,7 +21107,7 @@ _sk_store_4444_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,224 // callq ffffffffe100bd20 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fd361c>
+ .byte 232,255,255,255,224 // callq ffffffffe100bd20 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fd34ac>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // .byte 0xff
@@ -21126,14 +21126,14 @@ _sk_load_8888_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,86 // jne bd96 <_sk_load_8888_hsw+0x72>
.byte 196,129,126,111,28,152 // vmovdqu (%r8,%r11,4),%ymm3
- .byte 197,229,219,5,114,207,2,0 // vpand 0x2cf72(%rip),%ymm3,%ymm0 # 38cc0 <_sk_srcover_bgra_8888_sse2_lowp+0x5bc>
+ .byte 197,229,219,5,210,208,2,0 // vpand 0x2d0d2(%rip),%ymm3,%ymm0 # 38e20 <_sk_srcover_bgra_8888_sse2_lowp+0x5ac>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,209,204,2,0 // vbroadcastss 0x2ccd1(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,65,206,2,0 // vbroadcastss 0x2ce41(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
- .byte 196,226,101,0,13,119,207,2,0 // vpshufb 0x2cf77(%rip),%ymm3,%ymm1 # 38ce0 <_sk_srcover_bgra_8888_sse2_lowp+0x5dc>
+ .byte 196,226,101,0,13,215,208,2,0 // vpshufb 0x2d0d7(%rip),%ymm3,%ymm1 # 38e40 <_sk_srcover_bgra_8888_sse2_lowp+0x5cc>
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
.byte 196,193,116,89,200 // vmulps %ymm8,%ymm1,%ymm1
- .byte 196,226,101,0,21,133,207,2,0 // vpshufb 0x2cf85(%rip),%ymm3,%ymm2 # 38d00 <_sk_srcover_bgra_8888_sse2_lowp+0x5fc>
+ .byte 196,226,101,0,21,229,208,2,0 // vpshufb 0x2d0e5(%rip),%ymm3,%ymm2 # 38e60 <_sk_srcover_bgra_8888_sse2_lowp+0x5ec>
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
.byte 197,229,114,211,24 // vpsrld $0x18,%ymm3,%ymm3
@@ -21209,14 +21209,14 @@ _sk_load_8888_dst_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,86 // jne beca <_sk_load_8888_dst_hsw+0x72>
.byte 196,129,126,111,60,152 // vmovdqu (%r8,%r11,4),%ymm7
- .byte 197,197,219,37,158,206,2,0 // vpand 0x2ce9e(%rip),%ymm7,%ymm4 # 38d20 <_sk_srcover_bgra_8888_sse2_lowp+0x61c>
+ .byte 197,197,219,37,254,207,2,0 // vpand 0x2cffe(%rip),%ymm7,%ymm4 # 38e80 <_sk_srcover_bgra_8888_sse2_lowp+0x60c>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,98,125,24,5,157,203,2,0 // vbroadcastss 0x2cb9d(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,13,205,2,0 // vbroadcastss 0x2cd0d(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,92,89,224 // vmulps %ymm8,%ymm4,%ymm4
- .byte 196,226,69,0,45,163,206,2,0 // vpshufb 0x2cea3(%rip),%ymm7,%ymm5 # 38d40 <_sk_srcover_bgra_8888_sse2_lowp+0x63c>
+ .byte 196,226,69,0,45,3,208,2,0 // vpshufb 0x2d003(%rip),%ymm7,%ymm5 # 38ea0 <_sk_srcover_bgra_8888_sse2_lowp+0x62c>
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
.byte 196,193,84,89,232 // vmulps %ymm8,%ymm5,%ymm5
- .byte 196,226,69,0,53,177,206,2,0 // vpshufb 0x2ceb1(%rip),%ymm7,%ymm6 # 38d60 <_sk_srcover_bgra_8888_sse2_lowp+0x65c>
+ .byte 196,226,69,0,53,17,208,2,0 // vpshufb 0x2d011(%rip),%ymm7,%ymm6 # 38ec0 <_sk_srcover_bgra_8888_sse2_lowp+0x64c>
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
.byte 196,193,76,89,240 // vmulps %ymm8,%ymm6,%ymm6
.byte 197,197,114,215,24 // vpsrld $0x18,%ymm7,%ymm7
@@ -21300,14 +21300,14 @@ _sk_gather_8888_hsw:
.byte 197,254,91,192 // vcvttps2dq %ymm0,%ymm0
.byte 197,245,254,192 // vpaddd %ymm0,%ymm1,%ymm0
.byte 196,66,101,144,4,128 // vpgatherdd %ymm3,(%r8,%ymm0,4),%ymm8
- .byte 197,189,219,5,157,205,2,0 // vpand 0x2cd9d(%rip),%ymm8,%ymm0 # 38d80 <_sk_srcover_bgra_8888_sse2_lowp+0x67c>
+ .byte 197,189,219,5,253,206,2,0 // vpand 0x2cefd(%rip),%ymm8,%ymm0 # 38ee0 <_sk_srcover_bgra_8888_sse2_lowp+0x66c>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,29,60,202,2,0 // vbroadcastss 0x2ca3c(%rip),%ymm3 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,29,172,203,2,0 // vbroadcastss 0x2cbac(%rip),%ymm3 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,195 // vmulps %ymm3,%ymm0,%ymm0
- .byte 196,226,61,0,13,163,205,2,0 // vpshufb 0x2cda3(%rip),%ymm8,%ymm1 # 38da0 <_sk_srcover_bgra_8888_sse2_lowp+0x69c>
+ .byte 196,226,61,0,13,3,207,2,0 // vpshufb 0x2cf03(%rip),%ymm8,%ymm1 # 38f00 <_sk_srcover_bgra_8888_sse2_lowp+0x68c>
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
.byte 197,244,89,203 // vmulps %ymm3,%ymm1,%ymm1
- .byte 196,226,61,0,21,178,205,2,0 // vpshufb 0x2cdb2(%rip),%ymm8,%ymm2 # 38dc0 <_sk_srcover_bgra_8888_sse2_lowp+0x6bc>
+ .byte 196,226,61,0,21,18,207,2,0 // vpshufb 0x2cf12(%rip),%ymm8,%ymm2 # 38f20 <_sk_srcover_bgra_8888_sse2_lowp+0x6ac>
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
.byte 197,236,89,211 // vmulps %ymm3,%ymm2,%ymm2
.byte 196,193,61,114,208,24 // vpsrld $0x18,%ymm8,%ymm8
@@ -21327,7 +21327,7 @@ _sk_store_8888_hsw:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,151,201,2,0 // vbroadcastss 0x2c997(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,7,203,2,0 // vbroadcastss 0x2cb07(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,65,116,89,208 // vmulps %ymm8,%ymm1,%ymm10
@@ -21381,7 +21381,7 @@ _sk_store_8888_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,219 // jmpq ffffffffdc00c124 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfd3a20>
+ .byte 233,255,255,255,219 // jmpq ffffffffdc00c124 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfd38b0>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,205 // dec %ebp
@@ -21403,14 +21403,14 @@ _sk_load_bgra_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,86 // jne c19e <_sk_load_bgra_hsw+0x72>
.byte 196,129,126,111,28,152 // vmovdqu (%r8,%r11,4),%ymm3
- .byte 197,229,219,5,138,204,2,0 // vpand 0x2cc8a(%rip),%ymm3,%ymm0 # 38de0 <_sk_srcover_bgra_8888_sse2_lowp+0x6dc>
+ .byte 197,229,219,5,234,205,2,0 // vpand 0x2cdea(%rip),%ymm3,%ymm0 # 38f40 <_sk_srcover_bgra_8888_sse2_lowp+0x6cc>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,201,200,2,0 // vbroadcastss 0x2c8c9(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,57,202,2,0 // vbroadcastss 0x2ca39(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,208 // vmulps %ymm8,%ymm0,%ymm2
- .byte 196,226,101,0,5,143,204,2,0 // vpshufb 0x2cc8f(%rip),%ymm3,%ymm0 # 38e00 <_sk_srcover_bgra_8888_sse2_lowp+0x6fc>
+ .byte 196,226,101,0,5,239,205,2,0 // vpshufb 0x2cdef(%rip),%ymm3,%ymm0 # 38f60 <_sk_srcover_bgra_8888_sse2_lowp+0x6ec>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
.byte 196,193,124,89,200 // vmulps %ymm8,%ymm0,%ymm1
- .byte 196,226,101,0,5,157,204,2,0 // vpshufb 0x2cc9d(%rip),%ymm3,%ymm0 # 38e20 <_sk_srcover_bgra_8888_sse2_lowp+0x71c>
+ .byte 196,226,101,0,5,253,205,2,0 // vpshufb 0x2cdfd(%rip),%ymm3,%ymm0 # 38f80 <_sk_srcover_bgra_8888_sse2_lowp+0x70c>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 197,229,114,211,24 // vpsrld $0x18,%ymm3,%ymm3
@@ -21486,14 +21486,14 @@ _sk_load_bgra_dst_hsw:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,86 // jne c2d2 <_sk_load_bgra_dst_hsw+0x72>
.byte 196,129,126,111,60,152 // vmovdqu (%r8,%r11,4),%ymm7
- .byte 197,197,219,37,182,203,2,0 // vpand 0x2cbb6(%rip),%ymm7,%ymm4 # 38e40 <_sk_srcover_bgra_8888_sse2_lowp+0x73c>
+ .byte 197,197,219,37,22,205,2,0 // vpand 0x2cd16(%rip),%ymm7,%ymm4 # 38fa0 <_sk_srcover_bgra_8888_sse2_lowp+0x72c>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,98,125,24,5,149,199,2,0 // vbroadcastss 0x2c795(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,5,201,2,0 // vbroadcastss 0x2c905(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,92,89,240 // vmulps %ymm8,%ymm4,%ymm6
- .byte 196,226,69,0,37,187,203,2,0 // vpshufb 0x2cbbb(%rip),%ymm7,%ymm4 # 38e60 <_sk_srcover_bgra_8888_sse2_lowp+0x75c>
+ .byte 196,226,69,0,37,27,205,2,0 // vpshufb 0x2cd1b(%rip),%ymm7,%ymm4 # 38fc0 <_sk_srcover_bgra_8888_sse2_lowp+0x74c>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
.byte 196,193,92,89,232 // vmulps %ymm8,%ymm4,%ymm5
- .byte 196,226,69,0,37,201,203,2,0 // vpshufb 0x2cbc9(%rip),%ymm7,%ymm4 # 38e80 <_sk_srcover_bgra_8888_sse2_lowp+0x77c>
+ .byte 196,226,69,0,37,41,205,2,0 // vpshufb 0x2cd29(%rip),%ymm7,%ymm4 # 38fe0 <_sk_srcover_bgra_8888_sse2_lowp+0x76c>
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
.byte 196,193,92,89,224 // vmulps %ymm8,%ymm4,%ymm4
.byte 197,197,114,215,24 // vpsrld $0x18,%ymm7,%ymm7
@@ -21577,14 +21577,14 @@ _sk_gather_bgra_hsw:
.byte 197,254,91,192 // vcvttps2dq %ymm0,%ymm0
.byte 197,245,254,192 // vpaddd %ymm0,%ymm1,%ymm0
.byte 196,66,101,144,4,128 // vpgatherdd %ymm3,(%r8,%ymm0,4),%ymm8
- .byte 197,189,219,5,181,202,2,0 // vpand 0x2cab5(%rip),%ymm8,%ymm0 # 38ea0 <_sk_srcover_bgra_8888_sse2_lowp+0x79c>
+ .byte 197,189,219,5,21,204,2,0 // vpand 0x2cc15(%rip),%ymm8,%ymm0 # 39000 <_sk_srcover_bgra_8888_sse2_lowp+0x78c>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,29,52,198,2,0 // vbroadcastss 0x2c634(%rip),%ymm3 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,29,164,199,2,0 // vbroadcastss 0x2c7a4(%rip),%ymm3 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,211 // vmulps %ymm3,%ymm0,%ymm2
- .byte 196,226,61,0,5,187,202,2,0 // vpshufb 0x2cabb(%rip),%ymm8,%ymm0 # 38ec0 <_sk_srcover_bgra_8888_sse2_lowp+0x7bc>
+ .byte 196,226,61,0,5,27,204,2,0 // vpshufb 0x2cc1b(%rip),%ymm8,%ymm0 # 39020 <_sk_srcover_bgra_8888_sse2_lowp+0x7ac>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
.byte 197,252,89,203 // vmulps %ymm3,%ymm0,%ymm1
- .byte 196,226,61,0,5,202,202,2,0 // vpshufb 0x2caca(%rip),%ymm8,%ymm0 # 38ee0 <_sk_srcover_bgra_8888_sse2_lowp+0x7dc>
+ .byte 196,226,61,0,5,42,204,2,0 // vpshufb 0x2cc2a(%rip),%ymm8,%ymm0 # 39040 <_sk_srcover_bgra_8888_sse2_lowp+0x7cc>
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
.byte 197,252,89,195 // vmulps %ymm3,%ymm0,%ymm0
.byte 196,193,61,114,208,24 // vpsrld $0x18,%ymm8,%ymm8
@@ -21604,7 +21604,7 @@ _sk_store_bgra_hsw:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,143,197,2,0 // vbroadcastss 0x2c58f(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,255,198,2,0 // vbroadcastss 0x2c6ff(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,108,89,200 // vmulps %ymm8,%ymm2,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,65,116,89,208 // vmulps %ymm8,%ymm1,%ymm10
@@ -21658,7 +21658,7 @@ _sk_store_bgra_hsw:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,219 // jmpq ffffffffdc00c52c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfd3e28>
+ .byte 233,255,255,255,219 // jmpq ffffffffdc00c52c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdbfd3cb8>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,205 // dec %ebp
@@ -21919,7 +21919,7 @@ _sk_load_u16_be_hsw:
.byte 197,241,235,192 // vpor %xmm0,%xmm1,%xmm0
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,21,16,193,2,0 // vbroadcastss 0x2c110(%rip),%ymm10 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,98,125,24,21,128,194,2,0 // vbroadcastss 0x2c280(%rip),%ymm10 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 196,193,124,89,194 // vmulps %ymm10,%ymm0,%ymm0
.byte 197,185,109,202 // vpunpckhqdq %xmm2,%xmm8,%xmm1
.byte 197,233,113,241,8 // vpsllw $0x8,%xmm1,%xmm2
@@ -22011,7 +22011,7 @@ _sk_load_rgb_u16_be_hsw:
.byte 197,241,235,192 // vpor %xmm0,%xmm1,%xmm0
.byte 196,226,125,51,192 // vpmovzxwd %xmm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,13,127,191,2,0 // vbroadcastss 0x2bf7f(%rip),%ymm9 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,98,125,24,13,239,192,2,0 // vbroadcastss 0x2c0ef(%rip),%ymm9 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,185,109,203 // vpunpckhqdq %xmm3,%xmm8,%xmm1
.byte 197,225,113,241,8 // vpsllw $0x8,%xmm1,%xmm3
@@ -22027,7 +22027,7 @@ _sk_load_rgb_u16_be_hsw:
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
.byte 196,193,108,89,209 // vmulps %ymm9,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,159,190,2,0 // vbroadcastss 0x2be9f(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,15,192,2,0 // vbroadcastss 0x2c00f(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 196,193,121,110,4,64 // vmovd (%r8,%rax,2),%xmm0
.byte 196,65,49,239,201 // vpxor %xmm9,%xmm9,%xmm9
@@ -22103,7 +22103,7 @@ _sk_store_u16_be_hsw:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 73,99,193 // movslq %r9d,%rax
- .byte 196,98,125,24,5,104,190,2,0 // vbroadcastss 0x2be68(%rip),%ymm8 # 38ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x3cc>
+ .byte 196,98,125,24,5,216,191,2,0 // vbroadcastss 0x2bfd8(%rip),%ymm8 # 38c40 <_sk_srcover_bgra_8888_sse2_lowp+0x3cc>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,67,125,25,202,1 // vextractf128 $0x1,%ymm9,%xmm10
@@ -22262,7 +22262,7 @@ _sk_load_f32_hsw:
.byte 255,92,255,255 // lcall *-0x1(%rdi,%rdi,8)
.byte 255,70,255 // incl -0x1(%rsi)
.byte 255 // (bad)
- .byte 255,53,255,255,255,40 // pushq 0x28ffffff(%rip) # 2900cf38 <_sk_srcover_bgra_8888_sse2_lowp+0x28fd4834>
+ .byte 255,53,255,255,255,40 // pushq 0x28ffffff(%rip) # 2900cf38 <_sk_srcover_bgra_8888_sse2_lowp+0x28fd46c4>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // .byte 0xff
@@ -22457,7 +22457,7 @@ _sk_mirror_x_hsw:
.byte 196,65,124,92,218 // vsubps %ymm10,%ymm0,%ymm11
.byte 196,193,58,88,192 // vaddss %xmm8,%xmm8,%xmm0
.byte 196,98,125,24,192 // vbroadcastss %xmm0,%ymm8
- .byte 197,178,89,5,157,183,2,0 // vmulss 0x2b79d(%rip),%xmm9,%xmm0 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,178,89,5,13,185,2,0 // vmulss 0x2b90d(%rip),%xmm9,%xmm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,164,89,192 // vmulps %ymm0,%ymm11,%ymm0
.byte 196,227,125,8,192,1 // vroundps $0x1,%ymm0,%ymm0
@@ -22480,7 +22480,7 @@ _sk_mirror_y_hsw:
.byte 196,65,116,92,218 // vsubps %ymm10,%ymm1,%ymm11
.byte 196,193,58,88,200 // vaddss %xmm8,%xmm8,%xmm1
.byte 196,98,125,24,193 // vbroadcastss %xmm1,%ymm8
- .byte 197,178,89,13,76,183,2,0 // vmulss 0x2b74c(%rip),%xmm9,%xmm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,178,89,13,188,184,2,0 // vmulss 0x2b8bc(%rip),%xmm9,%xmm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,226,125,24,201 // vbroadcastss %xmm1,%ymm1
.byte 197,164,89,201 // vmulps %ymm1,%ymm11,%ymm1
.byte 196,227,125,8,201,1 // vroundps $0x1,%ymm1,%ymm1
@@ -22498,7 +22498,7 @@ FUNCTION(_sk_clamp_x_1_hsw)
_sk_clamp_x_1_hsw:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,20,183,2,0 // vbroadcastss 0x2b714(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,132,184,2,0 // vbroadcastss 0x2b884(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -22511,7 +22511,7 @@ _sk_repeat_x_1_hsw:
.byte 196,193,124,92,192 // vsubps %ymm8,%ymm0,%ymm0
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,238,182,2,0 // vbroadcastss 0x2b6ee(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,94,184,2,0 // vbroadcastss 0x2b85e(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -22520,9 +22520,9 @@ HIDDEN _sk_mirror_x_1_hsw
.globl _sk_mirror_x_1_hsw
FUNCTION(_sk_mirror_x_1_hsw)
_sk_mirror_x_1_hsw:
- .byte 196,98,125,24,5,244,182,2,0 // vbroadcastss 0x2b6f4(%rip),%ymm8 # 389cc <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
+ .byte 196,98,125,24,5,100,184,2,0 // vbroadcastss 0x2b864(%rip),%ymm8 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,13,202,182,2,0 // vbroadcastss 0x2b6ca(%rip),%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,13,58,184,2,0 // vbroadcastss 0x2b83a(%rip),%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,124,89,201 // vmulps %ymm9,%ymm0,%ymm9
.byte 196,67,125,8,201,1 // vroundps $0x1,%ymm9,%ymm9
.byte 196,65,52,88,201 // vaddps %ymm9,%ymm9,%ymm9
@@ -22532,7 +22532,7 @@ _sk_mirror_x_1_hsw:
.byte 197,60,92,200 // vsubps %ymm0,%ymm8,%ymm9
.byte 197,180,84,192 // vandps %ymm0,%ymm9,%ymm0
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,154,182,2,0 // vbroadcastss 0x2b69a(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,10,184,2,0 // vbroadcastss 0x2b80a(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -22541,11 +22541,11 @@ HIDDEN _sk_luminance_to_alpha_hsw
.globl _sk_luminance_to_alpha_hsw
FUNCTION(_sk_luminance_to_alpha_hsw)
_sk_luminance_to_alpha_hsw:
- .byte 196,226,125,24,29,172,183,2,0 // vbroadcastss 0x2b7ac(%rip),%ymm3 # 38ad8 <_sk_srcover_bgra_8888_sse2_lowp+0x3d4>
- .byte 196,98,125,24,5,159,183,2,0 // vbroadcastss 0x2b79f(%rip),%ymm8 # 38ad4 <_sk_srcover_bgra_8888_sse2_lowp+0x3d0>
+ .byte 196,226,125,24,29,28,185,2,0 // vbroadcastss 0x2b91c(%rip),%ymm3 # 38c48 <_sk_srcover_bgra_8888_sse2_lowp+0x3d4>
+ .byte 196,98,125,24,5,15,185,2,0 // vbroadcastss 0x2b90f(%rip),%ymm8 # 38c44 <_sk_srcover_bgra_8888_sse2_lowp+0x3d0>
.byte 196,193,116,89,200 // vmulps %ymm8,%ymm1,%ymm1
.byte 196,226,125,184,203 // vfmadd231ps %ymm3,%ymm0,%ymm1
- .byte 196,226,125,24,29,148,183,2,0 // vbroadcastss 0x2b794(%rip),%ymm3 # 38adc <_sk_srcover_bgra_8888_sse2_lowp+0x3d8>
+ .byte 196,226,125,24,29,4,185,2,0 // vbroadcastss 0x2b904(%rip),%ymm3 # 38c4c <_sk_srcover_bgra_8888_sse2_lowp+0x3d8>
.byte 196,226,109,168,217 // vfmadd213ps %ymm1,%ymm2,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
@@ -22824,7 +22824,7 @@ _sk_gradient_hsw:
.byte 76,139,72,72 // mov 0x48(%rax),%r9
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 65,186,1,0,0,0 // mov $0x1,%r10d
- .byte 196,226,125,24,21,49,178,2,0 // vbroadcastss 0x2b231(%rip),%ymm2 # 389b8 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
+ .byte 196,226,125,24,21,161,179,2,0 // vbroadcastss 0x2b3a1(%rip),%ymm2 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
.byte 196,65,53,239,201 // vpxor %ymm9,%ymm9,%ymm9
.byte 196,130,125,24,28,145 // vbroadcastss (%r9,%r10,4),%ymm3
.byte 197,228,194,216,2 // vcmpleps %ymm0,%ymm3,%ymm3
@@ -22927,24 +22927,24 @@ _sk_xy_to_unit_angle_hsw:
.byte 196,65,52,95,226 // vmaxps %ymm10,%ymm9,%ymm12
.byte 196,65,36,94,220 // vdivps %ymm12,%ymm11,%ymm11
.byte 196,65,36,89,227 // vmulps %ymm11,%ymm11,%ymm12
- .byte 196,98,125,24,45,172,177,2,0 // vbroadcastss 0x2b1ac(%rip),%ymm13 # 38ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
- .byte 196,98,125,24,53,167,177,2,0 // vbroadcastss 0x2b1a7(%rip),%ymm14 # 38ae4 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
+ .byte 196,98,125,24,45,28,179,2,0 // vbroadcastss 0x2b31c(%rip),%ymm13 # 38c50 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
+ .byte 196,98,125,24,53,23,179,2,0 // vbroadcastss 0x2b317(%rip),%ymm14 # 38c54 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
.byte 196,66,29,184,245 // vfmadd231ps %ymm13,%ymm12,%ymm14
- .byte 196,98,125,24,45,157,177,2,0 // vbroadcastss 0x2b19d(%rip),%ymm13 # 38ae8 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
+ .byte 196,98,125,24,45,13,179,2,0 // vbroadcastss 0x2b30d(%rip),%ymm13 # 38c58 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
.byte 196,66,29,184,238 // vfmadd231ps %ymm14,%ymm12,%ymm13
- .byte 196,98,125,24,53,147,177,2,0 // vbroadcastss 0x2b193(%rip),%ymm14 # 38aec <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
+ .byte 196,98,125,24,53,3,179,2,0 // vbroadcastss 0x2b303(%rip),%ymm14 # 38c5c <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
.byte 196,66,29,184,245 // vfmadd231ps %ymm13,%ymm12,%ymm14
.byte 196,65,36,89,222 // vmulps %ymm14,%ymm11,%ymm11
.byte 196,65,52,194,202,1 // vcmpltps %ymm10,%ymm9,%ymm9
- .byte 196,98,125,24,21,126,177,2,0 // vbroadcastss 0x2b17e(%rip),%ymm10 # 38af0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
+ .byte 196,98,125,24,21,238,178,2,0 // vbroadcastss 0x2b2ee(%rip),%ymm10 # 38c60 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
.byte 196,65,44,92,211 // vsubps %ymm11,%ymm10,%ymm10
.byte 196,67,37,74,202,144 // vblendvps %ymm9,%ymm10,%ymm11,%ymm9
.byte 196,193,124,194,192,1 // vcmpltps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,21,36,176,2,0 // vbroadcastss 0x2b024(%rip),%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,21,148,177,2,0 // vbroadcastss 0x2b194(%rip),%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,92,209 // vsubps %ymm9,%ymm10,%ymm10
.byte 196,195,53,74,194,0 // vblendvps %ymm0,%ymm10,%ymm9,%ymm0
.byte 196,65,116,194,200,1 // vcmpltps %ymm8,%ymm1,%ymm9
- .byte 196,98,125,24,21,14,176,2,0 // vbroadcastss 0x2b00e(%rip),%ymm10 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,21,126,177,2,0 // vbroadcastss 0x2b17e(%rip),%ymm10 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,44,92,208 // vsubps %ymm0,%ymm10,%ymm10
.byte 196,195,125,74,194,144 // vblendvps %ymm9,%ymm10,%ymm0,%ymm0
.byte 196,65,124,194,200,3 // vcmpunordps %ymm8,%ymm0,%ymm9
@@ -22972,23 +22972,23 @@ _sk_xy_to_2pt_conical_quadratic_max_hsw:
.byte 197,50,89,80,76 // vmulss 0x4c(%rax),%xmm9,%xmm10
.byte 196,66,125,24,210 // vbroadcastss %xmm10,%ymm10
.byte 197,44,88,208 // vaddps %ymm0,%ymm10,%ymm10
- .byte 196,98,125,24,29,254,176,2,0 // vbroadcastss 0x2b0fe(%rip),%ymm11 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 196,98,125,24,29,110,178,2,0 // vbroadcastss 0x2b26e(%rip),%ymm11 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
.byte 197,116,89,217 // vmulps %ymm1,%ymm1,%ymm11
.byte 196,98,125,184,216 // vfmadd231ps %ymm0,%ymm0,%ymm11
.byte 196,193,50,89,193 // vmulss %xmm9,%xmm9,%xmm0
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,164,92,192 // vsubps %ymm0,%ymm11,%ymm0
- .byte 196,98,125,24,13,221,176,2,0 // vbroadcastss 0x2b0dd(%rip),%ymm9 # 38af8 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
+ .byte 196,98,125,24,13,77,178,2,0 // vbroadcastss 0x2b24d(%rip),%ymm9 # 38c68 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,194,45,184,194 // vfmadd231ps %ymm10,%ymm10,%ymm0
.byte 197,252,81,192 // vsqrtps %ymm0,%ymm0
.byte 196,98,125,24,64,68 // vbroadcastss 0x44(%rax),%ymm8
- .byte 196,98,125,24,13,192,176,2,0 // vbroadcastss 0x2b0c0(%rip),%ymm9 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 196,98,125,24,13,48,178,2,0 // vbroadcastss 0x2b230(%rip),%ymm9 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,65,44,87,201 // vxorps %ymm9,%ymm10,%ymm9
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 196,98,125,24,29,97,175,2,0 // vbroadcastss 0x2af61(%rip),%ymm11 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,29,209,176,2,0 // vbroadcastss 0x2b0d1(%rip),%ymm11 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 196,65,44,89,208 // vmulps %ymm8,%ymm10,%ymm10
.byte 197,180,92,192 // vsubps %ymm0,%ymm9,%ymm0
@@ -23007,23 +23007,23 @@ _sk_xy_to_2pt_conical_quadratic_min_hsw:
.byte 197,50,89,80,76 // vmulss 0x4c(%rax),%xmm9,%xmm10
.byte 196,66,125,24,210 // vbroadcastss %xmm10,%ymm10
.byte 197,44,88,208 // vaddps %ymm0,%ymm10,%ymm10
- .byte 196,98,125,24,29,102,176,2,0 // vbroadcastss 0x2b066(%rip),%ymm11 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 196,98,125,24,29,214,177,2,0 // vbroadcastss 0x2b1d6(%rip),%ymm11 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
.byte 197,116,89,217 // vmulps %ymm1,%ymm1,%ymm11
.byte 196,98,125,184,216 // vfmadd231ps %ymm0,%ymm0,%ymm11
.byte 196,193,50,89,193 // vmulss %xmm9,%xmm9,%xmm0
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,164,92,192 // vsubps %ymm0,%ymm11,%ymm0
- .byte 196,98,125,24,13,69,176,2,0 // vbroadcastss 0x2b045(%rip),%ymm9 # 38af8 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
+ .byte 196,98,125,24,13,181,177,2,0 // vbroadcastss 0x2b1b5(%rip),%ymm9 # 38c68 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,194,45,184,194 // vfmadd231ps %ymm10,%ymm10,%ymm0
.byte 197,252,81,192 // vsqrtps %ymm0,%ymm0
.byte 196,98,125,24,64,68 // vbroadcastss 0x44(%rax),%ymm8
- .byte 196,98,125,24,13,40,176,2,0 // vbroadcastss 0x2b028(%rip),%ymm9 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 196,98,125,24,13,152,177,2,0 // vbroadcastss 0x2b198(%rip),%ymm9 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,65,44,87,201 // vxorps %ymm9,%ymm10,%ymm9
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 196,98,125,24,29,201,174,2,0 // vbroadcastss 0x2aec9(%rip),%ymm11 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,29,57,176,2,0 // vbroadcastss 0x2b039(%rip),%ymm11 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 196,65,44,89,208 // vmulps %ymm8,%ymm10,%ymm10
.byte 197,180,92,192 // vsubps %ymm0,%ymm9,%ymm0
@@ -23041,14 +23041,14 @@ _sk_xy_to_2pt_conical_linear_hsw:
.byte 197,58,89,72,76 // vmulss 0x4c(%rax),%xmm8,%xmm9
.byte 196,66,125,24,201 // vbroadcastss %xmm9,%ymm9
.byte 197,52,88,200 // vaddps %ymm0,%ymm9,%ymm9
- .byte 196,98,125,24,21,212,175,2,0 // vbroadcastss 0x2afd4(%rip),%ymm10 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 196,98,125,24,21,68,177,2,0 // vbroadcastss 0x2b144(%rip),%ymm10 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 196,65,52,89,202 // vmulps %ymm10,%ymm9,%ymm9
.byte 197,116,89,209 // vmulps %ymm1,%ymm1,%ymm10
.byte 196,98,125,184,208 // vfmadd231ps %ymm0,%ymm0,%ymm10
.byte 196,193,58,89,192 // vmulss %xmm8,%xmm8,%xmm0
.byte 196,226,125,24,192 // vbroadcastss %xmm0,%ymm0
.byte 197,172,92,192 // vsubps %ymm0,%ymm10,%ymm0
- .byte 196,98,125,24,5,183,175,2,0 // vbroadcastss 0x2afb7(%rip),%ymm8 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 196,98,125,24,5,39,177,2,0 // vbroadcastss 0x2b127(%rip),%ymm8 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,193,124,87,192 // vxorps %ymm8,%ymm0,%ymm0
.byte 196,193,124,94,193 // vdivps %ymm9,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -23091,7 +23091,7 @@ HIDDEN _sk_save_xy_hsw
FUNCTION(_sk_save_xy_hsw)
_sk_save_xy_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,246,173,2,0 // vbroadcastss 0x2adf6(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,102,175,2,0 // vbroadcastss 0x2af66(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,124,88,200 // vaddps %ymm8,%ymm0,%ymm9
.byte 196,67,125,8,209,1 // vroundps $0x1,%ymm9,%ymm10
.byte 196,65,52,92,202 // vsubps %ymm10,%ymm9,%ymm9
@@ -23125,9 +23125,9 @@ HIDDEN _sk_bilinear_nx_hsw
FUNCTION(_sk_bilinear_nx_hsw)
_sk_bilinear_nx_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,208,174,2,0 // vbroadcastss 0x2aed0(%rip),%ymm0 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,5,64,176,2,0 // vbroadcastss 0x2b040(%rip),%ymm0 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
- .byte 196,98,125,24,5,119,173,2,0 // vbroadcastss 0x2ad77(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,231,174,2,0 // vbroadcastss 0x2aee7(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -23138,7 +23138,7 @@ HIDDEN _sk_bilinear_px_hsw
FUNCTION(_sk_bilinear_px_hsw)
_sk_bilinear_px_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,84,173,2,0 // vbroadcastss 0x2ad54(%rip),%ymm0 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,5,196,174,2,0 // vbroadcastss 0x2aec4(%rip),%ymm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
.byte 197,124,16,128,128,0,0,0 // vmovups 0x80(%rax),%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
@@ -23150,9 +23150,9 @@ HIDDEN _sk_bilinear_ny_hsw
FUNCTION(_sk_bilinear_ny_hsw)
_sk_bilinear_ny_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,129,174,2,0 // vbroadcastss 0x2ae81(%rip),%ymm1 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,13,241,175,2,0 // vbroadcastss 0x2aff1(%rip),%ymm1 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
- .byte 196,98,125,24,5,39,173,2,0 // vbroadcastss 0x2ad27(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,151,174,2,0 // vbroadcastss 0x2ae97(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -23163,7 +23163,7 @@ HIDDEN _sk_bilinear_py_hsw
FUNCTION(_sk_bilinear_py_hsw)
_sk_bilinear_py_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,4,173,2,0 // vbroadcastss 0x2ad04(%rip),%ymm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,13,116,174,2,0 // vbroadcastss 0x2ae74(%rip),%ymm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
.byte 197,124,16,128,192,0,0,0 // vmovups 0xc0(%rax),%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
@@ -23175,13 +23175,13 @@ HIDDEN _sk_bicubic_n3x_hsw
FUNCTION(_sk_bicubic_n3x_hsw)
_sk_bicubic_n3x_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,52,174,2,0 // vbroadcastss 0x2ae34(%rip),%ymm0 # 38b04 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
+ .byte 196,226,125,24,5,164,175,2,0 // vbroadcastss 0x2afa4(%rip),%ymm0 # 38c74 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
- .byte 196,98,125,24,5,215,172,2,0 // vbroadcastss 0x2acd7(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,71,174,2,0 // vbroadcastss 0x2ae47(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,53,173,2,0 // vbroadcastss 0x2ad35(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
- .byte 196,98,125,24,29,12,174,2,0 // vbroadcastss 0x2ae0c(%rip),%ymm11 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,165,174,2,0 // vbroadcastss 0x2aea5(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,29,124,175,2,0 // vbroadcastss 0x2af7c(%rip),%ymm11 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,66,61,168,218 // vfmadd213ps %ymm10,%ymm8,%ymm11
.byte 196,65,36,89,193 // vmulps %ymm9,%ymm11,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
@@ -23193,16 +23193,16 @@ HIDDEN _sk_bicubic_n1x_hsw
FUNCTION(_sk_bicubic_n1x_hsw)
_sk_bicubic_n1x_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,227,173,2,0 // vbroadcastss 0x2ade3(%rip),%ymm0 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,5,83,175,2,0 // vbroadcastss 0x2af53(%rip),%ymm0 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
- .byte 196,98,125,24,5,138,172,2,0 // vbroadcastss 0x2ac8a(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,250,173,2,0 // vbroadcastss 0x2adfa(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
- .byte 196,98,125,24,13,213,173,2,0 // vbroadcastss 0x2add5(%rip),%ymm9 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
- .byte 196,98,125,24,21,200,173,2,0 // vbroadcastss 0x2adc8(%rip),%ymm10 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,13,69,175,2,0 // vbroadcastss 0x2af45(%rip),%ymm9 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,21,56,175,2,0 // vbroadcastss 0x2af38(%rip),%ymm10 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,66,61,168,209 // vfmadd213ps %ymm9,%ymm8,%ymm10
- .byte 196,98,125,24,13,94,172,2,0 // vbroadcastss 0x2ac5e(%rip),%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,13,206,173,2,0 // vbroadcastss 0x2adce(%rip),%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,66,61,184,202 // vfmadd231ps %ymm10,%ymm8,%ymm9
- .byte 196,98,125,24,21,180,173,2,0 // vbroadcastss 0x2adb4(%rip),%ymm10 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,21,36,175,2,0 // vbroadcastss 0x2af24(%rip),%ymm10 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,66,61,184,209 // vfmadd231ps %ymm9,%ymm8,%ymm10
.byte 197,124,17,144,0,1,0,0 // vmovups %ymm10,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -23213,14 +23213,14 @@ HIDDEN _sk_bicubic_p1x_hsw
FUNCTION(_sk_bicubic_p1x_hsw)
_sk_bicubic_p1x_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,52,172,2,0 // vbroadcastss 0x2ac34(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,164,173,2,0 // vbroadcastss 0x2ada4(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,188,88,0 // vaddps (%rax),%ymm8,%ymm0
.byte 197,124,16,136,128,0,0,0 // vmovups 0x80(%rax),%ymm9
- .byte 196,98,125,24,21,127,173,2,0 // vbroadcastss 0x2ad7f(%rip),%ymm10 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
- .byte 196,98,125,24,29,114,173,2,0 // vbroadcastss 0x2ad72(%rip),%ymm11 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,21,239,174,2,0 // vbroadcastss 0x2aeef(%rip),%ymm10 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,29,226,174,2,0 // vbroadcastss 0x2aee2(%rip),%ymm11 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,66,53,168,218 // vfmadd213ps %ymm10,%ymm9,%ymm11
.byte 196,66,53,168,216 // vfmadd213ps %ymm8,%ymm9,%ymm11
- .byte 196,98,125,24,5,103,173,2,0 // vbroadcastss 0x2ad67(%rip),%ymm8 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,5,215,174,2,0 // vbroadcastss 0x2aed7(%rip),%ymm8 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,66,53,184,195 // vfmadd231ps %ymm11,%ymm9,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -23231,12 +23231,12 @@ HIDDEN _sk_bicubic_p3x_hsw
FUNCTION(_sk_bicubic_p3x_hsw)
_sk_bicubic_p3x_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,71,173,2,0 // vbroadcastss 0x2ad47(%rip),%ymm0 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,226,125,24,5,183,174,2,0 // vbroadcastss 0x2aeb7(%rip),%ymm0 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
.byte 197,124,16,128,128,0,0,0 // vmovups 0x80(%rax),%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,69,172,2,0 // vbroadcastss 0x2ac45(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
- .byte 196,98,125,24,29,28,173,2,0 // vbroadcastss 0x2ad1c(%rip),%ymm11 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,181,173,2,0 // vbroadcastss 0x2adb5(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,29,140,174,2,0 // vbroadcastss 0x2ae8c(%rip),%ymm11 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,66,61,168,218 // vfmadd213ps %ymm10,%ymm8,%ymm11
.byte 196,65,52,89,195 // vmulps %ymm11,%ymm9,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
@@ -23248,13 +23248,13 @@ HIDDEN _sk_bicubic_n3y_hsw
FUNCTION(_sk_bicubic_n3y_hsw)
_sk_bicubic_n3y_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,247,172,2,0 // vbroadcastss 0x2acf7(%rip),%ymm1 # 38b04 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
+ .byte 196,226,125,24,13,103,174,2,0 // vbroadcastss 0x2ae67(%rip),%ymm1 # 38c74 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
- .byte 196,98,125,24,5,153,171,2,0 // vbroadcastss 0x2ab99(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,9,173,2,0 // vbroadcastss 0x2ad09(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,247,171,2,0 // vbroadcastss 0x2abf7(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
- .byte 196,98,125,24,29,206,172,2,0 // vbroadcastss 0x2acce(%rip),%ymm11 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,103,173,2,0 // vbroadcastss 0x2ad67(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,29,62,174,2,0 // vbroadcastss 0x2ae3e(%rip),%ymm11 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,66,61,168,218 // vfmadd213ps %ymm10,%ymm8,%ymm11
.byte 196,65,36,89,193 // vmulps %ymm9,%ymm11,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
@@ -23266,16 +23266,16 @@ HIDDEN _sk_bicubic_n1y_hsw
FUNCTION(_sk_bicubic_n1y_hsw)
_sk_bicubic_n1y_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,165,172,2,0 // vbroadcastss 0x2aca5(%rip),%ymm1 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,13,21,174,2,0 // vbroadcastss 0x2ae15(%rip),%ymm1 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
- .byte 196,98,125,24,5,75,171,2,0 // vbroadcastss 0x2ab4b(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,187,172,2,0 // vbroadcastss 0x2acbb(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
- .byte 196,98,125,24,13,150,172,2,0 // vbroadcastss 0x2ac96(%rip),%ymm9 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
- .byte 196,98,125,24,21,137,172,2,0 // vbroadcastss 0x2ac89(%rip),%ymm10 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,13,6,174,2,0 // vbroadcastss 0x2ae06(%rip),%ymm9 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,21,249,173,2,0 // vbroadcastss 0x2adf9(%rip),%ymm10 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,66,61,168,209 // vfmadd213ps %ymm9,%ymm8,%ymm10
- .byte 196,98,125,24,13,31,171,2,0 // vbroadcastss 0x2ab1f(%rip),%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,13,143,172,2,0 // vbroadcastss 0x2ac8f(%rip),%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,66,61,184,202 // vfmadd231ps %ymm10,%ymm8,%ymm9
- .byte 196,98,125,24,21,117,172,2,0 // vbroadcastss 0x2ac75(%rip),%ymm10 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,21,229,173,2,0 // vbroadcastss 0x2ade5(%rip),%ymm10 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,66,61,184,209 // vfmadd231ps %ymm9,%ymm8,%ymm10
.byte 197,124,17,144,64,1,0,0 // vmovups %ymm10,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -23286,14 +23286,14 @@ HIDDEN _sk_bicubic_p1y_hsw
FUNCTION(_sk_bicubic_p1y_hsw)
_sk_bicubic_p1y_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,245,170,2,0 // vbroadcastss 0x2aaf5(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,101,172,2,0 // vbroadcastss 0x2ac65(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,188,88,72,64 // vaddps 0x40(%rax),%ymm8,%ymm1
.byte 197,124,16,136,192,0,0,0 // vmovups 0xc0(%rax),%ymm9
- .byte 196,98,125,24,21,63,172,2,0 // vbroadcastss 0x2ac3f(%rip),%ymm10 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
- .byte 196,98,125,24,29,50,172,2,0 // vbroadcastss 0x2ac32(%rip),%ymm11 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,21,175,173,2,0 // vbroadcastss 0x2adaf(%rip),%ymm10 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,29,162,173,2,0 // vbroadcastss 0x2ada2(%rip),%ymm11 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,66,53,168,218 // vfmadd213ps %ymm10,%ymm9,%ymm11
.byte 196,66,53,168,216 // vfmadd213ps %ymm8,%ymm9,%ymm11
- .byte 196,98,125,24,5,39,172,2,0 // vbroadcastss 0x2ac27(%rip),%ymm8 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,5,151,173,2,0 // vbroadcastss 0x2ad97(%rip),%ymm8 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,66,53,184,195 // vfmadd231ps %ymm11,%ymm9,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -23304,12 +23304,12 @@ HIDDEN _sk_bicubic_p3y_hsw
FUNCTION(_sk_bicubic_p3y_hsw)
_sk_bicubic_p3y_hsw:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,7,172,2,0 // vbroadcastss 0x2ac07(%rip),%ymm1 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,226,125,24,13,119,173,2,0 // vbroadcastss 0x2ad77(%rip),%ymm1 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
.byte 197,124,16,128,192,0,0,0 // vmovups 0xc0(%rax),%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,4,171,2,0 // vbroadcastss 0x2ab04(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
- .byte 196,98,125,24,29,219,171,2,0 // vbroadcastss 0x2abdb(%rip),%ymm11 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,116,172,2,0 // vbroadcastss 0x2ac74(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,29,75,173,2,0 // vbroadcastss 0x2ad4b(%rip),%ymm11 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,66,61,168,218 // vfmadd213ps %ymm10,%ymm8,%ymm11
.byte 196,65,52,89,195 // vmulps %ymm11,%ymm9,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
@@ -23449,7 +23449,7 @@ _sk_clut_3D_hsw:
.byte 196,98,93,64,218 // vpmulld %ymm2,%ymm4,%ymm11
.byte 72,139,0 // mov (%rax),%rax
.byte 197,165,254,193 // vpaddd %ymm1,%ymm11,%ymm0
- .byte 196,98,125,88,61,157,169,2,0 // vpbroadcastd 0x2a99d(%rip),%ymm15 # 38b1c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
+ .byte 196,98,125,88,61,13,171,2,0 // vpbroadcastd 0x2ab0d(%rip),%ymm15 # 38c8c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
.byte 196,194,125,64,199 // vpmulld %ymm15,%ymm0,%ymm0
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
.byte 197,237,118,210 // vpcmpeqd %ymm2,%ymm2,%ymm2
@@ -23461,13 +23461,13 @@ _sk_clut_3D_hsw:
.byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
.byte 196,226,85,146,60,144 // vgatherdps %ymm5,(%rax,%ymm2,4),%ymm7
.byte 197,252,17,188,36,0,2,0,0 // vmovups %ymm7,0x200(%rsp)
- .byte 196,226,125,88,61,251,167,2,0 // vpbroadcastd 0x2a7fb(%rip),%ymm7 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 196,226,125,88,61,107,169,2,0 // vpbroadcastd 0x2a96b(%rip),%ymm7 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 197,253,254,199 // vpaddd %ymm7,%ymm0,%ymm0
.byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
.byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
.byte 196,226,85,146,20,128 // vgatherdps %ymm5,(%rax,%ymm0,4),%ymm2
.byte 197,252,17,148,36,32,1,0,0 // vmovups %ymm2,0x120(%rsp)
- .byte 196,226,125,24,5,47,169,2,0 // vbroadcastss 0x2a92f(%rip),%ymm0 # 38b18 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
+ .byte 196,226,125,24,5,159,170,2,0 // vbroadcastss 0x2aa9f(%rip),%ymm0 # 38c88 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
.byte 197,172,88,232 // vaddps %ymm0,%ymm10,%ymm5
.byte 197,254,91,237 // vcvttps2dq %ymm5,%ymm5
.byte 196,226,93,64,213 // vpmulld %ymm5,%ymm4,%ymm2
@@ -23729,7 +23729,7 @@ _sk_clut_4D_hsw:
.byte 197,254,127,132,36,0,1,0,0 // vmovdqu %ymm0,0x100(%rsp)
.byte 196,98,109,64,200 // vpmulld %ymm0,%ymm2,%ymm9
.byte 197,181,254,199 // vpaddd %ymm7,%ymm9,%ymm0
- .byte 196,98,125,88,21,200,163,2,0 // vpbroadcastd 0x2a3c8(%rip),%ymm10 # 38b1c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
+ .byte 196,98,125,88,21,56,165,2,0 // vpbroadcastd 0x2a538(%rip),%ymm10 # 38c8c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
.byte 196,194,125,64,194 // vpmulld %ymm10,%ymm0,%ymm0
.byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
@@ -23741,13 +23741,13 @@ _sk_clut_4D_hsw:
.byte 196,65,36,87,219 // vxorps %ymm11,%ymm11,%ymm11
.byte 196,98,61,146,28,168 // vgatherdps %ymm8,(%rax,%ymm5,4),%ymm11
.byte 197,124,17,156,36,192,0,0,0 // vmovups %ymm11,0xc0(%rsp)
- .byte 196,98,125,88,29,35,162,2,0 // vpbroadcastd 0x2a223(%rip),%ymm11 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 196,98,125,88,29,147,163,2,0 // vpbroadcastd 0x2a393(%rip),%ymm11 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 196,193,125,254,195 // vpaddd %ymm11,%ymm0,%ymm0
.byte 196,65,61,118,192 // vpcmpeqd %ymm8,%ymm8,%ymm8
.byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
.byte 196,226,61,146,44,128 // vgatherdps %ymm8,(%rax,%ymm0,4),%ymm5
.byte 197,252,17,108,36,32 // vmovups %ymm5,0x20(%rsp)
- .byte 196,226,125,24,5,88,163,2,0 // vbroadcastss 0x2a358(%rip),%ymm0 # 38b18 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
+ .byte 196,226,125,24,5,200,164,2,0 // vbroadcastss 0x2a4c8(%rip),%ymm0 # 38c88 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
.byte 197,116,88,192 // vaddps %ymm0,%ymm1,%ymm8
.byte 196,65,126,91,192 // vcvttps2dq %ymm8,%ymm8
.byte 196,194,109,64,232 // vpmulld %ymm8,%ymm2,%ymm5
@@ -24180,7 +24180,7 @@ _sk_clut_4D_hsw:
.byte 196,193,100,92,210 // vsubps %ymm10,%ymm3,%ymm2
.byte 196,194,77,168,210 // vfmadd213ps %ymm10,%ymm6,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,70,152,2,0 // vbroadcastss 0x29846(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,182,153,2,0 // vbroadcastss 0x299b6(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,16,164,36,96,6,0,0 // vmovups 0x660(%rsp),%ymm4
.byte 197,252,16,172,36,128,6,0,0 // vmovups 0x680(%rsp),%ymm5
.byte 197,252,16,180,36,160,6,0,0 // vmovups 0x6a0(%rsp),%ymm6
@@ -24192,14 +24192,14 @@ HIDDEN _sk_gauss_a_to_rgba_hsw
.globl _sk_gauss_a_to_rgba_hsw
FUNCTION(_sk_gauss_a_to_rgba_hsw)
_sk_gauss_a_to_rgba_hsw:
- .byte 196,226,125,24,5,128,153,2,0 // vbroadcastss 0x29980(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x420>
- .byte 196,226,125,24,13,115,153,2,0 // vbroadcastss 0x29973(%rip),%ymm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x41c>
+ .byte 196,226,125,24,5,240,154,2,0 // vbroadcastss 0x29af0(%rip),%ymm0 # 38c94 <_sk_srcover_bgra_8888_sse2_lowp+0x420>
+ .byte 196,226,125,24,13,227,154,2,0 // vbroadcastss 0x29ae3(%rip),%ymm1 # 38c90 <_sk_srcover_bgra_8888_sse2_lowp+0x41c>
.byte 196,226,101,168,200 // vfmadd213ps %ymm0,%ymm3,%ymm1
- .byte 196,226,125,24,5,109,153,2,0 // vbroadcastss 0x2996d(%rip),%ymm0 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x424>
+ .byte 196,226,125,24,5,221,154,2,0 // vbroadcastss 0x29add(%rip),%ymm0 # 38c98 <_sk_srcover_bgra_8888_sse2_lowp+0x424>
.byte 196,226,101,184,193 // vfmadd231ps %ymm1,%ymm3,%ymm0
- .byte 196,226,125,24,13,99,153,2,0 // vbroadcastss 0x29963(%rip),%ymm1 # 38b2c <_sk_srcover_bgra_8888_sse2_lowp+0x428>
+ .byte 196,226,125,24,13,211,154,2,0 // vbroadcastss 0x29ad3(%rip),%ymm1 # 38c9c <_sk_srcover_bgra_8888_sse2_lowp+0x428>
.byte 196,226,101,184,200 // vfmadd231ps %ymm0,%ymm3,%ymm1
- .byte 196,226,125,24,5,89,153,2,0 // vbroadcastss 0x29959(%rip),%ymm0 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x42c>
+ .byte 196,226,125,24,5,201,154,2,0 // vbroadcastss 0x29ac9(%rip),%ymm0 # 38ca0 <_sk_srcover_bgra_8888_sse2_lowp+0x42c>
.byte 196,226,101,184,193 // vfmadd231ps %ymm1,%ymm3,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
@@ -24301,10 +24301,10 @@ _sk_seed_shader_avx:
.byte 197,249,112,201,0 // vpshufd $0x0,%xmm1,%xmm1
.byte 196,227,117,24,201,1 // vinsertf128 $0x1,%xmm1,%ymm1,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,21,178,150,2,0 // vbroadcastss 0x296b2(%rip),%ymm2 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,21,34,152,2,0 // vbroadcastss 0x29822(%rip),%ymm2 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,244,88,202 // vaddps %ymm2,%ymm1,%ymm1
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,21,167,150,2,0 // vbroadcastss 0x296a7(%rip),%ymm2 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,21,23,152,2,0 // vbroadcastss 0x29817(%rip),%ymm2 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,228,87,219 // vxorps %ymm3,%ymm3,%ymm3
.byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
.byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
@@ -24318,46 +24318,46 @@ FUNCTION(_sk_dither_avx)
_sk_dither_avx:
.byte 197,121,110,194 // vmovd %edx,%xmm8
.byte 196,65,121,112,192,0 // vpshufd $0x0,%xmm8,%xmm8
- .byte 197,57,254,13,139,159,2,0 // vpaddd 0x29f8b(%rip),%xmm8,%xmm9 # 392c0 <_sk_srcover_bgra_8888_sse2_lowp+0xbbc>
- .byte 197,57,254,5,147,159,2,0 // vpaddd 0x29f93(%rip),%xmm8,%xmm8 # 392d0 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 197,57,254,13,235,160,2,0 // vpaddd 0x2a0eb(%rip),%xmm8,%xmm9 # 39420 <_sk_srcover_bgra_8888_sse2_lowp+0xbac>
+ .byte 197,57,254,5,243,160,2,0 // vpaddd 0x2a0f3(%rip),%xmm8,%xmm8 # 39430 <_sk_srcover_bgra_8888_sse2_lowp+0xbbc>
.byte 196,67,53,24,208,1 // vinsertf128 $0x1,%xmm8,%ymm9,%ymm10
.byte 197,121,110,217 // vmovd %ecx,%xmm11
.byte 196,65,121,112,219,0 // vpshufd $0x0,%xmm11,%xmm11
.byte 196,67,37,24,219,1 // vinsertf128 $0x1,%xmm11,%ymm11,%ymm11
.byte 196,65,36,87,218 // vxorps %ymm10,%ymm11,%ymm11
- .byte 196,98,125,24,21,87,150,2,0 // vbroadcastss 0x29657(%rip),%ymm10 # 389b8 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
+ .byte 196,98,125,24,21,199,151,2,0 // vbroadcastss 0x297c7(%rip),%ymm10 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
.byte 196,65,36,84,210 // vandps %ymm10,%ymm11,%ymm10
.byte 196,193,25,114,242,5 // vpslld $0x5,%xmm10,%xmm12
.byte 196,67,125,25,210,1 // vextractf128 $0x1,%ymm10,%xmm10
.byte 196,193,41,114,242,5 // vpslld $0x5,%xmm10,%xmm10
.byte 196,67,29,24,210,1 // vinsertf128 $0x1,%xmm10,%ymm12,%ymm10
- .byte 197,123,18,37,138,167,2,0 // vmovddup 0x2a78a(%rip),%xmm12 # 39b10 <_sk_srcover_bgra_8888_sse2_lowp+0x140c>
+ .byte 197,123,18,37,234,168,2,0 // vmovddup 0x2a8ea(%rip),%xmm12 # 39c70 <_sk_srcover_bgra_8888_sse2_lowp+0x13fc>
.byte 196,65,49,219,236 // vpand %xmm12,%xmm9,%xmm13
.byte 196,193,17,114,245,4 // vpslld $0x4,%xmm13,%xmm13
.byte 196,65,57,219,228 // vpand %xmm12,%xmm8,%xmm12
.byte 196,193,25,114,244,4 // vpslld $0x4,%xmm12,%xmm12
.byte 196,67,21,24,228,1 // vinsertf128 $0x1,%xmm12,%ymm13,%ymm12
- .byte 196,98,125,24,45,21,150,2,0 // vbroadcastss 0x29615(%rip),%ymm13 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 196,98,125,24,45,133,151,2,0 // vbroadcastss 0x29785(%rip),%ymm13 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 196,65,36,84,237 // vandps %ymm13,%ymm11,%ymm13
.byte 196,193,9,114,245,2 // vpslld $0x2,%xmm13,%xmm14
.byte 196,67,125,25,237,1 // vextractf128 $0x1,%ymm13,%xmm13
.byte 196,193,17,114,245,2 // vpslld $0x2,%xmm13,%xmm13
.byte 196,67,13,24,237,1 // vinsertf128 $0x1,%xmm13,%ymm14,%ymm13
- .byte 197,123,18,53,72,167,2,0 // vmovddup 0x2a748(%rip),%xmm14 # 39b18 <_sk_srcover_bgra_8888_sse2_lowp+0x1414>
+ .byte 197,123,18,53,168,168,2,0 // vmovddup 0x2a8a8(%rip),%xmm14 # 39c78 <_sk_srcover_bgra_8888_sse2_lowp+0x1404>
.byte 196,65,49,219,254 // vpand %xmm14,%xmm9,%xmm15
.byte 196,65,1,254,255 // vpaddd %xmm15,%xmm15,%xmm15
.byte 196,65,57,219,246 // vpand %xmm14,%xmm8,%xmm14
.byte 196,65,9,254,246 // vpaddd %xmm14,%xmm14,%xmm14
.byte 196,67,5,24,246,1 // vinsertf128 $0x1,%xmm14,%ymm15,%ymm14
.byte 196,65,12,86,228 // vorps %ymm12,%ymm14,%ymm12
- .byte 196,98,125,24,53,196,149,2,0 // vbroadcastss 0x295c4(%rip),%ymm14 # 389bc <_sk_srcover_bgra_8888_sse2_lowp+0x2b8>
+ .byte 196,98,125,24,53,52,151,2,0 // vbroadcastss 0x29734(%rip),%ymm14 # 38b2c <_sk_srcover_bgra_8888_sse2_lowp+0x2b8>
.byte 196,65,36,84,222 // vandps %ymm14,%ymm11,%ymm11
.byte 196,193,9,114,211,1 // vpsrld $0x1,%xmm11,%xmm14
.byte 196,67,125,25,219,1 // vextractf128 $0x1,%ymm11,%xmm11
.byte 196,193,33,114,211,1 // vpsrld $0x1,%xmm11,%xmm11
.byte 196,67,13,24,219,1 // vinsertf128 $0x1,%xmm11,%ymm14,%ymm11
.byte 196,65,20,86,219 // vorps %ymm11,%ymm13,%ymm11
- .byte 197,123,18,45,254,166,2,0 // vmovddup 0x2a6fe(%rip),%xmm13 # 39b20 <_sk_srcover_bgra_8888_sse2_lowp+0x141c>
+ .byte 197,123,18,45,94,168,2,0 // vmovddup 0x2a85e(%rip),%xmm13 # 39c80 <_sk_srcover_bgra_8888_sse2_lowp+0x140c>
.byte 196,65,49,219,205 // vpand %xmm13,%xmm9,%xmm9
.byte 196,65,57,219,197 // vpand %xmm13,%xmm8,%xmm8
.byte 196,193,49,114,209,2 // vpsrld $0x2,%xmm9,%xmm9
@@ -24368,9 +24368,9 @@ _sk_dither_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 196,65,60,86,195 // vorps %ymm11,%ymm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,13,103,149,2,0 // vbroadcastss 0x29567(%rip),%ymm9 # 389c4 <_sk_srcover_bgra_8888_sse2_lowp+0x2c0>
+ .byte 196,98,125,24,13,215,150,2,0 // vbroadcastss 0x296d7(%rip),%ymm9 # 38b34 <_sk_srcover_bgra_8888_sse2_lowp+0x2c0>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
- .byte 196,98,125,24,13,93,149,2,0 // vbroadcastss 0x2955d(%rip),%ymm9 # 389c8 <_sk_srcover_bgra_8888_sse2_lowp+0x2c4>
+ .byte 196,98,125,24,13,205,150,2,0 // vbroadcastss 0x296cd(%rip),%ymm9 # 38b38 <_sk_srcover_bgra_8888_sse2_lowp+0x2c4>
.byte 196,65,60,88,193 // vaddps %ymm9,%ymm8,%ymm8
.byte 196,98,125,24,8 // vbroadcastss (%rax),%ymm9
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
@@ -24404,7 +24404,7 @@ HIDDEN _sk_black_color_avx
FUNCTION(_sk_black_color_avx)
_sk_black_color_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,229,148,2,0 // vbroadcastss 0x294e5(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,85,150,2,0 // vbroadcastss 0x29655(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
@@ -24415,7 +24415,7 @@ HIDDEN _sk_white_color_avx
FUNCTION(_sk_white_color_avx)
_sk_white_color_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,204,148,2,0 // vbroadcastss 0x294cc(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,60,150,2,0 // vbroadcastss 0x2963c(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 197,252,40,216 // vmovaps %ymm0,%ymm3
@@ -24461,7 +24461,7 @@ HIDDEN _sk_srcatop_avx
FUNCTION(_sk_srcatop_avx)
_sk_srcatop_avx:
.byte 197,252,89,199 // vmulps %ymm7,%ymm0,%ymm0
- .byte 196,98,125,24,5,107,148,2,0 // vbroadcastss 0x2946b(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,219,149,2,0 // vbroadcastss 0x295db(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 197,60,89,204 // vmulps %ymm4,%ymm8,%ymm9
.byte 197,180,88,192 // vaddps %ymm0,%ymm9,%ymm0
@@ -24482,7 +24482,7 @@ HIDDEN _sk_dstatop_avx
FUNCTION(_sk_dstatop_avx)
_sk_dstatop_avx:
.byte 197,100,89,196 // vmulps %ymm4,%ymm3,%ymm8
- .byte 196,98,125,24,13,41,148,2,0 // vbroadcastss 0x29429(%rip),%ymm9 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,13,153,149,2,0 // vbroadcastss 0x29599(%rip),%ymm9 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,52,92,207 // vsubps %ymm7,%ymm9,%ymm9
.byte 197,180,89,192 // vmulps %ymm0,%ymm9,%ymm0
.byte 197,188,88,192 // vaddps %ymm0,%ymm8,%ymm0
@@ -24524,7 +24524,7 @@ HIDDEN _sk_srcout_avx
.globl _sk_srcout_avx
FUNCTION(_sk_srcout_avx)
_sk_srcout_avx:
- .byte 196,98,125,24,5,196,147,2,0 // vbroadcastss 0x293c4(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,52,149,2,0 // vbroadcastss 0x29534(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 197,188,89,201 // vmulps %ymm1,%ymm8,%ymm1
@@ -24537,7 +24537,7 @@ HIDDEN _sk_dstout_avx
.globl _sk_dstout_avx
FUNCTION(_sk_dstout_avx)
_sk_dstout_avx:
- .byte 196,226,125,24,5,163,147,2,0 // vbroadcastss 0x293a3(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,19,149,2,0 // vbroadcastss 0x29513(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,92,219 // vsubps %ymm3,%ymm0,%ymm3
.byte 197,228,89,196 // vmulps %ymm4,%ymm3,%ymm0
.byte 197,228,89,205 // vmulps %ymm5,%ymm3,%ymm1
@@ -24550,7 +24550,7 @@ HIDDEN _sk_srcover_avx
.globl _sk_srcover_avx
FUNCTION(_sk_srcover_avx)
_sk_srcover_avx:
- .byte 196,98,125,24,5,130,147,2,0 // vbroadcastss 0x29382(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,242,148,2,0 // vbroadcastss 0x294f2(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 197,60,89,204 // vmulps %ymm4,%ymm8,%ymm9
.byte 197,180,88,192 // vaddps %ymm0,%ymm9,%ymm0
@@ -24567,7 +24567,7 @@ HIDDEN _sk_dstover_avx
.globl _sk_dstover_avx
FUNCTION(_sk_dstover_avx)
_sk_dstover_avx:
- .byte 196,98,125,24,5,81,147,2,0 // vbroadcastss 0x29351(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,193,148,2,0 // vbroadcastss 0x294c1(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,199 // vsubps %ymm7,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 197,252,88,196 // vaddps %ymm4,%ymm0,%ymm0
@@ -24595,7 +24595,7 @@ HIDDEN _sk_multiply_avx
.globl _sk_multiply_avx
FUNCTION(_sk_multiply_avx)
_sk_multiply_avx:
- .byte 196,98,125,24,5,12,147,2,0 // vbroadcastss 0x2930c(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,124,148,2,0 // vbroadcastss 0x2947c(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,52,89,208 // vmulps %ymm0,%ymm9,%ymm10
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -24626,7 +24626,7 @@ HIDDEN _sk_plus__avx
FUNCTION(_sk_plus__avx)
_sk_plus__avx:
.byte 197,252,88,196 // vaddps %ymm4,%ymm0,%ymm0
- .byte 196,98,125,24,5,155,146,2,0 // vbroadcastss 0x2929b(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,11,148,2,0 // vbroadcastss 0x2940b(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 197,244,88,205 // vaddps %ymm5,%ymm1,%ymm1
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
@@ -24660,7 +24660,7 @@ HIDDEN _sk_xor__avx
.globl _sk_xor__avx
FUNCTION(_sk_xor__avx)
_sk_xor__avx:
- .byte 196,98,125,24,5,58,146,2,0 // vbroadcastss 0x2923a(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,170,147,2,0 // vbroadcastss 0x293aa(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,180,89,192 // vmulps %ymm0,%ymm9,%ymm0
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -24697,7 +24697,7 @@ _sk_darken_avx:
.byte 197,100,89,206 // vmulps %ymm6,%ymm3,%ymm9
.byte 196,193,108,95,209 // vmaxps %ymm9,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,182,145,2,0 // vbroadcastss 0x291b6(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,38,147,2,0 // vbroadcastss 0x29326(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 197,60,89,199 // vmulps %ymm7,%ymm8,%ymm8
.byte 197,188,88,219 // vaddps %ymm3,%ymm8,%ymm3
@@ -24723,7 +24723,7 @@ _sk_lighten_avx:
.byte 197,100,89,206 // vmulps %ymm6,%ymm3,%ymm9
.byte 196,193,108,93,209 // vminps %ymm9,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,94,145,2,0 // vbroadcastss 0x2915e(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,206,146,2,0 // vbroadcastss 0x292ce(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 197,60,89,199 // vmulps %ymm7,%ymm8,%ymm8
.byte 197,188,88,219 // vaddps %ymm3,%ymm8,%ymm3
@@ -24752,7 +24752,7 @@ _sk_difference_avx:
.byte 196,193,108,93,209 // vminps %ymm9,%ymm2,%ymm2
.byte 197,236,88,210 // vaddps %ymm2,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,250,144,2,0 // vbroadcastss 0x290fa(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,106,146,2,0 // vbroadcastss 0x2926a(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 197,60,89,199 // vmulps %ymm7,%ymm8,%ymm8
.byte 197,188,88,219 // vaddps %ymm3,%ymm8,%ymm3
@@ -24775,7 +24775,7 @@ _sk_exclusion_avx:
.byte 197,236,89,214 // vmulps %ymm6,%ymm2,%ymm2
.byte 197,236,88,210 // vaddps %ymm2,%ymm2,%ymm2
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
- .byte 196,98,125,24,5,177,144,2,0 // vbroadcastss 0x290b1(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,33,146,2,0 // vbroadcastss 0x29221(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
.byte 197,60,89,199 // vmulps %ymm7,%ymm8,%ymm8
.byte 197,188,88,219 // vaddps %ymm3,%ymm8,%ymm3
@@ -24786,7 +24786,7 @@ HIDDEN _sk_colorburn_avx
.globl _sk_colorburn_avx
FUNCTION(_sk_colorburn_avx)
_sk_colorburn_avx:
- .byte 196,98,125,24,5,152,144,2,0 // vbroadcastss 0x29098(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,8,146,2,0 // vbroadcastss 0x29208(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,52,89,216 // vmulps %ymm0,%ymm9,%ymm11
.byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
@@ -24848,7 +24848,7 @@ HIDDEN _sk_colordodge_avx
FUNCTION(_sk_colordodge_avx)
_sk_colordodge_avx:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
- .byte 196,98,125,24,13,156,143,2,0 // vbroadcastss 0x28f9c(%rip),%ymm9 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,13,12,145,2,0 // vbroadcastss 0x2910c(%rip),%ymm9 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,52,92,215 // vsubps %ymm7,%ymm9,%ymm10
.byte 197,44,89,216 // vmulps %ymm0,%ymm10,%ymm11
.byte 197,52,92,203 // vsubps %ymm3,%ymm9,%ymm9
@@ -24905,7 +24905,7 @@ HIDDEN _sk_hardlight_avx
.globl _sk_hardlight_avx
FUNCTION(_sk_hardlight_avx)
_sk_hardlight_avx:
- .byte 196,98,125,24,5,167,142,2,0 // vbroadcastss 0x28ea7(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,23,144,2,0 // vbroadcastss 0x29017(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,215 // vsubps %ymm7,%ymm8,%ymm10
.byte 197,44,89,200 // vmulps %ymm0,%ymm10,%ymm9
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -24960,7 +24960,7 @@ HIDDEN _sk_overlay_avx
.globl _sk_overlay_avx
FUNCTION(_sk_overlay_avx)
_sk_overlay_avx:
- .byte 196,98,125,24,5,204,141,2,0 // vbroadcastss 0x28dcc(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,60,143,2,0 // vbroadcastss 0x28f3c(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,215 // vsubps %ymm7,%ymm8,%ymm10
.byte 197,44,89,200 // vmulps %ymm0,%ymm10,%ymm9
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -25026,10 +25026,10 @@ _sk_softlight_avx:
.byte 196,65,60,88,192 // vaddps %ymm8,%ymm8,%ymm8
.byte 196,65,60,89,216 // vmulps %ymm8,%ymm8,%ymm11
.byte 196,65,60,88,195 // vaddps %ymm11,%ymm8,%ymm8
- .byte 196,98,125,24,29,211,140,2,0 // vbroadcastss 0x28cd3(%rip),%ymm11 # 389cc <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
+ .byte 196,98,125,24,29,67,142,2,0 // vbroadcastss 0x28e43(%rip),%ymm11 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
.byte 196,65,28,88,235 // vaddps %ymm11,%ymm12,%ymm13
.byte 196,65,20,89,192 // vmulps %ymm8,%ymm13,%ymm8
- .byte 196,98,125,24,45,196,140,2,0 // vbroadcastss 0x28cc4(%rip),%ymm13 # 389d0 <_sk_srcover_bgra_8888_sse2_lowp+0x2cc>
+ .byte 196,98,125,24,45,52,142,2,0 // vbroadcastss 0x28e34(%rip),%ymm13 # 38b40 <_sk_srcover_bgra_8888_sse2_lowp+0x2cc>
.byte 196,65,28,89,245 // vmulps %ymm13,%ymm12,%ymm14
.byte 196,65,12,88,192 // vaddps %ymm8,%ymm14,%ymm8
.byte 196,65,124,82,244 // vrsqrtps %ymm12,%ymm14
@@ -25040,7 +25040,7 @@ _sk_softlight_avx:
.byte 197,4,194,255,2 // vcmpleps %ymm7,%ymm15,%ymm15
.byte 196,67,13,74,240,240 // vblendvps %ymm15,%ymm8,%ymm14,%ymm14
.byte 197,116,88,249 // vaddps %ymm1,%ymm1,%ymm15
- .byte 196,98,125,24,5,110,140,2,0 // vbroadcastss 0x28c6e(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,222,141,2,0 // vbroadcastss 0x28dde(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,65,60,92,228 // vsubps %ymm12,%ymm8,%ymm12
.byte 197,132,92,195 // vsubps %ymm3,%ymm15,%ymm0
.byte 196,65,124,89,228 // vmulps %ymm12,%ymm0,%ymm12
@@ -25167,12 +25167,12 @@ _sk_hue_avx:
.byte 196,65,28,89,219 // vmulps %ymm11,%ymm12,%ymm11
.byte 196,65,36,94,222 // vdivps %ymm14,%ymm11,%ymm11
.byte 196,67,37,74,224,240 // vblendvps %ymm15,%ymm8,%ymm11,%ymm12
- .byte 196,98,125,24,53,81,138,2,0 // vbroadcastss 0x28a51(%rip),%ymm14 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,53,193,139,2,0 // vbroadcastss 0x28bc1(%rip),%ymm14 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,92,89,222 // vmulps %ymm14,%ymm4,%ymm11
- .byte 196,98,125,24,61,71,138,2,0 // vbroadcastss 0x28a47(%rip),%ymm15 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,61,183,139,2,0 // vbroadcastss 0x28bb7(%rip),%ymm15 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,239 // vmulps %ymm15,%ymm5,%ymm13
.byte 196,65,36,88,221 // vaddps %ymm13,%ymm11,%ymm11
- .byte 196,226,125,24,5,56,138,2,0 // vbroadcastss 0x28a38(%rip),%ymm0 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,226,125,24,5,168,139,2,0 // vbroadcastss 0x28ba8(%rip),%ymm0 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 197,76,89,232 // vmulps %ymm0,%ymm6,%ymm13
.byte 196,65,36,88,221 // vaddps %ymm13,%ymm11,%ymm11
.byte 196,65,52,89,238 // vmulps %ymm14,%ymm9,%ymm13
@@ -25233,7 +25233,7 @@ _sk_hue_avx:
.byte 196,65,36,95,208 // vmaxps %ymm8,%ymm11,%ymm10
.byte 196,195,109,74,209,240 // vblendvps %ymm15,%ymm9,%ymm2,%ymm2
.byte 196,193,108,95,208 // vmaxps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,24,5,229,136,2,0 // vbroadcastss 0x288e5(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,85,138,2,0 // vbroadcastss 0x28a55(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,180,89,201 // vmulps %ymm1,%ymm9,%ymm1
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -25290,12 +25290,12 @@ _sk_saturation_avx:
.byte 196,65,28,89,219 // vmulps %ymm11,%ymm12,%ymm11
.byte 196,65,36,94,222 // vdivps %ymm14,%ymm11,%ymm11
.byte 196,67,37,74,224,240 // vblendvps %ymm15,%ymm8,%ymm11,%ymm12
- .byte 196,98,125,24,53,15,136,2,0 // vbroadcastss 0x2880f(%rip),%ymm14 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,53,127,137,2,0 // vbroadcastss 0x2897f(%rip),%ymm14 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,92,89,222 // vmulps %ymm14,%ymm4,%ymm11
- .byte 196,98,125,24,61,5,136,2,0 // vbroadcastss 0x28805(%rip),%ymm15 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,61,117,137,2,0 // vbroadcastss 0x28975(%rip),%ymm15 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,239 // vmulps %ymm15,%ymm5,%ymm13
.byte 196,65,36,88,221 // vaddps %ymm13,%ymm11,%ymm11
- .byte 196,226,125,24,5,246,135,2,0 // vbroadcastss 0x287f6(%rip),%ymm0 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,226,125,24,5,102,137,2,0 // vbroadcastss 0x28966(%rip),%ymm0 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 197,76,89,232 // vmulps %ymm0,%ymm6,%ymm13
.byte 196,65,36,88,221 // vaddps %ymm13,%ymm11,%ymm11
.byte 196,65,52,89,238 // vmulps %ymm14,%ymm9,%ymm13
@@ -25356,7 +25356,7 @@ _sk_saturation_avx:
.byte 196,65,36,95,208 // vmaxps %ymm8,%ymm11,%ymm10
.byte 196,195,109,74,209,240 // vblendvps %ymm15,%ymm9,%ymm2,%ymm2
.byte 196,193,108,95,208 // vmaxps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,24,5,163,134,2,0 // vbroadcastss 0x286a3(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,19,136,2,0 // vbroadcastss 0x28813(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,207 // vsubps %ymm7,%ymm8,%ymm9
.byte 197,180,89,201 // vmulps %ymm1,%ymm9,%ymm1
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
@@ -25385,12 +25385,12 @@ _sk_color_avx:
.byte 197,252,17,68,36,168 // vmovups %ymm0,-0x58(%rsp)
.byte 197,124,89,199 // vmulps %ymm7,%ymm0,%ymm8
.byte 197,116,89,207 // vmulps %ymm7,%ymm1,%ymm9
- .byte 196,98,125,24,45,85,134,2,0 // vbroadcastss 0x28655(%rip),%ymm13 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,45,197,135,2,0 // vbroadcastss 0x287c5(%rip),%ymm13 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,92,89,213 // vmulps %ymm13,%ymm4,%ymm10
- .byte 196,98,125,24,53,75,134,2,0 // vbroadcastss 0x2864b(%rip),%ymm14 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,53,187,135,2,0 // vbroadcastss 0x287bb(%rip),%ymm14 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,84,89,222 // vmulps %ymm14,%ymm5,%ymm11
.byte 196,65,44,88,211 // vaddps %ymm11,%ymm10,%ymm10
- .byte 196,98,125,24,61,60,134,2,0 // vbroadcastss 0x2863c(%rip),%ymm15 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,98,125,24,61,172,135,2,0 // vbroadcastss 0x287ac(%rip),%ymm15 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,65,76,89,223 // vmulps %ymm15,%ymm6,%ymm11
.byte 196,193,44,88,195 // vaddps %ymm11,%ymm10,%ymm0
.byte 196,65,60,89,221 // vmulps %ymm13,%ymm8,%ymm11
@@ -25453,7 +25453,7 @@ _sk_color_avx:
.byte 196,65,44,95,207 // vmaxps %ymm15,%ymm10,%ymm9
.byte 196,195,37,74,192,0 // vblendvps %ymm0,%ymm8,%ymm11,%ymm0
.byte 196,65,124,95,199 // vmaxps %ymm15,%ymm0,%ymm8
- .byte 196,226,125,24,5,215,132,2,0 // vbroadcastss 0x284d7(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,71,134,2,0 // vbroadcastss 0x28647(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,124,92,215 // vsubps %ymm7,%ymm0,%ymm10
.byte 197,172,89,84,36,168 // vmulps -0x58(%rsp),%ymm10,%ymm2
.byte 197,124,92,219 // vsubps %ymm3,%ymm0,%ymm11
@@ -25483,12 +25483,12 @@ _sk_luminosity_avx:
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 197,100,89,196 // vmulps %ymm4,%ymm3,%ymm8
.byte 197,100,89,205 // vmulps %ymm5,%ymm3,%ymm9
- .byte 196,98,125,24,45,133,132,2,0 // vbroadcastss 0x28485(%rip),%ymm13 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,45,245,133,2,0 // vbroadcastss 0x285f5(%rip),%ymm13 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,108,89,213 // vmulps %ymm13,%ymm2,%ymm10
- .byte 196,98,125,24,53,123,132,2,0 // vbroadcastss 0x2847b(%rip),%ymm14 # 389d8 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
+ .byte 196,98,125,24,53,235,133,2,0 // vbroadcastss 0x285eb(%rip),%ymm14 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x2d4>
.byte 196,65,116,89,222 // vmulps %ymm14,%ymm1,%ymm11
.byte 196,65,44,88,211 // vaddps %ymm11,%ymm10,%ymm10
- .byte 196,98,125,24,61,108,132,2,0 // vbroadcastss 0x2846c(%rip),%ymm15 # 389dc <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
+ .byte 196,98,125,24,61,220,133,2,0 // vbroadcastss 0x285dc(%rip),%ymm15 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x2d8>
.byte 196,65,28,89,223 // vmulps %ymm15,%ymm12,%ymm11
.byte 196,193,44,88,195 // vaddps %ymm11,%ymm10,%ymm0
.byte 196,65,60,89,221 // vmulps %ymm13,%ymm8,%ymm11
@@ -25551,7 +25551,7 @@ _sk_luminosity_avx:
.byte 196,65,44,95,207 // vmaxps %ymm15,%ymm10,%ymm9
.byte 196,195,37,74,192,0 // vblendvps %ymm0,%ymm8,%ymm11,%ymm0
.byte 196,65,124,95,199 // vmaxps %ymm15,%ymm0,%ymm8
- .byte 196,226,125,24,5,7,131,2,0 // vbroadcastss 0x28307(%rip),%ymm0 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,5,119,132,2,0 // vbroadcastss 0x28477(%rip),%ymm0 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,124,92,215 // vsubps %ymm7,%ymm0,%ymm10
.byte 197,172,89,210 // vmulps %ymm2,%ymm10,%ymm2
.byte 197,124,92,219 // vsubps %ymm3,%ymm0,%ymm11
@@ -25585,7 +25585,7 @@ _sk_srcover_rgba_8888_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,42,1,0,0 // jne 10842 <_sk_srcover_rgba_8888_avx+0x14a>
.byte 196,1,125,16,4,152 // vmovupd (%r8,%r11,4),%ymm8
- .byte 197,125,40,13,218,135,2,0 // vmovapd 0x287da(%rip),%ymm9 # 38f00 <_sk_srcover_bgra_8888_sse2_lowp+0x7fc>
+ .byte 197,125,40,13,58,137,2,0 // vmovapd 0x2893a(%rip),%ymm9 # 39060 <_sk_srcover_bgra_8888_sse2_lowp+0x7ec>
.byte 196,193,61,84,225 // vandpd %ymm9,%ymm8,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
.byte 196,193,81,114,208,8 // vpsrld $0x8,%xmm8,%xmm5
@@ -25603,9 +25603,9 @@ _sk_srcover_rgba_8888_avx:
.byte 197,193,114,215,24 // vpsrld $0x18,%xmm7,%xmm7
.byte 196,227,61,24,255,1 // vinsertf128 $0x1,%xmm7,%ymm8,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,45,130,2,0 // vbroadcastss 0x2822d(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,157,131,2,0 // vbroadcastss 0x2839d(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,76,130,2,0 // vbroadcastss 0x2824c(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,13,188,131,2,0 // vbroadcastss 0x283bc(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,60,89,212 // vmulps %ymm4,%ymm8,%ymm10
.byte 196,193,124,88,194 // vaddps %ymm10,%ymm0,%ymm0
@@ -25747,7 +25747,7 @@ _sk_srcover_bgra_8888_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,42,1,0,0 // jne 10aee <_sk_srcover_bgra_8888_avx+0x14a>
.byte 196,1,125,16,4,152 // vmovupd (%r8,%r11,4),%ymm8
- .byte 197,125,40,13,78,133,2,0 // vmovapd 0x2854e(%rip),%ymm9 # 38f20 <_sk_srcover_bgra_8888_sse2_lowp+0x81c>
+ .byte 197,125,40,13,174,134,2,0 // vmovapd 0x286ae(%rip),%ymm9 # 39080 <_sk_srcover_bgra_8888_sse2_lowp+0x80c>
.byte 196,193,61,84,233 // vandpd %ymm9,%ymm8,%ymm5
.byte 197,252,91,245 // vcvtdq2ps %ymm5,%ymm6
.byte 196,193,81,114,208,8 // vpsrld $0x8,%xmm8,%xmm5
@@ -25765,9 +25765,9 @@ _sk_srcover_bgra_8888_avx:
.byte 197,193,114,215,24 // vpsrld $0x18,%xmm7,%xmm7
.byte 196,227,61,24,255,1 // vinsertf128 $0x1,%xmm7,%ymm8,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,129,127,2,0 // vbroadcastss 0x27f81(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,241,128,2,0 // vbroadcastss 0x280f1(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,195 // vsubps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,160,127,2,0 // vbroadcastss 0x27fa0(%rip),%ymm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,13,16,129,2,0 // vbroadcastss 0x28110(%rip),%ymm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,60,89,212 // vmulps %ymm4,%ymm8,%ymm10
.byte 196,193,124,88,194 // vaddps %ymm10,%ymm0,%ymm0
@@ -25911,7 +25911,7 @@ HIDDEN _sk_clamp_1_avx
.globl _sk_clamp_1_avx
FUNCTION(_sk_clamp_1_avx)
_sk_clamp_1_avx:
- .byte 196,98,125,24,5,62,125,2,0 // vbroadcastss 0x27d3e(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,174,126,2,0 // vbroadcastss 0x27eae(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 196,193,108,93,208 // vminps %ymm8,%ymm2,%ymm2
@@ -25923,7 +25923,7 @@ HIDDEN _sk_clamp_a_avx
.globl _sk_clamp_a_avx
FUNCTION(_sk_clamp_a_avx)
_sk_clamp_a_avx:
- .byte 196,98,125,24,5,29,125,2,0 // vbroadcastss 0x27d1d(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,141,126,2,0 // vbroadcastss 0x27e8d(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,100,93,216 // vminps %ymm8,%ymm3,%ymm3
.byte 197,252,93,195 // vminps %ymm3,%ymm0,%ymm0
.byte 197,244,93,203 // vminps %ymm3,%ymm1,%ymm1
@@ -25935,7 +25935,7 @@ HIDDEN _sk_clamp_a_dst_avx
.globl _sk_clamp_a_dst_avx
FUNCTION(_sk_clamp_a_dst_avx)
_sk_clamp_a_dst_avx:
- .byte 196,98,125,24,5,255,124,2,0 // vbroadcastss 0x27cff(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,111,126,2,0 // vbroadcastss 0x27e6f(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,68,93,248 // vminps %ymm8,%ymm7,%ymm7
.byte 197,220,93,231 // vminps %ymm7,%ymm4,%ymm4
.byte 197,212,93,239 // vminps %ymm7,%ymm5,%ymm5
@@ -25968,7 +25968,7 @@ HIDDEN _sk_invert_avx
.globl _sk_invert_avx
FUNCTION(_sk_invert_avx)
_sk_invert_avx:
- .byte 196,98,125,24,5,186,124,2,0 // vbroadcastss 0x27cba(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,42,126,2,0 // vbroadcastss 0x27e2a(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,188,92,192 // vsubps %ymm0,%ymm8,%ymm0
.byte 197,188,92,201 // vsubps %ymm1,%ymm8,%ymm1
.byte 197,188,92,210 // vsubps %ymm2,%ymm8,%ymm2
@@ -26022,9 +26022,9 @@ HIDDEN _sk_unpremul_avx
.globl _sk_unpremul_avx
FUNCTION(_sk_unpremul_avx)
_sk_unpremul_avx:
- .byte 196,98,125,24,5,85,124,2,0 // vbroadcastss 0x27c55(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,197,125,2,0 // vbroadcastss 0x27dc5(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,94,195 // vdivps %ymm3,%ymm8,%ymm8
- .byte 196,98,125,24,13,120,124,2,0 // vbroadcastss 0x27c78(%rip),%ymm9 # 389e4 <_sk_srcover_bgra_8888_sse2_lowp+0x2e0>
+ .byte 196,98,125,24,13,232,125,2,0 // vbroadcastss 0x27de8(%rip),%ymm9 # 38b54 <_sk_srcover_bgra_8888_sse2_lowp+0x2e0>
.byte 196,65,60,194,201,1 // vcmpltps %ymm9,%ymm8,%ymm9
.byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
.byte 196,67,45,74,192,144 // vblendvps %ymm9,%ymm8,%ymm10,%ymm8
@@ -26038,17 +26038,17 @@ HIDDEN _sk_from_srgb_avx
.globl _sk_from_srgb_avx
FUNCTION(_sk_from_srgb_avx)
_sk_from_srgb_avx:
- .byte 196,98,125,24,5,82,124,2,0 // vbroadcastss 0x27c52(%rip),%ymm8 # 389e8 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
+ .byte 196,98,125,24,5,194,125,2,0 // vbroadcastss 0x27dc2(%rip),%ymm8 # 38b58 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 197,124,89,208 // vmulps %ymm0,%ymm0,%ymm10
- .byte 196,98,125,24,29,44,124,2,0 // vbroadcastss 0x27c2c(%rip),%ymm11 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,29,156,125,2,0 // vbroadcastss 0x27d9c(%rip),%ymm11 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,124,89,227 // vmulps %ymm11,%ymm0,%ymm12
- .byte 196,98,125,24,45,54,124,2,0 // vbroadcastss 0x27c36(%rip),%ymm13 # 389ec <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
+ .byte 196,98,125,24,45,166,125,2,0 // vbroadcastss 0x27da6(%rip),%ymm13 # 38b5c <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
.byte 196,65,28,88,229 // vaddps %ymm13,%ymm12,%ymm12
.byte 196,65,44,89,212 // vmulps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,37,39,124,2,0 // vbroadcastss 0x27c27(%rip),%ymm12 # 389f0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
+ .byte 196,98,125,24,37,151,125,2,0 // vbroadcastss 0x27d97(%rip),%ymm12 # 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
.byte 196,65,44,88,212 // vaddps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,53,29,124,2,0 // vbroadcastss 0x27c1d(%rip),%ymm14 # 389f4 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
+ .byte 196,98,125,24,53,141,125,2,0 // vbroadcastss 0x27d8d(%rip),%ymm14 # 38b64 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
.byte 196,193,124,194,198,1 // vcmpltps %ymm14,%ymm0,%ymm0
.byte 196,195,45,74,193,0 // vblendvps %ymm0,%ymm9,%ymm10,%ymm0
.byte 196,65,116,89,200 // vmulps %ymm8,%ymm1,%ymm9
@@ -26074,17 +26074,17 @@ HIDDEN _sk_from_srgb_dst_avx
.globl _sk_from_srgb_dst_avx
FUNCTION(_sk_from_srgb_dst_avx)
_sk_from_srgb_dst_avx:
- .byte 196,98,125,24,5,166,123,2,0 // vbroadcastss 0x27ba6(%rip),%ymm8 # 389e8 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
+ .byte 196,98,125,24,5,22,125,2,0 // vbroadcastss 0x27d16(%rip),%ymm8 # 38b58 <_sk_srcover_bgra_8888_sse2_lowp+0x2e4>
.byte 196,65,92,89,200 // vmulps %ymm8,%ymm4,%ymm9
.byte 197,92,89,212 // vmulps %ymm4,%ymm4,%ymm10
- .byte 196,98,125,24,29,128,123,2,0 // vbroadcastss 0x27b80(%rip),%ymm11 # 389d4 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
+ .byte 196,98,125,24,29,240,124,2,0 // vbroadcastss 0x27cf0(%rip),%ymm11 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x2d0>
.byte 196,65,92,89,227 // vmulps %ymm11,%ymm4,%ymm12
- .byte 196,98,125,24,45,138,123,2,0 // vbroadcastss 0x27b8a(%rip),%ymm13 # 389ec <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
+ .byte 196,98,125,24,45,250,124,2,0 // vbroadcastss 0x27cfa(%rip),%ymm13 # 38b5c <_sk_srcover_bgra_8888_sse2_lowp+0x2e8>
.byte 196,65,28,88,229 // vaddps %ymm13,%ymm12,%ymm12
.byte 196,65,44,89,212 // vmulps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,37,123,123,2,0 // vbroadcastss 0x27b7b(%rip),%ymm12 # 389f0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
+ .byte 196,98,125,24,37,235,124,2,0 // vbroadcastss 0x27ceb(%rip),%ymm12 # 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x2ec>
.byte 196,65,44,88,212 // vaddps %ymm12,%ymm10,%ymm10
- .byte 196,98,125,24,53,113,123,2,0 // vbroadcastss 0x27b71(%rip),%ymm14 # 389f4 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
+ .byte 196,98,125,24,53,225,124,2,0 // vbroadcastss 0x27ce1(%rip),%ymm14 # 38b64 <_sk_srcover_bgra_8888_sse2_lowp+0x2f0>
.byte 196,193,92,194,230,1 // vcmpltps %ymm14,%ymm4,%ymm4
.byte 196,195,45,74,225,64 // vblendvps %ymm4,%ymm9,%ymm10,%ymm4
.byte 196,65,84,89,200 // vmulps %ymm8,%ymm5,%ymm9
@@ -26111,20 +26111,20 @@ HIDDEN _sk_to_srgb_avx
FUNCTION(_sk_to_srgb_avx)
_sk_to_srgb_avx:
.byte 197,124,82,200 // vrsqrtps %ymm0,%ymm9
- .byte 196,98,125,24,5,6,123,2,0 // vbroadcastss 0x27b06(%rip),%ymm8 # 389f8 <_sk_srcover_bgra_8888_sse2_lowp+0x2f4>
+ .byte 196,98,125,24,5,118,124,2,0 // vbroadcastss 0x27c76(%rip),%ymm8 # 38b68 <_sk_srcover_bgra_8888_sse2_lowp+0x2f4>
.byte 196,65,124,89,208 // vmulps %ymm8,%ymm0,%ymm10
- .byte 196,98,125,24,29,0,123,2,0 // vbroadcastss 0x27b00(%rip),%ymm11 # 38a00 <_sk_srcover_bgra_8888_sse2_lowp+0x2fc>
+ .byte 196,98,125,24,29,112,124,2,0 // vbroadcastss 0x27c70(%rip),%ymm11 # 38b70 <_sk_srcover_bgra_8888_sse2_lowp+0x2fc>
.byte 196,65,52,89,227 // vmulps %ymm11,%ymm9,%ymm12
- .byte 196,98,125,24,45,238,122,2,0 // vbroadcastss 0x27aee(%rip),%ymm13 # 389fc <_sk_srcover_bgra_8888_sse2_lowp+0x2f8>
+ .byte 196,98,125,24,45,94,124,2,0 // vbroadcastss 0x27c5e(%rip),%ymm13 # 38b6c <_sk_srcover_bgra_8888_sse2_lowp+0x2f8>
.byte 196,65,28,88,229 // vaddps %ymm13,%ymm12,%ymm12
.byte 196,65,52,89,228 // vmulps %ymm12,%ymm9,%ymm12
- .byte 196,98,125,24,53,19,124,2,0 // vbroadcastss 0x27c13(%rip),%ymm14 # 38b34 <_sk_srcover_bgra_8888_sse2_lowp+0x430>
+ .byte 196,98,125,24,53,131,125,2,0 // vbroadcastss 0x27d83(%rip),%ymm14 # 38ca4 <_sk_srcover_bgra_8888_sse2_lowp+0x430>
.byte 196,65,28,88,230 // vaddps %ymm14,%ymm12,%ymm12
- .byte 196,98,125,24,61,9,124,2,0 // vbroadcastss 0x27c09(%rip),%ymm15 # 38b38 <_sk_srcover_bgra_8888_sse2_lowp+0x434>
+ .byte 196,98,125,24,61,121,125,2,0 // vbroadcastss 0x27d79(%rip),%ymm15 # 38ca8 <_sk_srcover_bgra_8888_sse2_lowp+0x434>
.byte 196,65,52,88,207 // vaddps %ymm15,%ymm9,%ymm9
.byte 196,65,124,83,201 // vrcpps %ymm9,%ymm9
.byte 196,65,52,89,204 // vmulps %ymm12,%ymm9,%ymm9
- .byte 196,98,125,24,37,197,122,2,0 // vbroadcastss 0x27ac5(%rip),%ymm12 # 38a0c <_sk_srcover_bgra_8888_sse2_lowp+0x308>
+ .byte 196,98,125,24,37,53,124,2,0 // vbroadcastss 0x27c35(%rip),%ymm12 # 38b7c <_sk_srcover_bgra_8888_sse2_lowp+0x308>
.byte 196,193,124,194,196,1 // vcmpltps %ymm12,%ymm0,%ymm0
.byte 196,195,53,74,194,0 // vblendvps %ymm0,%ymm10,%ymm9,%ymm0
.byte 197,124,82,201 // vrsqrtps %ymm1,%ymm9
@@ -26161,7 +26161,7 @@ _sk_rgb_to_hsl_avx:
.byte 197,116,93,202 // vminps %ymm2,%ymm1,%ymm9
.byte 196,65,124,93,201 // vminps %ymm9,%ymm0,%ymm9
.byte 196,65,60,92,209 // vsubps %ymm9,%ymm8,%ymm10
- .byte 196,98,125,24,29,205,121,2,0 // vbroadcastss 0x279cd(%rip),%ymm11 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,29,61,123,2,0 // vbroadcastss 0x27b3d(%rip),%ymm11 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,65,36,94,218 // vdivps %ymm10,%ymm11,%ymm11
.byte 197,116,92,226 // vsubps %ymm2,%ymm1,%ymm12
.byte 196,65,28,89,227 // vmulps %ymm11,%ymm12,%ymm12
@@ -26171,19 +26171,19 @@ _sk_rgb_to_hsl_avx:
.byte 196,193,108,89,211 // vmulps %ymm11,%ymm2,%ymm2
.byte 197,252,92,201 // vsubps %ymm1,%ymm0,%ymm1
.byte 196,193,116,89,203 // vmulps %ymm11,%ymm1,%ymm1
- .byte 196,98,125,24,29,254,121,2,0 // vbroadcastss 0x279fe(%rip),%ymm11 # 38a18 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
+ .byte 196,98,125,24,29,110,123,2,0 // vbroadcastss 0x27b6e(%rip),%ymm11 # 38b88 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
.byte 196,193,116,88,203 // vaddps %ymm11,%ymm1,%ymm1
- .byte 196,98,125,24,29,236,121,2,0 // vbroadcastss 0x279ec(%rip),%ymm11 # 38a14 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
+ .byte 196,98,125,24,29,92,123,2,0 // vbroadcastss 0x27b5c(%rip),%ymm11 # 38b84 <_sk_srcover_bgra_8888_sse2_lowp+0x310>
.byte 196,193,108,88,211 // vaddps %ymm11,%ymm2,%ymm2
.byte 196,227,117,74,202,224 // vblendvps %ymm14,%ymm2,%ymm1,%ymm1
- .byte 196,226,125,24,21,212,121,2,0 // vbroadcastss 0x279d4(%rip),%ymm2 # 38a10 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
+ .byte 196,226,125,24,21,68,123,2,0 // vbroadcastss 0x27b44(%rip),%ymm2 # 38b80 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
.byte 196,65,12,87,246 // vxorps %ymm14,%ymm14,%ymm14
.byte 196,227,13,74,210,208 // vblendvps %ymm13,%ymm2,%ymm14,%ymm2
.byte 197,188,194,192,0 // vcmpeqps %ymm0,%ymm8,%ymm0
.byte 196,193,108,88,212 // vaddps %ymm12,%ymm2,%ymm2
.byte 196,227,117,74,194,0 // vblendvps %ymm0,%ymm2,%ymm1,%ymm0
.byte 196,193,60,88,201 // vaddps %ymm9,%ymm8,%ymm1
- .byte 196,98,125,24,37,75,121,2,0 // vbroadcastss 0x2794b(%rip),%ymm12 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,37,187,122,2,0 // vbroadcastss 0x27abb(%rip),%ymm12 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,116,89,212 // vmulps %ymm12,%ymm1,%ymm2
.byte 197,28,194,226,1 // vcmpltps %ymm2,%ymm12,%ymm12
.byte 196,65,36,92,216 // vsubps %ymm8,%ymm11,%ymm11
@@ -26193,7 +26193,7 @@ _sk_rgb_to_hsl_avx:
.byte 197,172,94,201 // vdivps %ymm1,%ymm10,%ymm1
.byte 196,195,125,74,198,128 // vblendvps %ymm8,%ymm14,%ymm0,%ymm0
.byte 196,195,117,74,206,128 // vblendvps %ymm8,%ymm14,%ymm1,%ymm1
- .byte 196,98,125,24,5,126,121,2,0 // vbroadcastss 0x2797e(%rip),%ymm8 # 38a1c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
+ .byte 196,98,125,24,5,238,122,2,0 // vbroadcastss 0x27aee(%rip),%ymm8 # 38b8c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -26210,7 +26210,7 @@ _sk_hsl_to_rgb_avx:
.byte 197,252,17,92,36,128 // vmovups %ymm3,-0x80(%rsp)
.byte 197,252,40,225 // vmovaps %ymm1,%ymm4
.byte 197,252,40,216 // vmovaps %ymm0,%ymm3
- .byte 196,98,125,24,5,215,120,2,0 // vbroadcastss 0x278d7(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,71,122,2,0 // vbroadcastss 0x27a47(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,60,194,202,2 // vcmpleps %ymm2,%ymm8,%ymm9
.byte 197,92,89,210 // vmulps %ymm2,%ymm4,%ymm10
.byte 196,65,92,92,218 // vsubps %ymm10,%ymm4,%ymm11
@@ -26218,23 +26218,23 @@ _sk_hsl_to_rgb_avx:
.byte 197,52,88,210 // vaddps %ymm2,%ymm9,%ymm10
.byte 197,108,88,202 // vaddps %ymm2,%ymm2,%ymm9
.byte 196,65,52,92,202 // vsubps %ymm10,%ymm9,%ymm9
- .byte 196,98,125,24,29,29,121,2,0 // vbroadcastss 0x2791d(%rip),%ymm11 # 38a20 <_sk_srcover_bgra_8888_sse2_lowp+0x31c>
+ .byte 196,98,125,24,29,141,122,2,0 // vbroadcastss 0x27a8d(%rip),%ymm11 # 38b90 <_sk_srcover_bgra_8888_sse2_lowp+0x31c>
.byte 196,65,100,88,219 // vaddps %ymm11,%ymm3,%ymm11
.byte 196,67,125,8,227,1 // vroundps $0x1,%ymm11,%ymm12
.byte 196,65,36,92,252 // vsubps %ymm12,%ymm11,%ymm15
.byte 196,65,44,92,217 // vsubps %ymm9,%ymm10,%ymm11
- .byte 196,98,125,24,37,239,120,2,0 // vbroadcastss 0x278ef(%rip),%ymm12 # 38a10 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
+ .byte 196,98,125,24,37,95,122,2,0 // vbroadcastss 0x27a5f(%rip),%ymm12 # 38b80 <_sk_srcover_bgra_8888_sse2_lowp+0x30c>
.byte 196,193,4,89,196 // vmulps %ymm12,%ymm15,%ymm0
- .byte 196,98,125,24,45,233,120,2,0 // vbroadcastss 0x278e9(%rip),%ymm13 # 38a18 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
+ .byte 196,98,125,24,45,89,122,2,0 // vbroadcastss 0x27a59(%rip),%ymm13 # 38b88 <_sk_srcover_bgra_8888_sse2_lowp+0x314>
.byte 197,20,92,240 // vsubps %ymm0,%ymm13,%ymm14
.byte 196,65,36,89,246 // vmulps %ymm14,%ymm11,%ymm14
.byte 196,65,52,88,246 // vaddps %ymm14,%ymm9,%ymm14
- .byte 196,226,125,24,13,222,120,2,0 // vbroadcastss 0x278de(%rip),%ymm1 # 38a24 <_sk_srcover_bgra_8888_sse2_lowp+0x320>
+ .byte 196,226,125,24,13,78,122,2,0 // vbroadcastss 0x27a4e(%rip),%ymm1 # 38b94 <_sk_srcover_bgra_8888_sse2_lowp+0x320>
.byte 196,193,116,194,255,2 // vcmpleps %ymm15,%ymm1,%ymm7
.byte 196,195,13,74,249,112 // vblendvps %ymm7,%ymm9,%ymm14,%ymm7
.byte 196,65,60,194,247,2 // vcmpleps %ymm15,%ymm8,%ymm14
.byte 196,227,45,74,255,224 // vblendvps %ymm14,%ymm7,%ymm10,%ymm7
- .byte 196,98,125,24,53,181,120,2,0 // vbroadcastss 0x278b5(%rip),%ymm14 # 38a1c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
+ .byte 196,98,125,24,53,37,122,2,0 // vbroadcastss 0x27a25(%rip),%ymm14 # 38b8c <_sk_srcover_bgra_8888_sse2_lowp+0x318>
.byte 196,65,12,194,255,2 // vcmpleps %ymm15,%ymm14,%ymm15
.byte 196,193,124,89,195 // vmulps %ymm11,%ymm0,%ymm0
.byte 197,180,88,192 // vaddps %ymm0,%ymm9,%ymm0
@@ -26253,7 +26253,7 @@ _sk_hsl_to_rgb_avx:
.byte 197,164,89,247 // vmulps %ymm7,%ymm11,%ymm6
.byte 197,180,88,246 // vaddps %ymm6,%ymm9,%ymm6
.byte 196,227,77,74,237,0 // vblendvps %ymm0,%ymm5,%ymm6,%ymm5
- .byte 196,226,125,24,5,95,120,2,0 // vbroadcastss 0x2785f(%rip),%ymm0 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,226,125,24,5,207,121,2,0 // vbroadcastss 0x279cf(%rip),%ymm0 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 197,228,88,192 // vaddps %ymm0,%ymm3,%ymm0
.byte 196,227,125,8,216,1 // vroundps $0x1,%ymm0,%ymm3
.byte 197,252,92,195 // vsubps %ymm3,%ymm0,%ymm0
@@ -26309,13 +26309,13 @@ _sk_scale_u8_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,75 // jne 112cf <_sk_scale_u8_avx+0x63>
.byte 196,2,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm8
- .byte 197,57,219,5,254,127,2,0 // vpand 0x27ffe(%rip),%xmm8,%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,57,219,5,94,129,2,0 // vpand 0x2815e(%rip),%xmm8,%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,66,121,51,200 // vpmovzxwd %xmm8,%xmm9
.byte 196,65,121,112,192,78 // vpshufd $0x4e,%xmm8,%xmm8
.byte 196,66,121,51,192 // vpmovzxwd %xmm8,%xmm8
.byte 196,67,53,24,192,1 // vinsertf128 $0x1,%xmm8,%ymm9,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,13,118,119,2,0 // vbroadcastss 0x27776(%rip),%ymm9 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,13,230,120,2,0 // vbroadcastss 0x278e6(%rip),%ymm9 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 197,188,89,201 // vmulps %ymm1,%ymm8,%ymm1
@@ -26395,20 +26395,20 @@ _sk_scale_565_avx:
.byte 196,65,121,112,192,78 // vpshufd $0x4e,%xmm8,%xmm8
.byte 196,66,121,51,192 // vpmovzxwd %xmm8,%xmm8
.byte 196,67,53,24,192,1 // vinsertf128 $0x1,%xmm8,%ymm9,%ymm8
- .byte 196,98,125,24,13,104,118,2,0 // vbroadcastss 0x27668(%rip),%ymm9 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,98,125,24,13,216,119,2,0 // vbroadcastss 0x277d8(%rip),%ymm9 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 196,65,60,84,201 // vandps %ymm9,%ymm8,%ymm9
.byte 196,65,124,91,201 // vcvtdq2ps %ymm9,%ymm9
- .byte 196,98,125,24,21,89,118,2,0 // vbroadcastss 0x27659(%rip),%ymm10 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,98,125,24,21,201,119,2,0 // vbroadcastss 0x277c9(%rip),%ymm10 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 196,65,52,89,202 // vmulps %ymm10,%ymm9,%ymm9
- .byte 196,98,125,24,21,79,118,2,0 // vbroadcastss 0x2764f(%rip),%ymm10 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,98,125,24,21,191,119,2,0 // vbroadcastss 0x277bf(%rip),%ymm10 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 196,65,60,84,210 // vandps %ymm10,%ymm8,%ymm10
.byte 196,65,124,91,210 // vcvtdq2ps %ymm10,%ymm10
- .byte 196,98,125,24,29,64,118,2,0 // vbroadcastss 0x27640(%rip),%ymm11 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,98,125,24,29,176,119,2,0 // vbroadcastss 0x277b0(%rip),%ymm11 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
- .byte 196,98,125,24,29,54,118,2,0 // vbroadcastss 0x27636(%rip),%ymm11 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,98,125,24,29,166,119,2,0 // vbroadcastss 0x277a6(%rip),%ymm11 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 196,65,60,84,195 // vandps %ymm11,%ymm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,29,39,118,2,0 // vbroadcastss 0x27627(%rip),%ymm11 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,98,125,24,29,151,119,2,0 // vbroadcastss 0x27797(%rip),%ymm11 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 197,100,194,223,1 // vcmpltps %ymm7,%ymm3,%ymm11
.byte 196,65,44,93,224 // vminps %ymm8,%ymm10,%ymm12
@@ -26501,13 +26501,13 @@ _sk_lerp_u8_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,111 // jne 115be <_sk_lerp_u8_avx+0x87>
.byte 196,2,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm8
- .byte 197,57,219,5,51,125,2,0 // vpand 0x27d33(%rip),%xmm8,%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,57,219,5,147,126,2,0 // vpand 0x27e93(%rip),%xmm8,%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,66,121,51,200 // vpmovzxwd %xmm8,%xmm9
.byte 196,65,121,112,192,78 // vpshufd $0x4e,%xmm8,%xmm8
.byte 196,66,121,51,192 // vpmovzxwd %xmm8,%xmm8
.byte 196,67,53,24,192,1 // vinsertf128 $0x1,%xmm8,%ymm9,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,13,171,116,2,0 // vbroadcastss 0x274ab(%rip),%ymm9 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,13,27,118,2,0 // vbroadcastss 0x2761b(%rip),%ymm9 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,252,92,196 // vsubps %ymm4,%ymm0,%ymm0
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
@@ -26563,7 +26563,7 @@ _sk_lerp_u8_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,221 // callq ffffffffde01166c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfd8f68>
+ .byte 232,255,255,255,221 // callq ffffffffde01166c <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfd8df8>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,210 // callq *%rdx
@@ -26592,20 +26592,20 @@ _sk_lerp_565_avx:
.byte 196,65,121,112,192,78 // vpshufd $0x4e,%xmm8,%xmm8
.byte 196,66,121,51,192 // vpmovzxwd %xmm8,%xmm8
.byte 196,67,53,24,192,1 // vinsertf128 $0x1,%xmm8,%ymm9,%ymm8
- .byte 196,98,125,24,13,116,115,2,0 // vbroadcastss 0x27374(%rip),%ymm9 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,98,125,24,13,228,116,2,0 // vbroadcastss 0x274e4(%rip),%ymm9 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 196,65,60,84,201 // vandps %ymm9,%ymm8,%ymm9
.byte 196,65,124,91,201 // vcvtdq2ps %ymm9,%ymm9
- .byte 196,98,125,24,21,101,115,2,0 // vbroadcastss 0x27365(%rip),%ymm10 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,98,125,24,21,213,116,2,0 // vbroadcastss 0x274d5(%rip),%ymm10 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 196,65,52,89,202 // vmulps %ymm10,%ymm9,%ymm9
- .byte 196,98,125,24,21,91,115,2,0 // vbroadcastss 0x2735b(%rip),%ymm10 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,98,125,24,21,203,116,2,0 // vbroadcastss 0x274cb(%rip),%ymm10 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 196,65,60,84,210 // vandps %ymm10,%ymm8,%ymm10
.byte 196,65,124,91,210 // vcvtdq2ps %ymm10,%ymm10
- .byte 196,98,125,24,29,76,115,2,0 // vbroadcastss 0x2734c(%rip),%ymm11 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,98,125,24,29,188,116,2,0 // vbroadcastss 0x274bc(%rip),%ymm11 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
- .byte 196,98,125,24,29,66,115,2,0 // vbroadcastss 0x27342(%rip),%ymm11 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,98,125,24,29,178,116,2,0 // vbroadcastss 0x274b2(%rip),%ymm11 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 196,65,60,84,195 // vandps %ymm11,%ymm8,%ymm8
.byte 196,65,124,91,192 // vcvtdq2ps %ymm8,%ymm8
- .byte 196,98,125,24,29,51,115,2,0 // vbroadcastss 0x27333(%rip),%ymm11 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,98,125,24,29,163,116,2,0 // vbroadcastss 0x274a3(%rip),%ymm11 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 197,100,194,223,1 // vcmpltps %ymm7,%ymm3,%ymm11
.byte 196,65,44,93,224 // vminps %ymm8,%ymm10,%ymm12
@@ -26682,7 +26682,7 @@ _sk_load_tables_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,4,2,0,0 // jne 11a28 <_sk_load_tables_avx+0x218>
.byte 196,65,125,16,20,144 // vmovupd (%r8,%rdx,4),%ymm10
- .byte 197,125,40,13,14,119,2,0 // vmovapd 0x2770e(%rip),%ymm9 # 38f40 <_sk_srcover_bgra_8888_sse2_lowp+0x83c>
+ .byte 197,125,40,13,110,120,2,0 // vmovapd 0x2786e(%rip),%ymm9 # 390a0 <_sk_srcover_bgra_8888_sse2_lowp+0x82c>
.byte 196,193,45,84,201 // vandpd %ymm9,%ymm10,%ymm1
.byte 196,227,125,25,200,1 // vextractf128 $0x1,%ymm1,%xmm0
.byte 196,193,249,126,192 // vmovq %xmm0,%r8
@@ -26774,7 +26774,7 @@ _sk_load_tables_avx:
.byte 196,193,65,114,213,24 // vpsrld $0x18,%xmm13,%xmm7
.byte 196,227,101,24,223,1 // vinsertf128 $0x1,%xmm7,%ymm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,226,125,24,61,18,112,2,0 // vbroadcastss 0x27012(%rip),%ymm7 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,61,130,113,2,0 // vbroadcastss 0x27182(%rip),%ymm7 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,228,89,223 // vmulps %ymm7,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,124,36,200 // vmovups -0x38(%rsp),%ymm7
@@ -26860,7 +26860,7 @@ _sk_load_tables_u16_be_avx:
.byte 197,177,108,208 // vpunpcklqdq %xmm0,%xmm9,%xmm2
.byte 197,49,109,232 // vpunpckhqdq %xmm0,%xmm9,%xmm13
.byte 196,65,57,108,212 // vpunpcklqdq %xmm12,%xmm8,%xmm10
- .byte 197,121,111,29,32,119,2,0 // vmovdqa 0x27720(%rip),%xmm11 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,121,111,29,128,120,2,0 // vmovdqa 0x27880(%rip),%xmm11 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,193,105,219,195 // vpand %xmm11,%xmm2,%xmm0
.byte 197,249,112,208,78 // vpshufd $0x4e,%xmm0,%xmm2
.byte 196,226,121,51,210 // vpmovzxwd %xmm2,%xmm2
@@ -26954,7 +26954,7 @@ _sk_load_tables_u16_be_avx:
.byte 196,226,121,51,219 // vpmovzxwd %xmm3,%xmm3
.byte 196,227,77,24,219,1 // vinsertf128 $0x1,%xmm3,%ymm6,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,226,125,24,53,243,108,2,0 // vbroadcastss 0x26cf3(%rip),%ymm6 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,226,125,24,53,99,110,2,0 // vbroadcastss 0x26e63(%rip),%ymm6 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 197,228,89,222 // vmulps %ymm6,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,116,36,168 // vmovups -0x58(%rsp),%ymm6
@@ -27018,7 +27018,7 @@ _sk_load_tables_rgb_u16_be_avx:
.byte 197,105,108,216 // vpunpcklqdq %xmm0,%xmm2,%xmm11
.byte 197,241,108,211 // vpunpcklqdq %xmm3,%xmm1,%xmm2
.byte 197,241,109,203 // vpunpckhqdq %xmm3,%xmm1,%xmm1
- .byte 197,121,111,53,32,116,2,0 // vmovdqa 0x27420(%rip),%xmm14 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,121,111,53,128,117,2,0 // vmovdqa 0x27580(%rip),%xmm14 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,193,105,219,214 // vpand %xmm14,%xmm2,%xmm2
.byte 197,249,112,218,78 // vpshufd $0x4e,%xmm2,%xmm3
.byte 196,226,121,51,219 // vpmovzxwd %xmm3,%xmm3
@@ -27104,7 +27104,7 @@ _sk_load_tables_rgb_u16_be_avx:
.byte 196,227,105,33,215,48 // vinsertps $0x30,%xmm7,%xmm2,%xmm2
.byte 196,227,77,24,210,1 // vinsertf128 $0x1,%xmm2,%ymm6,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,137,105,2,0 // vbroadcastss 0x26989(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,249,106,2,0 // vbroadcastss 0x26af9(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,16,116,36,168 // vmovups -0x58(%rsp),%ymm6
.byte 197,252,16,124,36,200 // vmovups -0x38(%rsp),%ymm7
.byte 255,224 // jmpq *%rax
@@ -27175,7 +27175,7 @@ HIDDEN _sk_byte_tables_avx
FUNCTION(_sk_byte_tables_avx)
_sk_byte_tables_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,114,104,2,0 // vbroadcastss 0x26872(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,226,105,2,0 // vbroadcastss 0x269e2(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 197,125,91,200 // vcvtps2dq %ymm0,%ymm9
.byte 196,65,249,126,200 // vmovq %xmm9,%r8
@@ -27310,7 +27310,7 @@ _sk_byte_tables_avx:
.byte 196,194,121,49,204 // vpmovzxbd %xmm12,%xmm1
.byte 196,194,121,49,213 // vpmovzxbd %xmm13,%xmm2
.byte 196,227,117,24,202,1 // vinsertf128 $0x1,%xmm2,%ymm1,%ymm1
- .byte 196,98,125,24,13,46,102,2,0 // vbroadcastss 0x2662e(%rip),%ymm9 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,13,158,103,2,0 // vbroadcastss 0x2679e(%rip),%ymm9 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
.byte 196,193,116,89,201 // vmulps %ymm9,%ymm1,%ymm1
@@ -27438,7 +27438,7 @@ _sk_byte_tables_rgb_avx:
.byte 196,194,121,49,203 // vpmovzxbd %xmm11,%xmm1
.byte 196,227,125,24,193,1 // vinsertf128 $0x1,%xmm1,%ymm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,13,227,99,2,0 // vbroadcastss 0x263e3(%rip),%ymm9 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,13,83,101,2,0 // vbroadcastss 0x26553(%rip),%ymm9 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 196,194,121,49,202 // vpmovzxbd %xmm10,%xmm1
.byte 196,194,121,49,212 // vpmovzxbd %xmm12,%xmm2
@@ -27675,36 +27675,36 @@ _sk_parametric_r_avx:
.byte 196,193,124,88,195 // vaddps %ymm11,%ymm0,%ymm0
.byte 196,98,125,24,16 // vbroadcastss (%rax),%ymm10
.byte 197,124,91,216 // vcvtdq2ps %ymm0,%ymm11
- .byte 196,98,125,24,37,116,96,2,0 // vbroadcastss 0x26074(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,37,228,97,2,0 // vbroadcastss 0x261e4(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
.byte 196,65,36,89,220 // vmulps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,106,96,2,0 // vbroadcastss 0x2606a(%rip),%ymm12 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,218,97,2,0 // vbroadcastss 0x261da(%rip),%ymm12 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,193,124,84,196 // vandps %ymm12,%ymm0,%ymm0
- .byte 196,98,125,24,37,188,95,2,0 // vbroadcastss 0x25fbc(%rip),%ymm12 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,37,44,97,2,0 // vbroadcastss 0x2612c(%rip),%ymm12 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,124,86,196 // vorps %ymm12,%ymm0,%ymm0
- .byte 196,98,125,24,37,82,96,2,0 // vbroadcastss 0x26052(%rip),%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,37,194,97,2,0 // vbroadcastss 0x261c2(%rip),%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,65,36,88,220 // vaddps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,72,96,2,0 // vbroadcastss 0x26048(%rip),%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,37,184,97,2,0 // vbroadcastss 0x261b8(%rip),%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,65,124,89,228 // vmulps %ymm12,%ymm0,%ymm12
.byte 196,65,36,92,220 // vsubps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,57,96,2,0 // vbroadcastss 0x26039(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,169,97,2,0 // vbroadcastss 0x261a9(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,193,124,88,196 // vaddps %ymm12,%ymm0,%ymm0
- .byte 196,98,125,24,37,47,96,2,0 // vbroadcastss 0x2602f(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,159,97,2,0 // vbroadcastss 0x2619f(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 197,156,94,192 // vdivps %ymm0,%ymm12,%ymm0
.byte 197,164,92,192 // vsubps %ymm0,%ymm11,%ymm0
.byte 197,172,89,192 // vmulps %ymm0,%ymm10,%ymm0
.byte 196,99,125,8,208,1 // vroundps $0x1,%ymm0,%ymm10
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 196,98,125,24,29,19,96,2,0 // vbroadcastss 0x26013(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,131,97,2,0 // vbroadcastss 0x26183(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,124,88,195 // vaddps %ymm11,%ymm0,%ymm0
- .byte 196,98,125,24,29,9,96,2,0 // vbroadcastss 0x26009(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,121,97,2,0 // vbroadcastss 0x26179(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,65,44,89,219 // vmulps %ymm11,%ymm10,%ymm11
.byte 196,193,124,92,195 // vsubps %ymm11,%ymm0,%ymm0
- .byte 196,98,125,24,29,250,95,2,0 // vbroadcastss 0x25ffa(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,98,125,24,29,106,97,2,0 // vbroadcastss 0x2616a(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,240,95,2,0 // vbroadcastss 0x25ff0(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,96,97,2,0 // vbroadcastss 0x26160(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,124,88,194 // vaddps %ymm10,%ymm0,%ymm0
- .byte 196,98,125,24,21,225,95,2,0 // vbroadcastss 0x25fe1(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,81,97,2,0 // vbroadcastss 0x26151(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,124,89,194 // vmulps %ymm10,%ymm0,%ymm0
.byte 197,253,91,192 // vcvtps2dq %ymm0,%ymm0
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -27712,7 +27712,7 @@ _sk_parametric_r_avx:
.byte 196,195,125,74,193,128 // vblendvps %ymm8,%ymm9,%ymm0,%ymm0
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,245,94,2,0 // vbroadcastss 0x25ef5(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,101,96,2,0 // vbroadcastss 0x26065(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -27734,36 +27734,36 @@ _sk_parametric_g_avx:
.byte 196,193,116,88,203 // vaddps %ymm11,%ymm1,%ymm1
.byte 196,98,125,24,16 // vbroadcastss (%rax),%ymm10
.byte 197,124,91,217 // vcvtdq2ps %ymm1,%ymm11
- .byte 196,98,125,24,37,58,95,2,0 // vbroadcastss 0x25f3a(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,37,170,96,2,0 // vbroadcastss 0x260aa(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
.byte 196,65,36,89,220 // vmulps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,48,95,2,0 // vbroadcastss 0x25f30(%rip),%ymm12 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,160,96,2,0 // vbroadcastss 0x260a0(%rip),%ymm12 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,193,116,84,204 // vandps %ymm12,%ymm1,%ymm1
- .byte 196,98,125,24,37,130,94,2,0 // vbroadcastss 0x25e82(%rip),%ymm12 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,37,242,95,2,0 // vbroadcastss 0x25ff2(%rip),%ymm12 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,116,86,204 // vorps %ymm12,%ymm1,%ymm1
- .byte 196,98,125,24,37,24,95,2,0 // vbroadcastss 0x25f18(%rip),%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,37,136,96,2,0 // vbroadcastss 0x26088(%rip),%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,65,36,88,220 // vaddps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,14,95,2,0 // vbroadcastss 0x25f0e(%rip),%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,37,126,96,2,0 // vbroadcastss 0x2607e(%rip),%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,65,116,89,228 // vmulps %ymm12,%ymm1,%ymm12
.byte 196,65,36,92,220 // vsubps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,255,94,2,0 // vbroadcastss 0x25eff(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,111,96,2,0 // vbroadcastss 0x2606f(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,193,116,88,204 // vaddps %ymm12,%ymm1,%ymm1
- .byte 196,98,125,24,37,245,94,2,0 // vbroadcastss 0x25ef5(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,101,96,2,0 // vbroadcastss 0x26065(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 197,156,94,201 // vdivps %ymm1,%ymm12,%ymm1
.byte 197,164,92,201 // vsubps %ymm1,%ymm11,%ymm1
.byte 197,172,89,201 // vmulps %ymm1,%ymm10,%ymm1
.byte 196,99,125,8,209,1 // vroundps $0x1,%ymm1,%ymm10
.byte 196,65,116,92,210 // vsubps %ymm10,%ymm1,%ymm10
- .byte 196,98,125,24,29,217,94,2,0 // vbroadcastss 0x25ed9(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,73,96,2,0 // vbroadcastss 0x26049(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,116,88,203 // vaddps %ymm11,%ymm1,%ymm1
- .byte 196,98,125,24,29,207,94,2,0 // vbroadcastss 0x25ecf(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,63,96,2,0 // vbroadcastss 0x2603f(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,65,44,89,219 // vmulps %ymm11,%ymm10,%ymm11
.byte 196,193,116,92,203 // vsubps %ymm11,%ymm1,%ymm1
- .byte 196,98,125,24,29,192,94,2,0 // vbroadcastss 0x25ec0(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,98,125,24,29,48,96,2,0 // vbroadcastss 0x26030(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,182,94,2,0 // vbroadcastss 0x25eb6(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,38,96,2,0 // vbroadcastss 0x26026(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,116,88,202 // vaddps %ymm10,%ymm1,%ymm1
- .byte 196,98,125,24,21,167,94,2,0 // vbroadcastss 0x25ea7(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,23,96,2,0 // vbroadcastss 0x26017(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,116,89,202 // vmulps %ymm10,%ymm1,%ymm1
.byte 197,253,91,201 // vcvtps2dq %ymm1,%ymm1
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -27771,7 +27771,7 @@ _sk_parametric_g_avx:
.byte 196,195,117,74,201,128 // vblendvps %ymm8,%ymm9,%ymm1,%ymm1
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,116,95,200 // vmaxps %ymm8,%ymm1,%ymm1
- .byte 196,98,125,24,5,187,93,2,0 // vbroadcastss 0x25dbb(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,43,95,2,0 // vbroadcastss 0x25f2b(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -27793,36 +27793,36 @@ _sk_parametric_b_avx:
.byte 196,193,108,88,211 // vaddps %ymm11,%ymm2,%ymm2
.byte 196,98,125,24,16 // vbroadcastss (%rax),%ymm10
.byte 197,124,91,218 // vcvtdq2ps %ymm2,%ymm11
- .byte 196,98,125,24,37,0,94,2,0 // vbroadcastss 0x25e00(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,37,112,95,2,0 // vbroadcastss 0x25f70(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
.byte 196,65,36,89,220 // vmulps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,246,93,2,0 // vbroadcastss 0x25df6(%rip),%ymm12 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,102,95,2,0 // vbroadcastss 0x25f66(%rip),%ymm12 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,193,108,84,212 // vandps %ymm12,%ymm2,%ymm2
- .byte 196,98,125,24,37,72,93,2,0 // vbroadcastss 0x25d48(%rip),%ymm12 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,37,184,94,2,0 // vbroadcastss 0x25eb8(%rip),%ymm12 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,108,86,212 // vorps %ymm12,%ymm2,%ymm2
- .byte 196,98,125,24,37,222,93,2,0 // vbroadcastss 0x25dde(%rip),%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,37,78,95,2,0 // vbroadcastss 0x25f4e(%rip),%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,65,36,88,220 // vaddps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,212,93,2,0 // vbroadcastss 0x25dd4(%rip),%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,37,68,95,2,0 // vbroadcastss 0x25f44(%rip),%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,65,108,89,228 // vmulps %ymm12,%ymm2,%ymm12
.byte 196,65,36,92,220 // vsubps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,197,93,2,0 // vbroadcastss 0x25dc5(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,53,95,2,0 // vbroadcastss 0x25f35(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,193,108,88,212 // vaddps %ymm12,%ymm2,%ymm2
- .byte 196,98,125,24,37,187,93,2,0 // vbroadcastss 0x25dbb(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,43,95,2,0 // vbroadcastss 0x25f2b(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 197,156,94,210 // vdivps %ymm2,%ymm12,%ymm2
.byte 197,164,92,210 // vsubps %ymm2,%ymm11,%ymm2
.byte 197,172,89,210 // vmulps %ymm2,%ymm10,%ymm2
.byte 196,99,125,8,210,1 // vroundps $0x1,%ymm2,%ymm10
.byte 196,65,108,92,210 // vsubps %ymm10,%ymm2,%ymm10
- .byte 196,98,125,24,29,159,93,2,0 // vbroadcastss 0x25d9f(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,15,95,2,0 // vbroadcastss 0x25f0f(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,108,88,211 // vaddps %ymm11,%ymm2,%ymm2
- .byte 196,98,125,24,29,149,93,2,0 // vbroadcastss 0x25d95(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,5,95,2,0 // vbroadcastss 0x25f05(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,65,44,89,219 // vmulps %ymm11,%ymm10,%ymm11
.byte 196,193,108,92,211 // vsubps %ymm11,%ymm2,%ymm2
- .byte 196,98,125,24,29,134,93,2,0 // vbroadcastss 0x25d86(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,98,125,24,29,246,94,2,0 // vbroadcastss 0x25ef6(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,124,93,2,0 // vbroadcastss 0x25d7c(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,236,94,2,0 // vbroadcastss 0x25eec(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,108,88,210 // vaddps %ymm10,%ymm2,%ymm2
- .byte 196,98,125,24,21,109,93,2,0 // vbroadcastss 0x25d6d(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,221,94,2,0 // vbroadcastss 0x25edd(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,108,89,210 // vmulps %ymm10,%ymm2,%ymm2
.byte 197,253,91,210 // vcvtps2dq %ymm2,%ymm2
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -27830,7 +27830,7 @@ _sk_parametric_b_avx:
.byte 196,195,109,74,209,128 // vblendvps %ymm8,%ymm9,%ymm2,%ymm2
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,108,95,208 // vmaxps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,24,5,129,92,2,0 // vbroadcastss 0x25c81(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,241,93,2,0 // vbroadcastss 0x25df1(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,108,93,208 // vminps %ymm8,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -27852,36 +27852,36 @@ _sk_parametric_a_avx:
.byte 196,193,100,88,219 // vaddps %ymm11,%ymm3,%ymm3
.byte 196,98,125,24,16 // vbroadcastss (%rax),%ymm10
.byte 197,124,91,219 // vcvtdq2ps %ymm3,%ymm11
- .byte 196,98,125,24,37,198,92,2,0 // vbroadcastss 0x25cc6(%rip),%ymm12 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,37,54,94,2,0 // vbroadcastss 0x25e36(%rip),%ymm12 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
.byte 196,65,36,89,220 // vmulps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,188,92,2,0 // vbroadcastss 0x25cbc(%rip),%ymm12 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,37,44,94,2,0 // vbroadcastss 0x25e2c(%rip),%ymm12 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,193,100,84,220 // vandps %ymm12,%ymm3,%ymm3
- .byte 196,98,125,24,37,14,92,2,0 // vbroadcastss 0x25c0e(%rip),%ymm12 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,37,126,93,2,0 // vbroadcastss 0x25d7e(%rip),%ymm12 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,100,86,220 // vorps %ymm12,%ymm3,%ymm3
- .byte 196,98,125,24,37,164,92,2,0 // vbroadcastss 0x25ca4(%rip),%ymm12 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,37,20,94,2,0 // vbroadcastss 0x25e14(%rip),%ymm12 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,65,36,88,220 // vaddps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,154,92,2,0 // vbroadcastss 0x25c9a(%rip),%ymm12 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,37,10,94,2,0 // vbroadcastss 0x25e0a(%rip),%ymm12 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,65,100,89,228 // vmulps %ymm12,%ymm3,%ymm12
.byte 196,65,36,92,220 // vsubps %ymm12,%ymm11,%ymm11
- .byte 196,98,125,24,37,139,92,2,0 // vbroadcastss 0x25c8b(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,251,93,2,0 // vbroadcastss 0x25dfb(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,193,100,88,220 // vaddps %ymm12,%ymm3,%ymm3
- .byte 196,98,125,24,37,129,92,2,0 // vbroadcastss 0x25c81(%rip),%ymm12 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,37,241,93,2,0 // vbroadcastss 0x25df1(%rip),%ymm12 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 197,156,94,219 // vdivps %ymm3,%ymm12,%ymm3
.byte 197,164,92,219 // vsubps %ymm3,%ymm11,%ymm3
.byte 197,172,89,219 // vmulps %ymm3,%ymm10,%ymm3
.byte 196,99,125,8,211,1 // vroundps $0x1,%ymm3,%ymm10
.byte 196,65,100,92,210 // vsubps %ymm10,%ymm3,%ymm10
- .byte 196,98,125,24,29,101,92,2,0 // vbroadcastss 0x25c65(%rip),%ymm11 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,29,213,93,2,0 // vbroadcastss 0x25dd5(%rip),%ymm11 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,100,88,219 // vaddps %ymm11,%ymm3,%ymm3
- .byte 196,98,125,24,29,91,92,2,0 // vbroadcastss 0x25c5b(%rip),%ymm11 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,98,125,24,29,203,93,2,0 // vbroadcastss 0x25dcb(%rip),%ymm11 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 196,65,44,89,219 // vmulps %ymm11,%ymm10,%ymm11
.byte 196,193,100,92,219 // vsubps %ymm11,%ymm3,%ymm3
- .byte 196,98,125,24,29,76,92,2,0 // vbroadcastss 0x25c4c(%rip),%ymm11 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,98,125,24,29,188,93,2,0 // vbroadcastss 0x25dbc(%rip),%ymm11 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 196,65,36,92,210 // vsubps %ymm10,%ymm11,%ymm10
- .byte 196,98,125,24,29,66,92,2,0 // vbroadcastss 0x25c42(%rip),%ymm11 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,98,125,24,29,178,93,2,0 // vbroadcastss 0x25db2(%rip),%ymm11 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 196,65,36,94,210 // vdivps %ymm10,%ymm11,%ymm10
.byte 196,193,100,88,218 // vaddps %ymm10,%ymm3,%ymm3
- .byte 196,98,125,24,21,51,92,2,0 // vbroadcastss 0x25c33(%rip),%ymm10 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,98,125,24,21,163,93,2,0 // vbroadcastss 0x25da3(%rip),%ymm10 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 196,193,100,89,218 // vmulps %ymm10,%ymm3,%ymm3
.byte 197,253,91,219 // vcvtps2dq %ymm3,%ymm3
.byte 196,98,125,24,80,20 // vbroadcastss 0x14(%rax),%ymm10
@@ -27889,7 +27889,7 @@ _sk_parametric_a_avx:
.byte 196,195,101,74,217,128 // vblendvps %ymm8,%ymm9,%ymm3,%ymm3
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,100,95,216 // vmaxps %ymm8,%ymm3,%ymm3
- .byte 196,98,125,24,5,71,91,2,0 // vbroadcastss 0x25b47(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,183,92,2,0 // vbroadcastss 0x25cb7(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,100,93,216 // vminps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -27908,34 +27908,34 @@ _sk_gamma_avx:
.byte 197,252,40,233 // vmovaps %ymm1,%ymm5
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,91,200 // vcvtdq2ps %ymm0,%ymm1
- .byte 196,98,125,24,5,158,91,2,0 // vbroadcastss 0x25b9e(%rip),%ymm8 # 38a4c <_sk_srcover_bgra_8888_sse2_lowp+0x348>
+ .byte 196,98,125,24,5,14,93,2,0 // vbroadcastss 0x25d0e(%rip),%ymm8 # 38bbc <_sk_srcover_bgra_8888_sse2_lowp+0x348>
.byte 196,193,116,89,200 // vmulps %ymm8,%ymm1,%ymm1
- .byte 196,98,125,24,13,148,91,2,0 // vbroadcastss 0x25b94(%rip),%ymm9 # 38a50 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
+ .byte 196,98,125,24,13,4,93,2,0 // vbroadcastss 0x25d04(%rip),%ymm9 # 38bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x34c>
.byte 196,193,124,84,193 // vandps %ymm9,%ymm0,%ymm0
- .byte 196,226,125,24,37,230,90,2,0 // vbroadcastss 0x25ae6(%rip),%ymm4 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,37,86,92,2,0 // vbroadcastss 0x25c56(%rip),%ymm4 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,252,86,196 // vorps %ymm4,%ymm0,%ymm0
- .byte 196,98,125,24,21,125,91,2,0 // vbroadcastss 0x25b7d(%rip),%ymm10 # 38a54 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
+ .byte 196,98,125,24,21,237,92,2,0 // vbroadcastss 0x25ced(%rip),%ymm10 # 38bc4 <_sk_srcover_bgra_8888_sse2_lowp+0x350>
.byte 196,193,116,88,202 // vaddps %ymm10,%ymm1,%ymm1
- .byte 196,98,125,24,29,115,91,2,0 // vbroadcastss 0x25b73(%rip),%ymm11 # 38a58 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
+ .byte 196,98,125,24,29,227,92,2,0 // vbroadcastss 0x25ce3(%rip),%ymm11 # 38bc8 <_sk_srcover_bgra_8888_sse2_lowp+0x354>
.byte 196,193,124,89,211 // vmulps %ymm11,%ymm0,%ymm2
.byte 197,244,92,202 // vsubps %ymm2,%ymm1,%ymm1
- .byte 196,98,125,24,37,101,91,2,0 // vbroadcastss 0x25b65(%rip),%ymm12 # 38a5c <_sk_srcover_bgra_8888_sse2_lowp+0x358>
+ .byte 196,98,125,24,37,213,92,2,0 // vbroadcastss 0x25cd5(%rip),%ymm12 # 38bcc <_sk_srcover_bgra_8888_sse2_lowp+0x358>
.byte 196,193,124,88,196 // vaddps %ymm12,%ymm0,%ymm0
- .byte 196,98,125,24,45,91,91,2,0 // vbroadcastss 0x25b5b(%rip),%ymm13 # 38a60 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
+ .byte 196,98,125,24,45,203,92,2,0 // vbroadcastss 0x25ccb(%rip),%ymm13 # 38bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x35c>
.byte 197,148,94,192 // vdivps %ymm0,%ymm13,%ymm0
.byte 197,244,92,192 // vsubps %ymm0,%ymm1,%ymm0
.byte 196,98,125,24,48 // vbroadcastss (%rax),%ymm14
.byte 196,193,124,89,198 // vmulps %ymm14,%ymm0,%ymm0
.byte 196,227,125,8,200,1 // vroundps $0x1,%ymm0,%ymm1
.byte 197,252,92,241 // vsubps %ymm1,%ymm0,%ymm6
- .byte 196,98,125,24,61,58,91,2,0 // vbroadcastss 0x25b3a(%rip),%ymm15 # 38a64 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
+ .byte 196,98,125,24,61,170,92,2,0 // vbroadcastss 0x25caa(%rip),%ymm15 # 38bd4 <_sk_srcover_bgra_8888_sse2_lowp+0x360>
.byte 196,193,124,88,199 // vaddps %ymm15,%ymm0,%ymm0
- .byte 196,226,125,24,29,48,91,2,0 // vbroadcastss 0x25b30(%rip),%ymm3 # 38a68 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
+ .byte 196,226,125,24,29,160,92,2,0 // vbroadcastss 0x25ca0(%rip),%ymm3 # 38bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x364>
.byte 197,204,89,203 // vmulps %ymm3,%ymm6,%ymm1
.byte 197,252,92,201 // vsubps %ymm1,%ymm0,%ymm1
- .byte 196,226,125,24,21,35,91,2,0 // vbroadcastss 0x25b23(%rip),%ymm2 # 38a6c <_sk_srcover_bgra_8888_sse2_lowp+0x368>
+ .byte 196,226,125,24,21,147,92,2,0 // vbroadcastss 0x25c93(%rip),%ymm2 # 38bdc <_sk_srcover_bgra_8888_sse2_lowp+0x368>
.byte 197,236,92,198 // vsubps %ymm6,%ymm2,%ymm0
- .byte 196,226,125,24,53,26,91,2,0 // vbroadcastss 0x25b1a(%rip),%ymm6 # 38a70 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
+ .byte 196,226,125,24,53,138,92,2,0 // vbroadcastss 0x25c8a(%rip),%ymm6 # 38be0 <_sk_srcover_bgra_8888_sse2_lowp+0x36c>
.byte 197,204,94,192 // vdivps %ymm0,%ymm6,%ymm0
.byte 197,244,88,192 // vaddps %ymm0,%ymm1,%ymm0
.byte 197,252,17,68,36,128 // vmovups %ymm0,-0x80(%rsp)
@@ -27977,7 +27977,7 @@ _sk_gamma_avx:
.byte 197,236,92,212 // vsubps %ymm4,%ymm2,%ymm2
.byte 197,204,94,210 // vdivps %ymm2,%ymm6,%ymm2
.byte 197,244,88,202 // vaddps %ymm2,%ymm1,%ymm1
- .byte 196,226,125,24,21,93,90,2,0 // vbroadcastss 0x25a5d(%rip),%ymm2 # 38a74 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
+ .byte 196,226,125,24,21,205,91,2,0 // vbroadcastss 0x25bcd(%rip),%ymm2 # 38be4 <_sk_srcover_bgra_8888_sse2_lowp+0x370>
.byte 197,236,89,92,36,128 // vmulps -0x80(%rsp),%ymm2,%ymm3
.byte 197,252,89,226 // vmulps %ymm2,%ymm0,%ymm4
.byte 197,244,89,210 // vmulps %ymm2,%ymm1,%ymm2
@@ -27997,31 +27997,31 @@ HIDDEN _sk_lab_to_xyz_avx
.globl _sk_lab_to_xyz_avx
FUNCTION(_sk_lab_to_xyz_avx)
_sk_lab_to_xyz_avx:
- .byte 196,98,125,24,5,25,90,2,0 // vbroadcastss 0x25a19(%rip),%ymm8 # 38a78 <_sk_srcover_bgra_8888_sse2_lowp+0x374>
+ .byte 196,98,125,24,5,137,91,2,0 // vbroadcastss 0x25b89(%rip),%ymm8 # 38be8 <_sk_srcover_bgra_8888_sse2_lowp+0x374>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,115,89,2,0 // vbroadcastss 0x25973(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,227,90,2,0 // vbroadcastss 0x25ae3(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,193,116,89,200 // vmulps %ymm8,%ymm1,%ymm1
- .byte 196,98,125,24,13,1,90,2,0 // vbroadcastss 0x25a01(%rip),%ymm9 # 38a7c <_sk_srcover_bgra_8888_sse2_lowp+0x378>
+ .byte 196,98,125,24,13,113,91,2,0 // vbroadcastss 0x25b71(%rip),%ymm9 # 38bec <_sk_srcover_bgra_8888_sse2_lowp+0x378>
.byte 196,193,116,88,201 // vaddps %ymm9,%ymm1,%ymm1
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
.byte 196,193,108,88,209 // vaddps %ymm9,%ymm2,%ymm2
- .byte 196,98,125,24,5,237,89,2,0 // vbroadcastss 0x259ed(%rip),%ymm8 # 38a80 <_sk_srcover_bgra_8888_sse2_lowp+0x37c>
+ .byte 196,98,125,24,5,93,91,2,0 // vbroadcastss 0x25b5d(%rip),%ymm8 # 38bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x37c>
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,227,89,2,0 // vbroadcastss 0x259e3(%rip),%ymm8 # 38a84 <_sk_srcover_bgra_8888_sse2_lowp+0x380>
+ .byte 196,98,125,24,5,83,91,2,0 // vbroadcastss 0x25b53(%rip),%ymm8 # 38bf4 <_sk_srcover_bgra_8888_sse2_lowp+0x380>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,217,89,2,0 // vbroadcastss 0x259d9(%rip),%ymm8 # 38a88 <_sk_srcover_bgra_8888_sse2_lowp+0x384>
+ .byte 196,98,125,24,5,73,91,2,0 // vbroadcastss 0x25b49(%rip),%ymm8 # 38bf8 <_sk_srcover_bgra_8888_sse2_lowp+0x384>
.byte 196,193,116,89,200 // vmulps %ymm8,%ymm1,%ymm1
.byte 197,252,88,201 // vaddps %ymm1,%ymm0,%ymm1
- .byte 196,98,125,24,5,203,89,2,0 // vbroadcastss 0x259cb(%rip),%ymm8 # 38a8c <_sk_srcover_bgra_8888_sse2_lowp+0x388>
+ .byte 196,98,125,24,5,59,91,2,0 // vbroadcastss 0x25b3b(%rip),%ymm8 # 38bfc <_sk_srcover_bgra_8888_sse2_lowp+0x388>
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
.byte 197,252,92,210 // vsubps %ymm2,%ymm0,%ymm2
.byte 197,116,89,193 // vmulps %ymm1,%ymm1,%ymm8
.byte 196,65,116,89,192 // vmulps %ymm8,%ymm1,%ymm8
- .byte 196,98,125,24,13,180,89,2,0 // vbroadcastss 0x259b4(%rip),%ymm9 # 38a90 <_sk_srcover_bgra_8888_sse2_lowp+0x38c>
+ .byte 196,98,125,24,13,36,91,2,0 // vbroadcastss 0x25b24(%rip),%ymm9 # 38c00 <_sk_srcover_bgra_8888_sse2_lowp+0x38c>
.byte 196,65,52,194,208,1 // vcmpltps %ymm8,%ymm9,%ymm10
- .byte 196,98,125,24,29,169,89,2,0 // vbroadcastss 0x259a9(%rip),%ymm11 # 38a94 <_sk_srcover_bgra_8888_sse2_lowp+0x390>
+ .byte 196,98,125,24,29,25,91,2,0 // vbroadcastss 0x25b19(%rip),%ymm11 # 38c04 <_sk_srcover_bgra_8888_sse2_lowp+0x390>
.byte 196,193,116,88,203 // vaddps %ymm11,%ymm1,%ymm1
- .byte 196,98,125,24,37,159,89,2,0 // vbroadcastss 0x2599f(%rip),%ymm12 # 38a98 <_sk_srcover_bgra_8888_sse2_lowp+0x394>
+ .byte 196,98,125,24,37,15,91,2,0 // vbroadcastss 0x25b0f(%rip),%ymm12 # 38c08 <_sk_srcover_bgra_8888_sse2_lowp+0x394>
.byte 196,193,116,89,204 // vmulps %ymm12,%ymm1,%ymm1
.byte 196,67,117,74,192,160 // vblendvps %ymm10,%ymm8,%ymm1,%ymm8
.byte 197,252,89,200 // vmulps %ymm0,%ymm0,%ymm1
@@ -28036,9 +28036,9 @@ _sk_lab_to_xyz_avx:
.byte 196,193,108,88,211 // vaddps %ymm11,%ymm2,%ymm2
.byte 196,193,108,89,212 // vmulps %ymm12,%ymm2,%ymm2
.byte 196,227,109,74,208,144 // vblendvps %ymm9,%ymm0,%ymm2,%ymm2
- .byte 196,226,125,24,5,85,89,2,0 // vbroadcastss 0x25955(%rip),%ymm0 # 38a9c <_sk_srcover_bgra_8888_sse2_lowp+0x398>
+ .byte 196,226,125,24,5,197,90,2,0 // vbroadcastss 0x25ac5(%rip),%ymm0 # 38c0c <_sk_srcover_bgra_8888_sse2_lowp+0x398>
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,76,89,2,0 // vbroadcastss 0x2594c(%rip),%ymm8 # 38aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x39c>
+ .byte 196,98,125,24,5,188,90,2,0 // vbroadcastss 0x25abc(%rip),%ymm8 # 38c10 <_sk_srcover_bgra_8888_sse2_lowp+0x39c>
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -28056,13 +28056,13 @@ _sk_load_a8_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,68 // jne 131b9 <_sk_load_a8_avx+0x5c>
.byte 196,130,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm0
- .byte 197,249,219,5,13,97,2,0 // vpand 0x2610d(%rip),%xmm0,%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,249,219,5,109,98,2,0 // vpand 0x2626d(%rip),%xmm0,%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,121,51,200 // vpmovzxwd %xmm0,%xmm1
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,135,88,2,0 // vbroadcastss 0x25887(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,247,89,2,0 // vbroadcastss 0x259f7(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,217 // vmulps %ymm1,%ymm0,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
@@ -28110,7 +28110,7 @@ _sk_load_a8_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,222 // jmpq ffffffffdf013260 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefdab5c>
+ .byte 233,255,255,255,222 // jmpq ffffffffdf013260 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefda9ec>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,211 // callq *%rbx
@@ -28134,13 +28134,13 @@ _sk_load_a8_dst_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,68 // jne 132c8 <_sk_load_a8_dst_avx+0x5c>
.byte 196,130,121,48,36,24 // vpmovzxbw (%r8,%r11,1),%xmm4
- .byte 197,217,219,37,254,95,2,0 // vpand 0x25ffe(%rip),%xmm4,%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,217,219,37,94,97,2,0 // vpand 0x2615e(%rip),%xmm4,%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,121,51,236 // vpmovzxwd %xmm4,%xmm5
.byte 197,249,112,228,78 // vpshufd $0x4e,%xmm4,%xmm4
.byte 196,226,121,51,228 // vpmovzxwd %xmm4,%xmm4
.byte 196,227,85,24,228,1 // vinsertf128 $0x1,%xmm4,%ymm5,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,120,87,2,0 // vbroadcastss 0x25778(%rip),%ymm5 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,45,232,88,2,0 // vbroadcastss 0x258e8(%rip),%ymm5 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,220,89,253 // vmulps %ymm5,%ymm4,%ymm7
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
@@ -28188,7 +28188,7 @@ _sk_load_a8_dst_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,221 // callq ffffffffde013370 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfdac6c>
+ .byte 232,255,255,255,221 // callq ffffffffde013370 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddfdaafc>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,210 // callq *%rdx
@@ -28268,7 +28268,7 @@ _sk_gather_a8_avx:
.byte 196,226,121,49,201 // vpmovzxbd %xmm1,%xmm1
.byte 196,227,125,24,193,1 // vinsertf128 $0x1,%xmm1,%ymm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,126,85,2,0 // vbroadcastss 0x2557e(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,238,86,2,0 // vbroadcastss 0x256ee(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,217 // vmulps %ymm1,%ymm0,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
@@ -28291,7 +28291,7 @@ _sk_store_a8_avx:
.byte 77,15,175,193 // imul %r9,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,250,84,2,0 // vbroadcastss 0x254fa(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,106,86,2,0 // vbroadcastss 0x2566a(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,100,89,192 // vmulps %ymm8,%ymm3,%ymm8
.byte 196,65,125,91,192 // vcvtps2dq %ymm8,%ymm8
.byte 196,67,125,25,193,1 // vextractf128 $0x1,%ymm8,%xmm9
@@ -28316,13 +28316,13 @@ _sk_store_a8_avx:
.byte 196,3,121,20,4,24,0 // vpextrb $0x0,%xmm8,(%r8,%r11,1)
.byte 235,202 // jmp 1350a <_sk_store_a8_avx+0x40>
.byte 196,3,121,20,68,24,2,4 // vpextrb $0x4,%xmm8,0x2(%r8,%r11,1)
- .byte 196,98,57,0,5,79,93,2,0 // vpshufb 0x25d4f(%rip),%xmm8,%xmm8 # 392a0 <_sk_srcover_bgra_8888_sse2_lowp+0xb9c>
+ .byte 196,98,57,0,5,175,94,2,0 // vpshufb 0x25eaf(%rip),%xmm8,%xmm8 # 39400 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
.byte 196,3,121,21,4,24,0 // vpextrw $0x0,%xmm8,(%r8,%r11,1)
.byte 235,176 // jmp 1350a <_sk_store_a8_avx+0x40>
.byte 196,3,121,20,68,24,6,12 // vpextrb $0xc,%xmm8,0x6(%r8,%r11,1)
.byte 196,3,121,20,68,24,5,10 // vpextrb $0xa,%xmm8,0x5(%r8,%r11,1)
.byte 196,3,121,20,68,24,4,8 // vpextrb $0x8,%xmm8,0x4(%r8,%r11,1)
- .byte 196,98,57,0,5,53,93,2,0 // vpshufb 0x25d35(%rip),%xmm8,%xmm8 # 392b0 <_sk_srcover_bgra_8888_sse2_lowp+0xbac>
+ .byte 196,98,57,0,5,149,94,2,0 // vpshufb 0x25e95(%rip),%xmm8,%xmm8 # 39410 <_sk_srcover_bgra_8888_sse2_lowp+0xb9c>
.byte 196,1,121,126,4,24 // vmovd %xmm8,(%r8,%r11,1)
.byte 235,135 // jmp 1350a <_sk_store_a8_avx+0x40>
.byte 144 // nop
@@ -28359,16 +28359,16 @@ _sk_load_g8_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,73 // jne 13601 <_sk_load_g8_avx+0x61>
.byte 196,130,121,48,4,24 // vpmovzxbw (%r8,%r11,1),%xmm0
- .byte 197,249,219,5,202,92,2,0 // vpand 0x25cca(%rip),%xmm0,%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,249,219,5,42,94,2,0 // vpand 0x25e2a(%rip),%xmm0,%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,121,51,200 // vpmovzxwd %xmm0,%xmm1
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,68,84,2,0 // vbroadcastss 0x25444(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,180,85,2,0 // vbroadcastss 0x255b4(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,189,83,2,0 // vbroadcastss 0x253bd(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,45,85,2,0 // vbroadcastss 0x2552d(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 255,224 // jmpq *%rax
@@ -28413,7 +28413,7 @@ _sk_load_g8_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,222 // jmpq ffffffffdf0136a8 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefdafa4>
+ .byte 233,255,255,255,222 // jmpq ffffffffdf0136a8 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefdae34>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,211 // callq *%rbx
@@ -28437,16 +28437,16 @@ _sk_load_g8_dst_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,73 // jne 13715 <_sk_load_g8_dst_avx+0x61>
.byte 196,130,121,48,36,24 // vpmovzxbw (%r8,%r11,1),%xmm4
- .byte 197,217,219,37,182,91,2,0 // vpand 0x25bb6(%rip),%xmm4,%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 197,217,219,37,22,93,2,0 // vpand 0x25d16(%rip),%xmm4,%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 196,226,121,51,236 // vpmovzxwd %xmm4,%xmm5
.byte 197,249,112,228,78 // vpshufd $0x4e,%xmm4,%xmm4
.byte 196,226,121,51,228 // vpmovzxwd %xmm4,%xmm4
.byte 196,227,85,24,228,1 // vinsertf128 $0x1,%xmm4,%ymm5,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,48,83,2,0 // vbroadcastss 0x25330(%rip),%ymm5 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,45,160,84,2,0 // vbroadcastss 0x254a0(%rip),%ymm5 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,220,89,229 // vmulps %ymm5,%ymm4,%ymm4
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,61,169,82,2,0 // vbroadcastss 0x252a9(%rip),%ymm7 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,61,25,84,2,0 // vbroadcastss 0x25419(%rip),%ymm7 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,236 // vmovaps %ymm4,%ymm5
.byte 197,252,40,244 // vmovaps %ymm4,%ymm6
.byte 255,224 // jmpq *%rax
@@ -28491,7 +28491,7 @@ _sk_load_g8_dst_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,222 // jmpq ffffffffdf0137bc <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefdb0b8>
+ .byte 233,255,255,255,222 // jmpq ffffffffdf0137bc <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdefdaf48>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,211 // callq *%rbx
@@ -28571,10 +28571,10 @@ _sk_gather_g8_avx:
.byte 196,226,121,49,201 // vpmovzxbd %xmm1,%xmm1
.byte 196,227,125,24,193,1 // vinsertf128 $0x1,%xmm1,%ymm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,50,81,2,0 // vbroadcastss 0x25132(%rip),%ymm1 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,226,125,24,13,162,82,2,0 // vbroadcastss 0x252a2(%rip),%ymm1 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,171,80,2,0 // vbroadcastss 0x250ab(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,27,82,2,0 // vbroadcastss 0x2521b(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
.byte 197,252,40,208 // vmovaps %ymm0,%ymm2
.byte 91 // pop %rbx
@@ -28602,23 +28602,23 @@ _sk_load_565_avx:
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,208,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm2
- .byte 196,226,125,24,5,210,80,2,0 // vbroadcastss 0x250d2(%rip),%ymm0 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,226,125,24,5,66,82,2,0 // vbroadcastss 0x25242(%rip),%ymm0 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,236,84,192 // vandps %ymm0,%ymm2,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,197,80,2,0 // vbroadcastss 0x250c5(%rip),%ymm1 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,226,125,24,13,53,82,2,0 // vbroadcastss 0x25235(%rip),%ymm1 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,24,13,188,80,2,0 // vbroadcastss 0x250bc(%rip),%ymm1 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,226,125,24,13,44,82,2,0 // vbroadcastss 0x2522c(%rip),%ymm1 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,236,84,201 // vandps %ymm1,%ymm2,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,29,175,80,2,0 // vbroadcastss 0x250af(%rip),%ymm3 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,226,125,24,29,31,82,2,0 // vbroadcastss 0x2521f(%rip),%ymm3 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 197,244,89,203 // vmulps %ymm3,%ymm1,%ymm1
- .byte 196,226,125,24,29,166,80,2,0 // vbroadcastss 0x250a6(%rip),%ymm3 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,226,125,24,29,22,82,2,0 // vbroadcastss 0x25216(%rip),%ymm3 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,236,84,211 // vandps %ymm3,%ymm2,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,226,125,24,29,153,80,2,0 // vbroadcastss 0x25099(%rip),%ymm3 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,226,125,24,29,9,82,2,0 // vbroadcastss 0x25209(%rip),%ymm3 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 197,236,89,211 // vmulps %ymm3,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,250,79,2,0 // vbroadcastss 0x24ffa(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,106,81,2,0 // vbroadcastss 0x2516a(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,7 // and $0x7,%r9b
@@ -28683,23 +28683,23 @@ _sk_load_565_dst_avx:
.byte 197,249,112,228,78 // vpshufd $0x4e,%xmm4,%xmm4
.byte 196,226,121,51,228 // vpmovzxwd %xmm4,%xmm4
.byte 196,227,85,24,244,1 // vinsertf128 $0x1,%xmm4,%ymm5,%ymm6
- .byte 196,226,125,24,37,145,79,2,0 // vbroadcastss 0x24f91(%rip),%ymm4 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,226,125,24,37,1,81,2,0 // vbroadcastss 0x25101(%rip),%ymm4 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,204,84,228 // vandps %ymm4,%ymm6,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,132,79,2,0 // vbroadcastss 0x24f84(%rip),%ymm5 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,226,125,24,45,244,80,2,0 // vbroadcastss 0x250f4(%rip),%ymm5 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 197,220,89,229 // vmulps %ymm5,%ymm4,%ymm4
- .byte 196,226,125,24,45,123,79,2,0 // vbroadcastss 0x24f7b(%rip),%ymm5 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,226,125,24,45,235,80,2,0 // vbroadcastss 0x250eb(%rip),%ymm5 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,204,84,237 // vandps %ymm5,%ymm6,%ymm5
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 196,226,125,24,61,110,79,2,0 // vbroadcastss 0x24f6e(%rip),%ymm7 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,226,125,24,61,222,80,2,0 // vbroadcastss 0x250de(%rip),%ymm7 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 197,212,89,239 // vmulps %ymm7,%ymm5,%ymm5
- .byte 196,226,125,24,61,101,79,2,0 // vbroadcastss 0x24f65(%rip),%ymm7 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,226,125,24,61,213,80,2,0 // vbroadcastss 0x250d5(%rip),%ymm7 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,204,84,247 // vandps %ymm7,%ymm6,%ymm6
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
- .byte 196,226,125,24,61,88,79,2,0 // vbroadcastss 0x24f58(%rip),%ymm7 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,226,125,24,61,200,80,2,0 // vbroadcastss 0x250c8(%rip),%ymm7 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 197,204,89,247 // vmulps %ymm7,%ymm6,%ymm6
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,61,185,78,2,0 // vbroadcastss 0x24eb9(%rip),%ymm7 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,61,41,80,2,0 // vbroadcastss 0x25029(%rip),%ymm7 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,7 // and $0x7,%r9b
@@ -28819,23 +28819,23 @@ _sk_gather_565_avx:
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,208,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm2
- .byte 196,226,125,24,5,97,77,2,0 // vbroadcastss 0x24d61(%rip),%ymm0 # 38a30 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
+ .byte 196,226,125,24,5,209,78,2,0 // vbroadcastss 0x24ed1(%rip),%ymm0 # 38ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x32c>
.byte 197,236,84,192 // vandps %ymm0,%ymm2,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,84,77,2,0 // vbroadcastss 0x24d54(%rip),%ymm1 # 38a34 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
+ .byte 196,226,125,24,13,196,78,2,0 // vbroadcastss 0x24ec4(%rip),%ymm1 # 38ba4 <_sk_srcover_bgra_8888_sse2_lowp+0x330>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,24,13,75,77,2,0 // vbroadcastss 0x24d4b(%rip),%ymm1 # 38a38 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
+ .byte 196,226,125,24,13,187,78,2,0 // vbroadcastss 0x24ebb(%rip),%ymm1 # 38ba8 <_sk_srcover_bgra_8888_sse2_lowp+0x334>
.byte 197,236,84,201 // vandps %ymm1,%ymm2,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,29,62,77,2,0 // vbroadcastss 0x24d3e(%rip),%ymm3 # 38a3c <_sk_srcover_bgra_8888_sse2_lowp+0x338>
+ .byte 196,226,125,24,29,174,78,2,0 // vbroadcastss 0x24eae(%rip),%ymm3 # 38bac <_sk_srcover_bgra_8888_sse2_lowp+0x338>
.byte 197,244,89,203 // vmulps %ymm3,%ymm1,%ymm1
- .byte 196,226,125,24,29,53,77,2,0 // vbroadcastss 0x24d35(%rip),%ymm3 # 38a40 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
+ .byte 196,226,125,24,29,165,78,2,0 // vbroadcastss 0x24ea5(%rip),%ymm3 # 38bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x33c>
.byte 197,236,84,211 // vandps %ymm3,%ymm2,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,226,125,24,29,40,77,2,0 // vbroadcastss 0x24d28(%rip),%ymm3 # 38a44 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
+ .byte 196,226,125,24,29,152,78,2,0 // vbroadcastss 0x24e98(%rip),%ymm3 # 38bb4 <_sk_srcover_bgra_8888_sse2_lowp+0x340>
.byte 197,236,89,211 // vmulps %ymm3,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,137,76,2,0 // vbroadcastss 0x24c89(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,249,77,2,0 // vbroadcastss 0x24df9(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
.byte 65,94 // pop %r14
@@ -28854,14 +28854,14 @@ _sk_store_565_avx:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,80,77,2,0 // vbroadcastss 0x24d50(%rip),%ymm8 # 38aa4 <_sk_srcover_bgra_8888_sse2_lowp+0x3a0>
+ .byte 196,98,125,24,5,192,78,2,0 // vbroadcastss 0x24ec0(%rip),%ymm8 # 38c14 <_sk_srcover_bgra_8888_sse2_lowp+0x3a0>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,193,41,114,241,11 // vpslld $0xb,%xmm9,%xmm10
.byte 196,67,125,25,201,1 // vextractf128 $0x1,%ymm9,%xmm9
.byte 196,193,49,114,241,11 // vpslld $0xb,%xmm9,%xmm9
.byte 196,67,45,24,201,1 // vinsertf128 $0x1,%xmm9,%ymm10,%ymm9
- .byte 196,98,125,24,21,41,77,2,0 // vbroadcastss 0x24d29(%rip),%ymm10 # 38aa8 <_sk_srcover_bgra_8888_sse2_lowp+0x3a4>
+ .byte 196,98,125,24,21,153,78,2,0 // vbroadcastss 0x24e99(%rip),%ymm10 # 38c18 <_sk_srcover_bgra_8888_sse2_lowp+0x3a4>
.byte 196,65,116,89,210 // vmulps %ymm10,%ymm1,%ymm10
.byte 196,65,125,91,210 // vcvtps2dq %ymm10,%ymm10
.byte 196,193,33,114,242,5 // vpslld $0x5,%xmm10,%xmm11
@@ -28916,7 +28916,7 @@ _sk_store_565_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,224 // callq ffffffffe1013e44 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fdb740>
+ .byte 232,255,255,255,224 // callq ffffffffe1013e44 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe0fdb5d0>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // .byte 0xff
@@ -28939,25 +28939,25 @@ _sk_load_4444_avx:
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,216,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm3
- .byte 196,226,125,24,5,33,76,2,0 // vbroadcastss 0x24c21(%rip),%ymm0 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 196,226,125,24,5,145,77,2,0 // vbroadcastss 0x24d91(%rip),%ymm0 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,228,84,192 // vandps %ymm0,%ymm3,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,20,76,2,0 // vbroadcastss 0x24c14(%rip),%ymm1 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 196,226,125,24,13,132,77,2,0 // vbroadcastss 0x24d84(%rip),%ymm1 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,24,13,11,76,2,0 // vbroadcastss 0x24c0b(%rip),%ymm1 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 196,226,125,24,13,123,77,2,0 // vbroadcastss 0x24d7b(%rip),%ymm1 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,228,84,201 // vandps %ymm1,%ymm3,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,21,254,75,2,0 // vbroadcastss 0x24bfe(%rip),%ymm2 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 196,226,125,24,21,110,77,2,0 // vbroadcastss 0x24d6e(%rip),%ymm2 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
.byte 197,244,89,202 // vmulps %ymm2,%ymm1,%ymm1
- .byte 196,226,125,24,21,245,75,2,0 // vbroadcastss 0x24bf5(%rip),%ymm2 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 196,226,125,24,21,101,77,2,0 // vbroadcastss 0x24d65(%rip),%ymm2 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,228,84,210 // vandps %ymm2,%ymm3,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,98,125,24,5,232,75,2,0 // vbroadcastss 0x24be8(%rip),%ymm8 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 196,98,125,24,5,88,77,2,0 // vbroadcastss 0x24d58(%rip),%ymm8 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,24,5,222,75,2,0 // vbroadcastss 0x24bde(%rip),%ymm8 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 196,98,125,24,5,78,77,2,0 // vbroadcastss 0x24d4e(%rip),%ymm8 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 196,193,100,84,216 // vandps %ymm8,%ymm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,98,125,24,5,208,75,2,0 // vbroadcastss 0x24bd0(%rip),%ymm8 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 196,98,125,24,5,64,77,2,0 // vbroadcastss 0x24d40(%rip),%ymm8 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 196,193,100,89,216 // vmulps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -29028,25 +29028,25 @@ _sk_load_4444_dst_avx:
.byte 197,249,112,228,78 // vpshufd $0x4e,%xmm4,%xmm4
.byte 196,226,121,51,228 // vpmovzxwd %xmm4,%xmm4
.byte 196,227,85,24,252,1 // vinsertf128 $0x1,%xmm4,%ymm5,%ymm7
- .byte 196,226,125,24,37,197,74,2,0 // vbroadcastss 0x24ac5(%rip),%ymm4 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 196,226,125,24,37,53,76,2,0 // vbroadcastss 0x24c35(%rip),%ymm4 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,196,84,228 // vandps %ymm4,%ymm7,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,226,125,24,45,184,74,2,0 // vbroadcastss 0x24ab8(%rip),%ymm5 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 196,226,125,24,45,40,76,2,0 // vbroadcastss 0x24c28(%rip),%ymm5 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
.byte 197,220,89,229 // vmulps %ymm5,%ymm4,%ymm4
- .byte 196,226,125,24,45,175,74,2,0 // vbroadcastss 0x24aaf(%rip),%ymm5 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 196,226,125,24,45,31,76,2,0 // vbroadcastss 0x24c1f(%rip),%ymm5 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,196,84,237 // vandps %ymm5,%ymm7,%ymm5
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 196,226,125,24,53,162,74,2,0 // vbroadcastss 0x24aa2(%rip),%ymm6 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 196,226,125,24,53,18,76,2,0 // vbroadcastss 0x24c12(%rip),%ymm6 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
.byte 197,212,89,238 // vmulps %ymm6,%ymm5,%ymm5
- .byte 196,226,125,24,53,153,74,2,0 // vbroadcastss 0x24a99(%rip),%ymm6 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 196,226,125,24,53,9,76,2,0 // vbroadcastss 0x24c09(%rip),%ymm6 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,196,84,246 // vandps %ymm6,%ymm7,%ymm6
.byte 197,252,91,246 // vcvtdq2ps %ymm6,%ymm6
- .byte 196,98,125,24,5,140,74,2,0 // vbroadcastss 0x24a8c(%rip),%ymm8 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 196,98,125,24,5,252,75,2,0 // vbroadcastss 0x24bfc(%rip),%ymm8 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
.byte 196,193,76,89,240 // vmulps %ymm8,%ymm6,%ymm6
- .byte 196,98,125,24,5,130,74,2,0 // vbroadcastss 0x24a82(%rip),%ymm8 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 196,98,125,24,5,242,75,2,0 // vbroadcastss 0x24bf2(%rip),%ymm8 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 196,193,68,84,248 // vandps %ymm8,%ymm7,%ymm7
.byte 197,252,91,255 // vcvtdq2ps %ymm7,%ymm7
- .byte 196,98,125,24,5,116,74,2,0 // vbroadcastss 0x24a74(%rip),%ymm8 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 196,98,125,24,5,228,75,2,0 // vbroadcastss 0x24be4(%rip),%ymm8 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 196,193,68,89,248 // vmulps %ymm8,%ymm7,%ymm7
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -29168,25 +29168,25 @@ _sk_gather_4444_avx:
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,216,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm3
- .byte 196,226,125,24,5,125,72,2,0 // vbroadcastss 0x2487d(%rip),%ymm0 # 38aac <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
+ .byte 196,226,125,24,5,237,73,2,0 // vbroadcastss 0x249ed(%rip),%ymm0 # 38c1c <_sk_srcover_bgra_8888_sse2_lowp+0x3a8>
.byte 197,228,84,192 // vandps %ymm0,%ymm3,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,226,125,24,13,112,72,2,0 // vbroadcastss 0x24870(%rip),%ymm1 # 38ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
+ .byte 196,226,125,24,13,224,73,2,0 // vbroadcastss 0x249e0(%rip),%ymm1 # 38c20 <_sk_srcover_bgra_8888_sse2_lowp+0x3ac>
.byte 197,252,89,193 // vmulps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,24,13,103,72,2,0 // vbroadcastss 0x24867(%rip),%ymm1 # 38ab4 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
+ .byte 196,226,125,24,13,215,73,2,0 // vbroadcastss 0x249d7(%rip),%ymm1 # 38c24 <_sk_srcover_bgra_8888_sse2_lowp+0x3b0>
.byte 197,228,84,201 // vandps %ymm1,%ymm3,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,226,125,24,21,90,72,2,0 // vbroadcastss 0x2485a(%rip),%ymm2 # 38ab8 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
+ .byte 196,226,125,24,21,202,73,2,0 // vbroadcastss 0x249ca(%rip),%ymm2 # 38c28 <_sk_srcover_bgra_8888_sse2_lowp+0x3b4>
.byte 197,244,89,202 // vmulps %ymm2,%ymm1,%ymm1
- .byte 196,226,125,24,21,81,72,2,0 // vbroadcastss 0x24851(%rip),%ymm2 # 38abc <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
+ .byte 196,226,125,24,21,193,73,2,0 // vbroadcastss 0x249c1(%rip),%ymm2 # 38c2c <_sk_srcover_bgra_8888_sse2_lowp+0x3b8>
.byte 197,228,84,210 // vandps %ymm2,%ymm3,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,98,125,24,5,68,72,2,0 // vbroadcastss 0x24844(%rip),%ymm8 # 38ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
+ .byte 196,98,125,24,5,180,73,2,0 // vbroadcastss 0x249b4(%rip),%ymm8 # 38c30 <_sk_srcover_bgra_8888_sse2_lowp+0x3bc>
.byte 196,193,108,89,208 // vmulps %ymm8,%ymm2,%ymm2
- .byte 196,98,125,24,5,58,72,2,0 // vbroadcastss 0x2483a(%rip),%ymm8 # 38ac4 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
+ .byte 196,98,125,24,5,170,73,2,0 // vbroadcastss 0x249aa(%rip),%ymm8 # 38c34 <_sk_srcover_bgra_8888_sse2_lowp+0x3c0>
.byte 196,193,100,84,216 // vandps %ymm8,%ymm3,%ymm3
.byte 197,252,91,219 // vcvtdq2ps %ymm3,%ymm3
- .byte 196,98,125,24,5,44,72,2,0 // vbroadcastss 0x2482c(%rip),%ymm8 # 38ac8 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
+ .byte 196,98,125,24,5,156,73,2,0 // vbroadcastss 0x2499c(%rip),%ymm8 # 38c38 <_sk_srcover_bgra_8888_sse2_lowp+0x3c4>
.byte 196,193,100,89,216 // vmulps %ymm8,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 91 // pop %rbx
@@ -29207,7 +29207,7 @@ _sk_store_4444_avx:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,0,72,2,0 // vbroadcastss 0x24800(%rip),%ymm8 # 38acc <_sk_srcover_bgra_8888_sse2_lowp+0x3c8>
+ .byte 196,98,125,24,5,112,73,2,0 // vbroadcastss 0x24970(%rip),%ymm8 # 38c3c <_sk_srcover_bgra_8888_sse2_lowp+0x3c8>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,193,41,114,241,12 // vpslld $0xc,%xmm9,%xmm10
@@ -29295,10 +29295,10 @@ _sk_load_8888_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,135,0,0,0 // jne 14487 <_sk_load_8888_avx+0xa7>
.byte 196,1,125,16,12,152 // vmovupd (%r8,%r11,4),%ymm9
- .byte 197,125,40,21,82,75,2,0 // vmovapd 0x24b52(%rip),%ymm10 # 38f60 <_sk_srcover_bgra_8888_sse2_lowp+0x85c>
+ .byte 197,125,40,21,178,76,2,0 // vmovapd 0x24cb2(%rip),%ymm10 # 390c0 <_sk_srcover_bgra_8888_sse2_lowp+0x84c>
.byte 196,193,53,84,194 // vandpd %ymm10,%ymm9,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,5,12,70,2,0 // vbroadcastss 0x2460c(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,124,71,2,0 // vbroadcastss 0x2477c(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,192 // vmulps %ymm8,%ymm0,%ymm0
.byte 196,193,113,114,209,8 // vpsrld $0x8,%xmm9,%xmm1
.byte 196,99,125,25,203,1 // vextractf128 $0x1,%ymm9,%xmm3
@@ -29390,10 +29390,10 @@ _sk_load_8888_dst_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,135,0,0,0 // jne 145ff <_sk_load_8888_dst_avx+0xa7>
.byte 196,1,125,16,12,152 // vmovupd (%r8,%r11,4),%ymm9
- .byte 197,125,40,21,250,73,2,0 // vmovapd 0x249fa(%rip),%ymm10 # 38f80 <_sk_srcover_bgra_8888_sse2_lowp+0x87c>
+ .byte 197,125,40,21,90,75,2,0 // vmovapd 0x24b5a(%rip),%ymm10 # 390e0 <_sk_srcover_bgra_8888_sse2_lowp+0x86c>
.byte 196,193,53,84,226 // vandpd %ymm10,%ymm9,%ymm4
.byte 197,252,91,228 // vcvtdq2ps %ymm4,%ymm4
- .byte 196,98,125,24,5,148,68,2,0 // vbroadcastss 0x24494(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,4,70,2,0 // vbroadcastss 0x24604(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,92,89,224 // vmulps %ymm8,%ymm4,%ymm4
.byte 196,193,81,114,209,8 // vpsrld $0x8,%xmm9,%xmm5
.byte 196,99,125,25,207,1 // vextractf128 $0x1,%ymm9,%xmm7
@@ -29528,10 +29528,10 @@ _sk_gather_8888_avx:
.byte 196,131,121,34,4,176,2 // vpinsrd $0x2,(%r8,%r14,4),%xmm0,%xmm0
.byte 196,195,121,34,28,152,3 // vpinsrd $0x3,(%r8,%rbx,4),%xmm0,%xmm3
.byte 196,227,61,24,195,1 // vinsertf128 $0x1,%xmm3,%ymm8,%ymm0
- .byte 197,124,40,21,204,71,2,0 // vmovaps 0x247cc(%rip),%ymm10 # 38fa0 <_sk_srcover_bgra_8888_sse2_lowp+0x89c>
+ .byte 197,124,40,21,44,73,2,0 // vmovaps 0x2492c(%rip),%ymm10 # 39100 <_sk_srcover_bgra_8888_sse2_lowp+0x88c>
.byte 196,193,124,84,194 // vandps %ymm10,%ymm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,13,70,66,2,0 // vbroadcastss 0x24246(%rip),%ymm9 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,13,182,67,2,0 // vbroadcastss 0x243b6(%rip),%ymm9 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 196,193,113,114,208,8 // vpsrld $0x8,%xmm8,%xmm1
.byte 197,233,114,211,8 // vpsrld $0x8,%xmm3,%xmm2
@@ -29568,7 +29568,7 @@ _sk_store_8888_avx:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,114,65,2,0 // vbroadcastss 0x24172(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,226,66,2,0 // vbroadcastss 0x242e2(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,65,116,89,208 // vmulps %ymm8,%ymm1,%ymm10
@@ -29631,7 +29631,7 @@ _sk_store_8888_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,218 // callq ffffffffdb014980 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdafdc27c>
+ .byte 232,255,255,255,218 // callq ffffffffdb014980 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdafdc10c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,204 // dec %esp
@@ -29653,10 +29653,10 @@ _sk_load_bgra_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,135,0,0,0 // jne 14a2f <_sk_load_bgra_avx+0xa7>
.byte 196,1,125,16,12,152 // vmovupd (%r8,%r11,4),%ymm9
- .byte 197,125,40,21,10,70,2,0 // vmovapd 0x2460a(%rip),%ymm10 # 38fc0 <_sk_srcover_bgra_8888_sse2_lowp+0x8bc>
+ .byte 197,125,40,21,106,71,2,0 // vmovapd 0x2476a(%rip),%ymm10 # 39120 <_sk_srcover_bgra_8888_sse2_lowp+0x8ac>
.byte 196,193,53,84,202 // vandpd %ymm10,%ymm9,%ymm1
.byte 197,252,91,201 // vcvtdq2ps %ymm1,%ymm1
- .byte 196,98,125,24,5,100,64,2,0 // vbroadcastss 0x24064(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,212,65,2,0 // vbroadcastss 0x241d4(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,116,89,208 // vmulps %ymm8,%ymm1,%ymm2
.byte 196,193,113,114,209,8 // vpsrld $0x8,%xmm9,%xmm1
.byte 196,99,125,25,203,1 // vextractf128 $0x1,%ymm9,%xmm3
@@ -29748,10 +29748,10 @@ _sk_load_bgra_dst_avx:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,135,0,0,0 // jne 14ba7 <_sk_load_bgra_dst_avx+0xa7>
.byte 196,1,125,16,12,152 // vmovupd (%r8,%r11,4),%ymm9
- .byte 197,125,40,21,178,68,2,0 // vmovapd 0x244b2(%rip),%ymm10 # 38fe0 <_sk_srcover_bgra_8888_sse2_lowp+0x8dc>
+ .byte 197,125,40,21,18,70,2,0 // vmovapd 0x24612(%rip),%ymm10 # 39140 <_sk_srcover_bgra_8888_sse2_lowp+0x8cc>
.byte 196,193,53,84,234 // vandpd %ymm10,%ymm9,%ymm5
.byte 197,252,91,237 // vcvtdq2ps %ymm5,%ymm5
- .byte 196,98,125,24,5,236,62,2,0 // vbroadcastss 0x23eec(%rip),%ymm8 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,5,92,64,2,0 // vbroadcastss 0x2405c(%rip),%ymm8 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,84,89,240 // vmulps %ymm8,%ymm5,%ymm6
.byte 196,193,81,114,209,8 // vpsrld $0x8,%xmm9,%xmm5
.byte 196,99,125,25,207,1 // vextractf128 $0x1,%ymm9,%xmm7
@@ -29886,10 +29886,10 @@ _sk_gather_bgra_avx:
.byte 196,131,121,34,4,176,2 // vpinsrd $0x2,(%r8,%r14,4),%xmm0,%xmm0
.byte 196,195,121,34,28,152,3 // vpinsrd $0x3,(%r8,%rbx,4),%xmm0,%xmm3
.byte 196,227,61,24,195,1 // vinsertf128 $0x1,%xmm3,%ymm8,%ymm0
- .byte 197,124,40,13,132,66,2,0 // vmovaps 0x24284(%rip),%ymm9 # 39000 <_sk_srcover_bgra_8888_sse2_lowp+0x8fc>
+ .byte 197,124,40,13,228,67,2,0 // vmovaps 0x243e4(%rip),%ymm9 # 39160 <_sk_srcover_bgra_8888_sse2_lowp+0x8ec>
.byte 196,193,124,84,193 // vandps %ymm9,%ymm0,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,21,158,60,2,0 // vbroadcastss 0x23c9e(%rip),%ymm10 # 38a2c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
+ .byte 196,98,125,24,21,14,62,2,0 // vbroadcastss 0x23e0e(%rip),%ymm10 # 38b9c <_sk_srcover_bgra_8888_sse2_lowp+0x328>
.byte 196,193,124,89,210 // vmulps %ymm10,%ymm0,%ymm2
.byte 196,193,121,114,208,8 // vpsrld $0x8,%xmm8,%xmm0
.byte 197,241,114,211,8 // vpsrld $0x8,%xmm3,%xmm1
@@ -29926,7 +29926,7 @@ _sk_store_bgra_avx:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,218 // movslq %edx,%r11
- .byte 196,98,125,24,5,202,59,2,0 // vbroadcastss 0x23bca(%rip),%ymm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,5,58,61,2,0 // vbroadcastss 0x23d3a(%rip),%ymm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 196,65,108,89,200 // vmulps %ymm8,%ymm2,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,65,116,89,208 // vmulps %ymm8,%ymm1,%ymm10
@@ -29989,7 +29989,7 @@ _sk_store_bgra_avx:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,255,255,255,218 // callq ffffffffdb014f28 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdafdc824>
+ .byte 232,255,255,255,218 // callq ffffffffdb014f28 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffdafdc6b4>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,204 // dec %esp
@@ -30031,14 +30031,14 @@ _sk_load_f16_avx:
.byte 196,226,121,51,200 // vpmovzxwd %xmm0,%xmm1
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
- .byte 197,123,18,21,99,75,2,0 // vmovddup 0x24b63(%rip),%xmm10 # 39b28 <_sk_srcover_bgra_8888_sse2_lowp+0x1424>
+ .byte 197,123,18,21,195,76,2,0 // vmovddup 0x24cc3(%rip),%xmm10 # 39c88 <_sk_srcover_bgra_8888_sse2_lowp+0x1414>
.byte 196,193,121,219,218 // vpand %xmm10,%xmm0,%xmm3
- .byte 196,98,121,24,29,105,59,2,0 // vbroadcastss 0x23b69(%rip),%xmm11 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x438>
+ .byte 196,98,121,24,29,217,60,2,0 // vbroadcastss 0x23cd9(%rip),%xmm11 # 38cac <_sk_srcover_bgra_8888_sse2_lowp+0x438>
.byte 197,33,102,227 // vpcmpgtd %xmm3,%xmm11,%xmm12
.byte 196,193,113,219,210 // vpand %xmm10,%xmm1,%xmm2
.byte 197,33,102,234 // vpcmpgtd %xmm2,%xmm11,%xmm13
.byte 196,195,21,24,252,1 // vinsertf128 $0x1,%xmm12,%ymm13,%ymm7
- .byte 197,123,18,37,66,75,2,0 // vmovddup 0x24b42(%rip),%xmm12 # 39b30 <_sk_srcover_bgra_8888_sse2_lowp+0x142c>
+ .byte 197,123,18,37,162,76,2,0 // vmovddup 0x24ca2(%rip),%xmm12 # 39c90 <_sk_srcover_bgra_8888_sse2_lowp+0x141c>
.byte 196,193,113,219,204 // vpand %xmm12,%xmm1,%xmm1
.byte 197,241,114,241,16 // vpslld $0x10,%xmm1,%xmm1
.byte 196,193,121,219,196 // vpand %xmm12,%xmm0,%xmm0
@@ -30047,7 +30047,7 @@ _sk_load_f16_avx:
.byte 197,241,235,202 // vpor %xmm2,%xmm1,%xmm1
.byte 197,233,114,243,13 // vpslld $0xd,%xmm3,%xmm2
.byte 197,249,235,194 // vpor %xmm2,%xmm0,%xmm0
- .byte 196,226,121,24,29,35,59,2,0 // vbroadcastss 0x23b23(%rip),%xmm3 # 38b40 <_sk_srcover_bgra_8888_sse2_lowp+0x43c>
+ .byte 196,226,121,24,29,147,60,2,0 // vbroadcastss 0x23c93(%rip),%xmm3 # 38cb0 <_sk_srcover_bgra_8888_sse2_lowp+0x43c>
.byte 197,249,254,195 // vpaddd %xmm3,%xmm0,%xmm0
.byte 197,241,254,203 // vpaddd %xmm3,%xmm1,%xmm1
.byte 196,227,117,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm0
@@ -30184,14 +30184,14 @@ _sk_load_f16_dst_avx:
.byte 196,226,121,51,236 // vpmovzxwd %xmm4,%xmm5
.byte 197,249,112,228,78 // vpshufd $0x4e,%xmm4,%xmm4
.byte 196,226,121,51,228 // vpmovzxwd %xmm4,%xmm4
- .byte 197,123,18,21,140,72,2,0 // vmovddup 0x2488c(%rip),%xmm10 # 39b28 <_sk_srcover_bgra_8888_sse2_lowp+0x1424>
+ .byte 197,123,18,21,236,73,2,0 // vmovddup 0x249ec(%rip),%xmm10 # 39c88 <_sk_srcover_bgra_8888_sse2_lowp+0x1414>
.byte 196,193,89,219,250 // vpand %xmm10,%xmm4,%xmm7
- .byte 196,98,121,24,29,146,56,2,0 // vbroadcastss 0x23892(%rip),%xmm11 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x438>
+ .byte 196,98,121,24,29,2,58,2,0 // vbroadcastss 0x23a02(%rip),%xmm11 # 38cac <_sk_srcover_bgra_8888_sse2_lowp+0x438>
.byte 197,33,102,231 // vpcmpgtd %xmm7,%xmm11,%xmm12
.byte 196,193,81,219,242 // vpand %xmm10,%xmm5,%xmm6
.byte 197,33,102,238 // vpcmpgtd %xmm6,%xmm11,%xmm13
.byte 196,195,21,24,220,1 // vinsertf128 $0x1,%xmm12,%ymm13,%ymm3
- .byte 197,123,18,37,107,72,2,0 // vmovddup 0x2486b(%rip),%xmm12 # 39b30 <_sk_srcover_bgra_8888_sse2_lowp+0x142c>
+ .byte 197,123,18,37,203,73,2,0 // vmovddup 0x249cb(%rip),%xmm12 # 39c90 <_sk_srcover_bgra_8888_sse2_lowp+0x141c>
.byte 196,193,81,219,236 // vpand %xmm12,%xmm5,%xmm5
.byte 197,209,114,245,16 // vpslld $0x10,%xmm5,%xmm5
.byte 196,193,89,219,228 // vpand %xmm12,%xmm4,%xmm4
@@ -30200,7 +30200,7 @@ _sk_load_f16_dst_avx:
.byte 197,209,235,238 // vpor %xmm6,%xmm5,%xmm5
.byte 197,201,114,247,13 // vpslld $0xd,%xmm7,%xmm6
.byte 197,217,235,230 // vpor %xmm6,%xmm4,%xmm4
- .byte 196,226,121,24,61,76,56,2,0 // vbroadcastss 0x2384c(%rip),%xmm7 # 38b40 <_sk_srcover_bgra_8888_sse2_lowp+0x43c>
+ .byte 196,226,121,24,61,188,57,2,0 // vbroadcastss 0x239bc(%rip),%xmm7 # 38cb0 <_sk_srcover_bgra_8888_sse2_lowp+0x43c>
.byte 197,217,254,231 // vpaddd %xmm7,%xmm4,%xmm4
.byte 197,209,254,239 // vpaddd %xmm7,%xmm5,%xmm5
.byte 196,227,85,24,228,1 // vinsertf128 $0x1,%xmm4,%ymm5,%ymm4
@@ -30380,14 +30380,14 @@ _sk_gather_f16_avx:
.byte 196,226,121,51,208 // vpmovzxwd %xmm0,%xmm2
.byte 197,249,112,192,78 // vpshufd $0x4e,%xmm0,%xmm0
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
- .byte 197,123,18,29,242,68,2,0 // vmovddup 0x244f2(%rip),%xmm11 # 39b28 <_sk_srcover_bgra_8888_sse2_lowp+0x1424>
+ .byte 197,123,18,29,82,70,2,0 // vmovddup 0x24652(%rip),%xmm11 # 39c88 <_sk_srcover_bgra_8888_sse2_lowp+0x1414>
.byte 196,193,121,219,219 // vpand %xmm11,%xmm0,%xmm3
- .byte 196,98,121,24,37,248,52,2,0 // vbroadcastss 0x234f8(%rip),%xmm12 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x438>
+ .byte 196,98,121,24,37,104,54,2,0 // vbroadcastss 0x23668(%rip),%xmm12 # 38cac <_sk_srcover_bgra_8888_sse2_lowp+0x438>
.byte 197,25,102,235 // vpcmpgtd %xmm3,%xmm12,%xmm13
.byte 196,193,105,219,203 // vpand %xmm11,%xmm2,%xmm1
.byte 197,25,102,193 // vpcmpgtd %xmm1,%xmm12,%xmm8
.byte 196,67,61,24,197,1 // vinsertf128 $0x1,%xmm13,%ymm8,%ymm8
- .byte 197,123,18,45,209,68,2,0 // vmovddup 0x244d1(%rip),%xmm13 # 39b30 <_sk_srcover_bgra_8888_sse2_lowp+0x142c>
+ .byte 197,123,18,45,49,70,2,0 // vmovddup 0x24631(%rip),%xmm13 # 39c90 <_sk_srcover_bgra_8888_sse2_lowp+0x141c>
.byte 196,193,105,219,213 // vpand %xmm13,%xmm2,%xmm2
.byte 197,233,114,242,16 // vpslld $0x10,%xmm2,%xmm2
.byte 197,241,114,241,13 // vpslld $0xd,%xmm1,%xmm1
@@ -30396,7 +30396,7 @@ _sk_gather_f16_avx:
.byte 197,249,114,240,16 // vpslld $0x10,%xmm0,%xmm0
.byte 197,233,114,243,13 // vpslld $0xd,%xmm3,%xmm2
.byte 197,249,235,194 // vpor %xmm2,%xmm0,%xmm0
- .byte 196,226,121,24,29,178,52,2,0 // vbroadcastss 0x234b2(%rip),%xmm3 # 38b40 <_sk_srcover_bgra_8888_sse2_lowp+0x43c>
+ .byte 196,226,121,24,29,34,54,2,0 // vbroadcastss 0x23622(%rip),%xmm3 # 38cb0 <_sk_srcover_bgra_8888_sse2_lowp+0x43c>
.byte 197,249,254,195 // vpaddd %xmm3,%xmm0,%xmm0
.byte 197,241,254,203 // vpaddd %xmm3,%xmm1,%xmm1
.byte 196,227,117,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm0
@@ -30486,12 +30486,12 @@ _sk_store_f16_avx:
.byte 197,252,17,108,36,192 // vmovups %ymm5,-0x40(%rsp)
.byte 197,252,17,100,36,160 // vmovups %ymm4,-0x60(%rsp)
.byte 197,252,40,225 // vmovaps %ymm1,%ymm4
- .byte 196,98,125,24,5,212,50,2,0 // vbroadcastss 0x232d4(%rip),%ymm8 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 196,98,125,24,5,68,52,2,0 // vbroadcastss 0x23444(%rip),%ymm8 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,65,124,84,216 // vandps %ymm8,%ymm0,%ymm11
- .byte 196,98,125,24,21,14,51,2,0 // vbroadcastss 0x2330e(%rip),%ymm10 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x440>
+ .byte 196,98,125,24,21,126,52,2,0 // vbroadcastss 0x2347e(%rip),%ymm10 # 38cb4 <_sk_srcover_bgra_8888_sse2_lowp+0x440>
.byte 196,65,124,84,226 // vandps %ymm10,%ymm0,%ymm12
.byte 196,67,125,25,229,1 // vextractf128 $0x1,%ymm12,%xmm13
- .byte 196,98,121,24,13,254,50,2,0 // vbroadcastss 0x232fe(%rip),%xmm9 # 38b48 <_sk_srcover_bgra_8888_sse2_lowp+0x444>
+ .byte 196,98,121,24,13,110,52,2,0 // vbroadcastss 0x2346e(%rip),%xmm9 # 38cb8 <_sk_srcover_bgra_8888_sse2_lowp+0x444>
.byte 196,65,49,102,245 // vpcmpgtd %xmm13,%xmm9,%xmm14
.byte 196,65,49,102,252 // vpcmpgtd %xmm12,%xmm9,%xmm15
.byte 196,67,5,24,246,1 // vinsertf128 $0x1,%xmm14,%ymm15,%ymm14
@@ -30502,7 +30502,7 @@ _sk_store_f16_avx:
.byte 196,65,17,254,239 // vpaddd %xmm15,%xmm13,%xmm13
.byte 196,193,25,114,212,13 // vpsrld $0xd,%xmm12,%xmm12
.byte 196,65,25,254,227 // vpaddd %xmm11,%xmm12,%xmm12
- .byte 196,98,121,24,29,193,50,2,0 // vbroadcastss 0x232c1(%rip),%xmm11 # 38b4c <_sk_srcover_bgra_8888_sse2_lowp+0x448>
+ .byte 196,98,121,24,29,49,52,2,0 // vbroadcastss 0x23431(%rip),%xmm11 # 38cbc <_sk_srcover_bgra_8888_sse2_lowp+0x448>
.byte 196,65,17,254,235 // vpaddd %xmm11,%xmm13,%xmm13
.byte 196,65,25,254,227 // vpaddd %xmm11,%xmm12,%xmm12
.byte 196,67,29,24,237,1 // vinsertf128 $0x1,%xmm13,%ymm12,%ymm13
@@ -30653,7 +30653,7 @@ _sk_load_u16_be_avx:
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,13,0,47,2,0 // vbroadcastss 0x22f00(%rip),%ymm9 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,98,125,24,13,112,48,2,0 // vbroadcastss 0x23070(%rip),%ymm9 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,169,109,202 // vpunpckhqdq %xmm2,%xmm10,%xmm1
.byte 197,233,113,241,8 // vpsllw $0x8,%xmm1,%xmm2
@@ -30757,7 +30757,7 @@ _sk_load_rgb_u16_be_avx:
.byte 196,226,121,51,192 // vpmovzxwd %xmm0,%xmm0
.byte 196,227,117,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm1,%ymm0
.byte 197,252,91,192 // vcvtdq2ps %ymm0,%ymm0
- .byte 196,98,125,24,13,47,45,2,0 // vbroadcastss 0x22d2f(%rip),%ymm9 # 38a48 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
+ .byte 196,98,125,24,13,159,46,2,0 // vbroadcastss 0x22e9f(%rip),%ymm9 # 38bb8 <_sk_srcover_bgra_8888_sse2_lowp+0x344>
.byte 196,193,124,89,193 // vmulps %ymm9,%ymm0,%ymm0
.byte 197,185,109,203 // vpunpckhqdq %xmm3,%xmm8,%xmm1
.byte 197,225,113,241,8 // vpsllw $0x8,%xmm1,%xmm3
@@ -30779,7 +30779,7 @@ _sk_load_rgb_u16_be_avx:
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
.byte 196,193,108,89,209 // vmulps %ymm9,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,47,44,2,0 // vbroadcastss 0x22c2f(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,159,45,2,0 // vbroadcastss 0x22d9f(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 255,224 // jmpq *%rax
.byte 196,193,121,110,4,64 // vmovd (%r8,%rax,2),%xmm0
.byte 196,65,49,239,201 // vpxor %xmm9,%xmm9,%xmm9
@@ -30855,7 +30855,7 @@ _sk_store_u16_be_avx:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 73,99,193 // movslq %r9d,%rax
- .byte 196,98,125,24,5,248,43,2,0 // vbroadcastss 0x22bf8(%rip),%ymm8 # 38ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x3cc>
+ .byte 196,98,125,24,5,104,45,2,0 // vbroadcastss 0x22d68(%rip),%ymm8 # 38c40 <_sk_srcover_bgra_8888_sse2_lowp+0x3cc>
.byte 196,65,124,89,200 // vmulps %ymm8,%ymm0,%ymm9
.byte 196,65,125,91,201 // vcvtps2dq %ymm9,%ymm9
.byte 196,67,125,25,202,1 // vextractf128 $0x1,%ymm9,%xmm10
@@ -31014,7 +31014,7 @@ _sk_load_f32_avx:
.byte 255,92,255,255 // lcall *-0x1(%rdi,%rdi,8)
.byte 255,70,255 // incl -0x1(%rsi)
.byte 255 // (bad)
- .byte 255,53,255,255,255,40 // pushq 0x28ffffff(%rip) # 290161a8 <_sk_srcover_bgra_8888_sse2_lowp+0x28fddaa4>
+ .byte 255,53,255,255,255,40 // pushq 0x28ffffff(%rip) # 290161a8 <_sk_srcover_bgra_8888_sse2_lowp+0x28fdd934>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // .byte 0xff
@@ -31213,7 +31213,7 @@ _sk_mirror_x_avx:
.byte 196,193,58,88,192 // vaddss %xmm8,%xmm8,%xmm0
.byte 196,227,121,4,192,0 // vpermilps $0x0,%xmm0,%xmm0
.byte 196,99,125,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm0,%ymm8
- .byte 197,178,89,5,21,37,2,0 // vmulss 0x22515(%rip),%xmm9,%xmm0 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,178,89,5,133,38,2,0 // vmulss 0x22685(%rip),%xmm9,%xmm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,227,121,4,192,0 // vpermilps $0x0,%xmm0,%xmm0
.byte 196,227,125,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm0,%ymm0
.byte 197,164,89,192 // vmulps %ymm0,%ymm11,%ymm0
@@ -31240,7 +31240,7 @@ _sk_mirror_y_avx:
.byte 196,193,58,88,200 // vaddss %xmm8,%xmm8,%xmm1
.byte 196,227,121,4,201,0 // vpermilps $0x0,%xmm1,%xmm1
.byte 196,99,117,24,193,1 // vinsertf128 $0x1,%xmm1,%ymm1,%ymm8
- .byte 197,178,89,13,171,36,2,0 // vmulss 0x224ab(%rip),%xmm9,%xmm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,178,89,13,27,38,2,0 // vmulss 0x2261b(%rip),%xmm9,%xmm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,227,121,4,201,0 // vpermilps $0x0,%xmm1,%xmm1
.byte 196,227,117,24,201,1 // vinsertf128 $0x1,%xmm1,%ymm1,%ymm1
.byte 197,164,89,201 // vmulps %ymm1,%ymm11,%ymm1
@@ -31260,7 +31260,7 @@ FUNCTION(_sk_clamp_x_1_avx)
_sk_clamp_x_1_avx:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,104,36,2,0 // vbroadcastss 0x22468(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,216,37,2,0 // vbroadcastss 0x225d8(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -31273,7 +31273,7 @@ _sk_repeat_x_1_avx:
.byte 196,193,124,92,192 // vsubps %ymm8,%ymm0,%ymm0
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,66,36,2,0 // vbroadcastss 0x22442(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,178,37,2,0 // vbroadcastss 0x225b2(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -31282,9 +31282,9 @@ HIDDEN _sk_mirror_x_1_avx
.globl _sk_mirror_x_1_avx
FUNCTION(_sk_mirror_x_1_avx)
_sk_mirror_x_1_avx:
- .byte 196,98,125,24,5,72,36,2,0 // vbroadcastss 0x22448(%rip),%ymm8 # 389cc <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
+ .byte 196,98,125,24,5,184,37,2,0 // vbroadcastss 0x225b8(%rip),%ymm8 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,13,30,36,2,0 // vbroadcastss 0x2241e(%rip),%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,13,142,37,2,0 // vbroadcastss 0x2258e(%rip),%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,124,89,201 // vmulps %ymm9,%ymm0,%ymm9
.byte 196,67,125,8,201,1 // vroundps $0x1,%ymm9,%ymm9
.byte 196,65,52,88,201 // vaddps %ymm9,%ymm9,%ymm9
@@ -31294,7 +31294,7 @@ _sk_mirror_x_1_avx:
.byte 197,60,92,200 // vsubps %ymm0,%ymm8,%ymm9
.byte 197,180,84,192 // vandps %ymm0,%ymm9,%ymm0
.byte 197,188,95,192 // vmaxps %ymm0,%ymm8,%ymm0
- .byte 196,98,125,24,5,238,35,2,0 // vbroadcastss 0x223ee(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,94,37,2,0 // vbroadcastss 0x2255e(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -31303,12 +31303,12 @@ HIDDEN _sk_luminance_to_alpha_avx
.globl _sk_luminance_to_alpha_avx
FUNCTION(_sk_luminance_to_alpha_avx)
_sk_luminance_to_alpha_avx:
- .byte 196,226,125,24,29,0,37,2,0 // vbroadcastss 0x22500(%rip),%ymm3 # 38ad8 <_sk_srcover_bgra_8888_sse2_lowp+0x3d4>
+ .byte 196,226,125,24,29,112,38,2,0 // vbroadcastss 0x22670(%rip),%ymm3 # 38c48 <_sk_srcover_bgra_8888_sse2_lowp+0x3d4>
.byte 197,252,89,195 // vmulps %ymm3,%ymm0,%ymm0
- .byte 196,226,125,24,29,239,36,2,0 // vbroadcastss 0x224ef(%rip),%ymm3 # 38ad4 <_sk_srcover_bgra_8888_sse2_lowp+0x3d0>
+ .byte 196,226,125,24,29,95,38,2,0 // vbroadcastss 0x2265f(%rip),%ymm3 # 38c44 <_sk_srcover_bgra_8888_sse2_lowp+0x3d0>
.byte 197,244,89,203 // vmulps %ymm3,%ymm1,%ymm1
.byte 197,252,88,193 // vaddps %ymm1,%ymm0,%ymm0
- .byte 196,226,125,24,13,230,36,2,0 // vbroadcastss 0x224e6(%rip),%ymm1 # 38adc <_sk_srcover_bgra_8888_sse2_lowp+0x3d8>
+ .byte 196,226,125,24,13,86,38,2,0 // vbroadcastss 0x22656(%rip),%ymm1 # 38c4c <_sk_srcover_bgra_8888_sse2_lowp+0x3d8>
.byte 197,236,89,201 // vmulps %ymm1,%ymm2,%ymm1
.byte 197,252,88,217 // vaddps %ymm1,%ymm0,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -31731,7 +31731,7 @@ _sk_gradient_avx:
.byte 73,255,200 // dec %r8
.byte 72,131,195,4 // add $0x4,%rbx
.byte 196,65,52,87,201 // vxorps %ymm9,%ymm9,%ymm9
- .byte 196,98,125,24,21,167,28,2,0 // vbroadcastss 0x21ca7(%rip),%ymm10 # 389b8 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
+ .byte 196,98,125,24,21,23,30,2,0 // vbroadcastss 0x21e17(%rip),%ymm10 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x2b4>
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 196,98,125,24,3 // vbroadcastss (%rbx),%ymm8
.byte 197,60,194,192,2 // vcmpleps %ymm0,%ymm8,%ymm8
@@ -31925,27 +31925,27 @@ _sk_xy_to_unit_angle_avx:
.byte 196,65,52,95,226 // vmaxps %ymm10,%ymm9,%ymm12
.byte 196,65,36,94,220 // vdivps %ymm12,%ymm11,%ymm11
.byte 196,65,36,89,227 // vmulps %ymm11,%ymm11,%ymm12
- .byte 196,98,125,24,45,193,25,2,0 // vbroadcastss 0x219c1(%rip),%ymm13 # 38ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
+ .byte 196,98,125,24,45,49,27,2,0 // vbroadcastss 0x21b31(%rip),%ymm13 # 38c50 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
.byte 196,65,28,89,237 // vmulps %ymm13,%ymm12,%ymm13
- .byte 196,98,125,24,53,183,25,2,0 // vbroadcastss 0x219b7(%rip),%ymm14 # 38ae4 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
+ .byte 196,98,125,24,53,39,27,2,0 // vbroadcastss 0x21b27(%rip),%ymm14 # 38c54 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
.byte 196,65,20,88,238 // vaddps %ymm14,%ymm13,%ymm13
.byte 196,65,28,89,237 // vmulps %ymm13,%ymm12,%ymm13
- .byte 196,98,125,24,53,168,25,2,0 // vbroadcastss 0x219a8(%rip),%ymm14 # 38ae8 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
+ .byte 196,98,125,24,53,24,27,2,0 // vbroadcastss 0x21b18(%rip),%ymm14 # 38c58 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
.byte 196,65,20,88,238 // vaddps %ymm14,%ymm13,%ymm13
.byte 196,65,28,89,229 // vmulps %ymm13,%ymm12,%ymm12
- .byte 196,98,125,24,45,153,25,2,0 // vbroadcastss 0x21999(%rip),%ymm13 # 38aec <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
+ .byte 196,98,125,24,45,9,27,2,0 // vbroadcastss 0x21b09(%rip),%ymm13 # 38c5c <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
.byte 196,65,28,88,229 // vaddps %ymm13,%ymm12,%ymm12
.byte 196,65,36,89,220 // vmulps %ymm12,%ymm11,%ymm11
.byte 196,65,52,194,202,1 // vcmpltps %ymm10,%ymm9,%ymm9
- .byte 196,98,125,24,21,132,25,2,0 // vbroadcastss 0x21984(%rip),%ymm10 # 38af0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
+ .byte 196,98,125,24,21,244,26,2,0 // vbroadcastss 0x21af4(%rip),%ymm10 # 38c60 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
.byte 196,65,44,92,211 // vsubps %ymm11,%ymm10,%ymm10
.byte 196,67,37,74,202,144 // vblendvps %ymm9,%ymm10,%ymm11,%ymm9
.byte 196,193,124,194,192,1 // vcmpltps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,21,42,24,2,0 // vbroadcastss 0x2182a(%rip),%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,21,154,25,2,0 // vbroadcastss 0x2199a(%rip),%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,44,92,209 // vsubps %ymm9,%ymm10,%ymm10
.byte 196,195,53,74,194,0 // vblendvps %ymm0,%ymm10,%ymm9,%ymm0
.byte 196,65,116,194,200,1 // vcmpltps %ymm8,%ymm1,%ymm9
- .byte 196,98,125,24,21,20,24,2,0 // vbroadcastss 0x21814(%rip),%ymm10 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,21,132,25,2,0 // vbroadcastss 0x21984(%rip),%ymm10 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,44,92,208 // vsubps %ymm0,%ymm10,%ymm10
.byte 196,195,125,74,194,144 // vblendvps %ymm9,%ymm10,%ymm0,%ymm0
.byte 196,65,124,194,200,3 // vcmpunordps %ymm8,%ymm0,%ymm9
@@ -31975,7 +31975,7 @@ _sk_xy_to_2pt_conical_quadratic_max_avx:
.byte 196,67,121,4,210,0 // vpermilps $0x0,%xmm10,%xmm10
.byte 196,67,45,24,210,1 // vinsertf128 $0x1,%xmm10,%ymm10,%ymm10
.byte 197,44,88,208 // vaddps %ymm0,%ymm10,%ymm10
- .byte 196,98,125,24,29,250,24,2,0 // vbroadcastss 0x218fa(%rip),%ymm11 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 196,98,125,24,29,106,26,2,0 // vbroadcastss 0x21a6a(%rip),%ymm11 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
.byte 197,252,89,192 // vmulps %ymm0,%ymm0,%ymm0
.byte 197,116,89,217 // vmulps %ymm1,%ymm1,%ymm11
@@ -31984,17 +31984,17 @@ _sk_xy_to_2pt_conical_quadratic_max_avx:
.byte 196,227,121,4,192,0 // vpermilps $0x0,%xmm0,%xmm0
.byte 196,227,125,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm0,%ymm0
.byte 197,164,92,192 // vsubps %ymm0,%ymm11,%ymm0
- .byte 196,98,125,24,13,206,24,2,0 // vbroadcastss 0x218ce(%rip),%ymm9 # 38af8 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
+ .byte 196,98,125,24,13,62,26,2,0 // vbroadcastss 0x21a3e(%rip),%ymm9 # 38c68 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,65,44,89,194 // vmulps %ymm10,%ymm10,%ymm8
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
.byte 197,252,81,192 // vsqrtps %ymm0,%ymm0
.byte 196,98,125,24,64,68 // vbroadcastss 0x44(%rax),%ymm8
- .byte 196,98,125,24,13,172,24,2,0 // vbroadcastss 0x218ac(%rip),%ymm9 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 196,98,125,24,13,28,26,2,0 // vbroadcastss 0x21a1c(%rip),%ymm9 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,65,44,87,201 // vxorps %ymm9,%ymm10,%ymm9
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 196,98,125,24,29,77,23,2,0 // vbroadcastss 0x2174d(%rip),%ymm11 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,29,189,24,2,0 // vbroadcastss 0x218bd(%rip),%ymm11 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 196,65,60,89,210 // vmulps %ymm10,%ymm8,%ymm10
.byte 197,180,92,192 // vsubps %ymm0,%ymm9,%ymm0
@@ -32014,7 +32014,7 @@ _sk_xy_to_2pt_conical_quadratic_min_avx:
.byte 196,67,121,4,210,0 // vpermilps $0x0,%xmm10,%xmm10
.byte 196,67,45,24,210,1 // vinsertf128 $0x1,%xmm10,%ymm10,%ymm10
.byte 197,44,88,208 // vaddps %ymm0,%ymm10,%ymm10
- .byte 196,98,125,24,29,76,24,2,0 // vbroadcastss 0x2184c(%rip),%ymm11 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 196,98,125,24,29,188,25,2,0 // vbroadcastss 0x219bc(%rip),%ymm11 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 196,65,44,89,211 // vmulps %ymm11,%ymm10,%ymm10
.byte 197,252,89,192 // vmulps %ymm0,%ymm0,%ymm0
.byte 197,116,89,217 // vmulps %ymm1,%ymm1,%ymm11
@@ -32023,17 +32023,17 @@ _sk_xy_to_2pt_conical_quadratic_min_avx:
.byte 196,227,121,4,192,0 // vpermilps $0x0,%xmm0,%xmm0
.byte 196,227,125,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm0,%ymm0
.byte 197,164,92,192 // vsubps %ymm0,%ymm11,%ymm0
- .byte 196,98,125,24,13,32,24,2,0 // vbroadcastss 0x21820(%rip),%ymm9 # 38af8 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
+ .byte 196,98,125,24,13,144,25,2,0 // vbroadcastss 0x21990(%rip),%ymm9 # 38c68 <_sk_srcover_bgra_8888_sse2_lowp+0x3f4>
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
.byte 197,188,89,192 // vmulps %ymm0,%ymm8,%ymm0
.byte 196,65,44,89,194 // vmulps %ymm10,%ymm10,%ymm8
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
.byte 197,252,81,192 // vsqrtps %ymm0,%ymm0
.byte 196,98,125,24,64,68 // vbroadcastss 0x44(%rax),%ymm8
- .byte 196,98,125,24,13,254,23,2,0 // vbroadcastss 0x217fe(%rip),%ymm9 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 196,98,125,24,13,110,25,2,0 // vbroadcastss 0x2196e(%rip),%ymm9 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,65,44,87,201 // vxorps %ymm9,%ymm10,%ymm9
.byte 196,65,124,92,210 // vsubps %ymm10,%ymm0,%ymm10
- .byte 196,98,125,24,29,159,22,2,0 // vbroadcastss 0x2169f(%rip),%ymm11 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,29,15,24,2,0 // vbroadcastss 0x2180f(%rip),%ymm11 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,60,89,195 // vmulps %ymm11,%ymm8,%ymm8
.byte 196,65,60,89,210 // vmulps %ymm10,%ymm8,%ymm10
.byte 197,180,92,192 // vsubps %ymm0,%ymm9,%ymm0
@@ -32052,7 +32052,7 @@ _sk_xy_to_2pt_conical_linear_avx:
.byte 196,67,121,4,201,0 // vpermilps $0x0,%xmm9,%xmm9
.byte 196,67,53,24,201,1 // vinsertf128 $0x1,%xmm9,%ymm9,%ymm9
.byte 197,52,88,200 // vaddps %ymm0,%ymm9,%ymm9
- .byte 196,98,125,24,21,164,23,2,0 // vbroadcastss 0x217a4(%rip),%ymm10 # 38af4 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
+ .byte 196,98,125,24,21,20,25,2,0 // vbroadcastss 0x21914(%rip),%ymm10 # 38c64 <_sk_srcover_bgra_8888_sse2_lowp+0x3f0>
.byte 196,65,52,89,202 // vmulps %ymm10,%ymm9,%ymm9
.byte 197,252,89,192 // vmulps %ymm0,%ymm0,%ymm0
.byte 197,116,89,209 // vmulps %ymm1,%ymm1,%ymm10
@@ -32061,7 +32061,7 @@ _sk_xy_to_2pt_conical_linear_avx:
.byte 196,227,121,4,192,0 // vpermilps $0x0,%xmm0,%xmm0
.byte 196,227,125,24,192,1 // vinsertf128 $0x1,%xmm0,%ymm0,%ymm0
.byte 197,172,92,192 // vsubps %ymm0,%ymm10,%ymm0
- .byte 196,98,125,24,5,124,23,2,0 // vbroadcastss 0x2177c(%rip),%ymm8 # 38afc <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
+ .byte 196,98,125,24,5,236,24,2,0 // vbroadcastss 0x218ec(%rip),%ymm8 # 38c6c <_sk_srcover_bgra_8888_sse2_lowp+0x3f8>
.byte 196,193,124,87,192 // vxorps %ymm8,%ymm0,%ymm0
.byte 196,193,124,94,193 // vdivps %ymm9,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -32105,7 +32105,7 @@ HIDDEN _sk_save_xy_avx
FUNCTION(_sk_save_xy_avx)
_sk_save_xy_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,182,21,2,0 // vbroadcastss 0x215b6(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,38,23,2,0 // vbroadcastss 0x21726(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,124,88,200 // vaddps %ymm8,%ymm0,%ymm9
.byte 196,67,125,8,209,1 // vroundps $0x1,%ymm9,%ymm10
.byte 196,65,52,92,202 // vsubps %ymm10,%ymm9,%ymm9
@@ -32142,9 +32142,9 @@ HIDDEN _sk_bilinear_nx_avx
FUNCTION(_sk_bilinear_nx_avx)
_sk_bilinear_nx_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,136,22,2,0 // vbroadcastss 0x21688(%rip),%ymm0 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,5,248,23,2,0 // vbroadcastss 0x217f8(%rip),%ymm0 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
- .byte 196,98,125,24,5,47,21,2,0 // vbroadcastss 0x2152f(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,159,22,2,0 // vbroadcastss 0x2169f(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -32155,7 +32155,7 @@ HIDDEN _sk_bilinear_px_avx
FUNCTION(_sk_bilinear_px_avx)
_sk_bilinear_px_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,12,21,2,0 // vbroadcastss 0x2150c(%rip),%ymm0 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,5,124,22,2,0 // vbroadcastss 0x2167c(%rip),%ymm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
.byte 197,124,16,128,128,0,0,0 // vmovups 0x80(%rax),%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
@@ -32167,9 +32167,9 @@ HIDDEN _sk_bilinear_ny_avx
FUNCTION(_sk_bilinear_ny_avx)
_sk_bilinear_ny_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,57,22,2,0 // vbroadcastss 0x21639(%rip),%ymm1 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,13,169,23,2,0 // vbroadcastss 0x217a9(%rip),%ymm1 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
- .byte 196,98,125,24,5,223,20,2,0 // vbroadcastss 0x214df(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,79,22,2,0 // vbroadcastss 0x2164f(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -32180,7 +32180,7 @@ HIDDEN _sk_bilinear_py_avx
FUNCTION(_sk_bilinear_py_avx)
_sk_bilinear_py_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,188,20,2,0 // vbroadcastss 0x214bc(%rip),%ymm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,13,44,22,2,0 // vbroadcastss 0x2162c(%rip),%ymm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
.byte 197,124,16,128,192,0,0,0 // vmovups 0xc0(%rax),%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
@@ -32192,14 +32192,14 @@ HIDDEN _sk_bicubic_n3x_avx
FUNCTION(_sk_bicubic_n3x_avx)
_sk_bicubic_n3x_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,236,21,2,0 // vbroadcastss 0x215ec(%rip),%ymm0 # 38b04 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
+ .byte 196,226,125,24,5,92,23,2,0 // vbroadcastss 0x2175c(%rip),%ymm0 # 38c74 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
- .byte 196,98,125,24,5,143,20,2,0 // vbroadcastss 0x2148f(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,255,21,2,0 // vbroadcastss 0x215ff(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,205,21,2,0 // vbroadcastss 0x215cd(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,61,23,2,0 // vbroadcastss 0x2173d(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,65,60,89,194 // vmulps %ymm10,%ymm8,%ymm8
- .byte 196,98,125,24,21,223,20,2,0 // vbroadcastss 0x214df(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,79,22,2,0 // vbroadcastss 0x2164f(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,60,88,194 // vaddps %ymm10,%ymm8,%ymm8
.byte 196,65,52,89,192 // vmulps %ymm8,%ymm9,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
@@ -32211,19 +32211,19 @@ HIDDEN _sk_bicubic_n1x_avx
FUNCTION(_sk_bicubic_n1x_avx)
_sk_bicubic_n1x_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,150,21,2,0 // vbroadcastss 0x21596(%rip),%ymm0 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,5,6,23,2,0 // vbroadcastss 0x21706(%rip),%ymm0 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
- .byte 196,98,125,24,5,61,20,2,0 // vbroadcastss 0x2143d(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,173,21,2,0 // vbroadcastss 0x215ad(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,128,0,0,0 // vsubps 0x80(%rax),%ymm8,%ymm8
- .byte 196,98,125,24,13,132,21,2,0 // vbroadcastss 0x21584(%rip),%ymm9 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,13,244,22,2,0 // vbroadcastss 0x216f4(%rip),%ymm9 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,65,60,89,201 // vmulps %ymm9,%ymm8,%ymm9
- .byte 196,98,125,24,21,122,21,2,0 // vbroadcastss 0x2157a(%rip),%ymm10 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,21,234,22,2,0 // vbroadcastss 0x216ea(%rip),%ymm10 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,65,52,88,202 // vaddps %ymm10,%ymm9,%ymm9
.byte 196,65,60,89,201 // vmulps %ymm9,%ymm8,%ymm9
- .byte 196,98,125,24,21,7,20,2,0 // vbroadcastss 0x21407(%rip),%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,21,119,21,2,0 // vbroadcastss 0x21577(%rip),%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,52,88,202 // vaddps %ymm10,%ymm9,%ymm9
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
- .byte 196,98,125,24,13,88,21,2,0 // vbroadcastss 0x21558(%rip),%ymm9 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,13,200,22,2,0 // vbroadcastss 0x216c8(%rip),%ymm9 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,65,60,88,193 // vaddps %ymm9,%ymm8,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -32234,17 +32234,17 @@ HIDDEN _sk_bicubic_p1x_avx
FUNCTION(_sk_bicubic_p1x_avx)
_sk_bicubic_p1x_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,216,19,2,0 // vbroadcastss 0x213d8(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,72,21,2,0 // vbroadcastss 0x21548(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,188,88,0 // vaddps (%rax),%ymm8,%ymm0
.byte 197,124,16,136,128,0,0,0 // vmovups 0x80(%rax),%ymm9
- .byte 196,98,125,24,21,31,21,2,0 // vbroadcastss 0x2151f(%rip),%ymm10 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,21,143,22,2,0 // vbroadcastss 0x2168f(%rip),%ymm10 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,65,52,89,210 // vmulps %ymm10,%ymm9,%ymm10
- .byte 196,98,125,24,29,21,21,2,0 // vbroadcastss 0x21515(%rip),%ymm11 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,29,133,22,2,0 // vbroadcastss 0x21685(%rip),%ymm11 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,65,44,88,211 // vaddps %ymm11,%ymm10,%ymm10
.byte 196,65,52,89,210 // vmulps %ymm10,%ymm9,%ymm10
.byte 196,65,44,88,192 // vaddps %ymm8,%ymm10,%ymm8
.byte 196,65,52,89,192 // vmulps %ymm8,%ymm9,%ymm8
- .byte 196,98,125,24,13,252,20,2,0 // vbroadcastss 0x214fc(%rip),%ymm9 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,13,108,22,2,0 // vbroadcastss 0x2166c(%rip),%ymm9 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,65,60,88,193 // vaddps %ymm9,%ymm8,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -32255,13 +32255,13 @@ HIDDEN _sk_bicubic_p3x_avx
FUNCTION(_sk_bicubic_p3x_avx)
_sk_bicubic_p3x_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,5,220,20,2,0 // vbroadcastss 0x214dc(%rip),%ymm0 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,226,125,24,5,76,22,2,0 // vbroadcastss 0x2164c(%rip),%ymm0 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 197,252,88,0 // vaddps (%rax),%ymm0,%ymm0
.byte 197,124,16,128,128,0,0,0 // vmovups 0x80(%rax),%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,186,20,2,0 // vbroadcastss 0x214ba(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,42,22,2,0 // vbroadcastss 0x2162a(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,65,60,89,194 // vmulps %ymm10,%ymm8,%ymm8
- .byte 196,98,125,24,21,204,19,2,0 // vbroadcastss 0x213cc(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,60,21,2,0 // vbroadcastss 0x2153c(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,60,88,194 // vaddps %ymm10,%ymm8,%ymm8
.byte 196,65,52,89,192 // vmulps %ymm8,%ymm9,%ymm8
.byte 197,124,17,128,0,1,0,0 // vmovups %ymm8,0x100(%rax)
@@ -32273,14 +32273,14 @@ HIDDEN _sk_bicubic_n3y_avx
FUNCTION(_sk_bicubic_n3y_avx)
_sk_bicubic_n3y_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,135,20,2,0 // vbroadcastss 0x21487(%rip),%ymm1 # 38b04 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
+ .byte 196,226,125,24,13,247,21,2,0 // vbroadcastss 0x215f7(%rip),%ymm1 # 38c74 <_sk_srcover_bgra_8888_sse2_lowp+0x400>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
- .byte 196,98,125,24,5,41,19,2,0 // vbroadcastss 0x21329(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,153,20,2,0 // vbroadcastss 0x21499(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,103,20,2,0 // vbroadcastss 0x21467(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,215,21,2,0 // vbroadcastss 0x215d7(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,65,60,89,194 // vmulps %ymm10,%ymm8,%ymm8
- .byte 196,98,125,24,21,121,19,2,0 // vbroadcastss 0x21379(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,233,20,2,0 // vbroadcastss 0x214e9(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,60,88,194 // vaddps %ymm10,%ymm8,%ymm8
.byte 196,65,52,89,192 // vmulps %ymm8,%ymm9,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
@@ -32292,19 +32292,19 @@ HIDDEN _sk_bicubic_n1y_avx
FUNCTION(_sk_bicubic_n1y_avx)
_sk_bicubic_n1y_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,48,20,2,0 // vbroadcastss 0x21430(%rip),%ymm1 # 38b00 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
+ .byte 196,226,125,24,13,160,21,2,0 // vbroadcastss 0x215a0(%rip),%ymm1 # 38c70 <_sk_srcover_bgra_8888_sse2_lowp+0x3fc>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
- .byte 196,98,125,24,5,214,18,2,0 // vbroadcastss 0x212d6(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,70,20,2,0 // vbroadcastss 0x21446(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,128,192,0,0,0 // vsubps 0xc0(%rax),%ymm8,%ymm8
- .byte 196,98,125,24,13,29,20,2,0 // vbroadcastss 0x2141d(%rip),%ymm9 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,13,141,21,2,0 // vbroadcastss 0x2158d(%rip),%ymm9 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,65,60,89,201 // vmulps %ymm9,%ymm8,%ymm9
- .byte 196,98,125,24,21,19,20,2,0 // vbroadcastss 0x21413(%rip),%ymm10 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,21,131,21,2,0 // vbroadcastss 0x21583(%rip),%ymm10 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,65,52,88,202 // vaddps %ymm10,%ymm9,%ymm9
.byte 196,65,60,89,201 // vmulps %ymm9,%ymm8,%ymm9
- .byte 196,98,125,24,21,160,18,2,0 // vbroadcastss 0x212a0(%rip),%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,21,16,20,2,0 // vbroadcastss 0x21410(%rip),%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,52,88,202 // vaddps %ymm10,%ymm9,%ymm9
.byte 196,65,60,89,193 // vmulps %ymm9,%ymm8,%ymm8
- .byte 196,98,125,24,13,241,19,2,0 // vbroadcastss 0x213f1(%rip),%ymm9 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,13,97,21,2,0 // vbroadcastss 0x21561(%rip),%ymm9 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,65,60,88,193 // vaddps %ymm9,%ymm8,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -32315,17 +32315,17 @@ HIDDEN _sk_bicubic_p1y_avx
FUNCTION(_sk_bicubic_p1y_avx)
_sk_bicubic_p1y_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,98,125,24,5,113,18,2,0 // vbroadcastss 0x21271(%rip),%ymm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,5,225,19,2,0 // vbroadcastss 0x213e1(%rip),%ymm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,188,88,72,64 // vaddps 0x40(%rax),%ymm8,%ymm1
.byte 197,124,16,136,192,0,0,0 // vmovups 0xc0(%rax),%ymm9
- .byte 196,98,125,24,21,183,19,2,0 // vbroadcastss 0x213b7(%rip),%ymm10 # 38b0c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
+ .byte 196,98,125,24,21,39,21,2,0 // vbroadcastss 0x21527(%rip),%ymm10 # 38c7c <_sk_srcover_bgra_8888_sse2_lowp+0x408>
.byte 196,65,52,89,210 // vmulps %ymm10,%ymm9,%ymm10
- .byte 196,98,125,24,29,173,19,2,0 // vbroadcastss 0x213ad(%rip),%ymm11 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,98,125,24,29,29,21,2,0 // vbroadcastss 0x2151d(%rip),%ymm11 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 196,65,44,88,211 // vaddps %ymm11,%ymm10,%ymm10
.byte 196,65,52,89,210 // vmulps %ymm10,%ymm9,%ymm10
.byte 196,65,44,88,192 // vaddps %ymm8,%ymm10,%ymm8
.byte 196,65,52,89,192 // vmulps %ymm8,%ymm9,%ymm8
- .byte 196,98,125,24,13,148,19,2,0 // vbroadcastss 0x21394(%rip),%ymm9 # 38b14 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
+ .byte 196,98,125,24,13,4,21,2,0 // vbroadcastss 0x21504(%rip),%ymm9 # 38c84 <_sk_srcover_bgra_8888_sse2_lowp+0x410>
.byte 196,65,60,88,193 // vaddps %ymm9,%ymm8,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -32336,13 +32336,13 @@ HIDDEN _sk_bicubic_p3y_avx
FUNCTION(_sk_bicubic_p3y_avx)
_sk_bicubic_p3y_avx:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,13,116,19,2,0 // vbroadcastss 0x21374(%rip),%ymm1 # 38b10 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
+ .byte 196,226,125,24,13,228,20,2,0 // vbroadcastss 0x214e4(%rip),%ymm1 # 38c80 <_sk_srcover_bgra_8888_sse2_lowp+0x40c>
.byte 197,244,88,72,64 // vaddps 0x40(%rax),%ymm1,%ymm1
.byte 197,124,16,128,192,0,0,0 // vmovups 0xc0(%rax),%ymm8
.byte 196,65,60,89,200 // vmulps %ymm8,%ymm8,%ymm9
- .byte 196,98,125,24,21,81,19,2,0 // vbroadcastss 0x21351(%rip),%ymm10 # 38b08 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
+ .byte 196,98,125,24,21,193,20,2,0 // vbroadcastss 0x214c1(%rip),%ymm10 # 38c78 <_sk_srcover_bgra_8888_sse2_lowp+0x404>
.byte 196,65,60,89,194 // vmulps %ymm10,%ymm8,%ymm8
- .byte 196,98,125,24,21,99,18,2,0 // vbroadcastss 0x21263(%rip),%ymm10 # 38a28 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
+ .byte 196,98,125,24,21,211,19,2,0 // vbroadcastss 0x213d3(%rip),%ymm10 # 38b98 <_sk_srcover_bgra_8888_sse2_lowp+0x324>
.byte 196,65,60,88,194 // vaddps %ymm10,%ymm8,%ymm8
.byte 196,65,52,89,192 // vmulps %ymm8,%ymm9,%ymm8
.byte 197,124,17,128,64,1,0,0 // vmovups %ymm8,0x140(%rax)
@@ -32490,7 +32490,7 @@ _sk_clut_3D_avx:
.byte 72,139,0 // mov (%rax),%rax
.byte 197,217,254,207 // vpaddd %xmm7,%xmm4,%xmm1
.byte 197,249,254,193 // vpaddd %xmm1,%xmm0,%xmm0
- .byte 196,226,121,24,37,233,16,2,0 // vbroadcastss 0x210e9(%rip),%xmm4 # 38b1c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
+ .byte 196,226,121,24,37,89,18,2,0 // vbroadcastss 0x21259(%rip),%xmm4 # 38c8c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
.byte 196,98,121,64,228 // vpmulld %xmm4,%xmm0,%xmm12
.byte 196,226,49,64,198 // vpmulld %xmm6,%xmm9,%xmm0
.byte 197,249,127,132,36,128,0,0,0 // vmovdqa %xmm0,0x80(%rsp)
@@ -32544,7 +32544,7 @@ _sk_clut_3D_avx:
.byte 196,161,122,16,60,128 // vmovss (%rax,%r8,4),%xmm7
.byte 196,227,73,33,247,48 // vinsertps $0x30,%xmm7,%xmm6,%xmm6
.byte 196,227,77,24,237,1 // vinsertf128 $0x1,%xmm5,%ymm6,%ymm5
- .byte 196,98,121,24,21,98,14,2,0 // vbroadcastss 0x20e62(%rip),%xmm10 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 196,98,121,24,21,210,15,2,0 // vbroadcastss 0x20fd2(%rip),%xmm10 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 196,193,105,254,210 // vpaddd %xmm10,%xmm2,%xmm2
.byte 196,195,249,22,208,1 // vpextrq $0x1,%xmm2,%r8
.byte 196,193,249,126,209 // vmovq %xmm2,%r9
@@ -32572,7 +32572,7 @@ _sk_clut_3D_avx:
.byte 196,161,122,16,60,128 // vmovss (%rax,%r8,4),%xmm7
.byte 196,227,73,33,247,48 // vinsertps $0x30,%xmm7,%xmm6,%xmm6
.byte 196,227,77,24,210,1 // vinsertf128 $0x1,%xmm2,%ymm6,%ymm2
- .byte 196,226,125,24,61,38,15,2,0 // vbroadcastss 0x20f26(%rip),%ymm7 # 38b18 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
+ .byte 196,226,125,24,61,150,16,2,0 // vbroadcastss 0x21096(%rip),%ymm7 # 38c88 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
.byte 197,148,88,247 // vaddps %ymm7,%ymm13,%ymm6
.byte 197,124,40,231 // vmovaps %ymm7,%ymm12
.byte 197,124,17,36,36 // vmovups %ymm12,(%rsp)
@@ -33315,7 +33315,7 @@ _sk_clut_4D_avx:
.byte 197,249,127,68,36,32 // vmovdqa %xmm0,0x20(%rsp)
.byte 197,225,254,216 // vpaddd %xmm0,%xmm3,%xmm3
.byte 197,233,254,195 // vpaddd %xmm3,%xmm2,%xmm0
- .byte 196,98,121,24,13,234,255,1,0 // vbroadcastss 0x1ffea(%rip),%xmm9 # 38b1c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
+ .byte 196,98,121,24,13,90,1,2,0 // vbroadcastss 0x2015a(%rip),%xmm9 # 38c8c <_sk_srcover_bgra_8888_sse2_lowp+0x418>
.byte 196,194,121,64,209 // vpmulld %xmm9,%xmm0,%xmm2
.byte 196,226,81,64,199 // vpmulld %xmm7,%xmm5,%xmm0
.byte 197,249,127,132,36,96,1,0,0 // vmovdqa %xmm0,0x160(%rsp)
@@ -33372,7 +33372,7 @@ _sk_clut_4D_avx:
.byte 196,161,122,16,60,128 // vmovss (%rax,%r8,4),%xmm7
.byte 196,227,73,33,247,48 // vinsertps $0x30,%xmm7,%xmm6,%xmm6
.byte 196,227,77,24,237,1 // vinsertf128 $0x1,%xmm5,%ymm6,%ymm5
- .byte 196,98,121,24,21,83,253,1,0 // vbroadcastss 0x1fd53(%rip),%xmm10 # 389c0 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
+ .byte 196,98,121,24,21,195,254,1,0 // vbroadcastss 0x1fec3(%rip),%xmm10 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x2bc>
.byte 196,193,121,254,194 // vpaddd %xmm10,%xmm0,%xmm0
.byte 196,195,249,22,192,1 // vpextrq $0x1,%xmm0,%r8
.byte 196,193,249,126,193 // vmovq %xmm0,%r9
@@ -33400,7 +33400,7 @@ _sk_clut_4D_avx:
.byte 196,161,122,16,52,128 // vmovss (%rax,%r8,4),%xmm6
.byte 196,227,105,33,214,48 // vinsertps $0x30,%xmm6,%xmm2,%xmm2
.byte 196,227,109,24,208,1 // vinsertf128 $0x1,%xmm0,%ymm2,%ymm2
- .byte 196,98,125,24,37,23,254,1,0 // vbroadcastss 0x1fe17(%rip),%ymm12 # 38b18 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
+ .byte 196,98,125,24,37,135,255,1,0 // vbroadcastss 0x1ff87(%rip),%ymm12 # 38c88 <_sk_srcover_bgra_8888_sse2_lowp+0x414>
.byte 196,193,4,88,196 // vaddps %ymm12,%ymm15,%ymm0
.byte 197,124,17,36,36 // vmovups %ymm12,(%rsp)
.byte 197,254,91,192 // vcvttps2dq %ymm0,%ymm0
@@ -34798,7 +34798,7 @@ _sk_clut_4D_avx:
.byte 197,228,89,210 // vmulps %ymm2,%ymm3,%ymm2
.byte 197,220,88,210 // vaddps %ymm2,%ymm4,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,24,29,116,223,1,0 // vbroadcastss 0x1df74(%rip),%ymm3 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,226,125,24,29,228,224,1,0 // vbroadcastss 0x1e0e4(%rip),%ymm3 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,252,16,164,36,64,2,0,0 // vmovups 0x240(%rsp),%ymm4
.byte 197,252,16,172,36,96,2,0,0 // vmovups 0x260(%rsp),%ymm5
.byte 197,252,16,180,36,128,2,0,0 // vmovups 0x280(%rsp),%ymm6
@@ -34814,18 +34814,18 @@ HIDDEN _sk_gauss_a_to_rgba_avx
.globl _sk_gauss_a_to_rgba_avx
FUNCTION(_sk_gauss_a_to_rgba_avx)
_sk_gauss_a_to_rgba_avx:
- .byte 196,226,125,24,5,163,224,1,0 // vbroadcastss 0x1e0a3(%rip),%ymm0 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x41c>
+ .byte 196,226,125,24,5,19,226,1,0 // vbroadcastss 0x1e213(%rip),%ymm0 # 38c90 <_sk_srcover_bgra_8888_sse2_lowp+0x41c>
.byte 197,228,89,192 // vmulps %ymm0,%ymm3,%ymm0
- .byte 196,226,125,24,13,154,224,1,0 // vbroadcastss 0x1e09a(%rip),%ymm1 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x420>
+ .byte 196,226,125,24,13,10,226,1,0 // vbroadcastss 0x1e20a(%rip),%ymm1 # 38c94 <_sk_srcover_bgra_8888_sse2_lowp+0x420>
.byte 197,252,88,193 // vaddps %ymm1,%ymm0,%ymm0
.byte 197,252,89,195 // vmulps %ymm3,%ymm0,%ymm0
- .byte 196,226,125,24,13,141,224,1,0 // vbroadcastss 0x1e08d(%rip),%ymm1 # 38b28 <_sk_srcover_bgra_8888_sse2_lowp+0x424>
+ .byte 196,226,125,24,13,253,225,1,0 // vbroadcastss 0x1e1fd(%rip),%ymm1 # 38c98 <_sk_srcover_bgra_8888_sse2_lowp+0x424>
.byte 197,252,88,193 // vaddps %ymm1,%ymm0,%ymm0
.byte 197,252,89,195 // vmulps %ymm3,%ymm0,%ymm0
- .byte 196,226,125,24,13,128,224,1,0 // vbroadcastss 0x1e080(%rip),%ymm1 # 38b2c <_sk_srcover_bgra_8888_sse2_lowp+0x428>
+ .byte 196,226,125,24,13,240,225,1,0 // vbroadcastss 0x1e1f0(%rip),%ymm1 # 38c9c <_sk_srcover_bgra_8888_sse2_lowp+0x428>
.byte 197,252,88,193 // vaddps %ymm1,%ymm0,%ymm0
.byte 197,252,89,195 // vmulps %ymm3,%ymm0,%ymm0
- .byte 196,226,125,24,13,115,224,1,0 // vbroadcastss 0x1e073(%rip),%ymm1 # 38b30 <_sk_srcover_bgra_8888_sse2_lowp+0x42c>
+ .byte 196,226,125,24,13,227,225,1,0 // vbroadcastss 0x1e1e3(%rip),%ymm1 # 38ca0 <_sk_srcover_bgra_8888_sse2_lowp+0x42c>
.byte 197,252,88,193 // vaddps %ymm1,%ymm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
@@ -34925,9 +34925,9 @@ _sk_seed_shader_sse41:
.byte 102,15,110,201 // movd %ecx,%xmm1
.byte 102,15,112,201,0 // pshufd $0x0,%xmm1,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,88,13,34,231,1,0 // addps 0x1e722(%rip),%xmm1 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,13,130,232,1,0 // addps 0x1e882(%rip),%xmm1 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,21,41,231,1,0 // movaps 0x1e729(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,137,232,1,0 // movaps 0x1e889(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,87,219 // xorps %xmm3,%xmm3
.byte 15,87,228 // xorps %xmm4,%xmm4
.byte 15,87,237 // xorps %xmm5,%xmm5
@@ -34942,18 +34942,18 @@ _sk_dither_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 102,68,15,110,194 // movd %edx,%xmm8
.byte 102,69,15,112,192,0 // pshufd $0x0,%xmm8,%xmm8
- .byte 102,68,15,254,5,210,230,1,0 // paddd 0x1e6d2(%rip),%xmm8 # 392c0 <_sk_srcover_bgra_8888_sse2_lowp+0xbbc>
+ .byte 102,68,15,254,5,50,232,1,0 // paddd 0x1e832(%rip),%xmm8 # 39420 <_sk_srcover_bgra_8888_sse2_lowp+0xbac>
.byte 102,68,15,110,201 // movd %ecx,%xmm9
.byte 102,69,15,112,201,0 // pshufd $0x0,%xmm9,%xmm9
.byte 102,69,15,239,200 // pxor %xmm8,%xmm9
- .byte 102,68,15,111,21,249,230,1,0 // movdqa 0x1e6f9(%rip),%xmm10 # 39300 <_sk_srcover_bgra_8888_sse2_lowp+0xbfc>
+ .byte 102,68,15,111,21,89,232,1,0 // movdqa 0x1e859(%rip),%xmm10 # 39460 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
.byte 102,69,15,111,217 // movdqa %xmm9,%xmm11
.byte 102,69,15,219,218 // pand %xmm10,%xmm11
.byte 102,65,15,114,243,5 // pslld $0x5,%xmm11
.byte 102,69,15,219,208 // pand %xmm8,%xmm10
.byte 102,65,15,114,242,4 // pslld $0x4,%xmm10
- .byte 102,68,15,111,37,229,230,1,0 // movdqa 0x1e6e5(%rip),%xmm12 # 39310 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
- .byte 102,68,15,111,45,236,230,1,0 // movdqa 0x1e6ec(%rip),%xmm13 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,37,69,232,1,0 // movdqa 0x1e845(%rip),%xmm12 # 39470 <_sk_srcover_bgra_8888_sse2_lowp+0xbfc>
+ .byte 102,68,15,111,45,76,232,1,0 // movdqa 0x1e84c(%rip),%xmm13 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,69,15,111,241 // movdqa %xmm9,%xmm14
.byte 102,69,15,219,245 // pand %xmm13,%xmm14
.byte 102,65,15,114,246,2 // pslld $0x2,%xmm14
@@ -34969,8 +34969,8 @@ _sk_dither_sse41:
.byte 102,69,15,235,198 // por %xmm14,%xmm8
.byte 102,69,15,235,193 // por %xmm9,%xmm8
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,167,230,1,0 // mulps 0x1e6a7(%rip),%xmm8 # 39330 <_sk_srcover_bgra_8888_sse2_lowp+0xc2c>
- .byte 68,15,88,5,175,230,1,0 // addps 0x1e6af(%rip),%xmm8 # 39340 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
+ .byte 68,15,89,5,7,232,1,0 // mulps 0x1e807(%rip),%xmm8 # 39490 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 68,15,88,5,15,232,1,0 // addps 0x1e80f(%rip),%xmm8 # 394a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc2c>
.byte 243,68,15,16,16 // movss (%rax),%xmm10
.byte 69,15,198,210,0 // shufps $0x0,%xmm10,%xmm10
.byte 69,15,89,208 // mulps %xmm8,%xmm10
@@ -35012,7 +35012,7 @@ HIDDEN _sk_black_color_sse41
FUNCTION(_sk_black_color_sse41)
_sk_black_color_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,229,229,1,0 // movaps 0x1e5e5(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,69,231,1,0 // movaps 0x1e745(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,87,201 // xorps %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
@@ -35023,7 +35023,7 @@ HIDDEN _sk_white_color_sse41
FUNCTION(_sk_white_color_sse41)
_sk_white_color_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,5,209,229,1,0 // movaps 0x1e5d1(%rip),%xmm0 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,5,49,231,1,0 // movaps 0x1e731(%rip),%xmm0 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,40,216 // movaps %xmm0,%xmm3
@@ -35069,7 +35069,7 @@ HIDDEN _sk_srcatop_sse41
FUNCTION(_sk_srcatop_sse41)
_sk_srcatop_sse41:
.byte 15,89,199 // mulps %xmm7,%xmm0
- .byte 68,15,40,5,129,229,1,0 // movaps 0x1e581(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,225,230,1,0 // movaps 0x1e6e1(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -35094,7 +35094,7 @@ FUNCTION(_sk_dstatop_sse41)
_sk_dstatop_sse41:
.byte 68,15,40,195 // movaps %xmm3,%xmm8
.byte 68,15,89,196 // mulps %xmm4,%xmm8
- .byte 68,15,40,13,52,229,1,0 // movaps 0x1e534(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,148,230,1,0 // movaps 0x1e694(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,207 // subps %xmm7,%xmm9
.byte 65,15,89,193 // mulps %xmm9,%xmm0
.byte 65,15,88,192 // addps %xmm8,%xmm0
@@ -35141,7 +35141,7 @@ HIDDEN _sk_srcout_sse41
.globl _sk_srcout_sse41
FUNCTION(_sk_srcout_sse41)
_sk_srcout_sse41:
- .byte 68,15,40,5,200,228,1,0 // movaps 0x1e4c8(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,40,230,1,0 // movaps 0x1e628(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,199 // subps %xmm7,%xmm8
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
@@ -35154,7 +35154,7 @@ HIDDEN _sk_dstout_sse41
.globl _sk_dstout_sse41
FUNCTION(_sk_dstout_sse41)
_sk_dstout_sse41:
- .byte 68,15,40,5,168,228,1,0 // movaps 0x1e4a8(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,8,230,1,0 // movaps 0x1e608(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 15,89,196 // mulps %xmm4,%xmm0
@@ -35171,7 +35171,7 @@ HIDDEN _sk_srcover_sse41
.globl _sk_srcover_sse41
FUNCTION(_sk_srcover_sse41)
_sk_srcover_sse41:
- .byte 68,15,40,5,123,228,1,0 // movaps 0x1e47b(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,219,229,1,0 // movaps 0x1e5db(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -35191,7 +35191,7 @@ HIDDEN _sk_dstover_sse41
.globl _sk_dstover_sse41
FUNCTION(_sk_dstover_sse41)
_sk_dstover_sse41:
- .byte 68,15,40,5,63,228,1,0 // movaps 0x1e43f(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,159,229,1,0 // movaps 0x1e59f(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,199 // subps %xmm7,%xmm8
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 15,88,196 // addps %xmm4,%xmm0
@@ -35219,7 +35219,7 @@ HIDDEN _sk_multiply_sse41
.globl _sk_multiply_sse41
FUNCTION(_sk_multiply_sse41)
_sk_multiply_sse41:
- .byte 68,15,40,5,3,228,1,0 // movaps 0x1e403(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,99,229,1,0 // movaps 0x1e563(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,92,207 // subps %xmm7,%xmm9
.byte 69,15,40,209 // movaps %xmm9,%xmm10
@@ -35257,7 +35257,7 @@ HIDDEN _sk_plus__sse41
FUNCTION(_sk_plus__sse41)
_sk_plus__sse41:
.byte 15,88,196 // addps %xmm4,%xmm0
- .byte 68,15,40,5,132,227,1,0 // movaps 0x1e384(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,228,228,1,0 // movaps 0x1e4e4(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 15,88,205 // addps %xmm5,%xmm1
.byte 65,15,93,200 // minps %xmm8,%xmm1
@@ -35299,7 +35299,7 @@ HIDDEN _sk_xor__sse41
FUNCTION(_sk_xor__sse41)
_sk_xor__sse41:
.byte 68,15,40,195 // movaps %xmm3,%xmm8
- .byte 15,40,29,16,227,1,0 // movaps 0x1e310(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,112,228,1,0 // movaps 0x1e470(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,40,203 // movaps %xmm3,%xmm9
.byte 68,15,92,207 // subps %xmm7,%xmm9
.byte 65,15,89,193 // mulps %xmm9,%xmm0
@@ -35347,7 +35347,7 @@ _sk_darken_sse41:
.byte 68,15,89,206 // mulps %xmm6,%xmm9
.byte 65,15,95,209 // maxps %xmm9,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,107,226,1,0 // movaps 0x1e26b(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,203,227,1,0 // movaps 0x1e3cb(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -35381,7 +35381,7 @@ _sk_lighten_sse41:
.byte 68,15,89,206 // mulps %xmm6,%xmm9
.byte 65,15,93,209 // minps %xmm9,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,0,226,1,0 // movaps 0x1e200(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,96,227,1,0 // movaps 0x1e360(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -35418,7 +35418,7 @@ _sk_difference_sse41:
.byte 65,15,93,209 // minps %xmm9,%xmm2
.byte 15,88,210 // addps %xmm2,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,138,225,1,0 // movaps 0x1e18a(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,234,226,1,0 // movaps 0x1e2ea(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -35446,7 +35446,7 @@ _sk_exclusion_sse41:
.byte 15,89,214 // mulps %xmm6,%xmm2
.byte 15,88,210 // addps %xmm2,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,58,225,1,0 // movaps 0x1e13a(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,154,226,1,0 // movaps 0x1e29a(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -35458,7 +35458,7 @@ HIDDEN _sk_colorburn_sse41
.globl _sk_colorburn_sse41
FUNCTION(_sk_colorburn_sse41)
_sk_colorburn_sse41:
- .byte 68,15,40,29,33,225,1,0 // movaps 0x1e121(%rip),%xmm11 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,29,129,226,1,0 // movaps 0x1e281(%rip),%xmm11 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,227 // movaps %xmm11,%xmm12
.byte 68,15,92,231 // subps %xmm7,%xmm12
.byte 69,15,40,204 // movaps %xmm12,%xmm9
@@ -35542,7 +35542,7 @@ HIDDEN _sk_colordodge_sse41
FUNCTION(_sk_colordodge_sse41)
_sk_colordodge_sse41:
.byte 68,15,40,192 // movaps %xmm0,%xmm8
- .byte 68,15,40,21,235,223,1,0 // movaps 0x1dfeb(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,75,225,1,0 // movaps 0x1e14b(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,218 // movaps %xmm10,%xmm11
.byte 68,15,92,223 // subps %xmm7,%xmm11
.byte 69,15,40,227 // movaps %xmm11,%xmm12
@@ -35624,7 +35624,7 @@ _sk_hardlight_sse41:
.byte 15,40,244 // movaps %xmm4,%xmm6
.byte 15,40,227 // movaps %xmm3,%xmm4
.byte 68,15,40,200 // movaps %xmm0,%xmm9
- .byte 68,15,40,21,182,222,1,0 // movaps 0x1deb6(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,22,224,1,0 // movaps 0x1e016(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,40,234 // movaps %xmm10,%xmm5
.byte 15,92,239 // subps %xmm7,%xmm5
.byte 15,40,197 // movaps %xmm5,%xmm0
@@ -35707,7 +35707,7 @@ FUNCTION(_sk_overlay_sse41)
_sk_overlay_sse41:
.byte 68,15,40,201 // movaps %xmm1,%xmm9
.byte 68,15,40,240 // movaps %xmm0,%xmm14
- .byte 68,15,40,21,139,221,1,0 // movaps 0x1dd8b(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,235,222,1,0 // movaps 0x1deeb(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,218 // movaps %xmm10,%xmm11
.byte 68,15,92,223 // subps %xmm7,%xmm11
.byte 65,15,40,195 // movaps %xmm11,%xmm0
@@ -35792,7 +35792,7 @@ _sk_softlight_sse41:
.byte 15,40,198 // movaps %xmm6,%xmm0
.byte 15,94,199 // divps %xmm7,%xmm0
.byte 65,15,84,193 // andps %xmm9,%xmm0
- .byte 15,40,13,82,220,1,0 // movaps 0x1dc52(%rip),%xmm1 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,13,178,221,1,0 // movaps 0x1ddb2(%rip),%xmm1 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,40,209 // movaps %xmm1,%xmm10
.byte 68,15,92,208 // subps %xmm0,%xmm10
.byte 68,15,40,240 // movaps %xmm0,%xmm14
@@ -35805,10 +35805,10 @@ _sk_softlight_sse41:
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,89,210 // mulps %xmm2,%xmm2
.byte 15,88,208 // addps %xmm0,%xmm2
- .byte 68,15,40,45,128,220,1,0 // movaps 0x1dc80(%rip),%xmm13 # 39350 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
+ .byte 68,15,40,45,224,221,1,0 // movaps 0x1dde0(%rip),%xmm13 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
.byte 69,15,88,245 // addps %xmm13,%xmm14
.byte 68,15,89,242 // mulps %xmm2,%xmm14
- .byte 68,15,40,37,128,220,1,0 // movaps 0x1dc80(%rip),%xmm12 # 39360 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
+ .byte 68,15,40,37,224,221,1,0 // movaps 0x1dde0(%rip),%xmm12 # 394c0 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
.byte 69,15,89,252 // mulps %xmm12,%xmm15
.byte 69,15,88,254 // addps %xmm14,%xmm15
.byte 15,40,198 // movaps %xmm6,%xmm0
@@ -35994,12 +35994,12 @@ _sk_hue_sse41:
.byte 68,15,84,208 // andps %xmm0,%xmm10
.byte 15,84,200 // andps %xmm0,%xmm1
.byte 68,15,84,232 // andps %xmm0,%xmm13
- .byte 15,40,5,235,217,1,0 // movaps 0x1d9eb(%rip),%xmm0 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 15,40,5,75,219,1,0 // movaps 0x1db4b(%rip),%xmm0 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 68,15,89,224 // mulps %xmm0,%xmm12
- .byte 15,40,21,240,217,1,0 // movaps 0x1d9f0(%rip),%xmm2 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 15,40,21,80,219,1,0 // movaps 0x1db50(%rip),%xmm2 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 15,89,250 // mulps %xmm2,%xmm7
.byte 65,15,88,252 // addps %xmm12,%xmm7
- .byte 68,15,40,53,241,217,1,0 // movaps 0x1d9f1(%rip),%xmm14 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
+ .byte 68,15,40,53,81,219,1,0 // movaps 0x1db51(%rip),%xmm14 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
.byte 68,15,40,252 // movaps %xmm4,%xmm15
.byte 69,15,89,254 // mulps %xmm14,%xmm15
.byte 68,15,88,255 // addps %xmm7,%xmm15
@@ -36082,7 +36082,7 @@ _sk_hue_sse41:
.byte 65,15,88,214 // addps %xmm14,%xmm2
.byte 15,40,196 // movaps %xmm4,%xmm0
.byte 102,15,56,20,202 // blendvps %xmm0,%xmm2,%xmm1
- .byte 68,15,40,13,5,216,1,0 // movaps 0x1d805(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,101,217,1,0 // movaps 0x1d965(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,40,225 // movaps %xmm9,%xmm4
.byte 15,92,229 // subps %xmm5,%xmm4
.byte 15,40,68,36,200 // movaps -0x38(%rsp),%xmm0
@@ -36176,14 +36176,14 @@ _sk_saturation_sse41:
.byte 68,15,84,215 // andps %xmm7,%xmm10
.byte 68,15,84,223 // andps %xmm7,%xmm11
.byte 68,15,84,199 // andps %xmm7,%xmm8
- .byte 15,40,21,47,215,1,0 // movaps 0x1d72f(%rip),%xmm2 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 15,40,21,143,216,1,0 // movaps 0x1d88f(%rip),%xmm2 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 15,40,221 // movaps %xmm5,%xmm3
.byte 15,89,218 // mulps %xmm2,%xmm3
- .byte 15,40,13,50,215,1,0 // movaps 0x1d732(%rip),%xmm1 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 15,40,13,146,216,1,0 // movaps 0x1d892(%rip),%xmm1 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 15,40,254 // movaps %xmm6,%xmm7
.byte 15,89,249 // mulps %xmm1,%xmm7
.byte 15,88,251 // addps %xmm3,%xmm7
- .byte 68,15,40,45,49,215,1,0 // movaps 0x1d731(%rip),%xmm13 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
+ .byte 68,15,40,45,145,216,1,0 // movaps 0x1d891(%rip),%xmm13 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
.byte 69,15,89,245 // mulps %xmm13,%xmm14
.byte 68,15,88,247 // addps %xmm7,%xmm14
.byte 65,15,40,218 // movaps %xmm10,%xmm3
@@ -36264,7 +36264,7 @@ _sk_saturation_sse41:
.byte 65,15,88,253 // addps %xmm13,%xmm7
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 102,68,15,56,20,223 // blendvps %xmm0,%xmm7,%xmm11
- .byte 68,15,40,13,71,213,1,0 // movaps 0x1d547(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,167,214,1,0 // movaps 0x1d6a7(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,193 // movaps %xmm9,%xmm8
.byte 68,15,92,204 // subps %xmm4,%xmm9
.byte 15,40,124,36,168 // movaps -0x58(%rsp),%xmm7
@@ -36319,14 +36319,14 @@ _sk_color_sse41:
.byte 15,40,231 // movaps %xmm7,%xmm4
.byte 68,15,89,244 // mulps %xmm4,%xmm14
.byte 15,89,204 // mulps %xmm4,%xmm1
- .byte 68,15,40,13,2,213,1,0 // movaps 0x1d502(%rip),%xmm9 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,13,98,214,1,0 // movaps 0x1d662(%rip),%xmm9 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 65,15,40,250 // movaps %xmm10,%xmm7
.byte 65,15,89,249 // mulps %xmm9,%xmm7
- .byte 68,15,40,21,2,213,1,0 // movaps 0x1d502(%rip),%xmm10 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 68,15,40,21,98,214,1,0 // movaps 0x1d662(%rip),%xmm10 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 65,15,40,219 // movaps %xmm11,%xmm3
.byte 65,15,89,218 // mulps %xmm10,%xmm3
.byte 15,88,223 // addps %xmm7,%xmm3
- .byte 68,15,40,29,255,212,1,0 // movaps 0x1d4ff(%rip),%xmm11 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
+ .byte 68,15,40,29,95,214,1,0 // movaps 0x1d65f(%rip),%xmm11 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
.byte 69,15,40,236 // movaps %xmm12,%xmm13
.byte 69,15,89,235 // mulps %xmm11,%xmm13
.byte 68,15,88,235 // addps %xmm3,%xmm13
@@ -36411,7 +36411,7 @@ _sk_color_sse41:
.byte 65,15,88,251 // addps %xmm11,%xmm7
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 102,15,56,20,207 // blendvps %xmm0,%xmm7,%xmm1
- .byte 68,15,40,13,11,211,1,0 // movaps 0x1d30b(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,107,212,1,0 // movaps 0x1d46b(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 15,92,196 // subps %xmm4,%xmm0
.byte 68,15,89,192 // mulps %xmm0,%xmm8
@@ -36463,13 +36463,13 @@ _sk_luminosity_sse41:
.byte 69,15,89,216 // mulps %xmm8,%xmm11
.byte 68,15,40,203 // movaps %xmm3,%xmm9
.byte 68,15,89,205 // mulps %xmm5,%xmm9
- .byte 68,15,40,5,211,210,1,0 // movaps 0x1d2d3(%rip),%xmm8 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,5,51,212,1,0 // movaps 0x1d433(%rip),%xmm8 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
- .byte 68,15,40,21,215,210,1,0 // movaps 0x1d2d7(%rip),%xmm10 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 68,15,40,21,55,212,1,0 // movaps 0x1d437(%rip),%xmm10 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 15,40,233 // movaps %xmm1,%xmm5
.byte 65,15,89,234 // mulps %xmm10,%xmm5
.byte 15,88,232 // addps %xmm0,%xmm5
- .byte 68,15,40,37,213,210,1,0 // movaps 0x1d2d5(%rip),%xmm12 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
+ .byte 68,15,40,37,53,212,1,0 // movaps 0x1d435(%rip),%xmm12 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
.byte 68,15,40,242 // movaps %xmm2,%xmm14
.byte 69,15,89,244 // mulps %xmm12,%xmm14
.byte 68,15,88,245 // addps %xmm5,%xmm14
@@ -36554,7 +36554,7 @@ _sk_luminosity_sse41:
.byte 65,15,88,244 // addps %xmm12,%xmm6
.byte 65,15,40,195 // movaps %xmm11,%xmm0
.byte 102,68,15,56,20,206 // blendvps %xmm0,%xmm6,%xmm9
- .byte 15,40,5,219,208,1,0 // movaps 0x1d0db(%rip),%xmm0 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,5,59,210,1,0 // movaps 0x1d23b(%rip),%xmm0 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,92,215 // subps %xmm7,%xmm2
.byte 15,89,226 // mulps %xmm2,%xmm4
@@ -36602,20 +36602,20 @@ _sk_srcover_rgba_8888_sse41:
.byte 15,133,219,0,0,0 // jne 1c37d <_sk_srcover_rgba_8888_sse41+0xfb>
.byte 243,65,15,111,60,128 // movdqu (%r8,%rax,4),%xmm7
.byte 72,133,255 // test %rdi,%rdi
- .byte 102,15,111,37,237,208,1,0 // movdqa 0x1d0ed(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,37,77,210,1,0 // movdqa 0x1d24d(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,231 // pand %xmm7,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
.byte 102,15,111,239 // movdqa %xmm7,%xmm5
- .byte 102,15,56,0,45,233,208,1,0 // pshufb 0x1d0e9(%rip),%xmm5 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,45,73,210,1,0 // pshufb 0x1d249(%rip),%xmm5 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,237 // cvtdq2ps %xmm5,%xmm5
.byte 102,15,111,247 // movdqa %xmm7,%xmm6
- .byte 102,15,56,0,53,233,208,1,0 // pshufb 0x1d0e9(%rip),%xmm6 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,53,73,210,1,0 // pshufb 0x1d249(%rip),%xmm6 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,246 // cvtdq2ps %xmm6,%xmm6
.byte 102,15,114,215,24 // psrld $0x18,%xmm7
.byte 15,91,255 // cvtdq2ps %xmm7,%xmm7
- .byte 68,15,40,5,6,208,1,0 // movaps 0x1d006(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,102,209,1,0 // movaps 0x1d166(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
- .byte 68,15,40,37,218,208,1,0 // movaps 0x1d0da(%rip),%xmm12 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,37,58,210,1,0 // movaps 0x1d23a(%rip),%xmm12 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,196 // mulps %xmm12,%xmm0
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -36694,20 +36694,20 @@ _sk_srcover_bgra_8888_sse41:
.byte 15,133,219,0,0,0 // jne 1c4f6 <_sk_srcover_bgra_8888_sse41+0xfb>
.byte 243,65,15,111,60,128 // movdqu (%r8,%rax,4),%xmm7
.byte 72,133,255 // test %rdi,%rdi
- .byte 102,15,111,37,116,207,1,0 // movdqa 0x1cf74(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,37,212,208,1,0 // movdqa 0x1d0d4(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,231 // pand %xmm7,%xmm4
.byte 15,91,244 // cvtdq2ps %xmm4,%xmm6
.byte 102,15,111,231 // movdqa %xmm7,%xmm4
- .byte 102,15,56,0,37,112,207,1,0 // pshufb 0x1cf70(%rip),%xmm4 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,37,208,208,1,0 // pshufb 0x1d0d0(%rip),%xmm4 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,236 // cvtdq2ps %xmm4,%xmm5
.byte 102,15,111,231 // movdqa %xmm7,%xmm4
- .byte 102,15,56,0,37,112,207,1,0 // pshufb 0x1cf70(%rip),%xmm4 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,37,208,208,1,0 // pshufb 0x1d0d0(%rip),%xmm4 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
.byte 102,15,114,215,24 // psrld $0x18,%xmm7
.byte 15,91,255 // cvtdq2ps %xmm7,%xmm7
- .byte 68,15,40,5,141,206,1,0 // movaps 0x1ce8d(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,237,207,1,0 // movaps 0x1cfed(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
- .byte 68,15,40,37,97,207,1,0 // movaps 0x1cf61(%rip),%xmm12 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,37,193,208,1,0 // movaps 0x1d0c1(%rip),%xmm12 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,196 // mulps %xmm12,%xmm0
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -36787,7 +36787,7 @@ HIDDEN _sk_clamp_1_sse41
.globl _sk_clamp_1_sse41
FUNCTION(_sk_clamp_1_sse41)
_sk_clamp_1_sse41:
- .byte 68,15,40,5,92,205,1,0 // movaps 0x1cd5c(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,188,206,1,0 // movaps 0x1cebc(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 65,15,93,200 // minps %xmm8,%xmm1
.byte 65,15,93,208 // minps %xmm8,%xmm2
@@ -36799,7 +36799,7 @@ HIDDEN _sk_clamp_a_sse41
.globl _sk_clamp_a_sse41
FUNCTION(_sk_clamp_a_sse41)
_sk_clamp_a_sse41:
- .byte 15,93,29,65,205,1,0 // minps 0x1cd41(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,93,29,161,206,1,0 // minps 0x1cea1(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,93,195 // minps %xmm3,%xmm0
.byte 15,93,203 // minps %xmm3,%xmm1
.byte 15,93,211 // minps %xmm3,%xmm2
@@ -36810,7 +36810,7 @@ HIDDEN _sk_clamp_a_dst_sse41
.globl _sk_clamp_a_dst_sse41
FUNCTION(_sk_clamp_a_dst_sse41)
_sk_clamp_a_dst_sse41:
- .byte 15,93,61,45,205,1,0 // minps 0x1cd2d(%rip),%xmm7 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,93,61,141,206,1,0 // minps 0x1ce8d(%rip),%xmm7 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,93,231 // minps %xmm7,%xmm4
.byte 15,93,239 // minps %xmm7,%xmm5
.byte 15,93,247 // minps %xmm7,%xmm6
@@ -36845,7 +36845,7 @@ HIDDEN _sk_invert_sse41
.globl _sk_invert_sse41
FUNCTION(_sk_invert_sse41)
_sk_invert_sse41:
- .byte 68,15,40,5,233,204,1,0 // movaps 0x1cce9(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,73,206,1,0 // movaps 0x1ce49(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,92,200 // subps %xmm0,%xmm9
.byte 69,15,40,208 // movaps %xmm8,%xmm10
@@ -36906,10 +36906,10 @@ HIDDEN _sk_unpremul_sse41
.globl _sk_unpremul_sse41
FUNCTION(_sk_unpremul_sse41)
_sk_unpremul_sse41:
- .byte 68,15,40,5,119,204,1,0 // movaps 0x1cc77(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,215,205,1,0 // movaps 0x1cdd7(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,94,195 // divps %xmm3,%xmm8
.byte 69,15,40,200 // movaps %xmm8,%xmm9
- .byte 68,15,194,13,86,205,1,0,1 // cmpltps 0x1cd56(%rip),%xmm9 # 393e0 <_sk_srcover_bgra_8888_sse2_lowp+0xcdc>
+ .byte 68,15,194,13,182,206,1,0,1 // cmpltps 0x1ceb6(%rip),%xmm9 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
.byte 69,15,84,200 // andps %xmm8,%xmm9
.byte 65,15,89,193 // mulps %xmm9,%xmm0
.byte 65,15,89,201 // mulps %xmm9,%xmm1
@@ -36921,20 +36921,20 @@ HIDDEN _sk_from_srgb_sse41
.globl _sk_from_srgb_sse41
FUNCTION(_sk_from_srgb_sse41)
_sk_from_srgb_sse41:
- .byte 68,15,40,29,74,205,1,0 // movaps 0x1cd4a(%rip),%xmm11 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
+ .byte 68,15,40,29,170,206,1,0 // movaps 0x1ceaa(%rip),%xmm11 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xcdc>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,203 // mulps %xmm11,%xmm9
.byte 68,15,40,208 // movaps %xmm0,%xmm10
.byte 69,15,89,210 // mulps %xmm10,%xmm10
- .byte 68,15,40,37,178,204,1,0 // movaps 0x1ccb2(%rip),%xmm12 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,37,18,206,1,0 // movaps 0x1ce12(%rip),%xmm12 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 68,15,40,192 // movaps %xmm0,%xmm8
.byte 69,15,89,196 // mulps %xmm12,%xmm8
- .byte 68,15,40,45,50,205,1,0 // movaps 0x1cd32(%rip),%xmm13 # 39400 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
+ .byte 68,15,40,45,146,206,1,0 // movaps 0x1ce92(%rip),%xmm13 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
.byte 69,15,88,197 // addps %xmm13,%xmm8
.byte 69,15,89,194 // mulps %xmm10,%xmm8
- .byte 68,15,40,53,50,205,1,0 // movaps 0x1cd32(%rip),%xmm14 # 39410 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
+ .byte 68,15,40,53,146,206,1,0 // movaps 0x1ce92(%rip),%xmm14 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
.byte 69,15,88,198 // addps %xmm14,%xmm8
- .byte 68,15,40,61,54,205,1,0 // movaps 0x1cd36(%rip),%xmm15 # 39420 <_sk_srcover_bgra_8888_sse2_lowp+0xd1c>
+ .byte 68,15,40,61,150,206,1,0 // movaps 0x1ce96(%rip),%xmm15 # 39580 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
.byte 65,15,194,199,1 // cmpltps %xmm15,%xmm0
.byte 102,69,15,56,20,193 // blendvps %xmm0,%xmm9,%xmm8
.byte 68,15,40,209 // movaps %xmm1,%xmm10
@@ -36971,19 +36971,19 @@ FUNCTION(_sk_from_srgb_dst_sse41)
_sk_from_srgb_dst_sse41:
.byte 68,15,40,204 // movaps %xmm4,%xmm9
.byte 68,15,40,192 // movaps %xmm0,%xmm8
- .byte 68,15,40,29,129,204,1,0 // movaps 0x1cc81(%rip),%xmm11 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
+ .byte 68,15,40,29,225,205,1,0 // movaps 0x1cde1(%rip),%xmm11 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xcdc>
.byte 69,15,40,209 // movaps %xmm9,%xmm10
.byte 69,15,89,211 // mulps %xmm11,%xmm10
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
- .byte 68,15,40,37,234,203,1,0 // movaps 0x1cbea(%rip),%xmm12 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,37,74,205,1,0 // movaps 0x1cd4a(%rip),%xmm12 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 65,15,89,228 // mulps %xmm12,%xmm4
- .byte 68,15,40,45,110,204,1,0 // movaps 0x1cc6e(%rip),%xmm13 # 39400 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
+ .byte 68,15,40,45,206,205,1,0 // movaps 0x1cdce(%rip),%xmm13 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
.byte 65,15,88,229 // addps %xmm13,%xmm4
.byte 15,89,224 // mulps %xmm0,%xmm4
- .byte 68,15,40,53,111,204,1,0 // movaps 0x1cc6f(%rip),%xmm14 # 39410 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
+ .byte 68,15,40,53,207,205,1,0 // movaps 0x1cdcf(%rip),%xmm14 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
.byte 65,15,88,230 // addps %xmm14,%xmm4
- .byte 68,15,40,61,115,204,1,0 // movaps 0x1cc73(%rip),%xmm15 # 39420 <_sk_srcover_bgra_8888_sse2_lowp+0xd1c>
+ .byte 68,15,40,61,211,205,1,0 // movaps 0x1cdd3(%rip),%xmm15 # 39580 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
.byte 69,15,194,207,1 // cmpltps %xmm15,%xmm9
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 102,65,15,56,20,226 // blendvps %xmm0,%xmm10,%xmm4
@@ -37027,22 +37027,22 @@ _sk_to_srgb_sse41:
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 15,40,209 // movaps %xmm1,%xmm2
.byte 68,15,82,192 // rsqrtps %xmm0,%xmm8
- .byte 68,15,40,29,231,203,1,0 // movaps 0x1cbe7(%rip),%xmm11 # 39430 <_sk_srcover_bgra_8888_sse2_lowp+0xd2c>
+ .byte 68,15,40,29,71,205,1,0 // movaps 0x1cd47(%rip),%xmm11 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xd1c>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,203 // mulps %xmm11,%xmm9
- .byte 68,15,40,37,231,203,1,0 // movaps 0x1cbe7(%rip),%xmm12 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xd3c>
+ .byte 68,15,40,37,71,205,1,0 // movaps 0x1cd47(%rip),%xmm12 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xd2c>
.byte 69,15,40,248 // movaps %xmm8,%xmm15
.byte 69,15,89,252 // mulps %xmm12,%xmm15
- .byte 68,15,40,21,231,203,1,0 // movaps 0x1cbe7(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xd4c>
+ .byte 68,15,40,21,71,205,1,0 // movaps 0x1cd47(%rip),%xmm10 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xd3c>
.byte 69,15,88,250 // addps %xmm10,%xmm15
.byte 69,15,89,248 // mulps %xmm8,%xmm15
- .byte 68,15,40,45,231,203,1,0 // movaps 0x1cbe7(%rip),%xmm13 # 39460 <_sk_srcover_bgra_8888_sse2_lowp+0xd5c>
+ .byte 68,15,40,45,71,205,1,0 // movaps 0x1cd47(%rip),%xmm13 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xd4c>
.byte 69,15,88,253 // addps %xmm13,%xmm15
- .byte 68,15,40,53,235,203,1,0 // movaps 0x1cbeb(%rip),%xmm14 # 39470 <_sk_srcover_bgra_8888_sse2_lowp+0xd6c>
+ .byte 68,15,40,53,75,205,1,0 // movaps 0x1cd4b(%rip),%xmm14 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xd5c>
.byte 69,15,88,198 // addps %xmm14,%xmm8
.byte 69,15,83,192 // rcpps %xmm8,%xmm8
.byte 69,15,89,199 // mulps %xmm15,%xmm8
- .byte 68,15,40,61,231,203,1,0 // movaps 0x1cbe7(%rip),%xmm15 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xd7c>
+ .byte 68,15,40,61,71,205,1,0 // movaps 0x1cd47(%rip),%xmm15 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xd6c>
.byte 65,15,194,199,1 // cmpltps %xmm15,%xmm0
.byte 102,69,15,56,20,193 // blendvps %xmm0,%xmm9,%xmm8
.byte 68,15,82,202 // rsqrtps %xmm2,%xmm9
@@ -37097,7 +37097,7 @@ _sk_rgb_to_hsl_sse41:
.byte 68,15,93,224 // minps %xmm0,%xmm12
.byte 65,15,40,203 // movaps %xmm11,%xmm1
.byte 65,15,92,204 // subps %xmm12,%xmm1
- .byte 68,15,40,53,142,201,1,0 // movaps 0x1c98e(%rip),%xmm14 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,53,238,202,1,0 // movaps 0x1caee(%rip),%xmm14 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,94,241 // divps %xmm1,%xmm14
.byte 69,15,40,211 // movaps %xmm11,%xmm10
.byte 69,15,194,208,0 // cmpeqps %xmm8,%xmm10
@@ -37106,27 +37106,27 @@ _sk_rgb_to_hsl_sse41:
.byte 65,15,89,198 // mulps %xmm14,%xmm0
.byte 69,15,40,249 // movaps %xmm9,%xmm15
.byte 68,15,194,250,1 // cmpltps %xmm2,%xmm15
- .byte 68,15,84,61,5,203,1,0 // andps 0x1cb05(%rip),%xmm15 # 39490 <_sk_srcover_bgra_8888_sse2_lowp+0xd8c>
+ .byte 68,15,84,61,101,204,1,0 // andps 0x1cc65(%rip),%xmm15 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xd7c>
.byte 68,15,88,248 // addps %xmm0,%xmm15
.byte 65,15,40,195 // movaps %xmm11,%xmm0
.byte 65,15,194,193,0 // cmpeqps %xmm9,%xmm0
.byte 65,15,92,208 // subps %xmm8,%xmm2
.byte 65,15,89,214 // mulps %xmm14,%xmm2
- .byte 68,15,40,45,248,202,1,0 // movaps 0x1caf8(%rip),%xmm13 # 394a0 <_sk_srcover_bgra_8888_sse2_lowp+0xd9c>
+ .byte 68,15,40,45,88,204,1,0 // movaps 0x1cc58(%rip),%xmm13 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xd8c>
.byte 65,15,88,213 // addps %xmm13,%xmm2
.byte 69,15,92,193 // subps %xmm9,%xmm8
.byte 69,15,89,198 // mulps %xmm14,%xmm8
- .byte 68,15,88,5,244,202,1,0 // addps 0x1caf4(%rip),%xmm8 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
+ .byte 68,15,88,5,84,204,1,0 // addps 0x1cc54(%rip),%xmm8 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xd9c>
.byte 102,68,15,56,20,194 // blendvps %xmm0,%xmm2,%xmm8
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 102,69,15,56,20,199 // blendvps %xmm0,%xmm15,%xmm8
- .byte 68,15,89,5,236,202,1,0 // mulps 0x1caec(%rip),%xmm8 # 394c0 <_sk_srcover_bgra_8888_sse2_lowp+0xdbc>
+ .byte 68,15,89,5,76,204,1,0 // mulps 0x1cc4c(%rip),%xmm8 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
.byte 69,15,40,203 // movaps %xmm11,%xmm9
.byte 69,15,194,204,4 // cmpneqps %xmm12,%xmm9
.byte 69,15,84,193 // andps %xmm9,%xmm8
.byte 69,15,92,235 // subps %xmm11,%xmm13
.byte 69,15,88,220 // addps %xmm12,%xmm11
- .byte 15,40,5,240,200,1,0 // movaps 0x1c8f0(%rip),%xmm0 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,40,5,80,202,1,0 // movaps 0x1ca50(%rip),%xmm0 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,40,211 // movaps %xmm11,%xmm2
.byte 15,89,208 // mulps %xmm0,%xmm2
.byte 15,194,194,1 // cmpltps %xmm2,%xmm0
@@ -37148,7 +37148,7 @@ _sk_hsl_to_rgb_sse41:
.byte 15,41,100,36,184 // movaps %xmm4,-0x48(%rsp)
.byte 15,41,92,36,168 // movaps %xmm3,-0x58(%rsp)
.byte 68,15,40,208 // movaps %xmm0,%xmm10
- .byte 68,15,40,13,166,200,1,0 // movaps 0x1c8a6(%rip),%xmm9 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,13,6,202,1,0 // movaps 0x1ca06(%rip),%xmm9 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 15,194,194,2 // cmpleps %xmm2,%xmm0
.byte 15,40,217 // movaps %xmm1,%xmm3
@@ -37161,19 +37161,19 @@ _sk_hsl_to_rgb_sse41:
.byte 15,41,84,36,152 // movaps %xmm2,-0x68(%rsp)
.byte 69,15,88,192 // addps %xmm8,%xmm8
.byte 68,15,92,197 // subps %xmm5,%xmm8
- .byte 68,15,40,53,97,202,1,0 // movaps 0x1ca61(%rip),%xmm14 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xdcc>
+ .byte 68,15,40,53,193,203,1,0 // movaps 0x1cbc1(%rip),%xmm14 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xdbc>
.byte 69,15,88,242 // addps %xmm10,%xmm14
.byte 102,65,15,58,8,198,1 // roundps $0x1,%xmm14,%xmm0
.byte 68,15,92,240 // subps %xmm0,%xmm14
- .byte 68,15,40,29,90,202,1,0 // movaps 0x1ca5a(%rip),%xmm11 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
+ .byte 68,15,40,29,186,203,1,0 // movaps 0x1cbba(%rip),%xmm11 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xdcc>
.byte 65,15,40,195 // movaps %xmm11,%xmm0
.byte 65,15,194,198,2 // cmpleps %xmm14,%xmm0
.byte 15,40,245 // movaps %xmm5,%xmm6
.byte 65,15,92,240 // subps %xmm8,%xmm6
- .byte 15,40,61,243,201,1,0 // movaps 0x1c9f3(%rip),%xmm7 # 39490 <_sk_srcover_bgra_8888_sse2_lowp+0xd8c>
+ .byte 15,40,61,83,203,1,0 // movaps 0x1cb53(%rip),%xmm7 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xd7c>
.byte 69,15,40,238 // movaps %xmm14,%xmm13
.byte 68,15,89,239 // mulps %xmm7,%xmm13
- .byte 15,40,29,4,202,1,0 // movaps 0x1ca04(%rip),%xmm3 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
+ .byte 15,40,29,100,203,1,0 // movaps 0x1cb64(%rip),%xmm3 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xd9c>
.byte 68,15,40,227 // movaps %xmm3,%xmm12
.byte 69,15,92,229 // subps %xmm13,%xmm12
.byte 68,15,89,230 // mulps %xmm6,%xmm12
@@ -37183,7 +37183,7 @@ _sk_hsl_to_rgb_sse41:
.byte 65,15,194,198,2 // cmpleps %xmm14,%xmm0
.byte 68,15,40,253 // movaps %xmm5,%xmm15
.byte 102,69,15,56,20,252 // blendvps %xmm0,%xmm12,%xmm15
- .byte 68,15,40,37,227,201,1,0 // movaps 0x1c9e3(%rip),%xmm12 # 394c0 <_sk_srcover_bgra_8888_sse2_lowp+0xdbc>
+ .byte 68,15,40,37,67,203,1,0 // movaps 0x1cb43(%rip),%xmm12 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
.byte 65,15,40,196 // movaps %xmm12,%xmm0
.byte 65,15,194,198,2 // cmpleps %xmm14,%xmm0
.byte 68,15,89,238 // mulps %xmm6,%xmm13
@@ -37217,7 +37217,7 @@ _sk_hsl_to_rgb_sse41:
.byte 65,15,40,198 // movaps %xmm14,%xmm0
.byte 15,40,84,36,152 // movaps -0x68(%rsp),%xmm2
.byte 102,15,56,20,202 // blendvps %xmm0,%xmm2,%xmm1
- .byte 68,15,88,21,123,201,1,0 // addps 0x1c97b(%rip),%xmm10 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,88,21,219,202,1,0 // addps 0x1cadb(%rip),%xmm10 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 102,65,15,58,8,194,1 // roundps $0x1,%xmm10,%xmm0
.byte 68,15,92,208 // subps %xmm0,%xmm10
.byte 69,15,194,218,2 // cmpleps %xmm10,%xmm11
@@ -37274,9 +37274,9 @@ _sk_scale_u8_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,52 // jne 1cc5c <_sk_scale_u8_sse41+0x4c>
.byte 102,71,15,56,49,4,16 // pmovzxbd (%r8,%r10,1),%xmm8
- .byte 102,68,15,219,5,104,199,1,0 // pand 0x1c768(%rip),%xmm8 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,68,15,219,5,200,200,1,0 // pand 0x1c8c8(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,188,200,1,0 // mulps 0x1c8bc(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,89,5,28,202,1,0 // mulps 0x1ca1c(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 65,15,89,208 // mulps %xmm8,%xmm2
@@ -37320,17 +37320,17 @@ _sk_scale_565_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,159,0,0,0 // jne 1cd76 <_sk_scale_565_sse41+0xc3>
.byte 102,71,15,56,51,28,80 // pmovzxwd (%r8,%r10,2),%xmm11
- .byte 102,15,111,5,42,200,1,0 // movdqa 0x1c82a(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,138,201,1,0 // movdqa 0x1c98a(%rip),%xmm0 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,65,15,219,195 // pand %xmm11,%xmm0
.byte 68,15,91,200 // cvtdq2ps %xmm0,%xmm9
- .byte 68,15,89,13,41,200,1,0 // mulps 0x1c829(%rip),%xmm9 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,5,49,200,1,0 // movdqa 0x1c831(%rip),%xmm0 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 68,15,89,13,137,201,1,0 // mulps 0x1c989(%rip),%xmm9 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,145,201,1,0 // movdqa 0x1c991(%rip),%xmm0 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,65,15,219,195 // pand %xmm11,%xmm0
.byte 68,15,91,208 // cvtdq2ps %xmm0,%xmm10
- .byte 68,15,89,21,48,200,1,0 // mulps 0x1c830(%rip),%xmm10 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,68,15,219,29,55,200,1,0 // pand 0x1c837(%rip),%xmm11 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 68,15,89,21,144,201,1,0 // mulps 0x1c990(%rip),%xmm10 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,68,15,219,29,151,201,1,0 // pand 0x1c997(%rip),%xmm11 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 69,15,91,219 // cvtdq2ps %xmm11,%xmm11
- .byte 68,15,89,29,59,200,1,0 // mulps 0x1c83b(%rip),%xmm11 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 68,15,89,29,155,201,1,0 // mulps 0x1c99b(%rip),%xmm11 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 15,40,195 // movaps %xmm3,%xmm0
.byte 15,194,199,1 // cmpltps %xmm7,%xmm0
.byte 69,15,40,226 // movaps %xmm10,%xmm12
@@ -37407,9 +37407,9 @@ _sk_lerp_u8_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,72 // jne 1ce66 <_sk_lerp_u8_sse41+0x60>
.byte 102,71,15,56,49,4,16 // pmovzxbd (%r8,%r10,1),%xmm8
- .byte 102,68,15,219,5,114,197,1,0 // pand 0x1c572(%rip),%xmm8 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,68,15,219,5,210,198,1,0 // pand 0x1c6d2(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,198,198,1,0 // mulps 0x1c6c6(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,89,5,38,200,1,0 // mulps 0x1c826(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 15,92,196 // subps %xmm4,%xmm0
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 15,88,196 // addps %xmm4,%xmm0
@@ -37460,17 +37460,17 @@ _sk_lerp_565_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 15,133,165,0,0,0 // jne 1cf89 <_sk_lerp_565_sse41+0xc9>
.byte 102,71,15,56,51,20,80 // pmovzxwd (%r8,%r10,2),%xmm10
- .byte 102,15,111,5,29,198,1,0 // movdqa 0x1c61d(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,125,199,1,0 // movdqa 0x1c77d(%rip),%xmm0 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,65,15,219,194 // pand %xmm10,%xmm0
.byte 68,15,91,200 // cvtdq2ps %xmm0,%xmm9
- .byte 68,15,89,13,28,198,1,0 // mulps 0x1c61c(%rip),%xmm9 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,5,36,198,1,0 // movdqa 0x1c624(%rip),%xmm0 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 68,15,89,13,124,199,1,0 // mulps 0x1c77c(%rip),%xmm9 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,132,199,1,0 // movdqa 0x1c784(%rip),%xmm0 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,65,15,219,194 // pand %xmm10,%xmm0
.byte 68,15,91,216 // cvtdq2ps %xmm0,%xmm11
- .byte 68,15,89,29,35,198,1,0 // mulps 0x1c623(%rip),%xmm11 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,68,15,219,21,42,198,1,0 // pand 0x1c62a(%rip),%xmm10 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 68,15,89,29,131,199,1,0 // mulps 0x1c783(%rip),%xmm11 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,68,15,219,21,138,199,1,0 // pand 0x1c78a(%rip),%xmm10 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 69,15,91,210 // cvtdq2ps %xmm10,%xmm10
- .byte 68,15,89,21,46,198,1,0 // mulps 0x1c62e(%rip),%xmm10 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 68,15,89,21,142,199,1,0 // mulps 0x1c78e(%rip),%xmm10 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 15,40,195 // movaps %xmm3,%xmm0
.byte 15,194,199,1 // cmpltps %xmm7,%xmm0
.byte 69,15,40,227 // movaps %xmm11,%xmm12
@@ -37526,7 +37526,7 @@ _sk_load_tables_sse41:
.byte 243,69,15,111,4,144 // movdqu (%r8,%rdx,4),%xmm8
.byte 65,86 // push %r14
.byte 83 // push %rbx
- .byte 102,15,111,5,160,195,1,0 // movdqa 0x1c3a0(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,5,0,197,1,0 // movdqa 0x1c500(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,219,192 // pand %xmm8,%xmm0
.byte 102,73,15,58,22,192,1 // pextrq $0x1,%xmm0,%r8
.byte 102,73,15,126,193 // movq %xmm0,%r9
@@ -37541,7 +37541,7 @@ _sk_load_tables_sse41:
.byte 102,66,15,58,33,4,155,32 // insertps $0x20,(%rbx,%r11,4),%xmm0
.byte 102,66,15,58,33,4,3,48 // insertps $0x30,(%rbx,%r8,1),%xmm0
.byte 102,65,15,111,200 // movdqa %xmm8,%xmm1
- .byte 102,15,56,0,13,91,195,1,0 // pshufb 0x1c35b(%rip),%xmm1 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,13,187,196,1,0 // pshufb 0x1c4bb(%rip),%xmm1 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 102,73,15,58,22,200,1 // pextrq $0x1,%xmm1,%r8
.byte 102,72,15,126,203 // movq %xmm1,%rbx
.byte 68,15,182,203 // movzbl %bl,%r9d
@@ -37556,7 +37556,7 @@ _sk_load_tables_sse41:
.byte 102,15,58,33,202,48 // insertps $0x30,%xmm2,%xmm1
.byte 76,139,64,24 // mov 0x18(%rax),%r8
.byte 102,65,15,111,208 // movdqa %xmm8,%xmm2
- .byte 102,15,56,0,21,23,195,1,0 // pshufb 0x1c317(%rip),%xmm2 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,21,119,196,1,0 // pshufb 0x1c477(%rip),%xmm2 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 102,72,15,58,22,211,1 // pextrq $0x1,%xmm2,%rbx
.byte 102,72,15,126,208 // movq %xmm2,%rax
.byte 68,15,182,200 // movzbl %al,%r9d
@@ -37571,7 +37571,7 @@ _sk_load_tables_sse41:
.byte 102,15,58,33,211,48 // insertps $0x30,%xmm3,%xmm2
.byte 102,65,15,114,208,24 // psrld $0x18,%xmm8
.byte 65,15,91,216 // cvtdq2ps %xmm8,%xmm3
- .byte 15,89,29,4,196,1,0 // mulps 0x1c404(%rip),%xmm3 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,29,100,197,1,0 // mulps 0x1c564(%rip),%xmm3 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 91 // pop %rbx
.byte 65,94 // pop %r14
@@ -37612,7 +37612,7 @@ _sk_load_tables_u16_be_sse41:
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,97,200 // punpcklwd %xmm0,%xmm1
.byte 102,68,15,105,200 // punpckhwd %xmm0,%xmm9
- .byte 102,68,15,111,5,214,195,1,0 // movdqa 0x1c3d6(%rip),%xmm8 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xe6c>
+ .byte 102,68,15,111,5,54,197,1,0 // movdqa 0x1c536(%rip),%xmm8 # 396d0 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,65,15,219,192 // pand %xmm8,%xmm0
.byte 102,15,56,51,192 // pmovzxwd %xmm0,%xmm0
@@ -37630,7 +37630,7 @@ _sk_load_tables_u16_be_sse41:
.byte 102,15,58,33,194,32 // insertps $0x20,%xmm2,%xmm0
.byte 243,66,15,16,20,3 // movss (%rbx,%r8,1),%xmm2
.byte 102,15,58,33,194,48 // insertps $0x30,%xmm2,%xmm0
- .byte 102,15,56,0,13,133,195,1,0 // pshufb 0x1c385(%rip),%xmm1 # 39580 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
+ .byte 102,15,56,0,13,229,196,1,0 // pshufb 0x1c4e5(%rip),%xmm1 # 396e0 <_sk_srcover_bgra_8888_sse2_lowp+0xe6c>
.byte 102,15,56,51,201 // pmovzxwd %xmm1,%xmm1
.byte 102,73,15,58,22,200,1 // pextrq $0x1,%xmm1,%r8
.byte 102,72,15,126,203 // movq %xmm1,%rbx
@@ -37666,7 +37666,7 @@ _sk_load_tables_u16_be_sse41:
.byte 102,65,15,235,216 // por %xmm8,%xmm3
.byte 102,15,56,51,219 // pmovzxwd %xmm3,%xmm3
.byte 15,91,219 // cvtdq2ps %xmm3,%xmm3
- .byte 15,89,29,211,194,1,0 // mulps 0x1c2d3(%rip),%xmm3 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 15,89,29,51,196,1,0 // mulps 0x1c433(%rip),%xmm3 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 91 // pop %rbx
.byte 65,94 // pop %r14
@@ -37706,7 +37706,7 @@ _sk_load_tables_rgb_u16_be_sse41:
.byte 102,15,97,209 // punpcklwd %xmm1,%xmm2
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
.byte 102,65,15,97,201 // punpcklwd %xmm9,%xmm1
- .byte 102,68,15,111,5,28,194,1,0 // movdqa 0x1c21c(%rip),%xmm8 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xe6c>
+ .byte 102,68,15,111,5,124,195,1,0 // movdqa 0x1c37c(%rip),%xmm8 # 396d0 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,65,15,219,192 // pand %xmm8,%xmm0
.byte 102,15,56,51,192 // pmovzxwd %xmm0,%xmm0
@@ -37724,7 +37724,7 @@ _sk_load_tables_rgb_u16_be_sse41:
.byte 102,15,58,33,195,32 // insertps $0x20,%xmm3,%xmm0
.byte 243,66,15,16,28,3 // movss (%rbx,%r8,1),%xmm3
.byte 102,15,58,33,195,48 // insertps $0x30,%xmm3,%xmm0
- .byte 102,15,56,0,13,203,193,1,0 // pshufb 0x1c1cb(%rip),%xmm1 # 39580 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
+ .byte 102,15,56,0,13,43,195,1,0 // pshufb 0x1c32b(%rip),%xmm1 # 396e0 <_sk_srcover_bgra_8888_sse2_lowp+0xe6c>
.byte 102,15,56,51,201 // pmovzxwd %xmm1,%xmm1
.byte 102,73,15,58,22,200,1 // pextrq $0x1,%xmm1,%r8
.byte 102,72,15,126,203 // movq %xmm1,%rbx
@@ -37755,7 +37755,7 @@ _sk_load_tables_rgb_u16_be_sse41:
.byte 243,65,15,16,28,24 // movss (%r8,%rbx,1),%xmm3
.byte 102,15,58,33,211,48 // insertps $0x30,%xmm3,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,150,190,1,0 // movaps 0x1be96(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,246,191,1,0 // movaps 0x1bff6(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 91 // pop %rbx
.byte 65,94 // pop %r14
.byte 255,224 // jmpq *%rax
@@ -37787,7 +37787,7 @@ _sk_byte_tables_sse41:
.byte 65,86 // push %r14
.byte 83 // push %rbx
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,3,191,1,0 // movaps 0x1bf03(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,99,192,1,0 // movaps 0x1c063(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,91,192 // cvtps2dq %xmm0,%xmm0
.byte 102,73,15,58,22,192,1 // pextrq $0x1,%xmm0,%r8
@@ -37810,7 +37810,7 @@ _sk_byte_tables_sse41:
.byte 102,15,58,32,197,3 // pinsrb $0x3,%ebp,%xmm0
.byte 102,15,56,49,192 // pmovzxbd %xmm0,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,13,201,191,1,0 // movaps 0x1bfc9(%rip),%xmm9 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,13,41,193,1,0 // movaps 0x1c129(%rip),%xmm9 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,193 // mulps %xmm9,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,91,201 // cvtps2dq %xmm1,%xmm1
@@ -37912,7 +37912,7 @@ _sk_byte_tables_rgb_sse41:
.byte 102,15,58,32,197,3 // pinsrb $0x3,%ebp,%xmm0
.byte 102,15,56,49,192 // pmovzxbd %xmm0,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,13,37,190,1,0 // movaps 0x1be25(%rip),%xmm9 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,13,133,191,1,0 // movaps 0x1bf85(%rip),%xmm9 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,193 // mulps %xmm9,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,91,201 // cvtps2dq %xmm1,%xmm1
@@ -38094,31 +38094,31 @@ _sk_parametric_r_sse41:
.byte 69,15,88,208 // addps %xmm8,%xmm10
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,91,194 // cvtdq2ps %xmm10,%xmm8
- .byte 68,15,89,5,0,188,1,0 // mulps 0x1bc00(%rip),%xmm8 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,21,8,188,1,0 // andps 0x1bc08(%rip),%xmm10 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,21,48,185,1,0 // orps 0x1b930(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,5,8,188,1,0 // addps 0x1bc08(%rip),%xmm8 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 68,15,40,37,16,188,1,0 // movaps 0x1bc10(%rip),%xmm12 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,5,96,189,1,0 // mulps 0x1bd60(%rip),%xmm8 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,21,104,189,1,0 // andps 0x1bd68(%rip),%xmm10 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,21,144,186,1,0 // orps 0x1ba90(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,5,104,189,1,0 // addps 0x1bd68(%rip),%xmm8 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 68,15,40,37,112,189,1,0 // movaps 0x1bd70(%rip),%xmm12 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 69,15,92,196 // subps %xmm12,%xmm8
- .byte 68,15,88,21,16,188,1,0 // addps 0x1bc10(%rip),%xmm10 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 68,15,40,37,24,188,1,0 // movaps 0x1bc18(%rip),%xmm12 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,21,112,189,1,0 // addps 0x1bd70(%rip),%xmm10 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,40,37,120,189,1,0 // movaps 0x1bd78(%rip),%xmm12 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 69,15,94,226 // divps %xmm10,%xmm12
.byte 69,15,92,196 // subps %xmm12,%xmm8
.byte 69,15,89,195 // mulps %xmm11,%xmm8
.byte 102,69,15,58,8,208,1 // roundps $0x1,%xmm8,%xmm10
.byte 69,15,40,216 // movaps %xmm8,%xmm11
.byte 69,15,92,218 // subps %xmm10,%xmm11
- .byte 68,15,88,5,5,188,1,0 // addps 0x1bc05(%rip),%xmm8 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 68,15,40,21,13,188,1,0 // movaps 0x1bc0d(%rip),%xmm10 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,5,101,189,1,0 // addps 0x1bd65(%rip),%xmm8 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,40,21,109,189,1,0 // movaps 0x1bd6d(%rip),%xmm10 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 69,15,89,211 // mulps %xmm11,%xmm10
.byte 69,15,92,194 // subps %xmm10,%xmm8
- .byte 68,15,40,21,13,188,1,0 // movaps 0x1bc0d(%rip),%xmm10 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,21,109,189,1,0 // movaps 0x1bd6d(%rip),%xmm10 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,211 // subps %xmm11,%xmm10
- .byte 68,15,40,29,17,188,1,0 // movaps 0x1bc11(%rip),%xmm11 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 68,15,40,29,113,189,1,0 // movaps 0x1bd71(%rip),%xmm11 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 69,15,94,218 // divps %xmm10,%xmm11
.byte 69,15,88,216 // addps %xmm8,%xmm11
- .byte 68,15,89,29,17,188,1,0 // mulps 0x1bc11(%rip),%xmm11 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 68,15,89,29,113,189,1,0 // mulps 0x1bd71(%rip),%xmm11 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,69,15,91,211 // cvtps2dq %xmm11,%xmm10
.byte 243,68,15,16,64,20 // movss 0x14(%rax),%xmm8
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
@@ -38126,7 +38126,7 @@ _sk_parametric_r_sse41:
.byte 102,69,15,56,20,193 // blendvps %xmm0,%xmm9,%xmm8
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 68,15,95,192 // maxps %xmm0,%xmm8
- .byte 68,15,93,5,152,184,1,0 // minps 0x1b898(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,93,5,248,185,1,0 // minps 0x1b9f8(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -38156,31 +38156,31 @@ _sk_parametric_g_sse41:
.byte 68,15,88,217 // addps %xmm1,%xmm11
.byte 69,15,198,210,0 // shufps $0x0,%xmm10,%xmm10
.byte 69,15,91,227 // cvtdq2ps %xmm11,%xmm12
- .byte 68,15,89,37,217,186,1,0 // mulps 0x1bad9(%rip),%xmm12 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,29,225,186,1,0 // andps 0x1bae1(%rip),%xmm11 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,29,9,184,1,0 // orps 0x1b809(%rip),%xmm11 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,37,225,186,1,0 // addps 0x1bae1(%rip),%xmm12 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 15,40,13,234,186,1,0 // movaps 0x1baea(%rip),%xmm1 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,37,57,188,1,0 // mulps 0x1bc39(%rip),%xmm12 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,29,65,188,1,0 // andps 0x1bc41(%rip),%xmm11 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,29,105,185,1,0 // orps 0x1b969(%rip),%xmm11 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,37,65,188,1,0 // addps 0x1bc41(%rip),%xmm12 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 15,40,13,74,188,1,0 // movaps 0x1bc4a(%rip),%xmm1 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 65,15,89,203 // mulps %xmm11,%xmm1
.byte 68,15,92,225 // subps %xmm1,%xmm12
- .byte 68,15,88,29,234,186,1,0 // addps 0x1baea(%rip),%xmm11 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 15,40,13,243,186,1,0 // movaps 0x1baf3(%rip),%xmm1 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,29,74,188,1,0 // addps 0x1bc4a(%rip),%xmm11 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 15,40,13,83,188,1,0 // movaps 0x1bc53(%rip),%xmm1 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 65,15,94,203 // divps %xmm11,%xmm1
.byte 68,15,92,225 // subps %xmm1,%xmm12
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 102,69,15,58,8,212,1 // roundps $0x1,%xmm12,%xmm10
.byte 69,15,40,220 // movaps %xmm12,%xmm11
.byte 69,15,92,218 // subps %xmm10,%xmm11
- .byte 68,15,88,37,224,186,1,0 // addps 0x1bae0(%rip),%xmm12 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 15,40,13,233,186,1,0 // movaps 0x1bae9(%rip),%xmm1 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,37,64,188,1,0 // addps 0x1bc40(%rip),%xmm12 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 15,40,13,73,188,1,0 // movaps 0x1bc49(%rip),%xmm1 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 65,15,89,203 // mulps %xmm11,%xmm1
.byte 68,15,92,225 // subps %xmm1,%xmm12
- .byte 68,15,40,21,233,186,1,0 // movaps 0x1bae9(%rip),%xmm10 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,21,73,188,1,0 // movaps 0x1bc49(%rip),%xmm10 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,211 // subps %xmm11,%xmm10
- .byte 15,40,13,238,186,1,0 // movaps 0x1baee(%rip),%xmm1 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 15,40,13,78,188,1,0 // movaps 0x1bc4e(%rip),%xmm1 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 65,15,94,202 // divps %xmm10,%xmm1
.byte 65,15,88,204 // addps %xmm12,%xmm1
- .byte 15,89,13,239,186,1,0 // mulps 0x1baef(%rip),%xmm1 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 15,89,13,79,188,1,0 // mulps 0x1bc4f(%rip),%xmm1 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,68,15,91,209 // cvtps2dq %xmm1,%xmm10
.byte 243,15,16,72,20 // movss 0x14(%rax),%xmm1
.byte 15,198,201,0 // shufps $0x0,%xmm1,%xmm1
@@ -38188,7 +38188,7 @@ _sk_parametric_g_sse41:
.byte 102,65,15,56,20,201 // blendvps %xmm0,%xmm9,%xmm1
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,95,200 // maxps %xmm0,%xmm1
- .byte 15,93,13,122,183,1,0 // minps 0x1b77a(%rip),%xmm1 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,93,13,218,184,1,0 // minps 0x1b8da(%rip),%xmm1 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -38218,31 +38218,31 @@ _sk_parametric_b_sse41:
.byte 68,15,88,218 // addps %xmm2,%xmm11
.byte 69,15,198,210,0 // shufps $0x0,%xmm10,%xmm10
.byte 69,15,91,227 // cvtdq2ps %xmm11,%xmm12
- .byte 68,15,89,37,187,185,1,0 // mulps 0x1b9bb(%rip),%xmm12 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,29,195,185,1,0 // andps 0x1b9c3(%rip),%xmm11 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,29,235,182,1,0 // orps 0x1b6eb(%rip),%xmm11 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,37,195,185,1,0 // addps 0x1b9c3(%rip),%xmm12 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 15,40,21,204,185,1,0 // movaps 0x1b9cc(%rip),%xmm2 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,37,27,187,1,0 // mulps 0x1bb1b(%rip),%xmm12 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,29,35,187,1,0 // andps 0x1bb23(%rip),%xmm11 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,29,75,184,1,0 // orps 0x1b84b(%rip),%xmm11 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,37,35,187,1,0 // addps 0x1bb23(%rip),%xmm12 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 15,40,21,44,187,1,0 // movaps 0x1bb2c(%rip),%xmm2 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 65,15,89,211 // mulps %xmm11,%xmm2
.byte 68,15,92,226 // subps %xmm2,%xmm12
- .byte 68,15,88,29,204,185,1,0 // addps 0x1b9cc(%rip),%xmm11 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 15,40,21,213,185,1,0 // movaps 0x1b9d5(%rip),%xmm2 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,29,44,187,1,0 // addps 0x1bb2c(%rip),%xmm11 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 15,40,21,53,187,1,0 // movaps 0x1bb35(%rip),%xmm2 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 65,15,94,211 // divps %xmm11,%xmm2
.byte 68,15,92,226 // subps %xmm2,%xmm12
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 102,69,15,58,8,212,1 // roundps $0x1,%xmm12,%xmm10
.byte 69,15,40,220 // movaps %xmm12,%xmm11
.byte 69,15,92,218 // subps %xmm10,%xmm11
- .byte 68,15,88,37,194,185,1,0 // addps 0x1b9c2(%rip),%xmm12 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 15,40,21,203,185,1,0 // movaps 0x1b9cb(%rip),%xmm2 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,37,34,187,1,0 // addps 0x1bb22(%rip),%xmm12 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 15,40,21,43,187,1,0 // movaps 0x1bb2b(%rip),%xmm2 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 65,15,89,211 // mulps %xmm11,%xmm2
.byte 68,15,92,226 // subps %xmm2,%xmm12
- .byte 68,15,40,21,203,185,1,0 // movaps 0x1b9cb(%rip),%xmm10 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,21,43,187,1,0 // movaps 0x1bb2b(%rip),%xmm10 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,211 // subps %xmm11,%xmm10
- .byte 15,40,21,208,185,1,0 // movaps 0x1b9d0(%rip),%xmm2 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 15,40,21,48,187,1,0 // movaps 0x1bb30(%rip),%xmm2 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 65,15,94,210 // divps %xmm10,%xmm2
.byte 65,15,88,212 // addps %xmm12,%xmm2
- .byte 15,89,21,209,185,1,0 // mulps 0x1b9d1(%rip),%xmm2 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 15,89,21,49,187,1,0 // mulps 0x1bb31(%rip),%xmm2 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,68,15,91,210 // cvtps2dq %xmm2,%xmm10
.byte 243,15,16,80,20 // movss 0x14(%rax),%xmm2
.byte 15,198,210,0 // shufps $0x0,%xmm2,%xmm2
@@ -38250,7 +38250,7 @@ _sk_parametric_b_sse41:
.byte 102,65,15,56,20,209 // blendvps %xmm0,%xmm9,%xmm2
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,95,208 // maxps %xmm0,%xmm2
- .byte 15,93,21,92,182,1,0 // minps 0x1b65c(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,93,21,188,183,1,0 // minps 0x1b7bc(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -38280,31 +38280,31 @@ _sk_parametric_a_sse41:
.byte 68,15,88,219 // addps %xmm3,%xmm11
.byte 69,15,198,210,0 // shufps $0x0,%xmm10,%xmm10
.byte 69,15,91,227 // cvtdq2ps %xmm11,%xmm12
- .byte 68,15,89,37,157,184,1,0 // mulps 0x1b89d(%rip),%xmm12 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,29,165,184,1,0 // andps 0x1b8a5(%rip),%xmm11 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,29,205,181,1,0 // orps 0x1b5cd(%rip),%xmm11 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,37,165,184,1,0 // addps 0x1b8a5(%rip),%xmm12 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 15,40,29,174,184,1,0 // movaps 0x1b8ae(%rip),%xmm3 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,37,253,185,1,0 // mulps 0x1b9fd(%rip),%xmm12 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,29,5,186,1,0 // andps 0x1ba05(%rip),%xmm11 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,29,45,183,1,0 // orps 0x1b72d(%rip),%xmm11 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,37,5,186,1,0 // addps 0x1ba05(%rip),%xmm12 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 15,40,29,14,186,1,0 // movaps 0x1ba0e(%rip),%xmm3 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 65,15,89,219 // mulps %xmm11,%xmm3
.byte 68,15,92,227 // subps %xmm3,%xmm12
- .byte 68,15,88,29,174,184,1,0 // addps 0x1b8ae(%rip),%xmm11 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 15,40,29,183,184,1,0 // movaps 0x1b8b7(%rip),%xmm3 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,29,14,186,1,0 // addps 0x1ba0e(%rip),%xmm11 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 15,40,29,23,186,1,0 // movaps 0x1ba17(%rip),%xmm3 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 65,15,94,219 // divps %xmm11,%xmm3
.byte 68,15,92,227 // subps %xmm3,%xmm12
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 102,69,15,58,8,212,1 // roundps $0x1,%xmm12,%xmm10
.byte 69,15,40,220 // movaps %xmm12,%xmm11
.byte 69,15,92,218 // subps %xmm10,%xmm11
- .byte 68,15,88,37,164,184,1,0 // addps 0x1b8a4(%rip),%xmm12 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 15,40,29,173,184,1,0 // movaps 0x1b8ad(%rip),%xmm3 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,37,4,186,1,0 // addps 0x1ba04(%rip),%xmm12 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 15,40,29,13,186,1,0 // movaps 0x1ba0d(%rip),%xmm3 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 65,15,89,219 // mulps %xmm11,%xmm3
.byte 68,15,92,227 // subps %xmm3,%xmm12
- .byte 68,15,40,21,173,184,1,0 // movaps 0x1b8ad(%rip),%xmm10 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,21,13,186,1,0 // movaps 0x1ba0d(%rip),%xmm10 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,211 // subps %xmm11,%xmm10
- .byte 15,40,29,178,184,1,0 // movaps 0x1b8b2(%rip),%xmm3 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 15,40,29,18,186,1,0 // movaps 0x1ba12(%rip),%xmm3 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 65,15,94,218 // divps %xmm10,%xmm3
.byte 65,15,88,220 // addps %xmm12,%xmm3
- .byte 15,89,29,179,184,1,0 // mulps 0x1b8b3(%rip),%xmm3 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 15,89,29,19,186,1,0 // mulps 0x1ba13(%rip),%xmm3 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,68,15,91,211 // cvtps2dq %xmm3,%xmm10
.byte 243,15,16,88,20 // movss 0x14(%rax),%xmm3
.byte 15,198,219,0 // shufps $0x0,%xmm3,%xmm3
@@ -38312,7 +38312,7 @@ _sk_parametric_a_sse41:
.byte 102,65,15,56,20,217 // blendvps %xmm0,%xmm9,%xmm3
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,95,216 // maxps %xmm0,%xmm3
- .byte 15,93,29,62,181,1,0 // minps 0x1b53e(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,93,29,158,182,1,0 // minps 0x1b69e(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -38329,20 +38329,20 @@ _sk_gamma_sse41:
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,91,194 // cvtdq2ps %xmm2,%xmm0
- .byte 15,89,5,189,183,1,0 // mulps 0x1b7bd(%rip),%xmm0 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,40,53,197,183,1,0 // movaps 0x1b7c5(%rip),%xmm14 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 15,89,5,29,185,1,0 // mulps 0x1b91d(%rip),%xmm0 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,40,53,37,185,1,0 // movaps 0x1b925(%rip),%xmm14 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
.byte 65,15,84,214 // andps %xmm14,%xmm2
- .byte 68,15,40,37,233,180,1,0 // movaps 0x1b4e9(%rip),%xmm12 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,37,73,182,1,0 // movaps 0x1b649(%rip),%xmm12 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,86,212 // orps %xmm12,%xmm2
- .byte 68,15,40,21,189,183,1,0 // movaps 0x1b7bd(%rip),%xmm10 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
+ .byte 68,15,40,21,29,185,1,0 // movaps 0x1b91d(%rip),%xmm10 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
.byte 65,15,88,194 // addps %xmm10,%xmm0
- .byte 68,15,40,29,193,183,1,0 // movaps 0x1b7c1(%rip),%xmm11 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,40,29,33,185,1,0 // movaps 0x1b921(%rip),%xmm11 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 15,40,226 // movaps %xmm2,%xmm4
.byte 65,15,89,227 // mulps %xmm11,%xmm4
.byte 15,92,196 // subps %xmm4,%xmm0
- .byte 68,15,40,13,191,183,1,0 // movaps 0x1b7bf(%rip),%xmm9 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
+ .byte 68,15,40,13,31,185,1,0 // movaps 0x1b91f(%rip),%xmm9 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
.byte 65,15,88,209 // addps %xmm9,%xmm2
- .byte 68,15,40,45,195,183,1,0 // movaps 0x1b7c3(%rip),%xmm13 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,40,45,35,185,1,0 // movaps 0x1b923(%rip),%xmm13 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 65,15,40,229 // movaps %xmm13,%xmm4
.byte 15,94,226 // divps %xmm2,%xmm4
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -38353,21 +38353,21 @@ _sk_gamma_sse41:
.byte 102,15,58,8,208,1 // roundps $0x1,%xmm0,%xmm2
.byte 15,40,224 // movaps %xmm0,%xmm4
.byte 15,92,226 // subps %xmm2,%xmm4
- .byte 15,40,53,166,183,1,0 // movaps 0x1b7a6(%rip),%xmm6 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
+ .byte 15,40,53,6,185,1,0 // movaps 0x1b906(%rip),%xmm6 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
.byte 15,88,198 // addps %xmm6,%xmm0
- .byte 15,40,61,188,183,1,0 // movaps 0x1b7bc(%rip),%xmm7 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 15,40,61,28,185,1,0 // movaps 0x1b91c(%rip),%xmm7 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 15,40,239 // movaps %xmm7,%xmm5
.byte 15,92,236 // subps %xmm4,%xmm5
.byte 15,40,212 // movaps %xmm4,%xmm2
- .byte 15,40,37,156,183,1,0 // movaps 0x1b79c(%rip),%xmm4 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 15,40,37,252,184,1,0 // movaps 0x1b8fc(%rip),%xmm4 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 15,89,212 // mulps %xmm4,%xmm2
.byte 15,92,194 // subps %xmm2,%xmm0
- .byte 68,15,40,61,174,183,1,0 // movaps 0x1b7ae(%rip),%xmm15 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 68,15,40,61,14,185,1,0 // movaps 0x1b90e(%rip),%xmm15 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 65,15,40,215 // movaps %xmm15,%xmm2
.byte 15,94,213 // divps %xmm5,%xmm2
.byte 15,88,208 // addps %xmm0,%xmm2
.byte 15,91,193 // cvtdq2ps %xmm1,%xmm0
- .byte 15,89,5,10,183,1,0 // mulps 0x1b70a(%rip),%xmm0 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 15,89,5,106,184,1,0 // mulps 0x1b86a(%rip),%xmm0 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
.byte 65,15,84,206 // andps %xmm14,%xmm1
.byte 65,15,86,204 // orps %xmm12,%xmm1
.byte 65,15,88,194 // addps %xmm10,%xmm0
@@ -38393,8 +38393,8 @@ _sk_gamma_sse41:
.byte 15,94,206 // divps %xmm6,%xmm1
.byte 15,88,200 // addps %xmm0,%xmm1
.byte 15,91,195 // cvtdq2ps %xmm3,%xmm0
- .byte 15,89,5,171,182,1,0 // mulps 0x1b6ab(%rip),%xmm0 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 15,84,29,180,182,1,0 // andps 0x1b6b4(%rip),%xmm3 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 15,89,5,11,184,1,0 // mulps 0x1b80b(%rip),%xmm0 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 15,84,29,20,184,1,0 // andps 0x1b814(%rip),%xmm3 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
.byte 65,15,86,220 // orps %xmm12,%xmm3
.byte 65,15,88,194 // addps %xmm10,%xmm0
.byte 68,15,89,219 // mulps %xmm3,%xmm11
@@ -38412,7 +38412,7 @@ _sk_gamma_sse41:
.byte 15,92,253 // subps %xmm5,%xmm7
.byte 68,15,94,255 // divps %xmm7,%xmm15
.byte 68,15,88,248 // addps %xmm0,%xmm15
- .byte 15,40,5,252,182,1,0 // movaps 0x1b6fc(%rip),%xmm0 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 15,40,5,92,184,1,0 // movaps 0x1b85c(%rip),%xmm0 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 15,89,208 // mulps %xmm0,%xmm2
.byte 15,89,200 // mulps %xmm0,%xmm1
.byte 68,15,89,248 // mulps %xmm0,%xmm15
@@ -38432,29 +38432,29 @@ HIDDEN _sk_lab_to_xyz_sse41
FUNCTION(_sk_lab_to_xyz_sse41)
_sk_lab_to_xyz_sse41:
.byte 68,15,40,192 // movaps %xmm0,%xmm8
- .byte 68,15,89,5,204,182,1,0 // mulps 0x1b6cc(%rip),%xmm8 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xf4c>
- .byte 68,15,40,13,68,180,1,0 // movaps 0x1b444(%rip),%xmm9 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,89,5,44,184,1,0 // mulps 0x1b82c(%rip),%xmm8 # 397b0 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 68,15,40,13,164,181,1,0 // movaps 0x1b5a4(%rip),%xmm9 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,201 // mulps %xmm9,%xmm1
- .byte 15,40,5,201,182,1,0 // movaps 0x1b6c9(%rip),%xmm0 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xf5c>
+ .byte 15,40,5,41,184,1,0 // movaps 0x1b829(%rip),%xmm0 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0xf4c>
.byte 15,88,200 // addps %xmm0,%xmm1
.byte 65,15,89,209 // mulps %xmm9,%xmm2
.byte 15,88,208 // addps %xmm0,%xmm2
- .byte 68,15,88,5,199,182,1,0 // addps 0x1b6c7(%rip),%xmm8 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xf6c>
- .byte 68,15,89,5,207,182,1,0 // mulps 0x1b6cf(%rip),%xmm8 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xf7c>
- .byte 15,89,13,216,182,1,0 // mulps 0x1b6d8(%rip),%xmm1 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xf8c>
+ .byte 68,15,88,5,39,184,1,0 // addps 0x1b827(%rip),%xmm8 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0xf5c>
+ .byte 68,15,89,5,47,184,1,0 // mulps 0x1b82f(%rip),%xmm8 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0xf6c>
+ .byte 15,89,13,56,184,1,0 // mulps 0x1b838(%rip),%xmm1 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0xf7c>
.byte 65,15,88,200 // addps %xmm8,%xmm1
- .byte 15,89,21,221,182,1,0 // mulps 0x1b6dd(%rip),%xmm2 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf9c>
+ .byte 15,89,21,61,184,1,0 // mulps 0x1b83d(%rip),%xmm2 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0xf8c>
.byte 69,15,40,208 // movaps %xmm8,%xmm10
.byte 68,15,92,210 // subps %xmm2,%xmm10
.byte 68,15,40,217 // movaps %xmm1,%xmm11
.byte 69,15,89,219 // mulps %xmm11,%xmm11
.byte 68,15,89,217 // mulps %xmm1,%xmm11
- .byte 68,15,40,13,209,182,1,0 // movaps 0x1b6d1(%rip),%xmm9 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xfac>
+ .byte 68,15,40,13,49,184,1,0 // movaps 0x1b831(%rip),%xmm9 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0xf9c>
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 65,15,194,195,1 // cmpltps %xmm11,%xmm0
- .byte 15,40,21,209,182,1,0 // movaps 0x1b6d1(%rip),%xmm2 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xfbc>
+ .byte 15,40,21,49,184,1,0 // movaps 0x1b831(%rip),%xmm2 # 39820 <_sk_srcover_bgra_8888_sse2_lowp+0xfac>
.byte 15,88,202 // addps %xmm2,%xmm1
- .byte 68,15,40,37,214,182,1,0 // movaps 0x1b6d6(%rip),%xmm12 # 396d0 <_sk_srcover_bgra_8888_sse2_lowp+0xfcc>
+ .byte 68,15,40,37,54,184,1,0 // movaps 0x1b836(%rip),%xmm12 # 39830 <_sk_srcover_bgra_8888_sse2_lowp+0xfbc>
.byte 65,15,89,204 // mulps %xmm12,%xmm1
.byte 102,65,15,56,20,203 // blendvps %xmm0,%xmm11,%xmm1
.byte 69,15,40,216 // movaps %xmm8,%xmm11
@@ -38473,8 +38473,8 @@ _sk_lab_to_xyz_sse41:
.byte 65,15,89,212 // mulps %xmm12,%xmm2
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 102,65,15,56,20,211 // blendvps %xmm0,%xmm11,%xmm2
- .byte 15,89,13,143,182,1,0 // mulps 0x1b68f(%rip),%xmm1 # 396e0 <_sk_srcover_bgra_8888_sse2_lowp+0xfdc>
- .byte 15,89,21,152,182,1,0 // mulps 0x1b698(%rip),%xmm2 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xfec>
+ .byte 15,89,13,239,183,1,0 // mulps 0x1b7ef(%rip),%xmm1 # 39840 <_sk_srcover_bgra_8888_sse2_lowp+0xfcc>
+ .byte 15,89,21,248,183,1,0 // mulps 0x1b7f8(%rip),%xmm2 # 39850 <_sk_srcover_bgra_8888_sse2_lowp+0xfdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,40,193 // movaps %xmm1,%xmm0
.byte 65,15,40,200 // movaps %xmm8,%xmm1
@@ -38493,9 +38493,9 @@ _sk_load_a8_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,39 // jne 1e0a2 <_sk_load_a8_sse41+0x3f>
.byte 102,67,15,56,49,4,16 // pmovzxbd (%r8,%r10,1),%xmm0
- .byte 102,15,219,5,22,179,1,0 // pand 0x1b316(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,5,118,180,1,0 // pand 0x1b476(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,216 // cvtdq2ps %xmm0,%xmm3
- .byte 15,89,29,108,180,1,0 // mulps 0x1b46c(%rip),%xmm3 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,29,204,181,1,0 // mulps 0x1b5cc(%rip),%xmm3 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 102,15,239,201 // pxor %xmm1,%xmm1
@@ -38535,9 +38535,9 @@ _sk_load_a8_dst_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,39 // jne 1e12e <_sk_load_a8_dst_sse41+0x3f>
.byte 102,67,15,56,49,36,16 // pmovzxbd (%r8,%r10,1),%xmm4
- .byte 102,15,219,37,138,178,1,0 // pand 0x1b28a(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,37,234,179,1,0 // pand 0x1b3ea(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,252 // cvtdq2ps %xmm4,%xmm7
- .byte 15,89,61,224,179,1,0 // mulps 0x1b3e0(%rip),%xmm7 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,61,64,181,1,0 // mulps 0x1b540(%rip),%xmm7 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,228 // xorps %xmm4,%xmm4
.byte 102,15,239,237 // pxor %xmm5,%xmm5
@@ -38605,7 +38605,7 @@ _sk_gather_a8_sse41:
.byte 102,15,58,32,192,3 // pinsrb $0x3,%eax,%xmm0
.byte 102,15,56,49,192 // pmovzxbd %xmm0,%xmm0
.byte 15,91,216 // cvtdq2ps %xmm0,%xmm3
- .byte 15,89,29,216,178,1,0 // mulps 0x1b2d8(%rip),%xmm3 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,29,56,180,1,0 // mulps 0x1b438(%rip),%xmm3 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 102,15,239,201 // pxor %xmm1,%xmm1
@@ -38622,7 +38622,7 @@ _sk_store_a8_sse41:
.byte 77,15,175,193 // imul %r9,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,210 // movslq %edx,%r10
- .byte 68,15,40,5,127,177,1,0 // movaps 0x1b17f(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,223,178,1,0 // movaps 0x1b2df(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 68,15,89,195 // mulps %xmm3,%xmm8
.byte 102,69,15,91,192 // cvtps2dq %xmm8,%xmm8
.byte 102,68,15,56,43,192 // packusdw %xmm0,%xmm8
@@ -38643,7 +38643,7 @@ _sk_store_a8_sse41:
.byte 65,128,249,3 // cmp $0x3,%r9b
.byte 117,221 // jne 1e273 <_sk_store_a8_sse41+0x3d>
.byte 102,71,15,58,20,68,16,2,8 // pextrb $0x8,%xmm8,0x2(%r8,%r10,1)
- .byte 102,68,15,56,0,5,87,180,1,0 // pshufb 0x1b457(%rip),%xmm8 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xffc>
+ .byte 102,68,15,56,0,5,183,181,1,0 // pshufb 0x1b5b7(%rip),%xmm8 # 39860 <_sk_srcover_bgra_8888_sse2_lowp+0xfec>
.byte 102,71,15,58,21,4,16,0 // pextrw $0x0,%xmm8,(%r8,%r10,1)
.byte 235,192 // jmp 1e273 <_sk_store_a8_sse41+0x3d>
.byte 102,71,15,58,20,4,16,0 // pextrb $0x0,%xmm8,(%r8,%r10,1)
@@ -38662,11 +38662,11 @@ _sk_load_g8_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,42 // jne 1e2ff <_sk_load_g8_sse41+0x42>
.byte 102,67,15,56,49,4,16 // pmovzxbd (%r8,%r10,1),%xmm0
- .byte 102,15,219,5,188,176,1,0 // pand 0x1b0bc(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,5,28,178,1,0 // pand 0x1b21c(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,18,178,1,0 // mulps 0x1b212(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,5,114,179,1,0 // mulps 0x1b372(%rip),%xmm0 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,249,175,1,0 // movaps 0x1aff9(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,89,177,1,0 // movaps 0x1b159(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 255,224 // jmpq *%rax
@@ -38704,11 +38704,11 @@ _sk_load_g8_dst_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,42 // jne 1e38e <_sk_load_g8_dst_sse41+0x42>
.byte 102,67,15,56,49,36,16 // pmovzxbd (%r8,%r10,1),%xmm4
- .byte 102,15,219,37,45,176,1,0 // pand 0x1b02d(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,37,141,177,1,0 // pand 0x1b18d(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 15,89,37,131,177,1,0 // mulps 0x1b183(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,37,227,178,1,0 // mulps 0x1b2e3(%rip),%xmm4 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,106,175,1,0 // movaps 0x1af6a(%rip),%xmm7 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,61,202,176,1,0 // movaps 0x1b0ca(%rip),%xmm7 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,236 // movaps %xmm4,%xmm5
.byte 15,40,244 // movaps %xmm4,%xmm6
.byte 255,224 // jmpq *%rax
@@ -38774,9 +38774,9 @@ _sk_gather_g8_sse41:
.byte 102,15,58,32,192,3 // pinsrb $0x3,%eax,%xmm0
.byte 102,15,56,49,192 // pmovzxbd %xmm0,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,120,176,1,0 // mulps 0x1b078(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,5,216,177,1,0 // mulps 0x1b1d8(%rip),%xmm0 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,95,174,1,0 // movaps 0x1ae5f(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,191,175,1,0 // movaps 0x1afbf(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 255,224 // jmpq *%rax
@@ -38795,19 +38795,19 @@ _sk_load_565_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,80 // jne 1e504 <_sk_load_565_sse41+0x6b>
.byte 102,67,15,56,51,20,80 // pmovzxwd (%r8,%r10,2),%xmm2
- .byte 102,15,111,5,77,176,1,0 // movdqa 0x1b04d(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,173,177,1,0 // movdqa 0x1b1ad(%rip),%xmm0 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,79,176,1,0 // mulps 0x1b04f(%rip),%xmm0 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,13,87,176,1,0 // movdqa 0x1b057(%rip),%xmm1 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 15,89,5,175,177,1,0 // mulps 0x1b1af(%rip),%xmm0 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,13,183,177,1,0 // movdqa 0x1b1b7(%rip),%xmm1 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,15,219,202 // pand %xmm2,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,89,176,1,0 // mulps 0x1b059(%rip),%xmm1 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,15,219,21,97,176,1,0 // pand 0x1b061(%rip),%xmm2 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 15,89,13,185,177,1,0 // mulps 0x1b1b9(%rip),%xmm1 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,15,219,21,193,177,1,0 // pand 0x1b1c1(%rip),%xmm2 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,103,176,1,0 // mulps 0x1b067(%rip),%xmm2 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 15,89,21,199,177,1,0 // mulps 0x1b1c7(%rip),%xmm2 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,238,173,1,0 // movaps 0x1adee(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,78,175,1,0 // movaps 0x1af4e(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,3 // and $0x3,%r9b
@@ -38843,19 +38843,19 @@ _sk_load_565_dst_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,80 // jne 1e5bf <_sk_load_565_dst_sse41+0x6b>
.byte 102,67,15,56,51,52,80 // pmovzxwd (%r8,%r10,2),%xmm6
- .byte 102,15,111,37,146,175,1,0 // movdqa 0x1af92(%rip),%xmm4 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,37,242,176,1,0 // movdqa 0x1b0f2(%rip),%xmm4 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,15,219,230 // pand %xmm6,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 15,89,37,148,175,1,0 // mulps 0x1af94(%rip),%xmm4 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,45,156,175,1,0 // movdqa 0x1af9c(%rip),%xmm5 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 15,89,37,244,176,1,0 // mulps 0x1b0f4(%rip),%xmm4 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,45,252,176,1,0 // movdqa 0x1b0fc(%rip),%xmm5 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,15,219,238 // pand %xmm6,%xmm5
.byte 15,91,237 // cvtdq2ps %xmm5,%xmm5
- .byte 15,89,45,158,175,1,0 // mulps 0x1af9e(%rip),%xmm5 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,15,219,53,166,175,1,0 // pand 0x1afa6(%rip),%xmm6 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 15,89,45,254,176,1,0 // mulps 0x1b0fe(%rip),%xmm5 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,15,219,53,6,177,1,0 // pand 0x1b106(%rip),%xmm6 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 15,91,246 // cvtdq2ps %xmm6,%xmm6
- .byte 15,89,53,172,175,1,0 // mulps 0x1afac(%rip),%xmm6 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 15,89,53,12,177,1,0 // mulps 0x1b10c(%rip),%xmm6 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,51,173,1,0 // movaps 0x1ad33(%rip),%xmm7 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,61,147,174,1,0 // movaps 0x1ae93(%rip),%xmm7 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,3 // and $0x3,%r9b
@@ -38917,19 +38917,19 @@ _sk_gather_565_sse41:
.byte 65,15,183,4,64 // movzwl (%r8,%rax,2),%eax
.byte 102,15,196,192,3 // pinsrw $0x3,%eax,%xmm0
.byte 102,15,56,51,208 // pmovzxwd %xmm0,%xmm2
- .byte 102,15,111,5,89,174,1,0 // movdqa 0x1ae59(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,185,175,1,0 // movdqa 0x1afb9(%rip),%xmm0 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,91,174,1,0 // mulps 0x1ae5b(%rip),%xmm0 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,13,99,174,1,0 // movdqa 0x1ae63(%rip),%xmm1 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 15,89,5,187,175,1,0 // mulps 0x1afbb(%rip),%xmm0 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,13,195,175,1,0 // movdqa 0x1afc3(%rip),%xmm1 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,15,219,202 // pand %xmm2,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,101,174,1,0 // mulps 0x1ae65(%rip),%xmm1 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,15,219,21,109,174,1,0 // pand 0x1ae6d(%rip),%xmm2 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 15,89,13,197,175,1,0 // mulps 0x1afc5(%rip),%xmm1 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,15,219,21,205,175,1,0 // pand 0x1afcd(%rip),%xmm2 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,115,174,1,0 // mulps 0x1ae73(%rip),%xmm2 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 15,89,21,211,175,1,0 // mulps 0x1afd3(%rip),%xmm2 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,250,171,1,0 // movaps 0x1abfa(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,90,173,1,0 // movaps 0x1ad5a(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
HIDDEN _sk_store_565_sse41
@@ -38943,12 +38943,12 @@ _sk_store_565_sse41:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 72,99,194 // movslq %edx,%rax
- .byte 68,15,40,5,250,175,1,0 // movaps 0x1affa(%rip),%xmm8 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0x100c>
+ .byte 68,15,40,5,90,177,1,0 // movaps 0x1b15a(%rip),%xmm8 # 39870 <_sk_srcover_bgra_8888_sse2_lowp+0xffc>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
.byte 102,65,15,114,241,11 // pslld $0xb,%xmm9
- .byte 68,15,40,21,239,175,1,0 // movaps 0x1afef(%rip),%xmm10 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
+ .byte 68,15,40,21,79,177,1,0 // movaps 0x1b14f(%rip),%xmm10 # 39880 <_sk_srcover_bgra_8888_sse2_lowp+0x100c>
.byte 68,15,89,209 // mulps %xmm1,%xmm10
.byte 102,69,15,91,210 // cvtps2dq %xmm10,%xmm10
.byte 102,65,15,114,242,5 // pslld $0x5,%xmm10
@@ -38992,21 +38992,21 @@ _sk_load_4444_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,95 // jne 1e822 <_sk_load_4444_sse41+0x7a>
.byte 102,67,15,56,51,28,80 // pmovzxwd (%r8,%r10,2),%xmm3
- .byte 102,15,111,5,94,175,1,0 // movdqa 0x1af5e(%rip),%xmm0 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,5,190,176,1,0 // movdqa 0x1b0be(%rip),%xmm0 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,96,175,1,0 // mulps 0x1af60(%rip),%xmm0 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
- .byte 102,15,111,13,104,175,1,0 // movdqa 0x1af68(%rip),%xmm1 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 15,89,5,192,176,1,0 // mulps 0x1b0c0(%rip),%xmm0 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,13,200,176,1,0 // movdqa 0x1b0c8(%rip),%xmm1 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
.byte 102,15,219,203 // pand %xmm3,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,106,175,1,0 // mulps 0x1af6a(%rip),%xmm1 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
- .byte 102,15,111,21,114,175,1,0 // movdqa 0x1af72(%rip),%xmm2 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 15,89,13,202,176,1,0 // mulps 0x1b0ca(%rip),%xmm1 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 102,15,111,21,210,176,1,0 // movdqa 0x1b0d2(%rip),%xmm2 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
.byte 102,15,219,211 // pand %xmm3,%xmm2
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,116,175,1,0 // mulps 0x1af74(%rip),%xmm2 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
- .byte 102,15,219,29,124,175,1,0 // pand 0x1af7c(%rip),%xmm3 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
+ .byte 15,89,21,212,176,1,0 // mulps 0x1b0d4(%rip),%xmm2 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 102,15,219,29,220,176,1,0 // pand 0x1b0dc(%rip),%xmm3 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
.byte 15,91,219 // cvtdq2ps %xmm3,%xmm3
- .byte 15,89,29,130,175,1,0 // mulps 0x1af82(%rip),%xmm3 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
+ .byte 15,89,29,226,176,1,0 // mulps 0x1b0e2(%rip),%xmm3 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
@@ -39043,21 +39043,21 @@ _sk_load_4444_dst_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,95 // jne 1e8ec <_sk_load_4444_dst_sse41+0x7a>
.byte 102,67,15,56,51,60,80 // pmovzxwd (%r8,%r10,2),%xmm7
- .byte 102,15,111,37,148,174,1,0 // movdqa 0x1ae94(%rip),%xmm4 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,37,244,175,1,0 // movdqa 0x1aff4(%rip),%xmm4 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
.byte 102,15,219,231 // pand %xmm7,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 15,89,37,150,174,1,0 // mulps 0x1ae96(%rip),%xmm4 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
- .byte 102,15,111,45,158,174,1,0 // movdqa 0x1ae9e(%rip),%xmm5 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 15,89,37,246,175,1,0 // mulps 0x1aff6(%rip),%xmm4 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,45,254,175,1,0 // movdqa 0x1affe(%rip),%xmm5 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
.byte 102,15,219,239 // pand %xmm7,%xmm5
.byte 15,91,237 // cvtdq2ps %xmm5,%xmm5
- .byte 15,89,45,160,174,1,0 // mulps 0x1aea0(%rip),%xmm5 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
- .byte 102,15,111,53,168,174,1,0 // movdqa 0x1aea8(%rip),%xmm6 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 15,89,45,0,176,1,0 // mulps 0x1b000(%rip),%xmm5 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 102,15,111,53,8,176,1,0 // movdqa 0x1b008(%rip),%xmm6 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
.byte 102,15,219,247 // pand %xmm7,%xmm6
.byte 15,91,246 // cvtdq2ps %xmm6,%xmm6
- .byte 15,89,53,170,174,1,0 // mulps 0x1aeaa(%rip),%xmm6 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
- .byte 102,15,219,61,178,174,1,0 // pand 0x1aeb2(%rip),%xmm7 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
+ .byte 15,89,53,10,176,1,0 // mulps 0x1b00a(%rip),%xmm6 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 102,15,219,61,18,176,1,0 // pand 0x1b012(%rip),%xmm7 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
.byte 15,91,255 // cvtdq2ps %xmm7,%xmm7
- .byte 15,89,61,184,174,1,0 // mulps 0x1aeb8(%rip),%xmm7 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
+ .byte 15,89,61,24,176,1,0 // mulps 0x1b018(%rip),%xmm7 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
@@ -39120,21 +39120,21 @@ _sk_gather_4444_sse41:
.byte 65,15,183,4,64 // movzwl (%r8,%rax,2),%eax
.byte 102,15,196,192,3 // pinsrw $0x3,%eax,%xmm0
.byte 102,15,56,51,216 // pmovzxwd %xmm0,%xmm3
- .byte 102,15,111,5,76,173,1,0 // movdqa 0x1ad4c(%rip),%xmm0 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,5,172,174,1,0 // movdqa 0x1aeac(%rip),%xmm0 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,78,173,1,0 // mulps 0x1ad4e(%rip),%xmm0 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
- .byte 102,15,111,13,86,173,1,0 // movdqa 0x1ad56(%rip),%xmm1 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 15,89,5,174,174,1,0 // mulps 0x1aeae(%rip),%xmm0 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,13,182,174,1,0 // movdqa 0x1aeb6(%rip),%xmm1 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
.byte 102,15,219,203 // pand %xmm3,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,88,173,1,0 // mulps 0x1ad58(%rip),%xmm1 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
- .byte 102,15,111,21,96,173,1,0 // movdqa 0x1ad60(%rip),%xmm2 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 15,89,13,184,174,1,0 // mulps 0x1aeb8(%rip),%xmm1 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 102,15,111,21,192,174,1,0 // movdqa 0x1aec0(%rip),%xmm2 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
.byte 102,15,219,211 // pand %xmm3,%xmm2
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,98,173,1,0 // mulps 0x1ad62(%rip),%xmm2 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
- .byte 102,15,219,29,106,173,1,0 // pand 0x1ad6a(%rip),%xmm3 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
+ .byte 15,89,21,194,174,1,0 // mulps 0x1aec2(%rip),%xmm2 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 102,15,219,29,202,174,1,0 // pand 0x1aeca(%rip),%xmm3 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
.byte 15,91,219 // cvtdq2ps %xmm3,%xmm3
- .byte 15,89,29,112,173,1,0 // mulps 0x1ad70(%rip),%xmm3 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
+ .byte 15,89,29,208,174,1,0 // mulps 0x1aed0(%rip),%xmm3 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -39149,7 +39149,7 @@ _sk_store_4444_sse41:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 72,99,194 // movslq %edx,%rax
- .byte 68,15,40,5,94,173,1,0 // movaps 0x1ad5e(%rip),%xmm8 # 397b0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
+ .byte 68,15,40,5,190,174,1,0 // movaps 0x1aebe(%rip),%xmm8 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
@@ -39203,17 +39203,17 @@ _sk_load_8888_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,89 // jne 1eb6d <_sk_load_8888_sse41+0x75>
.byte 243,65,15,111,28,128 // movdqu (%r8,%rax,4),%xmm3
- .byte 102,15,111,5,126,168,1,0 // movdqa 0x1a87e(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,5,222,169,1,0 // movdqa 0x1a9de(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,5,207,169,1,0 // movaps 0x1a9cf(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,47,171,1,0 // movaps 0x1ab2f(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,111,203 // movdqa %xmm3,%xmm1
- .byte 102,15,56,0,13,110,168,1,0 // pshufb 0x1a86e(%rip),%xmm1 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,13,206,169,1,0 // pshufb 0x1a9ce(%rip),%xmm1 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,111,211 // movdqa %xmm3,%xmm2
- .byte 102,15,56,0,21,106,168,1,0 // pshufb 0x1a86a(%rip),%xmm2 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,21,202,169,1,0 // pshufb 0x1a9ca(%rip),%xmm2 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 102,15,114,211,24 // psrld $0x18,%xmm3
@@ -39252,17 +39252,17 @@ _sk_load_8888_dst_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,89 // jne 1ec27 <_sk_load_8888_dst_sse41+0x75>
.byte 243,65,15,111,60,128 // movdqu (%r8,%rax,4),%xmm7
- .byte 102,15,111,37,196,167,1,0 // movdqa 0x1a7c4(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,37,36,169,1,0 // movdqa 0x1a924(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,231 // pand %xmm7,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 68,15,40,5,21,169,1,0 // movaps 0x1a915(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,117,170,1,0 // movaps 0x1aa75(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,224 // mulps %xmm8,%xmm4
.byte 102,15,111,239 // movdqa %xmm7,%xmm5
- .byte 102,15,56,0,45,180,167,1,0 // pshufb 0x1a7b4(%rip),%xmm5 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,45,20,169,1,0 // pshufb 0x1a914(%rip),%xmm5 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,237 // cvtdq2ps %xmm5,%xmm5
.byte 65,15,89,232 // mulps %xmm8,%xmm5
.byte 102,15,111,247 // movdqa %xmm7,%xmm6
- .byte 102,15,56,0,53,176,167,1,0 // pshufb 0x1a7b0(%rip),%xmm6 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,53,16,169,1,0 // pshufb 0x1a910(%rip),%xmm6 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,246 // cvtdq2ps %xmm6,%xmm6
.byte 65,15,89,240 // mulps %xmm8,%xmm6
.byte 102,15,114,215,24 // psrld $0x18,%xmm7
@@ -39322,17 +39322,17 @@ _sk_gather_8888_sse41:
.byte 102,65,15,58,34,28,128,1 // pinsrd $0x1,(%r8,%rax,4),%xmm3
.byte 102,67,15,58,34,28,152,2 // pinsrd $0x2,(%r8,%r11,4),%xmm3
.byte 102,67,15,58,34,28,144,3 // pinsrd $0x3,(%r8,%r10,4),%xmm3
- .byte 102,15,111,5,157,166,1,0 // movdqa 0x1a69d(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,5,253,167,1,0 // movdqa 0x1a7fd(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,5,238,167,1,0 // movaps 0x1a7ee(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,78,169,1,0 // movaps 0x1a94e(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,111,203 // movdqa %xmm3,%xmm1
- .byte 102,15,56,0,13,141,166,1,0 // pshufb 0x1a68d(%rip),%xmm1 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,13,237,167,1,0 // pshufb 0x1a7ed(%rip),%xmm1 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,111,211 // movdqa %xmm3,%xmm2
- .byte 102,15,56,0,21,137,166,1,0 // pshufb 0x1a689(%rip),%xmm2 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,21,233,167,1,0 // pshufb 0x1a7e9(%rip),%xmm2 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 102,15,114,211,24 // psrld $0x18,%xmm3
@@ -39352,7 +39352,7 @@ _sk_store_8888_sse41:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 72,99,194 // movslq %edx,%rax
- .byte 68,15,40,5,99,166,1,0 // movaps 0x1a663(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,195,167,1,0 // movaps 0x1a7c3(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
@@ -39403,17 +39403,17 @@ _sk_load_bgra_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,89 // jne 1ee74 <_sk_load_bgra_sse41+0x75>
.byte 243,65,15,111,28,128 // movdqu (%r8,%rax,4),%xmm3
- .byte 102,15,111,5,119,165,1,0 // movdqa 0x1a577(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,5,215,166,1,0 // movdqa 0x1a6d7(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,208 // cvtdq2ps %xmm0,%xmm2
- .byte 68,15,40,5,200,166,1,0 // movaps 0x1a6c8(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,40,168,1,0 // movaps 0x1a828(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
- .byte 102,15,56,0,5,103,165,1,0 // pshufb 0x1a567(%rip),%xmm0 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,5,199,166,1,0 // pshufb 0x1a6c7(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,200 // cvtdq2ps %xmm0,%xmm1
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
- .byte 102,15,56,0,5,99,165,1,0 // pshufb 0x1a563(%rip),%xmm0 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,5,195,166,1,0 // pshufb 0x1a6c3(%rip),%xmm0 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,114,211,24 // psrld $0x18,%xmm3
@@ -39452,17 +39452,17 @@ _sk_load_bgra_dst_sse41:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,89 // jne 1ef2e <_sk_load_bgra_dst_sse41+0x75>
.byte 243,65,15,111,60,128 // movdqu (%r8,%rax,4),%xmm7
- .byte 102,15,111,37,189,164,1,0 // movdqa 0x1a4bd(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,37,29,166,1,0 // movdqa 0x1a61d(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,231 // pand %xmm7,%xmm4
.byte 15,91,244 // cvtdq2ps %xmm4,%xmm6
- .byte 68,15,40,5,14,166,1,0 // movaps 0x1a60e(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,110,167,1,0 // movaps 0x1a76e(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,240 // mulps %xmm8,%xmm6
.byte 102,15,111,231 // movdqa %xmm7,%xmm4
- .byte 102,15,56,0,37,173,164,1,0 // pshufb 0x1a4ad(%rip),%xmm4 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,37,13,166,1,0 // pshufb 0x1a60d(%rip),%xmm4 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,236 // cvtdq2ps %xmm4,%xmm5
.byte 65,15,89,232 // mulps %xmm8,%xmm5
.byte 102,15,111,231 // movdqa %xmm7,%xmm4
- .byte 102,15,56,0,37,169,164,1,0 // pshufb 0x1a4a9(%rip),%xmm4 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,37,9,166,1,0 // pshufb 0x1a609(%rip),%xmm4 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
.byte 65,15,89,224 // mulps %xmm8,%xmm4
.byte 102,15,114,215,24 // psrld $0x18,%xmm7
@@ -39522,17 +39522,17 @@ _sk_gather_bgra_sse41:
.byte 102,65,15,58,34,28,128,1 // pinsrd $0x1,(%r8,%rax,4),%xmm3
.byte 102,67,15,58,34,28,152,2 // pinsrd $0x2,(%r8,%r11,4),%xmm3
.byte 102,67,15,58,34,28,144,3 // pinsrd $0x3,(%r8,%r10,4),%xmm3
- .byte 102,15,111,5,150,163,1,0 // movdqa 0x1a396(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,5,246,164,1,0 // movdqa 0x1a4f6(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,208 // cvtdq2ps %xmm0,%xmm2
- .byte 68,15,40,5,231,164,1,0 // movaps 0x1a4e7(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,71,166,1,0 // movaps 0x1a647(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
- .byte 102,15,56,0,5,134,163,1,0 // pshufb 0x1a386(%rip),%xmm0 # 393b0 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
+ .byte 102,15,56,0,5,230,164,1,0 // pshufb 0x1a4e6(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
.byte 15,91,200 // cvtdq2ps %xmm0,%xmm1
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
- .byte 102,15,56,0,5,130,163,1,0 // pshufb 0x1a382(%rip),%xmm0 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
+ .byte 102,15,56,0,5,226,164,1,0 // pshufb 0x1a4e2(%rip),%xmm0 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xcac>
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,114,211,24 // psrld $0x18,%xmm3
@@ -39552,7 +39552,7 @@ _sk_store_bgra_sse41:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 72,99,194 // movslq %edx,%rax
- .byte 68,15,40,5,92,163,1,0 // movaps 0x1a35c(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,188,164,1,0 // movaps 0x1a4bc(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 68,15,40,202 // movaps %xmm2,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
@@ -39611,19 +39611,19 @@ _sk_load_f16_sse41:
.byte 102,68,15,97,224 // punpcklwd %xmm0,%xmm12
.byte 102,68,15,105,200 // punpckhwd %xmm0,%xmm9
.byte 102,65,15,56,51,212 // pmovzxwd %xmm12,%xmm2
- .byte 102,68,15,111,5,97,166,1,0 // movdqa 0x1a661(%rip),%xmm8 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
+ .byte 102,68,15,111,5,193,167,1,0 // movdqa 0x1a7c1(%rip),%xmm8 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
.byte 102,65,15,219,200 // pand %xmm8,%xmm1
- .byte 102,68,15,111,21,95,166,1,0 // movdqa 0x1a65f(%rip),%xmm10 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
+ .byte 102,68,15,111,21,191,167,1,0 // movdqa 0x1a7bf(%rip),%xmm10 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
.byte 102,65,15,219,210 // pand %xmm10,%xmm2
- .byte 102,15,111,29,98,166,1,0 // movdqa 0x1a662(%rip),%xmm3 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
+ .byte 102,15,111,29,194,167,1,0 // movdqa 0x1a7c2(%rip),%xmm3 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
.byte 102,15,114,241,16 // pslld $0x10,%xmm1
.byte 102,15,111,194 // movdqa %xmm2,%xmm0
.byte 102,15,56,63,195 // pmaxud %xmm3,%xmm0
.byte 102,15,118,194 // pcmpeqd %xmm2,%xmm0
.byte 102,15,114,242,13 // pslld $0xd,%xmm2
.byte 102,15,235,209 // por %xmm1,%xmm2
- .byte 102,68,15,111,29,78,166,1,0 // movdqa 0x1a64e(%rip),%xmm11 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 102,68,15,111,29,174,167,1,0 // movdqa 0x1a7ae(%rip),%xmm11 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
.byte 102,65,15,254,211 // paddd %xmm11,%xmm2
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 102,65,15,112,204,238 // pshufd $0xee,%xmm12,%xmm1
@@ -39699,19 +39699,19 @@ _sk_load_f16_dst_sse41:
.byte 102,68,15,97,228 // punpcklwd %xmm4,%xmm12
.byte 102,68,15,105,204 // punpckhwd %xmm4,%xmm9
.byte 102,65,15,56,51,244 // pmovzxwd %xmm12,%xmm6
- .byte 102,68,15,111,5,195,164,1,0 // movdqa 0x1a4c3(%rip),%xmm8 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
+ .byte 102,68,15,111,5,35,166,1,0 // movdqa 0x1a623(%rip),%xmm8 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
.byte 102,15,111,238 // movdqa %xmm6,%xmm5
.byte 102,65,15,219,232 // pand %xmm8,%xmm5
- .byte 102,68,15,111,21,193,164,1,0 // movdqa 0x1a4c1(%rip),%xmm10 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
+ .byte 102,68,15,111,21,33,166,1,0 // movdqa 0x1a621(%rip),%xmm10 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
.byte 102,65,15,219,242 // pand %xmm10,%xmm6
- .byte 102,15,111,61,196,164,1,0 // movdqa 0x1a4c4(%rip),%xmm7 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
+ .byte 102,15,111,61,36,166,1,0 // movdqa 0x1a624(%rip),%xmm7 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
.byte 102,15,114,245,16 // pslld $0x10,%xmm5
.byte 102,15,111,230 // movdqa %xmm6,%xmm4
.byte 102,15,56,63,231 // pmaxud %xmm7,%xmm4
.byte 102,15,118,230 // pcmpeqd %xmm6,%xmm4
.byte 102,15,114,246,13 // pslld $0xd,%xmm6
.byte 102,15,235,245 // por %xmm5,%xmm6
- .byte 102,68,15,111,29,176,164,1,0 // movdqa 0x1a4b0(%rip),%xmm11 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 102,68,15,111,29,16,166,1,0 // movdqa 0x1a610(%rip),%xmm11 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
.byte 102,65,15,254,243 // paddd %xmm11,%xmm6
.byte 102,15,219,230 // pand %xmm6,%xmm4
.byte 102,65,15,112,236,238 // pshufd $0xee,%xmm12,%xmm5
@@ -39809,19 +39809,19 @@ _sk_gather_f16_sse41:
.byte 102,68,15,97,226 // punpcklwd %xmm2,%xmm12
.byte 102,68,15,105,202 // punpckhwd %xmm2,%xmm9
.byte 102,65,15,56,51,212 // pmovzxwd %xmm12,%xmm2
- .byte 102,68,15,111,5,193,162,1,0 // movdqa 0x1a2c1(%rip),%xmm8 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
+ .byte 102,68,15,111,5,33,164,1,0 // movdqa 0x1a421(%rip),%xmm8 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
.byte 102,65,15,219,200 // pand %xmm8,%xmm1
- .byte 102,68,15,111,21,191,162,1,0 // movdqa 0x1a2bf(%rip),%xmm10 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
+ .byte 102,68,15,111,21,31,164,1,0 // movdqa 0x1a41f(%rip),%xmm10 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
.byte 102,65,15,219,210 // pand %xmm10,%xmm2
- .byte 102,15,111,29,194,162,1,0 // movdqa 0x1a2c2(%rip),%xmm3 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
+ .byte 102,15,111,29,34,164,1,0 // movdqa 0x1a422(%rip),%xmm3 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
.byte 102,15,114,241,16 // pslld $0x10,%xmm1
.byte 102,15,111,194 // movdqa %xmm2,%xmm0
.byte 102,15,56,63,195 // pmaxud %xmm3,%xmm0
.byte 102,15,118,194 // pcmpeqd %xmm2,%xmm0
.byte 102,15,114,242,13 // pslld $0xd,%xmm2
.byte 102,15,235,209 // por %xmm1,%xmm2
- .byte 102,68,15,111,29,174,162,1,0 // movdqa 0x1a2ae(%rip),%xmm11 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 102,68,15,111,29,14,164,1,0 // movdqa 0x1a40e(%rip),%xmm11 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
.byte 102,65,15,254,211 // paddd %xmm11,%xmm2
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 102,65,15,112,204,238 // pshufd $0xee,%xmm12,%xmm1
@@ -39874,20 +39874,20 @@ _sk_store_f16_sse41:
.byte 73,193,224,3 // shl $0x3,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 72,99,194 // movslq %edx,%rax
- .byte 102,68,15,111,13,214,161,1,0 // movdqa 0x1a1d6(%rip),%xmm9 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
+ .byte 102,68,15,111,13,54,163,1,0 // movdqa 0x1a336(%rip),%xmm9 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
.byte 102,68,15,111,224 // movdqa %xmm0,%xmm12
.byte 102,69,15,219,225 // pand %xmm9,%xmm12
- .byte 102,68,15,111,29,211,161,1,0 // movdqa 0x1a1d3(%rip),%xmm11 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
+ .byte 102,68,15,111,29,51,163,1,0 // movdqa 0x1a333(%rip),%xmm11 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
.byte 102,68,15,111,232 // movdqa %xmm0,%xmm13
.byte 102,69,15,219,235 // pand %xmm11,%xmm13
- .byte 102,68,15,111,21,208,161,1,0 // movdqa 0x1a1d0(%rip),%xmm10 # 39820 <_sk_srcover_bgra_8888_sse2_lowp+0x111c>
+ .byte 102,68,15,111,21,48,163,1,0 // movdqa 0x1a330(%rip),%xmm10 # 39980 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
.byte 102,65,15,114,212,16 // psrld $0x10,%xmm12
.byte 102,69,15,111,197 // movdqa %xmm13,%xmm8
.byte 102,69,15,56,63,194 // pmaxud %xmm10,%xmm8
.byte 102,69,15,118,197 // pcmpeqd %xmm13,%xmm8
.byte 102,65,15,114,213,13 // psrld $0xd,%xmm13
.byte 102,69,15,254,236 // paddd %xmm12,%xmm13
- .byte 102,68,15,111,37,182,161,1,0 // movdqa 0x1a1b6(%rip),%xmm12 # 39830 <_sk_srcover_bgra_8888_sse2_lowp+0x112c>
+ .byte 102,68,15,111,37,22,163,1,0 // movdqa 0x1a316(%rip),%xmm12 # 39990 <_sk_srcover_bgra_8888_sse2_lowp+0x111c>
.byte 102,69,15,254,236 // paddd %xmm12,%xmm13
.byte 102,69,15,219,197 // pand %xmm13,%xmm8
.byte 102,68,15,56,43,192 // packusdw %xmm0,%xmm8
@@ -39977,7 +39977,7 @@ _sk_load_u16_be_sse41:
.byte 102,15,235,200 // por %xmm0,%xmm1
.byte 102,15,56,51,193 // pmovzxwd %xmm1,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,5,126,157,1,0 // movaps 0x19d7e(%rip),%xmm8 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,40,5,222,158,1,0 // movaps 0x19ede(%rip),%xmm8 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,111,203 // movdqa %xmm3,%xmm1
.byte 102,15,113,241,8 // psllw $0x8,%xmm1
@@ -40051,7 +40051,7 @@ _sk_load_rgb_u16_be_sse41:
.byte 102,15,235,200 // por %xmm0,%xmm1
.byte 102,15,56,51,193 // pmovzxwd %xmm1,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,5,77,156,1,0 // movaps 0x19c4d(%rip),%xmm8 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,40,5,173,157,1,0 // movaps 0x19dad(%rip),%xmm8 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,111,203 // movdqa %xmm3,%xmm1
.byte 102,15,113,241,8 // psllw $0x8,%xmm1
@@ -40068,7 +40068,7 @@ _sk_load_rgb_u16_be_sse41:
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,100,153,1,0 // movaps 0x19964(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,196,154,1,0 // movaps 0x19ac4(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,20,64 // movd (%r8,%rax,2),%xmm2
.byte 102,15,239,201 // pxor %xmm1,%xmm1
@@ -40102,7 +40102,7 @@ _sk_store_u16_be_sse41:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 73,99,193 // movslq %r9d,%rax
- .byte 68,15,40,21,45,158,1,0 // movaps 0x19e2d(%rip),%xmm10 # 39840 <_sk_srcover_bgra_8888_sse2_lowp+0x113c>
+ .byte 68,15,40,21,141,159,1,0 // movaps 0x19f8d(%rip),%xmm10 # 399a0 <_sk_srcover_bgra_8888_sse2_lowp+0x112c>
.byte 68,15,40,192 // movaps %xmm0,%xmm8
.byte 69,15,89,194 // mulps %xmm10,%xmm8
.byte 102,69,15,91,192 // cvtps2dq %xmm8,%xmm8
@@ -40333,7 +40333,7 @@ _sk_mirror_x_sse41:
.byte 65,15,92,194 // subps %xmm10,%xmm0
.byte 243,69,15,88,192 // addss %xmm8,%xmm8
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
- .byte 243,68,15,89,13,91,140,1,0 // mulss 0x18c5b(%rip),%xmm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,13,203,141,1,0 // mulss 0x18dcb(%rip),%xmm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,89,200 // mulps %xmm0,%xmm9
.byte 102,69,15,58,8,201,1 // roundps $0x1,%xmm9,%xmm9
@@ -40358,7 +40358,7 @@ _sk_mirror_y_sse41:
.byte 65,15,92,202 // subps %xmm10,%xmm1
.byte 243,69,15,88,192 // addss %xmm8,%xmm8
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
- .byte 243,68,15,89,13,2,140,1,0 // mulss 0x18c02(%rip),%xmm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,13,114,141,1,0 // mulss 0x18d72(%rip),%xmm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,89,201 // mulps %xmm1,%xmm9
.byte 102,69,15,58,8,201,1 // roundps $0x1,%xmm9,%xmm9
@@ -40377,7 +40377,7 @@ FUNCTION(_sk_clamp_x_1_sse41)
_sk_clamp_x_1_sse41:
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 68,15,95,192 // maxps %xmm0,%xmm8
- .byte 68,15,93,5,6,149,1,0 // minps 0x19506(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,93,5,102,150,1,0 // minps 0x19666(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -40390,7 +40390,7 @@ _sk_repeat_x_1_sse41:
.byte 65,15,92,192 // subps %xmm8,%xmm0
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 68,15,95,192 // maxps %xmm0,%xmm8
- .byte 68,15,93,5,227,148,1,0 // minps 0x194e3(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,93,5,67,150,1,0 // minps 0x19643(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -40399,9 +40399,9 @@ HIDDEN _sk_mirror_x_1_sse41
.globl _sk_mirror_x_1_sse41
FUNCTION(_sk_mirror_x_1_sse41)
_sk_mirror_x_1_sse41:
- .byte 68,15,40,5,51,149,1,0 // movaps 0x19533(%rip),%xmm8 # 39350 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
+ .byte 68,15,40,5,147,150,1,0 // movaps 0x19693(%rip),%xmm8 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
.byte 65,15,88,192 // addps %xmm8,%xmm0
- .byte 68,15,40,13,183,148,1,0 // movaps 0x194b7(%rip),%xmm9 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,13,23,150,1,0 // movaps 0x19617(%rip),%xmm9 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,89,200 // mulps %xmm0,%xmm9
.byte 102,69,15,58,8,201,1 // roundps $0x1,%xmm9,%xmm9
.byte 69,15,88,201 // addps %xmm9,%xmm9
@@ -40412,7 +40412,7 @@ _sk_mirror_x_1_sse41:
.byte 68,15,92,200 // subps %xmm0,%xmm9
.byte 68,15,84,200 // andps %xmm0,%xmm9
.byte 69,15,95,193 // maxps %xmm9,%xmm8
- .byte 68,15,93,5,148,148,1,0 // minps 0x19494(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,93,5,244,149,1,0 // minps 0x195f4(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -40422,10 +40422,10 @@ HIDDEN _sk_luminance_to_alpha_sse41
FUNCTION(_sk_luminance_to_alpha_sse41)
_sk_luminance_to_alpha_sse41:
.byte 15,40,218 // movaps %xmm2,%xmm3
- .byte 15,89,5,226,153,1,0 // mulps 0x199e2(%rip),%xmm0 # 39850 <_sk_srcover_bgra_8888_sse2_lowp+0x114c>
- .byte 15,89,13,235,153,1,0 // mulps 0x199eb(%rip),%xmm1 # 39860 <_sk_srcover_bgra_8888_sse2_lowp+0x115c>
+ .byte 15,89,5,66,155,1,0 // mulps 0x19b42(%rip),%xmm0 # 399b0 <_sk_srcover_bgra_8888_sse2_lowp+0x113c>
+ .byte 15,89,13,75,155,1,0 // mulps 0x19b4b(%rip),%xmm1 # 399c0 <_sk_srcover_bgra_8888_sse2_lowp+0x114c>
.byte 15,88,200 // addps %xmm0,%xmm1
- .byte 15,89,29,241,153,1,0 // mulps 0x199f1(%rip),%xmm3 # 39870 <_sk_srcover_bgra_8888_sse2_lowp+0x116c>
+ .byte 15,89,29,81,155,1,0 // mulps 0x19b51(%rip),%xmm3 # 399d0 <_sk_srcover_bgra_8888_sse2_lowp+0x115c>
.byte 15,88,217 // addps %xmm1,%xmm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,192 // xorps %xmm0,%xmm0
@@ -40974,26 +40974,26 @@ _sk_xy_to_unit_angle_sse41:
.byte 69,15,94,226 // divps %xmm10,%xmm12
.byte 69,15,40,236 // movaps %xmm12,%xmm13
.byte 69,15,89,237 // mulps %xmm13,%xmm13
- .byte 68,15,40,21,27,145,1,0 // movaps 0x1911b(%rip),%xmm10 # 39880 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
+ .byte 68,15,40,21,123,146,1,0 // movaps 0x1927b(%rip),%xmm10 # 399e0 <_sk_srcover_bgra_8888_sse2_lowp+0x116c>
.byte 69,15,89,213 // mulps %xmm13,%xmm10
- .byte 68,15,88,21,31,145,1,0 // addps 0x1911f(%rip),%xmm10 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
+ .byte 68,15,88,21,127,146,1,0 // addps 0x1927f(%rip),%xmm10 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
.byte 69,15,89,213 // mulps %xmm13,%xmm10
- .byte 68,15,88,21,35,145,1,0 // addps 0x19123(%rip),%xmm10 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
+ .byte 68,15,88,21,131,146,1,0 // addps 0x19283(%rip),%xmm10 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
.byte 69,15,89,213 // mulps %xmm13,%xmm10
- .byte 68,15,88,21,39,145,1,0 // addps 0x19127(%rip),%xmm10 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
+ .byte 68,15,88,21,135,146,1,0 // addps 0x19287(%rip),%xmm10 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
.byte 69,15,89,212 // mulps %xmm12,%xmm10
.byte 65,15,194,195,1 // cmpltps %xmm11,%xmm0
- .byte 68,15,40,29,38,145,1,0 // movaps 0x19126(%rip),%xmm11 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
+ .byte 68,15,40,29,134,146,1,0 // movaps 0x19286(%rip),%xmm11 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
.byte 69,15,92,218 // subps %xmm10,%xmm11
.byte 102,69,15,56,20,211 // blendvps %xmm0,%xmm11,%xmm10
.byte 69,15,194,200,1 // cmpltps %xmm8,%xmm9
- .byte 68,15,40,29,47,139,1,0 // movaps 0x18b2f(%rip),%xmm11 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,29,143,140,1,0 // movaps 0x18c8f(%rip),%xmm11 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,92,218 // subps %xmm10,%xmm11
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 102,69,15,56,20,211 // blendvps %xmm0,%xmm11,%xmm10
.byte 15,40,193 // movaps %xmm1,%xmm0
.byte 65,15,194,192,1 // cmpltps %xmm8,%xmm0
- .byte 68,15,40,13,33,139,1,0 // movaps 0x18b21(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,129,140,1,0 // movaps 0x18c81(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,202 // subps %xmm10,%xmm9
.byte 102,69,15,56,20,209 // blendvps %xmm0,%xmm9,%xmm10
.byte 69,15,194,194,7 // cmpordps %xmm10,%xmm8
@@ -41027,7 +41027,7 @@ _sk_xy_to_2pt_conical_quadratic_max_sse41:
.byte 243,69,15,89,203 // mulss %xmm11,%xmm9
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,88,200 // addps %xmm0,%xmm9
- .byte 68,15,89,13,154,144,1,0 // mulps 0x1909a(%rip),%xmm9 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
+ .byte 68,15,89,13,250,145,1,0 // mulps 0x191fa(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 68,15,40,225 // movaps %xmm1,%xmm12
.byte 69,15,89,228 // mulps %xmm12,%xmm12
@@ -41035,7 +41035,7 @@ _sk_xy_to_2pt_conical_quadratic_max_sse41:
.byte 243,69,15,89,219 // mulss %xmm11,%xmm11
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,92,227 // subps %xmm11,%xmm12
- .byte 68,15,89,21,133,144,1,0 // mulps 0x19085(%rip),%xmm10 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,89,21,229,145,1,0 // mulps 0x191e5(%rip),%xmm10 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
.byte 69,15,89,212 // mulps %xmm12,%xmm10
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
@@ -41044,8 +41044,8 @@ _sk_xy_to_2pt_conical_quadratic_max_sse41:
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 65,15,92,193 // subps %xmm9,%xmm0
- .byte 68,15,87,13,125,143,1,0 // xorps 0x18f7d(%rip),%xmm9 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
- .byte 68,15,89,5,85,138,1,0 // mulps 0x18a55(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,87,13,221,144,1,0 // xorps 0x190dd(%rip),%xmm9 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 68,15,89,5,181,139,1,0 // mulps 0x18bb5(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 69,15,92,202 // subps %xmm10,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
@@ -41066,7 +41066,7 @@ _sk_xy_to_2pt_conical_quadratic_min_sse41:
.byte 243,69,15,89,203 // mulss %xmm11,%xmm9
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,88,200 // addps %xmm0,%xmm9
- .byte 68,15,89,13,252,143,1,0 // mulps 0x18ffc(%rip),%xmm9 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
+ .byte 68,15,89,13,92,145,1,0 // mulps 0x1915c(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 68,15,40,225 // movaps %xmm1,%xmm12
.byte 69,15,89,228 // mulps %xmm12,%xmm12
@@ -41074,7 +41074,7 @@ _sk_xy_to_2pt_conical_quadratic_min_sse41:
.byte 243,69,15,89,219 // mulss %xmm11,%xmm11
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,92,227 // subps %xmm11,%xmm12
- .byte 68,15,89,21,231,143,1,0 // mulps 0x18fe7(%rip),%xmm10 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,89,21,71,145,1,0 // mulps 0x19147(%rip),%xmm10 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
.byte 69,15,89,212 // mulps %xmm12,%xmm10
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
@@ -41083,8 +41083,8 @@ _sk_xy_to_2pt_conical_quadratic_min_sse41:
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 65,15,92,193 // subps %xmm9,%xmm0
- .byte 68,15,87,13,223,142,1,0 // xorps 0x18edf(%rip),%xmm9 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
- .byte 68,15,89,5,183,137,1,0 // mulps 0x189b7(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,87,13,63,144,1,0 // xorps 0x1903f(%rip),%xmm9 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 68,15,89,5,23,139,1,0 // mulps 0x18b17(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 69,15,92,202 // subps %xmm10,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
@@ -41102,7 +41102,7 @@ _sk_xy_to_2pt_conical_linear_sse41:
.byte 243,69,15,89,200 // mulss %xmm8,%xmm9
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,88,200 // addps %xmm0,%xmm9
- .byte 68,15,89,13,111,143,1,0 // mulps 0x18f6f(%rip),%xmm9 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
+ .byte 68,15,89,13,207,144,1,0 // mulps 0x190cf(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 68,15,40,209 // movaps %xmm1,%xmm10
.byte 69,15,89,210 // mulps %xmm10,%xmm10
@@ -41110,7 +41110,7 @@ _sk_xy_to_2pt_conical_linear_sse41:
.byte 243,69,15,89,192 // mulss %xmm8,%xmm8
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 65,15,92,192 // subps %xmm8,%xmm0
- .byte 15,87,5,123,142,1,0 // xorps 0x18e7b(%rip),%xmm0 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
+ .byte 15,87,5,219,143,1,0 // xorps 0x18fdb(%rip),%xmm0 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
.byte 65,15,94,193 // divps %xmm9,%xmm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -41158,7 +41158,7 @@ HIDDEN _sk_save_xy_sse41
FUNCTION(_sk_save_xy_sse41)
_sk_save_xy_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,218,136,1,0 // movaps 0x188da(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,5,58,138,1,0 // movaps 0x18a3a(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 15,17,0 // movups %xmm0,(%rax)
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,88,200 // addps %xmm8,%xmm9
@@ -41202,8 +41202,8 @@ _sk_bilinear_nx_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,83,142,1,0 // addps 0x18e53(%rip),%xmm0 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,75,136,1,0 // movaps 0x1884b(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,5,179,143,1,0 // addps 0x18fb3(%rip),%xmm0 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,171,137,1,0 // movaps 0x189ab(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 68,15,17,136,0,1,0,0 // movups %xmm9,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -41216,7 +41216,7 @@ _sk_bilinear_px_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,23,136,1,0 // addps 0x18817(%rip),%xmm0 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,5,119,137,1,0 // addps 0x18977(%rip),%xmm0 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,17,128,0,1,0,0 // movups %xmm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -41228,8 +41228,8 @@ _sk_bilinear_ny_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,6,142,1,0 // addps 0x18e06(%rip),%xmm1 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,254,135,1,0 // movaps 0x187fe(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,13,102,143,1,0 // addps 0x18f66(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,94,137,1,0 // movaps 0x1895e(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 68,15,17,136,64,1,0,0 // movups %xmm9,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -41242,7 +41242,7 @@ _sk_bilinear_py_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,201,135,1,0 // addps 0x187c9(%rip),%xmm1 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,13,41,137,1,0 // addps 0x18929(%rip),%xmm1 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,17,128,64,1,0,0 // movups %xmm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -41254,13 +41254,13 @@ _sk_bicubic_n3x_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,201,141,1,0 // addps 0x18dc9(%rip),%xmm0 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
- .byte 68,15,40,13,177,135,1,0 // movaps 0x187b1(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,5,41,143,1,0 // addps 0x18f29(%rip),%xmm0 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
+ .byte 68,15,40,13,17,137,1,0 // movaps 0x18911(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 69,15,40,193 // movaps %xmm9,%xmm8
.byte 69,15,89,192 // mulps %xmm8,%xmm8
- .byte 68,15,89,13,189,141,1,0 // mulps 0x18dbd(%rip),%xmm9 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,13,149,137,1,0 // addps 0x18995(%rip),%xmm9 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,13,29,143,1,0 // mulps 0x18f1d(%rip),%xmm9 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,13,245,138,1,0 // addps 0x18af5(%rip),%xmm9 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 68,15,17,136,0,1,0,0 // movups %xmm9,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -41273,16 +41273,16 @@ _sk_bicubic_n1x_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,113,141,1,0 // addps 0x18d71(%rip),%xmm0 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,105,135,1,0 // movaps 0x18769(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,5,209,142,1,0 // addps 0x18ed1(%rip),%xmm0 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,201,136,1,0 // movaps 0x188c9(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
- .byte 68,15,40,5,141,141,1,0 // movaps 0x18d8d(%rip),%xmm8 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,5,237,142,1,0 // movaps 0x18eed(%rip),%xmm8 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,145,141,1,0 // addps 0x18d91(%rip),%xmm8 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,5,241,142,1,0 // addps 0x18ef1(%rip),%xmm8 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,53,135,1,0 // addps 0x18735(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,88,5,149,136,1,0 // addps 0x18895(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,137,141,1,0 // addps 0x18d89(%rip),%xmm8 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,5,233,142,1,0 // addps 0x18ee9(%rip),%xmm8 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,128,0,1,0,0 // movups %xmm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -41292,17 +41292,17 @@ HIDDEN _sk_bicubic_p1x_sse41
FUNCTION(_sk_bicubic_p1x_sse41)
_sk_bicubic_p1x_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,19,135,1,0 // movaps 0x18713(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,5,115,136,1,0 // movaps 0x18873(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,136,128,0,0,0 // movups 0x80(%rax),%xmm9
.byte 65,15,88,192 // addps %xmm8,%xmm0
- .byte 68,15,40,21,60,141,1,0 // movaps 0x18d3c(%rip),%xmm10 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,21,156,142,1,0 // movaps 0x18e9c(%rip),%xmm10 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,64,141,1,0 // addps 0x18d40(%rip),%xmm10 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,21,160,142,1,0 // addps 0x18ea0(%rip),%xmm10 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
.byte 69,15,88,208 // addps %xmm8,%xmm10
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,60,141,1,0 // addps 0x18d3c(%rip),%xmm10 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,21,156,142,1,0 // addps 0x18e9c(%rip),%xmm10 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,144,0,1,0,0 // movups %xmm10,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -41314,11 +41314,11 @@ _sk_bicubic_p3x_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,12,141,1,0 // addps 0x18d0c(%rip),%xmm0 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 15,88,5,108,142,1,0 // addps 0x18e6c(%rip),%xmm0 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 69,15,89,201 // mulps %xmm9,%xmm9
- .byte 68,15,89,5,220,140,1,0 // mulps 0x18cdc(%rip),%xmm8 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,5,180,136,1,0 // addps 0x188b4(%rip),%xmm8 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,5,60,142,1,0 // mulps 0x18e3c(%rip),%xmm8 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,5,20,138,1,0 // addps 0x18a14(%rip),%xmm8 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
.byte 68,15,17,128,0,1,0,0 // movups %xmm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -41331,13 +41331,13 @@ _sk_bicubic_n3y_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,159,140,1,0 // addps 0x18c9f(%rip),%xmm1 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
- .byte 68,15,40,13,135,134,1,0 // movaps 0x18687(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,13,255,141,1,0 // addps 0x18dff(%rip),%xmm1 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
+ .byte 68,15,40,13,231,135,1,0 // movaps 0x187e7(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 69,15,40,193 // movaps %xmm9,%xmm8
.byte 69,15,89,192 // mulps %xmm8,%xmm8
- .byte 68,15,89,13,147,140,1,0 // mulps 0x18c93(%rip),%xmm9 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,13,107,136,1,0 // addps 0x1886b(%rip),%xmm9 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,13,243,141,1,0 // mulps 0x18df3(%rip),%xmm9 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,13,203,137,1,0 // addps 0x189cb(%rip),%xmm9 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 68,15,17,136,64,1,0,0 // movups %xmm9,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -41350,16 +41350,16 @@ _sk_bicubic_n1y_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,70,140,1,0 // addps 0x18c46(%rip),%xmm1 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,62,134,1,0 // movaps 0x1863e(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,13,166,141,1,0 // addps 0x18da6(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,158,135,1,0 // movaps 0x1879e(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
- .byte 68,15,40,5,98,140,1,0 // movaps 0x18c62(%rip),%xmm8 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,5,194,141,1,0 // movaps 0x18dc2(%rip),%xmm8 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,102,140,1,0 // addps 0x18c66(%rip),%xmm8 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,5,198,141,1,0 // addps 0x18dc6(%rip),%xmm8 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,10,134,1,0 // addps 0x1860a(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,88,5,106,135,1,0 // addps 0x1876a(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,94,140,1,0 // addps 0x18c5e(%rip),%xmm8 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,5,190,141,1,0 // addps 0x18dbe(%rip),%xmm8 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,128,64,1,0,0 // movups %xmm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -41369,17 +41369,17 @@ HIDDEN _sk_bicubic_p1y_sse41
FUNCTION(_sk_bicubic_p1y_sse41)
_sk_bicubic_p1y_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,232,133,1,0 // movaps 0x185e8(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,5,72,135,1,0 // movaps 0x18748(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,136,192,0,0,0 // movups 0xc0(%rax),%xmm9
.byte 65,15,88,200 // addps %xmm8,%xmm1
- .byte 68,15,40,21,16,140,1,0 // movaps 0x18c10(%rip),%xmm10 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,21,112,141,1,0 // movaps 0x18d70(%rip),%xmm10 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,20,140,1,0 // addps 0x18c14(%rip),%xmm10 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,21,116,141,1,0 // addps 0x18d74(%rip),%xmm10 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
.byte 69,15,88,208 // addps %xmm8,%xmm10
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,16,140,1,0 // addps 0x18c10(%rip),%xmm10 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,21,112,141,1,0 // addps 0x18d70(%rip),%xmm10 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,144,64,1,0,0 // movups %xmm10,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -41391,11 +41391,11 @@ _sk_bicubic_p3y_sse41:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,223,139,1,0 // addps 0x18bdf(%rip),%xmm1 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 15,88,13,63,141,1,0 // addps 0x18d3f(%rip),%xmm1 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 69,15,89,201 // mulps %xmm9,%xmm9
- .byte 68,15,89,5,175,139,1,0 // mulps 0x18baf(%rip),%xmm8 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,5,135,135,1,0 // addps 0x18787(%rip),%xmm8 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,5,15,141,1,0 // mulps 0x18d0f(%rip),%xmm8 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,5,231,136,1,0 // addps 0x188e7(%rip),%xmm8 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
.byte 68,15,17,128,64,1,0,0 // movups %xmm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -41506,7 +41506,7 @@ _sk_clut_3D_sse41:
.byte 15,89,194 // mulps %xmm2,%xmm0
.byte 15,41,68,36,208 // movaps %xmm0,-0x30(%rsp)
.byte 243,15,91,240 // cvttps2dq %xmm0,%xmm6
- .byte 15,40,37,154,138,1,0 // movaps 0x18a9a(%rip),%xmm4 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
+ .byte 15,40,37,250,139,1,0 // movaps 0x18bfa(%rip),%xmm4 # 39ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
.byte 15,88,196 // addps %xmm4,%xmm0
.byte 15,41,68,36,176 // movaps %xmm0,-0x50(%rsp)
.byte 102,65,15,110,208 // movd %r8d,%xmm2
@@ -41545,7 +41545,7 @@ _sk_clut_3D_sse41:
.byte 102,68,15,56,64,192 // pmulld %xmm0,%xmm8
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,254,218 // paddd %xmm2,%xmm3
- .byte 102,68,15,111,37,241,137,1,0 // movdqa 0x189f1(%rip),%xmm12 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,68,15,111,37,81,139,1,0 // movdqa 0x18b51(%rip),%xmm12 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,65,15,56,64,220 // pmulld %xmm12,%xmm3
.byte 102,15,118,228 // pcmpeqd %xmm4,%xmm4
.byte 102,15,111,203 // movdqa %xmm3,%xmm1
@@ -41568,7 +41568,7 @@ _sk_clut_3D_sse41:
.byte 102,65,15,58,22,217,1 // pextrd $0x1,%xmm3,%r9d
.byte 102,65,15,58,22,218,2 // pextrd $0x2,%xmm3,%r10d
.byte 102,65,15,58,22,219,3 // pextrd $0x3,%xmm3,%r11d
- .byte 102,15,111,37,53,131,1,0 // movdqa 0x18335(%rip),%xmm4 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,111,37,149,132,1,0 // movdqa 0x18495(%rip),%xmm4 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,254,220 // paddd %xmm4,%xmm3
.byte 102,15,111,252 // movdqa %xmm4,%xmm7
.byte 102,73,15,58,22,222,1 // pextrq $0x1,%xmm3,%r14
@@ -41741,7 +41741,7 @@ _sk_clut_3D_sse41:
.byte 102,68,15,254,202 // paddd %xmm2,%xmm9
.byte 102,65,15,111,192 // movdqa %xmm8,%xmm0
.byte 102,65,15,254,193 // paddd %xmm9,%xmm0
- .byte 102,15,111,45,0,134,1,0 // movdqa 0x18600(%rip),%xmm5 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,45,96,135,1,0 // movdqa 0x18760(%rip),%xmm5 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,56,64,197 // pmulld %xmm5,%xmm0
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,118,246 // pcmpeqd %xmm6,%xmm6
@@ -41762,7 +41762,7 @@ _sk_clut_3D_sse41:
.byte 102,65,15,58,22,193,1 // pextrd $0x1,%xmm0,%r9d
.byte 102,65,15,58,22,194,2 // pextrd $0x2,%xmm0,%r10d
.byte 102,65,15,58,22,195,3 // pextrd $0x3,%xmm0,%r11d
- .byte 102,15,111,61,81,127,1,0 // movdqa 0x17f51(%rip),%xmm7 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,111,61,177,128,1,0 // movdqa 0x180b1(%rip),%xmm7 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,254,199 // paddd %xmm7,%xmm0
.byte 102,72,15,58,22,193,1 // pextrq $0x1,%xmm0,%rcx
.byte 102,72,15,126,195 // movq %xmm0,%rbx
@@ -41828,7 +41828,7 @@ _sk_clut_3D_sse41:
.byte 15,88,248 // addps %xmm0,%xmm7
.byte 102,68,15,254,210 // paddd %xmm2,%xmm10
.byte 102,69,15,254,194 // paddd %xmm10,%xmm8
- .byte 102,15,111,13,60,132,1,0 // movdqa 0x1843c(%rip),%xmm1 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,13,156,133,1,0 // movdqa 0x1859c(%rip),%xmm1 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,68,15,56,64,193 // pmulld %xmm1,%xmm8
.byte 102,65,15,111,192 // movdqa %xmm8,%xmm0
.byte 102,15,118,237 // pcmpeqd %xmm5,%xmm5
@@ -41849,7 +41849,7 @@ _sk_clut_3D_sse41:
.byte 102,69,15,58,22,193,1 // pextrd $0x1,%xmm8,%r9d
.byte 102,69,15,58,22,194,2 // pextrd $0x2,%xmm8,%r10d
.byte 102,69,15,58,22,195,3 // pextrd $0x3,%xmm8,%r11d
- .byte 102,15,111,21,135,125,1,0 // movdqa 0x17d87(%rip),%xmm2 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,111,21,231,126,1,0 // movdqa 0x17ee7(%rip),%xmm2 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,68,15,254,194 // paddd %xmm2,%xmm8
.byte 102,76,15,58,22,193,1 // pextrq $0x1,%xmm8,%rcx
.byte 102,76,15,126,195 // movq %xmm8,%rbx
@@ -41976,7 +41976,7 @@ _sk_clut_4D_sse41:
.byte 15,89,203 // mulps %xmm3,%xmm1
.byte 15,41,76,36,48 // movaps %xmm1,0x30(%rsp)
.byte 243,15,91,249 // cvttps2dq %xmm1,%xmm7
- .byte 15,40,29,154,129,1,0 // movaps 0x1819a(%rip),%xmm3 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
+ .byte 15,40,29,250,130,1,0 // movaps 0x182fa(%rip),%xmm3 # 39ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
.byte 15,88,203 // addps %xmm3,%xmm1
.byte 15,41,76,36,160 // movaps %xmm1,-0x60(%rsp)
.byte 102,65,15,110,232 // movd %r8d,%xmm5
@@ -42033,7 +42033,7 @@ _sk_clut_4D_sse41:
.byte 102,68,15,56,64,253 // pmulld %xmm5,%xmm15
.byte 102,65,15,111,223 // movdqa %xmm15,%xmm3
.byte 102,15,254,217 // paddd %xmm1,%xmm3
- .byte 102,68,15,111,29,153,128,1,0 // movdqa 0x18099(%rip),%xmm11 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,68,15,111,29,249,129,1,0 // movdqa 0x181f9(%rip),%xmm11 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,65,15,56,64,219 // pmulld %xmm11,%xmm3
.byte 102,15,118,192 // pcmpeqd %xmm0,%xmm0
.byte 102,15,111,243 // movdqa %xmm3,%xmm6
@@ -42055,7 +42055,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,58,22,217,1 // pextrd $0x1,%xmm3,%r9d
.byte 102,65,15,58,22,218,2 // pextrd $0x2,%xmm3,%r10d
.byte 102,65,15,58,22,219,3 // pextrd $0x3,%xmm3,%r11d
- .byte 102,68,15,111,21,225,121,1,0 // movdqa 0x179e1(%rip),%xmm10 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,21,65,123,1,0 // movdqa 0x17b41(%rip),%xmm10 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,65,15,254,218 // paddd %xmm10,%xmm3
.byte 102,73,15,58,22,222,1 // pextrq $0x1,%xmm3,%r14
.byte 102,72,15,126,219 // movq %xmm3,%rbx
@@ -42073,7 +42073,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,254,201 // paddd %xmm9,%xmm1
.byte 102,65,15,56,64,203 // pmulld %xmm11,%xmm1
.byte 102,15,111,217 // movdqa %xmm1,%xmm3
- .byte 102,15,250,29,207,127,1,0 // psubd 0x17fcf(%rip),%xmm3 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,29,47,129,1,0 // psubd 0x1812f(%rip),%xmm3 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,73,15,58,22,222,1 // pextrq $0x1,%xmm3,%r14
.byte 102,72,15,126,219 // movq %xmm3,%rbx
.byte 65,137,223 // mov %ebx,%r15d
@@ -42128,7 +42128,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,254,199 // paddd %xmm15,%xmm0
.byte 102,65,15,56,64,195 // pmulld %xmm11,%xmm0
.byte 102,15,111,232 // movdqa %xmm0,%xmm5
- .byte 102,15,250,45,175,126,1,0 // psubd 0x17eaf(%rip),%xmm5 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,45,15,128,1,0 // psubd 0x1800f(%rip),%xmm5 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,69,15,118,192 // pcmpeqd %xmm8,%xmm8
.byte 102,72,15,58,22,233,1 // pextrq $0x1,%xmm5,%rcx
.byte 102,72,15,126,237 // movq %xmm5,%rbp
@@ -42253,7 +42253,7 @@ _sk_clut_4D_sse41:
.byte 72,193,233,32 // shr $0x20,%rcx
.byte 243,15,16,36,136 // movss (%rax,%rcx,4),%xmm4
.byte 102,68,15,58,33,220,48 // insertps $0x30,%xmm4,%xmm11
- .byte 102,15,111,61,211,117,1,0 // movdqa 0x175d3(%rip),%xmm7 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,111,61,51,119,1,0 // movdqa 0x17733(%rip),%xmm7 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,254,223 // paddd %xmm7,%xmm3
.byte 102,72,15,58,22,217,1 // pextrq $0x1,%xmm3,%rcx
.byte 102,72,15,126,221 // movq %xmm3,%rbp
@@ -42339,7 +42339,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,58,22,201,1 // pextrd $0x1,%xmm1,%r9d
.byte 102,65,15,58,22,202,2 // pextrd $0x2,%xmm1,%r10d
.byte 102,65,15,58,22,203,3 // pextrd $0x3,%xmm1,%r11d
- .byte 102,68,15,111,21,23,116,1,0 // movdqa 0x17417(%rip),%xmm10 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,21,119,117,1,0 // movdqa 0x17577(%rip),%xmm10 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,65,15,254,202 // paddd %xmm10,%xmm1
.byte 102,72,15,58,22,201,1 // pextrq $0x1,%xmm1,%rcx
.byte 102,72,15,126,203 // movq %xmm1,%rbx
@@ -42435,7 +42435,7 @@ _sk_clut_4D_sse41:
.byte 102,15,111,124,36,192 // movdqa -0x40(%rsp),%xmm7
.byte 102,15,111,199 // movdqa %xmm7,%xmm0
.byte 102,15,254,195 // paddd %xmm3,%xmm0
- .byte 102,15,111,21,113,120,1,0 // movdqa 0x17871(%rip),%xmm2 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,21,209,121,1,0 // movdqa 0x179d1(%rip),%xmm2 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,56,64,194 // pmulld %xmm2,%xmm0
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,69,15,118,192 // pcmpeqd %xmm8,%xmm8
@@ -42456,7 +42456,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,58,22,193,1 // pextrd $0x1,%xmm0,%r9d
.byte 102,65,15,58,22,194,2 // pextrd $0x2,%xmm0,%r10d
.byte 102,65,15,58,22,195,3 // pextrd $0x3,%xmm0,%r11d
- .byte 102,15,111,37,192,113,1,0 // movdqa 0x171c0(%rip),%xmm4 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,111,37,32,115,1,0 // movdqa 0x17320(%rip),%xmm4 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,254,196 // paddd %xmm4,%xmm0
.byte 102,72,15,58,22,193,1 // pextrq $0x1,%xmm0,%rcx
.byte 102,72,15,126,195 // movq %xmm0,%rbx
@@ -42528,7 +42528,7 @@ _sk_clut_4D_sse41:
.byte 102,68,15,111,226 // movdqa %xmm2,%xmm12
.byte 102,65,15,56,64,204 // pmulld %xmm12,%xmm1
.byte 102,15,111,209 // movdqa %xmm1,%xmm2
- .byte 102,15,250,21,156,118,1,0 // psubd 0x1769c(%rip),%xmm2 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,21,252,119,1,0 // psubd 0x177fc(%rip),%xmm2 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,72,15,58,22,209,1 // pextrq $0x1,%xmm2,%rcx
.byte 102,72,15,126,213 // movq %xmm2,%rbp
.byte 137,235 // mov %ebp,%ebx
@@ -42562,7 +42562,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,111,249 // movdqa %xmm9,%xmm7
.byte 102,69,15,56,64,236 // pmulld %xmm12,%xmm13
.byte 102,65,15,111,205 // movdqa %xmm13,%xmm1
- .byte 102,15,250,13,232,117,1,0 // psubd 0x175e8(%rip),%xmm1 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,13,72,119,1,0 // psubd 0x17748(%rip),%xmm1 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,72,15,58,22,201,1 // pextrq $0x1,%xmm1,%rcx
.byte 102,72,15,126,203 // movq %xmm1,%rbx
.byte 137,221 // mov %ebx,%ebp
@@ -42627,7 +42627,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,111,206 // movdqa %xmm14,%xmm1
.byte 102,15,111,108,36,192 // movdqa -0x40(%rsp),%xmm5
.byte 102,15,254,205 // paddd %xmm5,%xmm1
- .byte 102,15,111,37,127,116,1,0 // movdqa 0x1747f(%rip),%xmm4 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,37,223,117,1,0 // movdqa 0x175df(%rip),%xmm4 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,56,64,204 // pmulld %xmm4,%xmm1
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,15,118,246 // pcmpeqd %xmm6,%xmm6
@@ -42648,7 +42648,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,58,22,201,1 // pextrd $0x1,%xmm1,%r9d
.byte 102,65,15,58,22,202,2 // pextrd $0x2,%xmm1,%r10d
.byte 102,65,15,58,22,203,3 // pextrd $0x3,%xmm1,%r11d
- .byte 102,15,111,29,208,109,1,0 // movdqa 0x16dd0(%rip),%xmm3 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,111,29,48,111,1,0 // movdqa 0x16f30(%rip),%xmm3 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,254,203 // paddd %xmm3,%xmm1
.byte 102,72,15,58,22,201,1 // pextrq $0x1,%xmm1,%rcx
.byte 102,72,15,126,203 // movq %xmm1,%rbx
@@ -42720,7 +42720,7 @@ _sk_clut_4D_sse41:
.byte 102,15,56,64,239 // pmulld %xmm7,%xmm5
.byte 102,15,111,205 // movdqa %xmm5,%xmm1
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
- .byte 102,15,250,5,176,114,1,0 // psubd 0x172b0(%rip),%xmm0 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,5,16,116,1,0 // psubd 0x17410(%rip),%xmm0 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,72,15,58,22,193,1 // pextrq $0x1,%xmm0,%rcx
.byte 102,72,15,126,197 // movq %xmm0,%rbp
.byte 137,235 // mov %ebp,%ebx
@@ -42738,7 +42738,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,58,22,193,1 // pextrd $0x1,%xmm0,%r9d
.byte 102,65,15,58,22,194,2 // pextrd $0x2,%xmm0,%r10d
.byte 102,65,15,58,22,195,3 // pextrd $0x3,%xmm0,%r11d
- .byte 102,15,254,5,254,107,1,0 // paddd 0x16bfe(%rip),%xmm0 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,5,94,109,1,0 // paddd 0x16d5e(%rip),%xmm0 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,72,15,58,22,193,1 // pextrq $0x1,%xmm0,%rcx
.byte 102,72,15,126,195 // movq %xmm0,%rbx
.byte 137,221 // mov %ebx,%ebp
@@ -42754,7 +42754,7 @@ _sk_clut_4D_sse41:
.byte 102,15,254,84,36,176 // paddd -0x50(%rsp),%xmm2
.byte 102,15,56,64,215 // pmulld %xmm7,%xmm2
.byte 102,15,111,194 // movdqa %xmm2,%xmm0
- .byte 102,15,250,5,247,113,1,0 // psubd 0x171f7(%rip),%xmm0 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,5,87,115,1,0 // psubd 0x17357(%rip),%xmm0 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,72,15,58,22,193,1 // pextrq $0x1,%xmm0,%rcx
.byte 102,72,15,126,195 // movq %xmm0,%rbx
.byte 137,221 // mov %ebx,%ebp
@@ -42772,7 +42772,7 @@ _sk_clut_4D_sse41:
.byte 102,65,15,58,22,199,1 // pextrd $0x1,%xmm0,%r15d
.byte 102,65,15,58,22,196,2 // pextrd $0x2,%xmm0,%r12d
.byte 102,65,15,58,22,198,3 // pextrd $0x3,%xmm0,%r14d
- .byte 102,15,254,5,69,107,1,0 // paddd 0x16b45(%rip),%xmm0 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,5,165,108,1,0 // paddd 0x16ca5(%rip),%xmm0 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,72,15,58,22,195,1 // pextrq $0x1,%xmm0,%rbx
.byte 102,72,15,126,197 // movq %xmm0,%rbp
.byte 137,233 // mov %ebp,%ecx
@@ -42835,7 +42835,7 @@ _sk_clut_4D_sse41:
.byte 15,89,212 // mulps %xmm4,%xmm2
.byte 65,15,88,211 // addps %xmm11,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,2,106,1,0 // movaps 0x16a02(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,98,107,1,0 // movaps 0x16b62(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,139,76,36,248 // mov -0x8(%rsp),%rcx
.byte 15,40,100,36,64 // movaps 0x40(%rsp),%xmm4
.byte 15,40,108,36,80 // movaps 0x50(%rsp),%xmm5
@@ -42854,15 +42854,15 @@ HIDDEN _sk_gauss_a_to_rgba_sse41
.globl _sk_gauss_a_to_rgba_sse41
FUNCTION(_sk_gauss_a_to_rgba_sse41)
_sk_gauss_a_to_rgba_sse41:
- .byte 15,40,5,95,112,1,0 // movaps 0x1705f(%rip),%xmm0 # 39980 <_sk_srcover_bgra_8888_sse2_lowp+0x127c>
+ .byte 15,40,5,191,113,1,0 // movaps 0x171bf(%rip),%xmm0 # 39ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,101,112,1,0 // addps 0x17065(%rip),%xmm0 # 39990 <_sk_srcover_bgra_8888_sse2_lowp+0x128c>
+ .byte 15,88,5,197,113,1,0 // addps 0x171c5(%rip),%xmm0 # 39af0 <_sk_srcover_bgra_8888_sse2_lowp+0x127c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,107,112,1,0 // addps 0x1706b(%rip),%xmm0 # 399a0 <_sk_srcover_bgra_8888_sse2_lowp+0x129c>
+ .byte 15,88,5,203,113,1,0 // addps 0x171cb(%rip),%xmm0 # 39b00 <_sk_srcover_bgra_8888_sse2_lowp+0x128c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,113,112,1,0 // addps 0x17071(%rip),%xmm0 # 399b0 <_sk_srcover_bgra_8888_sse2_lowp+0x12ac>
+ .byte 15,88,5,209,113,1,0 // addps 0x171d1(%rip),%xmm0 # 39b10 <_sk_srcover_bgra_8888_sse2_lowp+0x129c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,119,112,1,0 // addps 0x17077(%rip),%xmm0 # 399c0 <_sk_srcover_bgra_8888_sse2_lowp+0x12bc>
+ .byte 15,88,5,215,113,1,0 // addps 0x171d7(%rip),%xmm0 # 39b20 <_sk_srcover_bgra_8888_sse2_lowp+0x12ac>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
@@ -42961,9 +42961,9 @@ _sk_seed_shader_sse2:
.byte 102,15,110,201 // movd %ecx,%xmm1
.byte 102,15,112,201,0 // pshufd $0x0,%xmm1,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,88,13,157,104,1,0 // addps 0x1689d(%rip),%xmm1 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,13,253,105,1,0 // addps 0x169fd(%rip),%xmm1 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,21,164,104,1,0 // movaps 0x168a4(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,4,106,1,0 // movaps 0x16a04(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,87,219 // xorps %xmm3,%xmm3
.byte 15,87,228 // xorps %xmm4,%xmm4
.byte 15,87,237 // xorps %xmm5,%xmm5
@@ -42978,18 +42978,18 @@ _sk_dither_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 102,68,15,110,194 // movd %edx,%xmm8
.byte 102,69,15,112,192,0 // pshufd $0x0,%xmm8,%xmm8
- .byte 102,68,15,254,5,77,104,1,0 // paddd 0x1684d(%rip),%xmm8 # 392c0 <_sk_srcover_bgra_8888_sse2_lowp+0xbbc>
+ .byte 102,68,15,254,5,173,105,1,0 // paddd 0x169ad(%rip),%xmm8 # 39420 <_sk_srcover_bgra_8888_sse2_lowp+0xbac>
.byte 102,68,15,110,201 // movd %ecx,%xmm9
.byte 102,69,15,112,201,0 // pshufd $0x0,%xmm9,%xmm9
.byte 102,69,15,239,200 // pxor %xmm8,%xmm9
- .byte 102,68,15,111,21,116,104,1,0 // movdqa 0x16874(%rip),%xmm10 # 39300 <_sk_srcover_bgra_8888_sse2_lowp+0xbfc>
+ .byte 102,68,15,111,21,212,105,1,0 // movdqa 0x169d4(%rip),%xmm10 # 39460 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
.byte 102,69,15,111,217 // movdqa %xmm9,%xmm11
.byte 102,69,15,219,218 // pand %xmm10,%xmm11
.byte 102,65,15,114,243,5 // pslld $0x5,%xmm11
.byte 102,69,15,219,208 // pand %xmm8,%xmm10
.byte 102,65,15,114,242,4 // pslld $0x4,%xmm10
- .byte 102,68,15,111,37,96,104,1,0 // movdqa 0x16860(%rip),%xmm12 # 39310 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
- .byte 102,68,15,111,45,103,104,1,0 // movdqa 0x16867(%rip),%xmm13 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,37,192,105,1,0 // movdqa 0x169c0(%rip),%xmm12 # 39470 <_sk_srcover_bgra_8888_sse2_lowp+0xbfc>
+ .byte 102,68,15,111,45,199,105,1,0 // movdqa 0x169c7(%rip),%xmm13 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,69,15,111,241 // movdqa %xmm9,%xmm14
.byte 102,69,15,219,245 // pand %xmm13,%xmm14
.byte 102,65,15,114,246,2 // pslld $0x2,%xmm14
@@ -43005,8 +43005,8 @@ _sk_dither_sse2:
.byte 102,69,15,235,198 // por %xmm14,%xmm8
.byte 102,69,15,235,193 // por %xmm9,%xmm8
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,34,104,1,0 // mulps 0x16822(%rip),%xmm8 # 39330 <_sk_srcover_bgra_8888_sse2_lowp+0xc2c>
- .byte 68,15,88,5,42,104,1,0 // addps 0x1682a(%rip),%xmm8 # 39340 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
+ .byte 68,15,89,5,130,105,1,0 // mulps 0x16982(%rip),%xmm8 # 39490 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 68,15,88,5,138,105,1,0 // addps 0x1698a(%rip),%xmm8 # 394a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc2c>
.byte 243,68,15,16,16 // movss (%rax),%xmm10
.byte 69,15,198,210,0 // shufps $0x0,%xmm10,%xmm10
.byte 69,15,89,208 // mulps %xmm8,%xmm10
@@ -43048,7 +43048,7 @@ HIDDEN _sk_black_color_sse2
FUNCTION(_sk_black_color_sse2)
_sk_black_color_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,96,103,1,0 // movaps 0x16760(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,192,104,1,0 // movaps 0x168c0(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,87,201 // xorps %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
@@ -43059,7 +43059,7 @@ HIDDEN _sk_white_color_sse2
FUNCTION(_sk_white_color_sse2)
_sk_white_color_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,5,76,103,1,0 // movaps 0x1674c(%rip),%xmm0 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,5,172,104,1,0 // movaps 0x168ac(%rip),%xmm0 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,40,216 // movaps %xmm0,%xmm3
@@ -43105,7 +43105,7 @@ HIDDEN _sk_srcatop_sse2
FUNCTION(_sk_srcatop_sse2)
_sk_srcatop_sse2:
.byte 15,89,199 // mulps %xmm7,%xmm0
- .byte 68,15,40,5,252,102,1,0 // movaps 0x166fc(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,92,104,1,0 // movaps 0x1685c(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -43130,7 +43130,7 @@ FUNCTION(_sk_dstatop_sse2)
_sk_dstatop_sse2:
.byte 68,15,40,195 // movaps %xmm3,%xmm8
.byte 68,15,89,196 // mulps %xmm4,%xmm8
- .byte 68,15,40,13,175,102,1,0 // movaps 0x166af(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,15,104,1,0 // movaps 0x1680f(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,207 // subps %xmm7,%xmm9
.byte 65,15,89,193 // mulps %xmm9,%xmm0
.byte 65,15,88,192 // addps %xmm8,%xmm0
@@ -43177,7 +43177,7 @@ HIDDEN _sk_srcout_sse2
.globl _sk_srcout_sse2
FUNCTION(_sk_srcout_sse2)
_sk_srcout_sse2:
- .byte 68,15,40,5,67,102,1,0 // movaps 0x16643(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,163,103,1,0 // movaps 0x167a3(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,199 // subps %xmm7,%xmm8
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
@@ -43190,7 +43190,7 @@ HIDDEN _sk_dstout_sse2
.globl _sk_dstout_sse2
FUNCTION(_sk_dstout_sse2)
_sk_dstout_sse2:
- .byte 68,15,40,5,35,102,1,0 // movaps 0x16623(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,131,103,1,0 // movaps 0x16783(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 15,89,196 // mulps %xmm4,%xmm0
@@ -43207,7 +43207,7 @@ HIDDEN _sk_srcover_sse2
.globl _sk_srcover_sse2
FUNCTION(_sk_srcover_sse2)
_sk_srcover_sse2:
- .byte 68,15,40,5,246,101,1,0 // movaps 0x165f6(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,86,103,1,0 // movaps 0x16756(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -43227,7 +43227,7 @@ HIDDEN _sk_dstover_sse2
.globl _sk_dstover_sse2
FUNCTION(_sk_dstover_sse2)
_sk_dstover_sse2:
- .byte 68,15,40,5,186,101,1,0 // movaps 0x165ba(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,26,103,1,0 // movaps 0x1671a(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,199 // subps %xmm7,%xmm8
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 15,88,196 // addps %xmm4,%xmm0
@@ -43255,7 +43255,7 @@ HIDDEN _sk_multiply_sse2
.globl _sk_multiply_sse2
FUNCTION(_sk_multiply_sse2)
_sk_multiply_sse2:
- .byte 68,15,40,5,126,101,1,0 // movaps 0x1657e(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,222,102,1,0 // movaps 0x166de(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,92,207 // subps %xmm7,%xmm9
.byte 69,15,40,209 // movaps %xmm9,%xmm10
@@ -43293,7 +43293,7 @@ HIDDEN _sk_plus__sse2
FUNCTION(_sk_plus__sse2)
_sk_plus__sse2:
.byte 15,88,196 // addps %xmm4,%xmm0
- .byte 68,15,40,5,255,100,1,0 // movaps 0x164ff(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,95,102,1,0 // movaps 0x1665f(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 15,88,205 // addps %xmm5,%xmm1
.byte 65,15,93,200 // minps %xmm8,%xmm1
@@ -43335,7 +43335,7 @@ HIDDEN _sk_xor__sse2
FUNCTION(_sk_xor__sse2)
_sk_xor__sse2:
.byte 68,15,40,195 // movaps %xmm3,%xmm8
- .byte 15,40,29,139,100,1,0 // movaps 0x1648b(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,235,101,1,0 // movaps 0x165eb(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,40,203 // movaps %xmm3,%xmm9
.byte 68,15,92,207 // subps %xmm7,%xmm9
.byte 65,15,89,193 // mulps %xmm9,%xmm0
@@ -43383,7 +43383,7 @@ _sk_darken_sse2:
.byte 68,15,89,206 // mulps %xmm6,%xmm9
.byte 65,15,95,209 // maxps %xmm9,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,230,99,1,0 // movaps 0x163e6(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,70,101,1,0 // movaps 0x16546(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -43417,7 +43417,7 @@ _sk_lighten_sse2:
.byte 68,15,89,206 // mulps %xmm6,%xmm9
.byte 65,15,93,209 // minps %xmm9,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,123,99,1,0 // movaps 0x1637b(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,219,100,1,0 // movaps 0x164db(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -43454,7 +43454,7 @@ _sk_difference_sse2:
.byte 65,15,93,209 // minps %xmm9,%xmm2
.byte 15,88,210 // addps %xmm2,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,5,99,1,0 // movaps 0x16305(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,101,100,1,0 // movaps 0x16465(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -43482,7 +43482,7 @@ _sk_exclusion_sse2:
.byte 15,89,214 // mulps %xmm6,%xmm2
.byte 15,88,210 // addps %xmm2,%xmm2
.byte 68,15,92,194 // subps %xmm2,%xmm8
- .byte 15,40,21,181,98,1,0 // movaps 0x162b5(%rip),%xmm2 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,21,21,100,1,0 // movaps 0x16415(%rip),%xmm2 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,211 // subps %xmm3,%xmm2
.byte 15,89,215 // mulps %xmm7,%xmm2
.byte 15,88,218 // addps %xmm2,%xmm3
@@ -43495,7 +43495,7 @@ HIDDEN _sk_colorburn_sse2
FUNCTION(_sk_colorburn_sse2)
_sk_colorburn_sse2:
.byte 68,15,40,192 // movaps %xmm0,%xmm8
- .byte 68,15,40,13,152,98,1,0 // movaps 0x16298(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,248,99,1,0 // movaps 0x163f8(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,209 // movaps %xmm9,%xmm10
.byte 68,15,92,215 // subps %xmm7,%xmm10
.byte 69,15,40,218 // movaps %xmm10,%xmm11
@@ -43590,7 +43590,7 @@ HIDDEN _sk_colordodge_sse2
FUNCTION(_sk_colordodge_sse2)
_sk_colordodge_sse2:
.byte 68,15,40,200 // movaps %xmm0,%xmm9
- .byte 68,15,40,21,64,97,1,0 // movaps 0x16140(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,160,98,1,0 // movaps 0x162a0(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,218 // movaps %xmm10,%xmm11
.byte 68,15,92,223 // subps %xmm7,%xmm11
.byte 65,15,40,195 // movaps %xmm11,%xmm0
@@ -43682,7 +43682,7 @@ _sk_hardlight_sse2:
.byte 15,41,116,36,232 // movaps %xmm6,-0x18(%rsp)
.byte 15,40,245 // movaps %xmm5,%xmm6
.byte 15,40,236 // movaps %xmm4,%xmm5
- .byte 68,15,40,29,233,95,1,0 // movaps 0x15fe9(%rip),%xmm11 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,29,73,97,1,0 // movaps 0x16149(%rip),%xmm11 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,211 // movaps %xmm11,%xmm10
.byte 68,15,92,215 // subps %xmm7,%xmm10
.byte 69,15,40,194 // movaps %xmm10,%xmm8
@@ -43770,7 +43770,7 @@ FUNCTION(_sk_overlay_sse2)
_sk_overlay_sse2:
.byte 68,15,40,193 // movaps %xmm1,%xmm8
.byte 68,15,40,232 // movaps %xmm0,%xmm13
- .byte 68,15,40,13,167,94,1,0 // movaps 0x15ea7(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,7,96,1,0 // movaps 0x16007(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,209 // movaps %xmm9,%xmm10
.byte 68,15,92,215 // subps %xmm7,%xmm10
.byte 69,15,40,218 // movaps %xmm10,%xmm11
@@ -43861,7 +43861,7 @@ _sk_softlight_sse2:
.byte 68,15,40,213 // movaps %xmm5,%xmm10
.byte 68,15,94,215 // divps %xmm7,%xmm10
.byte 69,15,84,212 // andps %xmm12,%xmm10
- .byte 68,15,40,13,84,93,1,0 // movaps 0x15d54(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,180,94,1,0 // movaps 0x15eb4(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,249 // movaps %xmm9,%xmm15
.byte 69,15,92,250 // subps %xmm10,%xmm15
.byte 69,15,40,218 // movaps %xmm10,%xmm11
@@ -43874,10 +43874,10 @@ _sk_softlight_sse2:
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 65,15,88,194 // addps %xmm10,%xmm0
- .byte 68,15,40,53,126,93,1,0 // movaps 0x15d7e(%rip),%xmm14 # 39350 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
+ .byte 68,15,40,53,222,94,1,0 // movaps 0x15ede(%rip),%xmm14 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
.byte 69,15,88,222 // addps %xmm14,%xmm11
.byte 68,15,89,216 // mulps %xmm0,%xmm11
- .byte 68,15,40,21,126,93,1,0 // movaps 0x15d7e(%rip),%xmm10 # 39360 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
+ .byte 68,15,40,21,222,94,1,0 // movaps 0x15ede(%rip),%xmm10 # 394c0 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
.byte 69,15,89,234 // mulps %xmm10,%xmm13
.byte 69,15,88,235 // addps %xmm11,%xmm13
.byte 15,88,228 // addps %xmm4,%xmm4
@@ -44022,7 +44022,7 @@ _sk_hue_sse2:
.byte 68,15,40,209 // movaps %xmm1,%xmm10
.byte 68,15,40,225 // movaps %xmm1,%xmm12
.byte 68,15,89,211 // mulps %xmm3,%xmm10
- .byte 68,15,40,5,17,91,1,0 // movaps 0x15b11(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,113,92,1,0 // movaps 0x15c71(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,216 // movaps %xmm8,%xmm11
.byte 15,40,207 // movaps %xmm7,%xmm1
.byte 68,15,92,217 // subps %xmm1,%xmm11
@@ -44070,12 +44070,12 @@ _sk_hue_sse2:
.byte 69,15,84,206 // andps %xmm14,%xmm9
.byte 69,15,84,214 // andps %xmm14,%xmm10
.byte 65,15,84,214 // andps %xmm14,%xmm2
- .byte 68,15,40,61,206,90,1,0 // movaps 0x15ace(%rip),%xmm15 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,61,46,92,1,0 // movaps 0x15c2e(%rip),%xmm15 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 65,15,89,231 // mulps %xmm15,%xmm4
- .byte 15,40,5,211,90,1,0 // movaps 0x15ad3(%rip),%xmm0 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 15,40,5,51,92,1,0 // movaps 0x15c33(%rip),%xmm0 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 15,89,240 // mulps %xmm0,%xmm6
.byte 15,88,244 // addps %xmm4,%xmm6
- .byte 68,15,40,53,213,90,1,0 // movaps 0x15ad5(%rip),%xmm14 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
+ .byte 68,15,40,53,53,92,1,0 // movaps 0x15c35(%rip),%xmm14 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
.byte 68,15,40,239 // movaps %xmm7,%xmm13
.byte 69,15,89,238 // mulps %xmm14,%xmm13
.byte 68,15,88,238 // addps %xmm6,%xmm13
@@ -44253,14 +44253,14 @@ _sk_saturation_sse2:
.byte 68,15,84,211 // andps %xmm3,%xmm10
.byte 68,15,84,203 // andps %xmm3,%xmm9
.byte 15,84,195 // andps %xmm3,%xmm0
- .byte 68,15,40,5,39,88,1,0 // movaps 0x15827(%rip),%xmm8 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,5,135,89,1,0 // movaps 0x15987(%rip),%xmm8 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 15,40,214 // movaps %xmm6,%xmm2
.byte 65,15,89,208 // mulps %xmm8,%xmm2
- .byte 15,40,13,41,88,1,0 // movaps 0x15829(%rip),%xmm1 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 15,40,13,137,89,1,0 // movaps 0x15989(%rip),%xmm1 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 15,40,221 // movaps %xmm5,%xmm3
.byte 15,89,217 // mulps %xmm1,%xmm3
.byte 15,88,218 // addps %xmm2,%xmm3
- .byte 68,15,40,37,40,88,1,0 // movaps 0x15828(%rip),%xmm12 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
+ .byte 68,15,40,37,136,89,1,0 // movaps 0x15988(%rip),%xmm12 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
.byte 69,15,89,236 // mulps %xmm12,%xmm13
.byte 68,15,88,235 // addps %xmm3,%xmm13
.byte 65,15,40,210 // movaps %xmm10,%xmm2
@@ -44305,7 +44305,7 @@ _sk_saturation_sse2:
.byte 15,40,223 // movaps %xmm7,%xmm3
.byte 15,40,236 // movaps %xmm4,%xmm5
.byte 15,89,221 // mulps %xmm5,%xmm3
- .byte 68,15,40,5,221,86,1,0 // movaps 0x156dd(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,61,88,1,0 // movaps 0x1583d(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,40,224 // movaps %xmm8,%xmm4
.byte 68,15,92,199 // subps %xmm7,%xmm8
.byte 15,88,253 // addps %xmm5,%xmm7
@@ -44406,14 +44406,14 @@ _sk_color_sse2:
.byte 68,15,40,213 // movaps %xmm5,%xmm10
.byte 69,15,89,208 // mulps %xmm8,%xmm10
.byte 65,15,40,208 // movaps %xmm8,%xmm2
- .byte 68,15,40,45,235,85,1,0 // movaps 0x155eb(%rip),%xmm13 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,45,75,87,1,0 // movaps 0x1574b(%rip),%xmm13 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 68,15,40,198 // movaps %xmm6,%xmm8
.byte 69,15,89,197 // mulps %xmm13,%xmm8
- .byte 68,15,40,53,235,85,1,0 // movaps 0x155eb(%rip),%xmm14 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 68,15,40,53,75,87,1,0 // movaps 0x1574b(%rip),%xmm14 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 65,15,40,195 // movaps %xmm11,%xmm0
.byte 65,15,89,198 // mulps %xmm14,%xmm0
.byte 65,15,88,192 // addps %xmm8,%xmm0
- .byte 68,15,40,29,231,85,1,0 // movaps 0x155e7(%rip),%xmm11 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
+ .byte 68,15,40,29,71,87,1,0 // movaps 0x15747(%rip),%xmm11 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
.byte 69,15,89,227 // mulps %xmm11,%xmm12
.byte 68,15,88,224 // addps %xmm0,%xmm12
.byte 65,15,40,193 // movaps %xmm9,%xmm0
@@ -44421,7 +44421,7 @@ _sk_color_sse2:
.byte 69,15,40,250 // movaps %xmm10,%xmm15
.byte 69,15,89,254 // mulps %xmm14,%xmm15
.byte 68,15,88,248 // addps %xmm0,%xmm15
- .byte 68,15,40,5,35,85,1,0 // movaps 0x15523(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,131,86,1,0 // movaps 0x15683(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,40,224 // movaps %xmm8,%xmm4
.byte 15,92,226 // subps %xmm2,%xmm4
.byte 15,89,252 // mulps %xmm4,%xmm7
@@ -44557,15 +44557,15 @@ _sk_luminosity_sse2:
.byte 68,15,40,205 // movaps %xmm5,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
.byte 15,89,222 // mulps %xmm6,%xmm3
- .byte 68,15,40,37,170,83,1,0 // movaps 0x153aa(%rip),%xmm12 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,37,10,85,1,0 // movaps 0x1550a(%rip),%xmm12 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 68,15,40,199 // movaps %xmm7,%xmm8
.byte 69,15,89,196 // mulps %xmm12,%xmm8
- .byte 68,15,40,45,170,83,1,0 // movaps 0x153aa(%rip),%xmm13 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 68,15,40,45,10,85,1,0 // movaps 0x1550a(%rip),%xmm13 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
.byte 68,15,40,241 // movaps %xmm1,%xmm14
.byte 69,15,89,245 // mulps %xmm13,%xmm14
.byte 69,15,88,240 // addps %xmm8,%xmm14
- .byte 68,15,40,29,166,83,1,0 // movaps 0x153a6(%rip),%xmm11 # 39390 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
- .byte 68,15,40,5,254,82,1,0 // movaps 0x152fe(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,29,6,85,1,0 // movaps 0x15506(%rip),%xmm11 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xc7c>
+ .byte 68,15,40,5,94,84,1,0 // movaps 0x1545e(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,248 // movaps %xmm8,%xmm15
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 68,15,92,248 // subps %xmm0,%xmm15
@@ -44709,7 +44709,7 @@ _sk_srcover_rgba_8888_sse2:
.byte 15,133,228,0,0,0 // jne 242dd <_sk_srcover_rgba_8888_sse2+0x104>
.byte 243,69,15,111,4,128 // movdqu (%r8,%rax,4),%xmm8
.byte 72,133,255 // test %rdi,%rdi
- .byte 102,15,111,53,150,81,1,0 // movdqa 0x15196(%rip),%xmm6 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,53,246,82,1,0 // movdqa 0x152f6(%rip),%xmm6 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,224 // movdqa %xmm8,%xmm4
.byte 102,15,219,230 // pand %xmm6,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
@@ -44723,9 +44723,9 @@ _sk_srcover_rgba_8888_sse2:
.byte 15,91,247 // cvtdq2ps %xmm7,%xmm6
.byte 102,65,15,114,208,24 // psrld $0x18,%xmm8
.byte 65,15,91,248 // cvtdq2ps %xmm8,%xmm7
- .byte 68,15,40,5,166,80,1,0 // movaps 0x150a6(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,6,82,1,0 // movaps 0x15206(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
- .byte 68,15,40,37,122,81,1,0 // movaps 0x1517a(%rip),%xmm12 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,37,218,82,1,0 // movaps 0x152da(%rip),%xmm12 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,196 // mulps %xmm12,%xmm0
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -44804,7 +44804,7 @@ _sk_srcover_bgra_8888_sse2:
.byte 15,133,228,0,0,0 // jne 2445e <_sk_srcover_bgra_8888_sse2+0x104>
.byte 243,69,15,111,4,128 // movdqu (%r8,%rax,4),%xmm8
.byte 72,133,255 // test %rdi,%rdi
- .byte 102,15,111,37,21,80,1,0 // movdqa 0x15015(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,37,117,81,1,0 // movdqa 0x15175(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,232 // movdqa %xmm8,%xmm5
.byte 102,15,219,236 // pand %xmm4,%xmm5
.byte 15,91,245 // cvtdq2ps %xmm5,%xmm6
@@ -44818,9 +44818,9 @@ _sk_srcover_bgra_8888_sse2:
.byte 15,91,231 // cvtdq2ps %xmm7,%xmm4
.byte 102,65,15,114,208,24 // psrld $0x18,%xmm8
.byte 65,15,91,248 // cvtdq2ps %xmm8,%xmm7
- .byte 68,15,40,5,37,79,1,0 // movaps 0x14f25(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,133,80,1,0 // movaps 0x15085(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,92,195 // subps %xmm3,%xmm8
- .byte 68,15,40,37,249,79,1,0 // movaps 0x14ff9(%rip),%xmm12 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,37,89,81,1,0 // movaps 0x15159(%rip),%xmm12 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,196 // mulps %xmm12,%xmm0
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,89,204 // mulps %xmm4,%xmm9
@@ -44900,7 +44900,7 @@ HIDDEN _sk_clamp_1_sse2
.globl _sk_clamp_1_sse2
FUNCTION(_sk_clamp_1_sse2)
_sk_clamp_1_sse2:
- .byte 68,15,40,5,245,77,1,0 // movaps 0x14df5(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,85,79,1,0 // movaps 0x14f55(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 65,15,93,200 // minps %xmm8,%xmm1
.byte 65,15,93,208 // minps %xmm8,%xmm2
@@ -44912,7 +44912,7 @@ HIDDEN _sk_clamp_a_sse2
.globl _sk_clamp_a_sse2
FUNCTION(_sk_clamp_a_sse2)
_sk_clamp_a_sse2:
- .byte 15,93,29,218,77,1,0 // minps 0x14dda(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,93,29,58,79,1,0 // minps 0x14f3a(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,93,195 // minps %xmm3,%xmm0
.byte 15,93,203 // minps %xmm3,%xmm1
.byte 15,93,211 // minps %xmm3,%xmm2
@@ -44923,7 +44923,7 @@ HIDDEN _sk_clamp_a_dst_sse2
.globl _sk_clamp_a_dst_sse2
FUNCTION(_sk_clamp_a_dst_sse2)
_sk_clamp_a_dst_sse2:
- .byte 15,93,61,198,77,1,0 // minps 0x14dc6(%rip),%xmm7 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,93,61,38,79,1,0 // minps 0x14f26(%rip),%xmm7 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,93,231 // minps %xmm7,%xmm4
.byte 15,93,239 // minps %xmm7,%xmm5
.byte 15,93,247 // minps %xmm7,%xmm6
@@ -44958,7 +44958,7 @@ HIDDEN _sk_invert_sse2
.globl _sk_invert_sse2
FUNCTION(_sk_invert_sse2)
_sk_invert_sse2:
- .byte 68,15,40,5,130,77,1,0 // movaps 0x14d82(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,226,78,1,0 // movaps 0x14ee2(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 68,15,92,200 // subps %xmm0,%xmm9
.byte 69,15,40,208 // movaps %xmm8,%xmm10
@@ -45019,10 +45019,10 @@ HIDDEN _sk_unpremul_sse2
.globl _sk_unpremul_sse2
FUNCTION(_sk_unpremul_sse2)
_sk_unpremul_sse2:
- .byte 68,15,40,5,16,77,1,0 // movaps 0x14d10(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,112,78,1,0 // movaps 0x14e70(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,94,195 // divps %xmm3,%xmm8
.byte 69,15,40,200 // movaps %xmm8,%xmm9
- .byte 68,15,194,13,239,77,1,0,1 // cmpltps 0x14def(%rip),%xmm9 # 393e0 <_sk_srcover_bgra_8888_sse2_lowp+0xcdc>
+ .byte 68,15,194,13,79,79,1,0,1 // cmpltps 0x14f4f(%rip),%xmm9 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
.byte 69,15,84,200 // andps %xmm8,%xmm9
.byte 65,15,89,193 // mulps %xmm9,%xmm0
.byte 65,15,89,201 // mulps %xmm9,%xmm1
@@ -45034,20 +45034,20 @@ HIDDEN _sk_from_srgb_sse2
.globl _sk_from_srgb_sse2
FUNCTION(_sk_from_srgb_sse2)
_sk_from_srgb_sse2:
- .byte 68,15,40,5,227,77,1,0 // movaps 0x14de3(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
+ .byte 68,15,40,5,67,79,1,0 // movaps 0x14f43(%rip),%xmm8 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xcdc>
.byte 68,15,40,232 // movaps %xmm0,%xmm13
.byte 69,15,89,232 // mulps %xmm8,%xmm13
.byte 68,15,40,216 // movaps %xmm0,%xmm11
.byte 69,15,89,219 // mulps %xmm11,%xmm11
- .byte 68,15,40,13,75,77,1,0 // movaps 0x14d4b(%rip),%xmm9 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,13,171,78,1,0 // movaps 0x14eab(%rip),%xmm9 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 68,15,40,240 // movaps %xmm0,%xmm14
.byte 69,15,89,241 // mulps %xmm9,%xmm14
- .byte 68,15,40,21,203,77,1,0 // movaps 0x14dcb(%rip),%xmm10 # 39400 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
+ .byte 68,15,40,21,43,79,1,0 // movaps 0x14f2b(%rip),%xmm10 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
.byte 69,15,88,242 // addps %xmm10,%xmm14
.byte 69,15,89,243 // mulps %xmm11,%xmm14
- .byte 68,15,40,29,203,77,1,0 // movaps 0x14dcb(%rip),%xmm11 # 39410 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
+ .byte 68,15,40,29,43,79,1,0 // movaps 0x14f2b(%rip),%xmm11 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
.byte 69,15,88,243 // addps %xmm11,%xmm14
- .byte 68,15,40,37,207,77,1,0 // movaps 0x14dcf(%rip),%xmm12 # 39420 <_sk_srcover_bgra_8888_sse2_lowp+0xd1c>
+ .byte 68,15,40,37,47,79,1,0 // movaps 0x14f2f(%rip),%xmm12 # 39580 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
.byte 65,15,194,196,1 // cmpltps %xmm12,%xmm0
.byte 68,15,84,232 // andps %xmm0,%xmm13
.byte 65,15,85,198 // andnps %xmm14,%xmm0
@@ -45083,20 +45083,20 @@ HIDDEN _sk_from_srgb_dst_sse2
.globl _sk_from_srgb_dst_sse2
FUNCTION(_sk_from_srgb_dst_sse2)
_sk_from_srgb_dst_sse2:
- .byte 68,15,40,5,32,77,1,0 // movaps 0x14d20(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
+ .byte 68,15,40,5,128,78,1,0 // movaps 0x14e80(%rip),%xmm8 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xcdc>
.byte 68,15,40,236 // movaps %xmm4,%xmm13
.byte 69,15,89,232 // mulps %xmm8,%xmm13
.byte 68,15,40,220 // movaps %xmm4,%xmm11
.byte 69,15,89,219 // mulps %xmm11,%xmm11
- .byte 68,15,40,13,136,76,1,0 // movaps 0x14c88(%rip),%xmm9 # 39370 <_sk_srcover_bgra_8888_sse2_lowp+0xc6c>
+ .byte 68,15,40,13,232,77,1,0 // movaps 0x14de8(%rip),%xmm9 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xc5c>
.byte 68,15,40,244 // movaps %xmm4,%xmm14
.byte 69,15,89,241 // mulps %xmm9,%xmm14
- .byte 68,15,40,21,8,77,1,0 // movaps 0x14d08(%rip),%xmm10 # 39400 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
+ .byte 68,15,40,21,104,78,1,0 // movaps 0x14e68(%rip),%xmm10 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xcec>
.byte 69,15,88,242 // addps %xmm10,%xmm14
.byte 69,15,89,243 // mulps %xmm11,%xmm14
- .byte 68,15,40,29,8,77,1,0 // movaps 0x14d08(%rip),%xmm11 # 39410 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
+ .byte 68,15,40,29,104,78,1,0 // movaps 0x14e68(%rip),%xmm11 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xcfc>
.byte 69,15,88,243 // addps %xmm11,%xmm14
- .byte 68,15,40,37,12,77,1,0 // movaps 0x14d0c(%rip),%xmm12 # 39420 <_sk_srcover_bgra_8888_sse2_lowp+0xd1c>
+ .byte 68,15,40,37,108,78,1,0 // movaps 0x14e6c(%rip),%xmm12 # 39580 <_sk_srcover_bgra_8888_sse2_lowp+0xd0c>
.byte 65,15,194,228,1 // cmpltps %xmm12,%xmm4
.byte 68,15,84,236 // andps %xmm4,%xmm13
.byte 65,15,85,230 // andnps %xmm14,%xmm4
@@ -45133,22 +45133,22 @@ HIDDEN _sk_to_srgb_sse2
FUNCTION(_sk_to_srgb_sse2)
_sk_to_srgb_sse2:
.byte 68,15,82,232 // rsqrtps %xmm0,%xmm13
- .byte 68,15,40,5,153,76,1,0 // movaps 0x14c99(%rip),%xmm8 # 39430 <_sk_srcover_bgra_8888_sse2_lowp+0xd2c>
+ .byte 68,15,40,5,249,77,1,0 // movaps 0x14df9(%rip),%xmm8 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xd1c>
.byte 68,15,40,240 // movaps %xmm0,%xmm14
.byte 69,15,89,240 // mulps %xmm8,%xmm14
- .byte 68,15,40,13,153,76,1,0 // movaps 0x14c99(%rip),%xmm9 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xd3c>
+ .byte 68,15,40,13,249,77,1,0 // movaps 0x14df9(%rip),%xmm9 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xd2c>
.byte 69,15,40,253 // movaps %xmm13,%xmm15
.byte 69,15,89,249 // mulps %xmm9,%xmm15
- .byte 68,15,40,21,153,76,1,0 // movaps 0x14c99(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xd4c>
+ .byte 68,15,40,21,249,77,1,0 // movaps 0x14df9(%rip),%xmm10 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xd3c>
.byte 69,15,88,250 // addps %xmm10,%xmm15
.byte 69,15,89,253 // mulps %xmm13,%xmm15
- .byte 68,15,40,29,153,76,1,0 // movaps 0x14c99(%rip),%xmm11 # 39460 <_sk_srcover_bgra_8888_sse2_lowp+0xd5c>
+ .byte 68,15,40,29,249,77,1,0 // movaps 0x14df9(%rip),%xmm11 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xd4c>
.byte 69,15,88,251 // addps %xmm11,%xmm15
- .byte 68,15,40,37,157,76,1,0 // movaps 0x14c9d(%rip),%xmm12 # 39470 <_sk_srcover_bgra_8888_sse2_lowp+0xd6c>
+ .byte 68,15,40,37,253,77,1,0 // movaps 0x14dfd(%rip),%xmm12 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xd5c>
.byte 69,15,88,236 // addps %xmm12,%xmm13
.byte 69,15,83,237 // rcpps %xmm13,%xmm13
.byte 69,15,89,239 // mulps %xmm15,%xmm13
- .byte 68,15,40,61,153,76,1,0 // movaps 0x14c99(%rip),%xmm15 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xd7c>
+ .byte 68,15,40,61,249,77,1,0 // movaps 0x14df9(%rip),%xmm15 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xd6c>
.byte 65,15,194,199,1 // cmpltps %xmm15,%xmm0
.byte 68,15,84,240 // andps %xmm0,%xmm14
.byte 65,15,85,197 // andnps %xmm13,%xmm0
@@ -45200,7 +45200,7 @@ _sk_rgb_to_hsl_sse2:
.byte 68,15,93,216 // minps %xmm0,%xmm11
.byte 65,15,40,202 // movaps %xmm10,%xmm1
.byte 65,15,92,203 // subps %xmm11,%xmm1
- .byte 68,15,40,45,76,74,1,0 // movaps 0x14a4c(%rip),%xmm13 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,45,172,75,1,0 // movaps 0x14bac(%rip),%xmm13 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 68,15,94,233 // divps %xmm1,%xmm13
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 65,15,194,192,0 // cmpeqps %xmm8,%xmm0
@@ -45209,30 +45209,30 @@ _sk_rgb_to_hsl_sse2:
.byte 69,15,89,229 // mulps %xmm13,%xmm12
.byte 69,15,40,241 // movaps %xmm9,%xmm14
.byte 68,15,194,242,1 // cmpltps %xmm2,%xmm14
- .byte 68,15,84,53,194,75,1,0 // andps 0x14bc2(%rip),%xmm14 # 39490 <_sk_srcover_bgra_8888_sse2_lowp+0xd8c>
+ .byte 68,15,84,53,34,77,1,0 // andps 0x14d22(%rip),%xmm14 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xd7c>
.byte 69,15,88,244 // addps %xmm12,%xmm14
.byte 69,15,40,250 // movaps %xmm10,%xmm15
.byte 69,15,194,249,0 // cmpeqps %xmm9,%xmm15
.byte 65,15,92,208 // subps %xmm8,%xmm2
.byte 65,15,89,213 // mulps %xmm13,%xmm2
- .byte 68,15,40,37,181,75,1,0 // movaps 0x14bb5(%rip),%xmm12 # 394a0 <_sk_srcover_bgra_8888_sse2_lowp+0xd9c>
+ .byte 68,15,40,37,21,77,1,0 // movaps 0x14d15(%rip),%xmm12 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xd8c>
.byte 65,15,88,212 // addps %xmm12,%xmm2
.byte 69,15,92,193 // subps %xmm9,%xmm8
.byte 69,15,89,197 // mulps %xmm13,%xmm8
- .byte 68,15,88,5,177,75,1,0 // addps 0x14bb1(%rip),%xmm8 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
+ .byte 68,15,88,5,17,77,1,0 // addps 0x14d11(%rip),%xmm8 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xd9c>
.byte 65,15,84,215 // andps %xmm15,%xmm2
.byte 69,15,85,248 // andnps %xmm8,%xmm15
.byte 68,15,86,250 // orps %xmm2,%xmm15
.byte 68,15,84,240 // andps %xmm0,%xmm14
.byte 65,15,85,199 // andnps %xmm15,%xmm0
.byte 65,15,86,198 // orps %xmm14,%xmm0
- .byte 15,89,5,162,75,1,0 // mulps 0x14ba2(%rip),%xmm0 # 394c0 <_sk_srcover_bgra_8888_sse2_lowp+0xdbc>
+ .byte 15,89,5,2,77,1,0 // mulps 0x14d02(%rip),%xmm0 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
.byte 69,15,40,194 // movaps %xmm10,%xmm8
.byte 69,15,194,195,4 // cmpneqps %xmm11,%xmm8
.byte 65,15,84,192 // andps %xmm8,%xmm0
.byte 69,15,92,226 // subps %xmm10,%xmm12
.byte 69,15,88,211 // addps %xmm11,%xmm10
- .byte 68,15,40,13,165,73,1,0 // movaps 0x149a5(%rip),%xmm9 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,13,5,75,1,0 // movaps 0x14b05(%rip),%xmm9 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,40,210 // movaps %xmm10,%xmm2
.byte 65,15,89,209 // mulps %xmm9,%xmm2
.byte 68,15,194,202,1 // cmpltps %xmm2,%xmm9
@@ -45256,7 +45256,7 @@ _sk_hsl_to_rgb_sse2:
.byte 15,41,92,36,168 // movaps %xmm3,-0x58(%rsp)
.byte 68,15,40,218 // movaps %xmm2,%xmm11
.byte 15,40,240 // movaps %xmm0,%xmm6
- .byte 68,15,40,13,84,73,1,0 // movaps 0x14954(%rip),%xmm9 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,13,180,74,1,0 // movaps 0x14ab4(%rip),%xmm9 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,40,209 // movaps %xmm9,%xmm10
.byte 69,15,194,211,2 // cmpleps %xmm11,%xmm10
.byte 15,40,193 // movaps %xmm1,%xmm0
@@ -45273,28 +45273,28 @@ _sk_hsl_to_rgb_sse2:
.byte 69,15,88,211 // addps %xmm11,%xmm10
.byte 69,15,88,219 // addps %xmm11,%xmm11
.byte 69,15,92,218 // subps %xmm10,%xmm11
- .byte 15,40,5,253,74,1,0 // movaps 0x14afd(%rip),%xmm0 # 394d0 <_sk_srcover_bgra_8888_sse2_lowp+0xdcc>
+ .byte 15,40,5,93,76,1,0 // movaps 0x14c5d(%rip),%xmm0 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xdbc>
.byte 15,88,198 // addps %xmm6,%xmm0
.byte 243,15,91,200 // cvttps2dq %xmm0,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
.byte 15,40,216 // movaps %xmm0,%xmm3
.byte 15,194,217,1 // cmpltps %xmm1,%xmm3
- .byte 15,84,29,5,73,1,0 // andps 0x14905(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,84,29,101,74,1,0 // andps 0x14a65(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,203 // subps %xmm3,%xmm1
.byte 15,92,193 // subps %xmm1,%xmm0
- .byte 68,15,40,45,231,74,1,0 // movaps 0x14ae7(%rip),%xmm13 # 394e0 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
+ .byte 68,15,40,45,71,76,1,0 // movaps 0x14c47(%rip),%xmm13 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xdcc>
.byte 69,15,40,197 // movaps %xmm13,%xmm8
.byte 68,15,194,192,2 // cmpleps %xmm0,%xmm8
.byte 69,15,40,242 // movaps %xmm10,%xmm14
.byte 69,15,92,243 // subps %xmm11,%xmm14
.byte 65,15,40,217 // movaps %xmm9,%xmm3
.byte 15,194,216,2 // cmpleps %xmm0,%xmm3
- .byte 15,40,21,167,74,1,0 // movaps 0x14aa7(%rip),%xmm2 # 394c0 <_sk_srcover_bgra_8888_sse2_lowp+0xdbc>
+ .byte 15,40,21,7,76,1,0 // movaps 0x14c07(%rip),%xmm2 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
.byte 68,15,40,250 // movaps %xmm2,%xmm15
.byte 68,15,194,248,2 // cmpleps %xmm0,%xmm15
- .byte 15,40,13,103,74,1,0 // movaps 0x14a67(%rip),%xmm1 # 39490 <_sk_srcover_bgra_8888_sse2_lowp+0xd8c>
+ .byte 15,40,13,199,75,1,0 // movaps 0x14bc7(%rip),%xmm1 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xd7c>
.byte 15,89,193 // mulps %xmm1,%xmm0
- .byte 15,40,45,125,74,1,0 // movaps 0x14a7d(%rip),%xmm5 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xdac>
+ .byte 15,40,45,221,75,1,0 // movaps 0x14bdd(%rip),%xmm5 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xd9c>
.byte 15,40,229 // movaps %xmm5,%xmm4
.byte 15,92,224 // subps %xmm0,%xmm4
.byte 65,15,89,230 // mulps %xmm14,%xmm4
@@ -45317,7 +45317,7 @@ _sk_hsl_to_rgb_sse2:
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
.byte 15,40,222 // movaps %xmm6,%xmm3
.byte 15,194,216,1 // cmpltps %xmm0,%xmm3
- .byte 15,84,29,98,72,1,0 // andps 0x14862(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,84,29,194,73,1,0 // andps 0x149c2(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,195 // subps %xmm3,%xmm0
.byte 68,15,40,230 // movaps %xmm6,%xmm12
.byte 68,15,92,224 // subps %xmm0,%xmm12
@@ -45347,12 +45347,12 @@ _sk_hsl_to_rgb_sse2:
.byte 15,40,124,36,136 // movaps -0x78(%rsp),%xmm7
.byte 15,40,231 // movaps %xmm7,%xmm4
.byte 15,85,227 // andnps %xmm3,%xmm4
- .byte 15,88,53,234,73,1,0 // addps 0x149ea(%rip),%xmm6 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 15,88,53,74,75,1,0 // addps 0x14b4a(%rip),%xmm6 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 243,15,91,198 // cvttps2dq %xmm6,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
.byte 15,40,222 // movaps %xmm6,%xmm3
.byte 15,194,216,1 // cmpltps %xmm0,%xmm3
- .byte 15,84,29,213,71,1,0 // andps 0x147d5(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,84,29,53,73,1,0 // andps 0x14935(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,92,195 // subps %xmm3,%xmm0
.byte 15,92,240 // subps %xmm0,%xmm6
.byte 15,89,206 // mulps %xmm6,%xmm1
@@ -45420,9 +45420,9 @@ _sk_scale_u8_sse2:
.byte 102,71,15,110,4,16 // movd (%r8,%r10,1),%xmm8
.byte 102,68,15,96,192 // punpcklbw %xmm0,%xmm8
.byte 102,68,15,97,192 // punpcklwd %xmm0,%xmm8
- .byte 102,68,15,219,5,168,71,1,0 // pand 0x147a8(%rip),%xmm8 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,68,15,219,5,8,73,1,0 // pand 0x14908(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,252,72,1,0 // mulps 0x148fc(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,89,5,92,74,1,0 // mulps 0x14a5c(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 65,15,89,208 // mulps %xmm8,%xmm2
@@ -45467,17 +45467,17 @@ _sk_scale_565_sse2:
.byte 15,133,173,0,0,0 // jne 24d41 <_sk_scale_565_sse2+0xcc>
.byte 243,71,15,126,20,80 // movq (%r8,%r10,2),%xmm10
.byte 102,68,15,97,208 // punpcklwd %xmm0,%xmm10
- .byte 102,68,15,111,5,104,72,1,0 // movdqa 0x14868(%rip),%xmm8 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,68,15,111,5,200,73,1,0 // movdqa 0x149c8(%rip),%xmm8 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,69,15,219,194 // pand %xmm10,%xmm8
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,103,72,1,0 // mulps 0x14867(%rip),%xmm8 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,68,15,111,13,110,72,1,0 // movdqa 0x1486e(%rip),%xmm9 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 68,15,89,5,199,73,1,0 // mulps 0x149c7(%rip),%xmm8 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,68,15,111,13,206,73,1,0 // movdqa 0x149ce(%rip),%xmm9 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,69,15,219,202 // pand %xmm10,%xmm9
.byte 69,15,91,201 // cvtdq2ps %xmm9,%xmm9
- .byte 68,15,89,13,109,72,1,0 // mulps 0x1486d(%rip),%xmm9 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,68,15,219,21,116,72,1,0 // pand 0x14874(%rip),%xmm10 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 68,15,89,13,205,73,1,0 // mulps 0x149cd(%rip),%xmm9 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,68,15,219,21,212,73,1,0 // pand 0x149d4(%rip),%xmm10 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 69,15,91,210 // cvtdq2ps %xmm10,%xmm10
- .byte 68,15,89,21,120,72,1,0 // mulps 0x14878(%rip),%xmm10 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 68,15,89,21,216,73,1,0 // mulps 0x149d8(%rip),%xmm10 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 68,15,40,219 // movaps %xmm3,%xmm11
.byte 68,15,194,223,1 // cmpltps %xmm7,%xmm11
.byte 69,15,40,225 // movaps %xmm9,%xmm12
@@ -45558,9 +45558,9 @@ _sk_lerp_u8_sse2:
.byte 102,71,15,110,4,16 // movd (%r8,%r10,1),%xmm8
.byte 102,68,15,96,192 // punpcklbw %xmm0,%xmm8
.byte 102,68,15,97,192 // punpcklwd %xmm0,%xmm8
- .byte 102,68,15,219,5,158,69,1,0 // pand 0x1459e(%rip),%xmm8 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,68,15,219,5,254,70,1,0 // pand 0x146fe(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,242,70,1,0 // mulps 0x146f2(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,89,5,82,72,1,0 // mulps 0x14852(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 15,92,196 // subps %xmm4,%xmm0
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 15,88,196 // addps %xmm4,%xmm0
@@ -45612,17 +45612,17 @@ _sk_lerp_565_sse2:
.byte 15,133,173,0,0,0 // jne 24f62 <_sk_lerp_565_sse2+0xcc>
.byte 243,71,15,126,4,80 // movq (%r8,%r10,2),%xmm8
.byte 102,68,15,97,192 // punpcklwd %xmm0,%xmm8
- .byte 102,68,15,111,13,71,70,1,0 // movdqa 0x14647(%rip),%xmm9 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,68,15,111,13,167,71,1,0 // movdqa 0x147a7(%rip),%xmm9 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,69,15,219,200 // pand %xmm8,%xmm9
.byte 69,15,91,201 // cvtdq2ps %xmm9,%xmm9
- .byte 68,15,89,13,70,70,1,0 // mulps 0x14646(%rip),%xmm9 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,68,15,111,21,77,70,1,0 // movdqa 0x1464d(%rip),%xmm10 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 68,15,89,13,166,71,1,0 // mulps 0x147a6(%rip),%xmm9 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,68,15,111,21,173,71,1,0 // movdqa 0x147ad(%rip),%xmm10 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,69,15,219,208 // pand %xmm8,%xmm10
.byte 69,15,91,218 // cvtdq2ps %xmm10,%xmm11
- .byte 68,15,89,29,76,70,1,0 // mulps 0x1464c(%rip),%xmm11 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,68,15,219,5,83,70,1,0 // pand 0x14653(%rip),%xmm8 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 68,15,89,29,172,71,1,0 // mulps 0x147ac(%rip),%xmm11 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,68,15,219,5,179,71,1,0 // pand 0x147b3(%rip),%xmm8 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 69,15,91,192 // cvtdq2ps %xmm8,%xmm8
- .byte 68,15,89,5,87,70,1,0 // mulps 0x14657(%rip),%xmm8 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 68,15,89,5,183,71,1,0 // mulps 0x147b7(%rip),%xmm8 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 68,15,40,211 // movaps %xmm3,%xmm10
.byte 68,15,194,215,1 // cmpltps %xmm7,%xmm10
.byte 69,15,40,227 // movaps %xmm11,%xmm12
@@ -45679,7 +45679,7 @@ _sk_load_tables_sse2:
.byte 243,69,15,111,12,144 // movdqu (%r8,%rdx,4),%xmm9
.byte 65,86 // push %r14
.byte 83 // push %rbx
- .byte 102,68,15,111,5,198,67,1,0 // movdqa 0x143c6(%rip),%xmm8 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,68,15,111,5,38,69,1,0 // movdqa 0x14526(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,65,15,219,192 // pand %xmm8,%xmm0
.byte 102,15,112,200,78 // pshufd $0x4e,%xmm0,%xmm1
@@ -45735,7 +45735,7 @@ _sk_load_tables_sse2:
.byte 102,15,20,211 // unpcklpd %xmm3,%xmm2
.byte 102,65,15,114,209,24 // psrld $0x18,%xmm9
.byte 65,15,91,217 // cvtdq2ps %xmm9,%xmm3
- .byte 15,89,29,29,68,1,0 // mulps 0x1441d(%rip),%xmm3 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,29,125,69,1,0 // mulps 0x1457d(%rip),%xmm3 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 91 // pop %rbx
.byte 65,94 // pop %r14
@@ -45775,7 +45775,7 @@ _sk_load_tables_u16_be_sse2:
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,97,200 // punpcklwd %xmm0,%xmm1
.byte 102,68,15,105,200 // punpckhwd %xmm0,%xmm9
- .byte 102,15,111,21,247,67,1,0 // movdqa 0x143f7(%rip),%xmm2 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xe6c>
+ .byte 102,15,111,21,87,69,1,0 // movdqa 0x14557(%rip),%xmm2 # 396d0 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
.byte 102,15,112,217,238 // pshufd $0xee,%xmm1,%xmm3
.byte 102,15,219,202 // pand %xmm2,%xmm1
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
@@ -45836,7 +45836,7 @@ _sk_load_tables_u16_be_sse2:
.byte 102,65,15,235,217 // por %xmm9,%xmm3
.byte 102,65,15,97,216 // punpcklwd %xmm8,%xmm3
.byte 15,91,219 // cvtdq2ps %xmm3,%xmm3
- .byte 15,89,29,245,66,1,0 // mulps 0x142f5(%rip),%xmm3 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 15,89,29,85,68,1,0 // mulps 0x14455(%rip),%xmm3 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 91 // pop %rbx
.byte 65,94 // pop %r14
@@ -45877,7 +45877,7 @@ _sk_load_tables_rgb_u16_be_sse2:
.byte 102,15,111,194 // movdqa %xmm2,%xmm0
.byte 102,65,15,97,194 // punpcklwd %xmm10,%xmm0
.byte 102,15,112,200,78 // pshufd $0x4e,%xmm0,%xmm1
- .byte 102,68,15,111,5,57,66,1,0 // movdqa 0x14239(%rip),%xmm8 # 39570 <_sk_srcover_bgra_8888_sse2_lowp+0xe6c>
+ .byte 102,68,15,111,5,153,67,1,0 // movdqa 0x14399(%rip),%xmm8 # 396d0 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
.byte 102,65,15,219,192 // pand %xmm8,%xmm0
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 102,65,15,97,193 // punpcklwd %xmm9,%xmm0
@@ -45932,7 +45932,7 @@ _sk_load_tables_rgb_u16_be_sse2:
.byte 65,15,20,216 // unpcklps %xmm8,%xmm3
.byte 102,15,20,211 // unpcklpd %xmm3,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,182,62,1,0 // movaps 0x13eb6(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,22,64,1,0 // movaps 0x14016(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 91 // pop %rbx
.byte 65,94 // pop %r14
.byte 255,224 // jmpq *%rax
@@ -45964,7 +45964,7 @@ _sk_byte_tables_sse2:
.byte 65,86 // push %r14
.byte 83 // push %rbx
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,35,63,1,0 // movaps 0x13f23(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,131,64,1,0 // movaps 0x14083(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,91,192 // cvtps2dq %xmm0,%xmm0
.byte 102,73,15,126,192 // movq %xmm0,%r8
@@ -45992,7 +45992,7 @@ _sk_byte_tables_sse2:
.byte 102,65,15,96,193 // punpcklbw %xmm9,%xmm0
.byte 102,65,15,97,193 // punpcklwd %xmm9,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,21,222,63,1,0 // movaps 0x13fde(%rip),%xmm10 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,21,62,65,1,0 // movaps 0x1413e(%rip),%xmm10 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,194 // mulps %xmm10,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,91,201 // cvtps2dq %xmm1,%xmm1
@@ -46111,7 +46111,7 @@ _sk_byte_tables_rgb_sse2:
.byte 102,65,15,96,193 // punpcklbw %xmm9,%xmm0
.byte 102,65,15,97,193 // punpcklwd %xmm9,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,21,30,62,1,0 // movaps 0x13e1e(%rip),%xmm10 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,21,126,63,1,0 // movaps 0x13f7e(%rip),%xmm10 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,194 // mulps %xmm10,%xmm0
.byte 65,15,89,200 // mulps %xmm8,%xmm1
.byte 102,15,91,201 // cvtps2dq %xmm1,%xmm1
@@ -46309,15 +46309,15 @@ _sk_parametric_r_sse2:
.byte 69,15,88,209 // addps %xmm9,%xmm10
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,91,202 // cvtdq2ps %xmm10,%xmm9
- .byte 68,15,89,13,234,59,1,0 // mulps 0x13bea(%rip),%xmm9 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,21,242,59,1,0 // andps 0x13bf2(%rip),%xmm10 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,21,26,57,1,0 // orps 0x1391a(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,13,242,59,1,0 // addps 0x13bf2(%rip),%xmm9 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 68,15,40,37,250,59,1,0 // movaps 0x13bfa(%rip),%xmm12 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,13,74,61,1,0 // mulps 0x13d4a(%rip),%xmm9 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,21,82,61,1,0 // andps 0x13d52(%rip),%xmm10 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,21,122,58,1,0 // orps 0x13a7a(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,13,82,61,1,0 // addps 0x13d52(%rip),%xmm9 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 68,15,40,37,90,61,1,0 // movaps 0x13d5a(%rip),%xmm12 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,88,21,250,59,1,0 // addps 0x13bfa(%rip),%xmm10 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 68,15,40,37,2,60,1,0 // movaps 0x13c02(%rip),%xmm12 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,21,90,61,1,0 // addps 0x13d5a(%rip),%xmm10 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,40,37,98,61,1,0 // movaps 0x13d62(%rip),%xmm12 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 69,15,94,226 // divps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
.byte 69,15,89,203 // mulps %xmm11,%xmm9
@@ -46325,22 +46325,22 @@ _sk_parametric_r_sse2:
.byte 69,15,91,226 // cvtdq2ps %xmm10,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,194,236,1 // cmpltps %xmm12,%xmm13
- .byte 68,15,40,21,220,56,1,0 // movaps 0x138dc(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,60,58,1,0 // movaps 0x13a3c(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,234 // andps %xmm10,%xmm13
.byte 69,15,87,219 // xorps %xmm11,%xmm11
.byte 69,15,92,229 // subps %xmm13,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,92,236 // subps %xmm12,%xmm13
- .byte 68,15,88,13,208,59,1,0 // addps 0x13bd0(%rip),%xmm9 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 68,15,40,37,216,59,1,0 // movaps 0x13bd8(%rip),%xmm12 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,13,48,61,1,0 // addps 0x13d30(%rip),%xmm9 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,40,37,56,61,1,0 // movaps 0x13d38(%rip),%xmm12 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 69,15,89,229 // mulps %xmm13,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,40,37,216,59,1,0 // movaps 0x13bd8(%rip),%xmm12 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,37,56,61,1,0 // movaps 0x13d38(%rip),%xmm12 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,229 // subps %xmm13,%xmm12
- .byte 68,15,40,45,220,59,1,0 // movaps 0x13bdc(%rip),%xmm13 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 68,15,40,45,60,61,1,0 // movaps 0x13d3c(%rip),%xmm13 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 69,15,94,236 // divps %xmm12,%xmm13
.byte 69,15,88,233 // addps %xmm9,%xmm13
- .byte 68,15,89,45,220,59,1,0 // mulps 0x13bdc(%rip),%xmm13 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 68,15,89,45,60,61,1,0 // mulps 0x13d3c(%rip),%xmm13 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,69,15,91,205 // cvtps2dq %xmm13,%xmm9
.byte 243,68,15,16,96,20 // movss 0x14(%rax),%xmm12
.byte 69,15,198,228,0 // shufps $0x0,%xmm12,%xmm12
@@ -46376,15 +46376,15 @@ _sk_parametric_g_sse2:
.byte 69,15,88,209 // addps %xmm9,%xmm10
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,91,202 // cvtdq2ps %xmm10,%xmm9
- .byte 68,15,89,13,172,58,1,0 // mulps 0x13aac(%rip),%xmm9 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,21,180,58,1,0 // andps 0x13ab4(%rip),%xmm10 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,21,220,55,1,0 // orps 0x137dc(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,13,180,58,1,0 // addps 0x13ab4(%rip),%xmm9 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 68,15,40,37,188,58,1,0 // movaps 0x13abc(%rip),%xmm12 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,13,12,60,1,0 // mulps 0x13c0c(%rip),%xmm9 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,21,20,60,1,0 // andps 0x13c14(%rip),%xmm10 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,21,60,57,1,0 // orps 0x1393c(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,13,20,60,1,0 // addps 0x13c14(%rip),%xmm9 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 68,15,40,37,28,60,1,0 // movaps 0x13c1c(%rip),%xmm12 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,88,21,188,58,1,0 // addps 0x13abc(%rip),%xmm10 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 68,15,40,37,196,58,1,0 // movaps 0x13ac4(%rip),%xmm12 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,21,28,60,1,0 // addps 0x13c1c(%rip),%xmm10 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,40,37,36,60,1,0 // movaps 0x13c24(%rip),%xmm12 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 69,15,94,226 // divps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
.byte 69,15,89,203 // mulps %xmm11,%xmm9
@@ -46392,22 +46392,22 @@ _sk_parametric_g_sse2:
.byte 69,15,91,226 // cvtdq2ps %xmm10,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,194,236,1 // cmpltps %xmm12,%xmm13
- .byte 68,15,40,21,158,55,1,0 // movaps 0x1379e(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,254,56,1,0 // movaps 0x138fe(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,234 // andps %xmm10,%xmm13
.byte 69,15,87,219 // xorps %xmm11,%xmm11
.byte 69,15,92,229 // subps %xmm13,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,92,236 // subps %xmm12,%xmm13
- .byte 68,15,88,13,146,58,1,0 // addps 0x13a92(%rip),%xmm9 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 68,15,40,37,154,58,1,0 // movaps 0x13a9a(%rip),%xmm12 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,13,242,59,1,0 // addps 0x13bf2(%rip),%xmm9 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,40,37,250,59,1,0 // movaps 0x13bfa(%rip),%xmm12 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 69,15,89,229 // mulps %xmm13,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,40,37,154,58,1,0 // movaps 0x13a9a(%rip),%xmm12 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,37,250,59,1,0 // movaps 0x13bfa(%rip),%xmm12 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,229 // subps %xmm13,%xmm12
- .byte 68,15,40,45,158,58,1,0 // movaps 0x13a9e(%rip),%xmm13 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 68,15,40,45,254,59,1,0 // movaps 0x13bfe(%rip),%xmm13 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 69,15,94,236 // divps %xmm12,%xmm13
.byte 69,15,88,233 // addps %xmm9,%xmm13
- .byte 68,15,89,45,158,58,1,0 // mulps 0x13a9e(%rip),%xmm13 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 68,15,89,45,254,59,1,0 // mulps 0x13bfe(%rip),%xmm13 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,69,15,91,205 // cvtps2dq %xmm13,%xmm9
.byte 243,68,15,16,96,20 // movss 0x14(%rax),%xmm12
.byte 69,15,198,228,0 // shufps $0x0,%xmm12,%xmm12
@@ -46443,15 +46443,15 @@ _sk_parametric_b_sse2:
.byte 69,15,88,209 // addps %xmm9,%xmm10
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,91,202 // cvtdq2ps %xmm10,%xmm9
- .byte 68,15,89,13,110,57,1,0 // mulps 0x1396e(%rip),%xmm9 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,21,118,57,1,0 // andps 0x13976(%rip),%xmm10 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,21,158,54,1,0 // orps 0x1369e(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,13,118,57,1,0 // addps 0x13976(%rip),%xmm9 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 68,15,40,37,126,57,1,0 // movaps 0x1397e(%rip),%xmm12 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,13,206,58,1,0 // mulps 0x13ace(%rip),%xmm9 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,21,214,58,1,0 // andps 0x13ad6(%rip),%xmm10 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,21,254,55,1,0 // orps 0x137fe(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,13,214,58,1,0 // addps 0x13ad6(%rip),%xmm9 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 68,15,40,37,222,58,1,0 // movaps 0x13ade(%rip),%xmm12 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,88,21,126,57,1,0 // addps 0x1397e(%rip),%xmm10 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 68,15,40,37,134,57,1,0 // movaps 0x13986(%rip),%xmm12 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,21,222,58,1,0 // addps 0x13ade(%rip),%xmm10 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,40,37,230,58,1,0 // movaps 0x13ae6(%rip),%xmm12 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 69,15,94,226 // divps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
.byte 69,15,89,203 // mulps %xmm11,%xmm9
@@ -46459,22 +46459,22 @@ _sk_parametric_b_sse2:
.byte 69,15,91,226 // cvtdq2ps %xmm10,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,194,236,1 // cmpltps %xmm12,%xmm13
- .byte 68,15,40,21,96,54,1,0 // movaps 0x13660(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,192,55,1,0 // movaps 0x137c0(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,234 // andps %xmm10,%xmm13
.byte 69,15,87,219 // xorps %xmm11,%xmm11
.byte 69,15,92,229 // subps %xmm13,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,92,236 // subps %xmm12,%xmm13
- .byte 68,15,88,13,84,57,1,0 // addps 0x13954(%rip),%xmm9 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 68,15,40,37,92,57,1,0 // movaps 0x1395c(%rip),%xmm12 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,13,180,58,1,0 // addps 0x13ab4(%rip),%xmm9 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,40,37,188,58,1,0 // movaps 0x13abc(%rip),%xmm12 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 69,15,89,229 // mulps %xmm13,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,40,37,92,57,1,0 // movaps 0x1395c(%rip),%xmm12 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,37,188,58,1,0 // movaps 0x13abc(%rip),%xmm12 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,229 // subps %xmm13,%xmm12
- .byte 68,15,40,45,96,57,1,0 // movaps 0x13960(%rip),%xmm13 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 68,15,40,45,192,58,1,0 // movaps 0x13ac0(%rip),%xmm13 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 69,15,94,236 // divps %xmm12,%xmm13
.byte 69,15,88,233 // addps %xmm9,%xmm13
- .byte 68,15,89,45,96,57,1,0 // mulps 0x13960(%rip),%xmm13 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 68,15,89,45,192,58,1,0 // mulps 0x13ac0(%rip),%xmm13 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,69,15,91,205 // cvtps2dq %xmm13,%xmm9
.byte 243,68,15,16,96,20 // movss 0x14(%rax),%xmm12
.byte 69,15,198,228,0 // shufps $0x0,%xmm12,%xmm12
@@ -46510,15 +46510,15 @@ _sk_parametric_a_sse2:
.byte 69,15,88,209 // addps %xmm9,%xmm10
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,91,202 // cvtdq2ps %xmm10,%xmm9
- .byte 68,15,89,13,48,56,1,0 // mulps 0x13830(%rip),%xmm9 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 68,15,84,21,56,56,1,0 // andps 0x13838(%rip),%xmm10 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,86,21,96,53,1,0 // orps 0x13560(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
- .byte 68,15,88,13,56,56,1,0 // addps 0x13838(%rip),%xmm9 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
- .byte 68,15,40,37,64,56,1,0 // movaps 0x13840(%rip),%xmm12 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,89,13,144,57,1,0 // mulps 0x13990(%rip),%xmm9 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,84,21,152,57,1,0 // andps 0x13998(%rip),%xmm10 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,86,21,192,54,1,0 // orps 0x136c0(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
+ .byte 68,15,88,13,152,57,1,0 // addps 0x13998(%rip),%xmm9 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 68,15,40,37,160,57,1,0 // movaps 0x139a0(%rip),%xmm12 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 69,15,89,226 // mulps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,88,21,64,56,1,0 // addps 0x13840(%rip),%xmm10 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
- .byte 68,15,40,37,72,56,1,0 // movaps 0x13848(%rip),%xmm12 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,88,21,160,57,1,0 // addps 0x139a0(%rip),%xmm10 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,40,37,168,57,1,0 // movaps 0x139a8(%rip),%xmm12 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 69,15,94,226 // divps %xmm10,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
.byte 69,15,89,203 // mulps %xmm11,%xmm9
@@ -46526,22 +46526,22 @@ _sk_parametric_a_sse2:
.byte 69,15,91,226 // cvtdq2ps %xmm10,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,194,236,1 // cmpltps %xmm12,%xmm13
- .byte 68,15,40,21,34,53,1,0 // movaps 0x13522(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,130,54,1,0 // movaps 0x13682(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,234 // andps %xmm10,%xmm13
.byte 69,15,87,219 // xorps %xmm11,%xmm11
.byte 69,15,92,229 // subps %xmm13,%xmm12
.byte 69,15,40,233 // movaps %xmm9,%xmm13
.byte 69,15,92,236 // subps %xmm12,%xmm13
- .byte 68,15,88,13,22,56,1,0 // addps 0x13816(%rip),%xmm9 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
- .byte 68,15,40,37,30,56,1,0 // movaps 0x1381e(%rip),%xmm12 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 68,15,88,13,118,57,1,0 // addps 0x13976(%rip),%xmm9 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,40,37,126,57,1,0 // movaps 0x1397e(%rip),%xmm12 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 69,15,89,229 // mulps %xmm13,%xmm12
.byte 69,15,92,204 // subps %xmm12,%xmm9
- .byte 68,15,40,37,30,56,1,0 // movaps 0x1381e(%rip),%xmm12 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 68,15,40,37,126,57,1,0 // movaps 0x1397e(%rip),%xmm12 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 69,15,92,229 // subps %xmm13,%xmm12
- .byte 68,15,40,45,34,56,1,0 // movaps 0x13822(%rip),%xmm13 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 68,15,40,45,130,57,1,0 // movaps 0x13982(%rip),%xmm13 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 69,15,94,236 // divps %xmm12,%xmm13
.byte 69,15,88,233 // addps %xmm9,%xmm13
- .byte 68,15,89,45,34,56,1,0 // mulps 0x13822(%rip),%xmm13 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 68,15,89,45,130,57,1,0 // mulps 0x13982(%rip),%xmm13 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 102,69,15,91,205 // cvtps2dq %xmm13,%xmm9
.byte 243,68,15,16,96,20 // movss 0x14(%rax),%xmm12
.byte 69,15,198,228,0 // shufps $0x0,%xmm12,%xmm12
@@ -46566,19 +46566,19 @@ _sk_gamma_sse2:
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,91,194 // cvtdq2ps %xmm2,%xmm0
- .byte 15,89,5,45,55,1,0 // mulps 0x1372d(%rip),%xmm0 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 15,84,21,54,55,1,0 // andps 0x13736(%rip),%xmm2 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 68,15,40,53,94,52,1,0 // movaps 0x1345e(%rip),%xmm14 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,89,5,141,56,1,0 // mulps 0x1388d(%rip),%xmm0 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 15,84,21,150,56,1,0 // andps 0x13896(%rip),%xmm2 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 68,15,40,53,190,53,1,0 // movaps 0x135be(%rip),%xmm14 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,86,214 // orps %xmm14,%xmm2
- .byte 68,15,40,37,50,55,1,0 // movaps 0x13732(%rip),%xmm12 # 395c0 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
+ .byte 68,15,40,37,146,56,1,0 // movaps 0x13892(%rip),%xmm12 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
.byte 65,15,88,196 // addps %xmm12,%xmm0
- .byte 68,15,40,29,54,55,1,0 // movaps 0x13736(%rip),%xmm11 # 395d0 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
+ .byte 68,15,40,29,150,56,1,0 // movaps 0x13896(%rip),%xmm11 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0xebc>
.byte 15,40,226 // movaps %xmm2,%xmm4
.byte 65,15,89,227 // mulps %xmm11,%xmm4
.byte 15,92,196 // subps %xmm4,%xmm0
- .byte 68,15,40,21,52,55,1,0 // movaps 0x13734(%rip),%xmm10 # 395e0 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
+ .byte 68,15,40,21,148,56,1,0 // movaps 0x13894(%rip),%xmm10 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0xecc>
.byte 65,15,88,210 // addps %xmm10,%xmm2
- .byte 68,15,40,45,56,55,1,0 // movaps 0x13738(%rip),%xmm13 # 395f0 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
+ .byte 68,15,40,45,152,56,1,0 // movaps 0x13898(%rip),%xmm13 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0xedc>
.byte 65,15,40,229 // movaps %xmm13,%xmm4
.byte 15,94,226 // divps %xmm2,%xmm4
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -46590,27 +46590,27 @@ _sk_gamma_sse2:
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
.byte 15,40,224 // movaps %xmm0,%xmm4
.byte 15,194,226,1 // cmpltps %xmm2,%xmm4
- .byte 68,15,40,13,8,52,1,0 // movaps 0x13408(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,104,53,1,0 // movaps 0x13568(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,84,225 // andps %xmm9,%xmm4
.byte 15,92,212 // subps %xmm4,%xmm2
.byte 15,40,224 // movaps %xmm0,%xmm4
.byte 15,92,226 // subps %xmm2,%xmm4
- .byte 15,40,53,4,55,1,0 // movaps 0x13704(%rip),%xmm6 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
+ .byte 15,40,53,100,56,1,0 // movaps 0x13864(%rip),%xmm6 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0xeec>
.byte 15,88,198 // addps %xmm6,%xmm0
- .byte 15,40,61,26,55,1,0 // movaps 0x1371a(%rip),%xmm7 # 39620 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
+ .byte 15,40,61,122,56,1,0 // movaps 0x1387a(%rip),%xmm7 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
.byte 15,40,239 // movaps %xmm7,%xmm5
.byte 15,92,236 // subps %xmm4,%xmm5
.byte 15,40,212 // movaps %xmm4,%xmm2
- .byte 15,40,37,250,54,1,0 // movaps 0x136fa(%rip),%xmm4 # 39610 <_sk_srcover_bgra_8888_sse2_lowp+0xf0c>
+ .byte 15,40,37,90,56,1,0 // movaps 0x1385a(%rip),%xmm4 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0xefc>
.byte 15,89,212 // mulps %xmm4,%xmm2
.byte 15,92,194 // subps %xmm2,%xmm0
- .byte 68,15,40,61,12,55,1,0 // movaps 0x1370c(%rip),%xmm15 # 39630 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
+ .byte 68,15,40,61,108,56,1,0 // movaps 0x1386c(%rip),%xmm15 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0xf1c>
.byte 65,15,40,215 // movaps %xmm15,%xmm2
.byte 15,94,213 // divps %xmm5,%xmm2
.byte 15,88,208 // addps %xmm0,%xmm2
.byte 15,91,193 // cvtdq2ps %xmm1,%xmm0
- .byte 15,89,5,104,54,1,0 // mulps 0x13668(%rip),%xmm0 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 15,84,13,113,54,1,0 // andps 0x13671(%rip),%xmm1 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
+ .byte 15,89,5,200,55,1,0 // mulps 0x137c8(%rip),%xmm0 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 15,84,13,209,55,1,0 // andps 0x137d1(%rip),%xmm1 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
.byte 65,15,86,206 // orps %xmm14,%xmm1
.byte 65,15,88,196 // addps %xmm12,%xmm0
.byte 15,40,233 // movaps %xmm1,%xmm5
@@ -46640,9 +46640,9 @@ _sk_gamma_sse2:
.byte 15,94,206 // divps %xmm6,%xmm1
.byte 15,88,200 // addps %xmm0,%xmm1
.byte 15,91,195 // cvtdq2ps %xmm3,%xmm0
- .byte 15,89,5,247,53,1,0 // mulps 0x135f7(%rip),%xmm0 # 395a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
- .byte 15,84,29,0,54,1,0 // andps 0x13600(%rip),%xmm3 # 395b0 <_sk_srcover_bgra_8888_sse2_lowp+0xeac>
- .byte 15,86,29,41,51,1,0 // orps 0x13329(%rip),%xmm3 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,89,5,87,55,1,0 // mulps 0x13757(%rip),%xmm0 # 39700 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 15,84,29,96,55,1,0 // andps 0x13760(%rip),%xmm3 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0xe9c>
+ .byte 15,86,29,137,52,1,0 // orps 0x13489(%rip),%xmm3 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,88,196 // addps %xmm12,%xmm0
.byte 68,15,89,219 // mulps %xmm3,%xmm11
.byte 65,15,92,195 // subps %xmm11,%xmm0
@@ -46664,7 +46664,7 @@ _sk_gamma_sse2:
.byte 15,92,253 // subps %xmm5,%xmm7
.byte 68,15,94,255 // divps %xmm7,%xmm15
.byte 68,15,88,248 // addps %xmm0,%xmm15
- .byte 15,40,5,54,54,1,0 // movaps 0x13636(%rip),%xmm0 # 39640 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 15,40,5,150,55,1,0 // movaps 0x13796(%rip),%xmm0 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf2c>
.byte 15,89,208 // mulps %xmm0,%xmm2
.byte 15,89,200 // mulps %xmm0,%xmm1
.byte 68,15,89,248 // mulps %xmm0,%xmm15
@@ -46683,29 +46683,29 @@ HIDDEN _sk_lab_to_xyz_sse2
.globl _sk_lab_to_xyz_sse2
FUNCTION(_sk_lab_to_xyz_sse2)
_sk_lab_to_xyz_sse2:
- .byte 15,89,5,11,54,1,0 // mulps 0x1360b(%rip),%xmm0 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xf4c>
- .byte 68,15,40,5,131,51,1,0 // movaps 0x13383(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 15,89,5,107,55,1,0 // mulps 0x1376b(%rip),%xmm0 # 397b0 <_sk_srcover_bgra_8888_sse2_lowp+0xf3c>
+ .byte 68,15,40,5,227,52,1,0 // movaps 0x134e3(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,200 // mulps %xmm8,%xmm1
- .byte 68,15,40,13,7,54,1,0 // movaps 0x13607(%rip),%xmm9 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xf5c>
+ .byte 68,15,40,13,103,55,1,0 // movaps 0x13767(%rip),%xmm9 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0xf4c>
.byte 65,15,88,201 // addps %xmm9,%xmm1
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 65,15,88,209 // addps %xmm9,%xmm2
- .byte 15,88,5,4,54,1,0 // addps 0x13604(%rip),%xmm0 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xf6c>
- .byte 15,89,5,13,54,1,0 // mulps 0x1360d(%rip),%xmm0 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xf7c>
- .byte 15,89,13,22,54,1,0 // mulps 0x13616(%rip),%xmm1 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xf8c>
+ .byte 15,88,5,100,55,1,0 // addps 0x13764(%rip),%xmm0 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0xf5c>
+ .byte 15,89,5,109,55,1,0 // mulps 0x1376d(%rip),%xmm0 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0xf6c>
+ .byte 15,89,13,118,55,1,0 // mulps 0x13776(%rip),%xmm1 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0xf7c>
.byte 15,88,200 // addps %xmm0,%xmm1
- .byte 15,89,21,28,54,1,0 // mulps 0x1361c(%rip),%xmm2 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xf9c>
+ .byte 15,89,21,124,55,1,0 // mulps 0x1377c(%rip),%xmm2 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0xf8c>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 68,15,92,202 // subps %xmm2,%xmm9
.byte 68,15,40,225 // movaps %xmm1,%xmm12
.byte 69,15,89,228 // mulps %xmm12,%xmm12
.byte 68,15,89,225 // mulps %xmm1,%xmm12
- .byte 15,40,21,17,54,1,0 // movaps 0x13611(%rip),%xmm2 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xfac>
+ .byte 15,40,21,113,55,1,0 // movaps 0x13771(%rip),%xmm2 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0xf9c>
.byte 68,15,40,194 // movaps %xmm2,%xmm8
.byte 69,15,194,196,1 // cmpltps %xmm12,%xmm8
- .byte 68,15,40,21,16,54,1,0 // movaps 0x13610(%rip),%xmm10 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xfbc>
+ .byte 68,15,40,21,112,55,1,0 // movaps 0x13770(%rip),%xmm10 # 39820 <_sk_srcover_bgra_8888_sse2_lowp+0xfac>
.byte 65,15,88,202 // addps %xmm10,%xmm1
- .byte 68,15,40,29,20,54,1,0 // movaps 0x13614(%rip),%xmm11 # 396d0 <_sk_srcover_bgra_8888_sse2_lowp+0xfcc>
+ .byte 68,15,40,29,116,55,1,0 // movaps 0x13774(%rip),%xmm11 # 39830 <_sk_srcover_bgra_8888_sse2_lowp+0xfbc>
.byte 65,15,89,203 // mulps %xmm11,%xmm1
.byte 69,15,84,224 // andps %xmm8,%xmm12
.byte 68,15,85,193 // andnps %xmm1,%xmm8
@@ -46729,8 +46729,8 @@ _sk_lab_to_xyz_sse2:
.byte 15,84,194 // andps %xmm2,%xmm0
.byte 65,15,85,209 // andnps %xmm9,%xmm2
.byte 15,86,208 // orps %xmm0,%xmm2
- .byte 68,15,89,5,196,53,1,0 // mulps 0x135c4(%rip),%xmm8 # 396e0 <_sk_srcover_bgra_8888_sse2_lowp+0xfdc>
- .byte 15,89,21,205,53,1,0 // mulps 0x135cd(%rip),%xmm2 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xfec>
+ .byte 68,15,89,5,36,55,1,0 // mulps 0x13724(%rip),%xmm8 # 39840 <_sk_srcover_bgra_8888_sse2_lowp+0xfcc>
+ .byte 15,89,21,45,55,1,0 // mulps 0x1372d(%rip),%xmm2 # 39850 <_sk_srcover_bgra_8888_sse2_lowp+0xfdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -46750,9 +46750,9 @@ _sk_load_a8_sse2:
.byte 102,67,15,110,4,16 // movd (%r8,%r10,1),%xmm0
.byte 102,15,96,192 // punpcklbw %xmm0,%xmm0
.byte 102,15,97,192 // punpcklwd %xmm0,%xmm0
- .byte 102,15,219,5,71,50,1,0 // pand 0x13247(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,5,167,51,1,0 // pand 0x133a7(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,216 // cvtdq2ps %xmm0,%xmm3
- .byte 15,89,29,157,51,1,0 // mulps 0x1339d(%rip),%xmm3 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,29,253,52,1,0 // mulps 0x134fd(%rip),%xmm3 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 102,15,87,201 // xorpd %xmm1,%xmm1
@@ -46795,9 +46795,9 @@ _sk_load_a8_dst_sse2:
.byte 102,67,15,110,36,16 // movd (%r8,%r10,1),%xmm4
.byte 102,15,96,224 // punpcklbw %xmm0,%xmm4
.byte 102,15,97,224 // punpcklwd %xmm0,%xmm4
- .byte 102,15,219,37,179,49,1,0 // pand 0x131b3(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,37,19,51,1,0 // pand 0x13313(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,252 // cvtdq2ps %xmm4,%xmm7
- .byte 15,89,61,9,51,1,0 // mulps 0x13309(%rip),%xmm7 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,61,105,52,1,0 // mulps 0x13469(%rip),%xmm7 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,228 // xorps %xmm4,%xmm4
.byte 102,15,87,237 // xorpd %xmm5,%xmm5
@@ -46876,7 +46876,7 @@ _sk_gather_a8_sse2:
.byte 102,65,15,96,192 // punpcklbw %xmm8,%xmm0
.byte 102,65,15,97,192 // punpcklwd %xmm8,%xmm0
.byte 15,91,216 // cvtdq2ps %xmm0,%xmm3
- .byte 15,89,29,227,49,1,0 // mulps 0x131e3(%rip),%xmm3 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,29,67,51,1,0 // mulps 0x13343(%rip),%xmm3 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 102,15,239,201 // pxor %xmm1,%xmm1
@@ -46893,7 +46893,7 @@ _sk_store_a8_sse2:
.byte 77,15,175,193 // imul %r9,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,210 // movslq %edx,%r10
- .byte 68,15,40,5,138,48,1,0 // movaps 0x1308a(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,234,49,1,0 // movaps 0x131ea(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 68,15,89,195 // mulps %xmm3,%xmm8
.byte 102,69,15,91,192 // cvtps2dq %xmm8,%xmm8
.byte 102,65,15,114,240,16 // pslld $0x10,%xmm8
@@ -46918,7 +46918,7 @@ _sk_store_a8_sse2:
.byte 117,217 // jne 26373 <_sk_store_a8_sse2+0x48>
.byte 102,65,15,197,192,4 // pextrw $0x4,%xmm8,%eax
.byte 67,136,68,16,2 // mov %al,0x2(%r8,%r10,1)
- .byte 102,68,15,219,5,242,47,1,0 // pand 0x12ff2(%rip),%xmm8 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,68,15,219,5,82,49,1,0 // pand 0x13152(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,69,15,103,192 // packuswb %xmm8,%xmm8
.byte 102,69,15,103,192 // packuswb %xmm8,%xmm8
.byte 102,68,15,126,192 // movd %xmm8,%eax
@@ -46943,11 +46943,11 @@ _sk_load_g8_sse2:
.byte 102,67,15,110,4,16 // movd (%r8,%r10,1),%xmm0
.byte 102,15,96,192 // punpcklbw %xmm0,%xmm0
.byte 102,15,97,192 // punpcklwd %xmm0,%xmm0
- .byte 102,15,219,5,163,47,1,0 // pand 0x12fa3(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,5,3,49,1,0 // pand 0x13103(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,249,48,1,0 // mulps 0x130f9(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,5,89,50,1,0 // mulps 0x13259(%rip),%xmm0 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,224,46,1,0 // movaps 0x12ee0(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,64,48,1,0 // movaps 0x13040(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 255,224 // jmpq *%rax
@@ -46988,11 +46988,11 @@ _sk_load_g8_dst_sse2:
.byte 102,67,15,110,36,16 // movd (%r8,%r10,1),%xmm4
.byte 102,15,96,224 // punpcklbw %xmm0,%xmm4
.byte 102,15,97,224 // punpcklwd %xmm0,%xmm4
- .byte 102,15,219,37,12,47,1,0 // pand 0x12f0c(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,219,37,108,48,1,0 // pand 0x1306c(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 15,89,37,98,48,1,0 // mulps 0x13062(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,37,194,49,1,0 // mulps 0x131c2(%rip),%xmm4 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,73,46,1,0 // movaps 0x12e49(%rip),%xmm7 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,61,169,47,1,0 // movaps 0x12fa9(%rip),%xmm7 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,236 // movaps %xmm4,%xmm5
.byte 15,40,244 // movaps %xmm4,%xmm6
.byte 255,224 // jmpq *%rax
@@ -47069,9 +47069,9 @@ _sk_gather_g8_sse2:
.byte 102,65,15,96,192 // punpcklbw %xmm8,%xmm0
.byte 102,65,15,97,192 // punpcklwd %xmm8,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,57,47,1,0 // mulps 0x12f39(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 15,89,5,153,48,1,0 // mulps 0x13099(%rip),%xmm0 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,32,45,1,0 // movaps 0x12d20(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,128,46,1,0 // movaps 0x12e80(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 255,224 // jmpq *%rax
@@ -47091,19 +47091,19 @@ _sk_load_565_sse2:
.byte 117,83 // jne 26646 <_sk_load_565_sse2+0x6e>
.byte 243,67,15,126,20,80 // movq (%r8,%r10,2),%xmm2
.byte 102,15,97,208 // punpcklwd %xmm0,%xmm2
- .byte 102,15,111,5,11,47,1,0 // movdqa 0x12f0b(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,107,48,1,0 // movdqa 0x1306b(%rip),%xmm0 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,13,47,1,0 // mulps 0x12f0d(%rip),%xmm0 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,13,21,47,1,0 // movdqa 0x12f15(%rip),%xmm1 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 15,89,5,109,48,1,0 // mulps 0x1306d(%rip),%xmm0 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,13,117,48,1,0 // movdqa 0x13075(%rip),%xmm1 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,15,219,202 // pand %xmm2,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,23,47,1,0 // mulps 0x12f17(%rip),%xmm1 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,15,219,21,31,47,1,0 // pand 0x12f1f(%rip),%xmm2 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 15,89,13,119,48,1,0 // mulps 0x13077(%rip),%xmm1 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,15,219,21,127,48,1,0 // pand 0x1307f(%rip),%xmm2 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,37,47,1,0 // mulps 0x12f25(%rip),%xmm2 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 15,89,21,133,48,1,0 // mulps 0x13085(%rip),%xmm2 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,172,44,1,0 // movaps 0x12cac(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,12,46,1,0 // movaps 0x12e0c(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,3 // and $0x3,%r9b
@@ -47140,19 +47140,19 @@ _sk_load_565_dst_sse2:
.byte 117,83 // jne 26702 <_sk_load_565_dst_sse2+0x6e>
.byte 243,67,15,126,52,80 // movq (%r8,%r10,2),%xmm6
.byte 102,15,97,240 // punpcklwd %xmm0,%xmm6
- .byte 102,15,111,37,79,46,1,0 // movdqa 0x12e4f(%rip),%xmm4 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,37,175,47,1,0 // movdqa 0x12faf(%rip),%xmm4 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,15,219,230 // pand %xmm6,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 15,89,37,81,46,1,0 // mulps 0x12e51(%rip),%xmm4 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,45,89,46,1,0 // movdqa 0x12e59(%rip),%xmm5 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 15,89,37,177,47,1,0 // mulps 0x12fb1(%rip),%xmm4 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,45,185,47,1,0 // movdqa 0x12fb9(%rip),%xmm5 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,15,219,238 // pand %xmm6,%xmm5
.byte 15,91,237 // cvtdq2ps %xmm5,%xmm5
- .byte 15,89,45,91,46,1,0 // mulps 0x12e5b(%rip),%xmm5 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,15,219,53,99,46,1,0 // pand 0x12e63(%rip),%xmm6 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 15,89,45,187,47,1,0 // mulps 0x12fbb(%rip),%xmm5 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,15,219,53,195,47,1,0 // pand 0x12fc3(%rip),%xmm6 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 15,91,246 // cvtdq2ps %xmm6,%xmm6
- .byte 15,89,53,105,46,1,0 // mulps 0x12e69(%rip),%xmm6 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 15,89,53,201,47,1,0 // mulps 0x12fc9(%rip),%xmm6 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,240,43,1,0 // movaps 0x12bf0(%rip),%xmm7 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,61,80,45,1,0 // movaps 0x12d50(%rip),%xmm7 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
.byte 65,128,225,3 // and $0x3,%r9b
@@ -47221,19 +47221,19 @@ _sk_gather_565_sse2:
.byte 65,15,183,4,64 // movzwl (%r8,%rax,2),%eax
.byte 102,15,196,208,3 // pinsrw $0x3,%eax,%xmm2
.byte 102,65,15,97,208 // punpcklwd %xmm8,%xmm2
- .byte 102,15,111,5,254,44,1,0 // movdqa 0x12cfe(%rip),%xmm0 # 39510 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,5,94,46,1,0 // movdqa 0x12e5e(%rip),%xmm0 # 39670 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,0,45,1,0 // mulps 0x12d00(%rip),%xmm0 # 39520 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
- .byte 102,15,111,13,8,45,1,0 // movdqa 0x12d08(%rip),%xmm1 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 15,89,5,96,46,1,0 // mulps 0x12e60(%rip),%xmm0 # 39680 <_sk_srcover_bgra_8888_sse2_lowp+0xe0c>
+ .byte 102,15,111,13,104,46,1,0 // movdqa 0x12e68(%rip),%xmm1 # 39690 <_sk_srcover_bgra_8888_sse2_lowp+0xe1c>
.byte 102,15,219,202 // pand %xmm2,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,10,45,1,0 // mulps 0x12d0a(%rip),%xmm1 # 39540 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
- .byte 102,15,219,21,18,45,1,0 // pand 0x12d12(%rip),%xmm2 # 39550 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
+ .byte 15,89,13,106,46,1,0 // mulps 0x12e6a(%rip),%xmm1 # 396a0 <_sk_srcover_bgra_8888_sse2_lowp+0xe2c>
+ .byte 102,15,219,21,114,46,1,0 // pand 0x12e72(%rip),%xmm2 # 396b0 <_sk_srcover_bgra_8888_sse2_lowp+0xe3c>
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,24,45,1,0 // mulps 0x12d18(%rip),%xmm2 # 39560 <_sk_srcover_bgra_8888_sse2_lowp+0xe5c>
+ .byte 15,89,21,120,46,1,0 // mulps 0x12e78(%rip),%xmm2 # 396c0 <_sk_srcover_bgra_8888_sse2_lowp+0xe4c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,159,42,1,0 // movaps 0x12a9f(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,255,43,1,0 // movaps 0x12bff(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
HIDDEN _sk_store_565_sse2
@@ -47247,12 +47247,12 @@ _sk_store_565_sse2:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,210 // movslq %edx,%r10
- .byte 68,15,40,5,159,46,1,0 // movaps 0x12e9f(%rip),%xmm8 # 39710 <_sk_srcover_bgra_8888_sse2_lowp+0x100c>
+ .byte 68,15,40,5,255,47,1,0 // movaps 0x12fff(%rip),%xmm8 # 39870 <_sk_srcover_bgra_8888_sse2_lowp+0xffc>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
.byte 102,65,15,114,241,11 // pslld $0xb,%xmm9
- .byte 68,15,40,21,148,46,1,0 // movaps 0x12e94(%rip),%xmm10 # 39720 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
+ .byte 68,15,40,21,244,47,1,0 // movaps 0x12ff4(%rip),%xmm10 # 39880 <_sk_srcover_bgra_8888_sse2_lowp+0x100c>
.byte 68,15,89,209 // mulps %xmm1,%xmm10
.byte 102,69,15,91,210 // cvtps2dq %xmm10,%xmm10
.byte 102,65,15,114,242,5 // pslld $0x5,%xmm10
@@ -47301,21 +47301,21 @@ _sk_load_4444_sse2:
.byte 117,98 // jne 2698f <_sk_load_4444_sse2+0x7d>
.byte 243,67,15,126,28,80 // movq (%r8,%r10,2),%xmm3
.byte 102,15,97,216 // punpcklwd %xmm0,%xmm3
- .byte 102,15,111,5,241,45,1,0 // movdqa 0x12df1(%rip),%xmm0 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,5,81,47,1,0 // movdqa 0x12f51(%rip),%xmm0 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,243,45,1,0 // mulps 0x12df3(%rip),%xmm0 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
- .byte 102,15,111,13,251,45,1,0 // movdqa 0x12dfb(%rip),%xmm1 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 15,89,5,83,47,1,0 // mulps 0x12f53(%rip),%xmm0 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,13,91,47,1,0 // movdqa 0x12f5b(%rip),%xmm1 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
.byte 102,15,219,203 // pand %xmm3,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,253,45,1,0 // mulps 0x12dfd(%rip),%xmm1 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
- .byte 102,15,111,21,5,46,1,0 // movdqa 0x12e05(%rip),%xmm2 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 15,89,13,93,47,1,0 // mulps 0x12f5d(%rip),%xmm1 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 102,15,111,21,101,47,1,0 // movdqa 0x12f65(%rip),%xmm2 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
.byte 102,15,219,211 // pand %xmm3,%xmm2
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,7,46,1,0 // mulps 0x12e07(%rip),%xmm2 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
- .byte 102,15,219,29,15,46,1,0 // pand 0x12e0f(%rip),%xmm3 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
+ .byte 15,89,21,103,47,1,0 // mulps 0x12f67(%rip),%xmm2 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 102,15,219,29,111,47,1,0 // pand 0x12f6f(%rip),%xmm3 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
.byte 15,91,219 // cvtdq2ps %xmm3,%xmm3
- .byte 15,89,29,21,46,1,0 // mulps 0x12e15(%rip),%xmm3 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
+ .byte 15,89,29,117,47,1,0 // mulps 0x12f75(%rip),%xmm3 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
@@ -47353,21 +47353,21 @@ _sk_load_4444_dst_sse2:
.byte 117,98 // jne 26a5a <_sk_load_4444_dst_sse2+0x7d>
.byte 243,67,15,126,60,80 // movq (%r8,%r10,2),%xmm7
.byte 102,15,97,248 // punpcklwd %xmm0,%xmm7
- .byte 102,15,111,37,38,45,1,0 // movdqa 0x12d26(%rip),%xmm4 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,37,134,46,1,0 // movdqa 0x12e86(%rip),%xmm4 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
.byte 102,15,219,231 // pand %xmm7,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 15,89,37,40,45,1,0 // mulps 0x12d28(%rip),%xmm4 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
- .byte 102,15,111,45,48,45,1,0 // movdqa 0x12d30(%rip),%xmm5 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 15,89,37,136,46,1,0 // mulps 0x12e88(%rip),%xmm4 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,45,144,46,1,0 // movdqa 0x12e90(%rip),%xmm5 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
.byte 102,15,219,239 // pand %xmm7,%xmm5
.byte 15,91,237 // cvtdq2ps %xmm5,%xmm5
- .byte 15,89,45,50,45,1,0 // mulps 0x12d32(%rip),%xmm5 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
- .byte 102,15,111,53,58,45,1,0 // movdqa 0x12d3a(%rip),%xmm6 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 15,89,45,146,46,1,0 // mulps 0x12e92(%rip),%xmm5 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 102,15,111,53,154,46,1,0 // movdqa 0x12e9a(%rip),%xmm6 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
.byte 102,15,219,247 // pand %xmm7,%xmm6
.byte 15,91,246 // cvtdq2ps %xmm6,%xmm6
- .byte 15,89,53,60,45,1,0 // mulps 0x12d3c(%rip),%xmm6 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
- .byte 102,15,219,61,68,45,1,0 // pand 0x12d44(%rip),%xmm7 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
+ .byte 15,89,53,156,46,1,0 // mulps 0x12e9c(%rip),%xmm6 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 102,15,219,61,164,46,1,0 // pand 0x12ea4(%rip),%xmm7 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
.byte 15,91,255 // cvtdq2ps %xmm7,%xmm7
- .byte 15,89,61,74,45,1,0 // mulps 0x12d4a(%rip),%xmm7 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
+ .byte 15,89,61,170,46,1,0 // mulps 0x12eaa(%rip),%xmm7 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 65,137,249 // mov %edi,%r9d
@@ -47437,21 +47437,21 @@ _sk_gather_4444_sse2:
.byte 65,15,183,4,64 // movzwl (%r8,%rax,2),%eax
.byte 102,15,196,216,3 // pinsrw $0x3,%eax,%xmm3
.byte 102,65,15,97,216 // punpcklwd %xmm8,%xmm3
- .byte 102,15,111,5,198,43,1,0 // movdqa 0x12bc6(%rip),%xmm0 # 39730 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,5,38,45,1,0 // movdqa 0x12d26(%rip),%xmm0 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x101c>
.byte 102,15,219,195 // pand %xmm3,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 15,89,5,200,43,1,0 // mulps 0x12bc8(%rip),%xmm0 # 39740 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
- .byte 102,15,111,13,208,43,1,0 // movdqa 0x12bd0(%rip),%xmm1 # 39750 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 15,89,5,40,45,1,0 // mulps 0x12d28(%rip),%xmm0 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x102c>
+ .byte 102,15,111,13,48,45,1,0 // movdqa 0x12d30(%rip),%xmm1 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x103c>
.byte 102,15,219,203 // pand %xmm3,%xmm1
.byte 15,91,201 // cvtdq2ps %xmm1,%xmm1
- .byte 15,89,13,210,43,1,0 // mulps 0x12bd2(%rip),%xmm1 # 39760 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
- .byte 102,15,111,21,218,43,1,0 // movdqa 0x12bda(%rip),%xmm2 # 39770 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 15,89,13,50,45,1,0 // mulps 0x12d32(%rip),%xmm1 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x104c>
+ .byte 102,15,111,21,58,45,1,0 // movdqa 0x12d3a(%rip),%xmm2 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x105c>
.byte 102,15,219,211 // pand %xmm3,%xmm2
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,89,21,220,43,1,0 // mulps 0x12bdc(%rip),%xmm2 # 39780 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
- .byte 102,15,219,29,228,43,1,0 // pand 0x12be4(%rip),%xmm3 # 39790 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
+ .byte 15,89,21,60,45,1,0 // mulps 0x12d3c(%rip),%xmm2 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x106c>
+ .byte 102,15,219,29,68,45,1,0 // pand 0x12d44(%rip),%xmm3 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x107c>
.byte 15,91,219 // cvtdq2ps %xmm3,%xmm3
- .byte 15,89,29,234,43,1,0 // mulps 0x12bea(%rip),%xmm3 # 397a0 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
+ .byte 15,89,29,74,45,1,0 // mulps 0x12d4a(%rip),%xmm3 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x108c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -47466,7 +47466,7 @@ _sk_store_4444_sse2:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 76,99,210 // movslq %edx,%r10
- .byte 68,15,40,5,216,43,1,0 // movaps 0x12bd8(%rip),%xmm8 # 397b0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
+ .byte 68,15,40,5,56,45,1,0 // movaps 0x12d38(%rip),%xmm8 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x109c>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
@@ -47524,11 +47524,11 @@ _sk_load_8888_sse2:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,98 // jne 26d0b <_sk_load_8888_sse2+0x7e>
.byte 243,69,15,111,12,128 // movdqu (%r8,%rax,4),%xmm9
- .byte 102,15,111,21,233,38,1,0 // movdqa 0x126e9(%rip),%xmm2 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,21,73,40,1,0 // movdqa 0x12849(%rip),%xmm2 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,5,53,40,1,0 // movaps 0x12835(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,149,41,1,0 // movaps 0x12995(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,114,209,8 // psrld $0x8,%xmm1
@@ -47575,11 +47575,11 @@ _sk_load_8888_dst_sse2:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,98 // jne 26dca <_sk_load_8888_dst_sse2+0x7e>
.byte 243,69,15,111,12,128 // movdqu (%r8,%rax,4),%xmm9
- .byte 102,15,111,53,42,38,1,0 // movdqa 0x1262a(%rip),%xmm6 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,53,138,39,1,0 // movdqa 0x1278a(%rip),%xmm6 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,225 // movdqa %xmm9,%xmm4
.byte 102,15,219,230 // pand %xmm6,%xmm4
.byte 15,91,228 // cvtdq2ps %xmm4,%xmm4
- .byte 68,15,40,5,118,39,1,0 // movaps 0x12776(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,214,40,1,0 // movaps 0x128d6(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,224 // mulps %xmm8,%xmm4
.byte 102,65,15,111,233 // movdqa %xmm9,%xmm5
.byte 102,15,114,213,8 // psrld $0x8,%xmm5
@@ -47656,11 +47656,11 @@ _sk_gather_8888_sse2:
.byte 102,67,15,110,12,136 // movd (%r8,%r9,4),%xmm1
.byte 102,15,98,200 // punpckldq %xmm0,%xmm1
.byte 102,68,15,108,201 // punpcklqdq %xmm1,%xmm9
- .byte 102,15,111,21,221,36,1,0 // movdqa 0x124dd(%rip),%xmm2 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,21,61,38,1,0 // movdqa 0x1263d(%rip),%xmm2 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,15,219,194 // pand %xmm2,%xmm0
.byte 15,91,192 // cvtdq2ps %xmm0,%xmm0
- .byte 68,15,40,5,41,38,1,0 // movaps 0x12629(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,137,39,1,0 // movaps 0x12789(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,114,209,8 // psrld $0x8,%xmm1
@@ -47689,7 +47689,7 @@ _sk_store_8888_sse2:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 72,99,194 // movslq %edx,%rax
- .byte 68,15,40,5,154,36,1,0 // movaps 0x1249a(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,250,37,1,0 // movaps 0x125fa(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
@@ -47741,11 +47741,11 @@ _sk_load_bgra_sse2:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,98 // jne 2704a <_sk_load_bgra_sse2+0x7e>
.byte 243,69,15,111,12,128 // movdqu (%r8,%rax,4),%xmm9
- .byte 102,15,111,5,170,35,1,0 // movdqa 0x123aa(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,5,10,37,1,0 // movdqa 0x1250a(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,219,200 // pand %xmm0,%xmm1
.byte 15,91,209 // cvtdq2ps %xmm1,%xmm2
- .byte 68,15,40,5,246,36,1,0 // movaps 0x124f6(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,86,38,1,0 // movaps 0x12656(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,114,209,8 // psrld $0x8,%xmm1
@@ -47792,11 +47792,11 @@ _sk_load_bgra_dst_sse2:
.byte 72,133,255 // test %rdi,%rdi
.byte 117,98 // jne 27109 <_sk_load_bgra_dst_sse2+0x7e>
.byte 243,69,15,111,12,128 // movdqu (%r8,%rax,4),%xmm9
- .byte 102,15,111,37,235,34,1,0 // movdqa 0x122eb(%rip),%xmm4 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,37,75,36,1,0 // movdqa 0x1244b(%rip),%xmm4 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,233 // movdqa %xmm9,%xmm5
.byte 102,15,219,236 // pand %xmm4,%xmm5
.byte 15,91,245 // cvtdq2ps %xmm5,%xmm6
- .byte 68,15,40,5,55,36,1,0 // movaps 0x12437(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,151,37,1,0 // movaps 0x12597(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,240 // mulps %xmm8,%xmm6
.byte 102,65,15,111,233 // movdqa %xmm9,%xmm5
.byte 102,15,114,213,8 // psrld $0x8,%xmm5
@@ -47873,11 +47873,11 @@ _sk_gather_bgra_sse2:
.byte 102,67,15,110,12,136 // movd (%r8,%r9,4),%xmm1
.byte 102,15,98,200 // punpckldq %xmm0,%xmm1
.byte 102,68,15,108,201 // punpcklqdq %xmm1,%xmm9
- .byte 102,15,111,5,158,33,1,0 // movdqa 0x1219e(%rip),%xmm0 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xc9c>
+ .byte 102,15,111,5,254,34,1,0 // movdqa 0x122fe(%rip),%xmm0 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xc8c>
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,219,200 // pand %xmm0,%xmm1
.byte 15,91,209 // cvtdq2ps %xmm1,%xmm2
- .byte 68,15,40,5,234,34,1,0 // movaps 0x122ea(%rip),%xmm8 # 39500 <_sk_srcover_bgra_8888_sse2_lowp+0xdfc>
+ .byte 68,15,40,5,74,36,1,0 // movaps 0x1244a(%rip),%xmm8 # 39660 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
.byte 65,15,89,208 // mulps %xmm8,%xmm2
.byte 102,65,15,111,201 // movdqa %xmm9,%xmm1
.byte 102,15,114,209,8 // psrld $0x8,%xmm1
@@ -47906,7 +47906,7 @@ _sk_store_bgra_sse2:
.byte 73,193,224,2 // shl $0x2,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 72,99,194 // movslq %edx,%rax
- .byte 68,15,40,5,91,33,1,0 // movaps 0x1215b(%rip),%xmm8 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,5,187,34,1,0 // movaps 0x122bb(%rip),%xmm8 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 68,15,40,202 // movaps %xmm2,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 102,69,15,91,201 // cvtps2dq %xmm9,%xmm9
@@ -47968,18 +47968,18 @@ _sk_load_f16_sse2:
.byte 102,69,15,239,210 // pxor %xmm10,%xmm10
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
.byte 102,65,15,97,202 // punpcklwd %xmm10,%xmm1
- .byte 102,68,15,111,13,85,36,1,0 // movdqa 0x12455(%rip),%xmm9 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
+ .byte 102,68,15,111,13,181,37,1,0 // movdqa 0x125b5(%rip),%xmm9 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
.byte 102,68,15,111,225 // movdqa %xmm1,%xmm12
.byte 102,69,15,219,225 // pand %xmm9,%xmm12
- .byte 102,68,15,111,29,82,36,1,0 // movdqa 0x12452(%rip),%xmm11 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
+ .byte 102,68,15,111,29,178,37,1,0 // movdqa 0x125b2(%rip),%xmm11 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
.byte 102,65,15,219,203 // pand %xmm11,%xmm1
- .byte 102,15,111,29,85,36,1,0 // movdqa 0x12455(%rip),%xmm3 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
+ .byte 102,15,111,29,181,37,1,0 // movdqa 0x125b5(%rip),%xmm3 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
.byte 102,65,15,114,244,16 // pslld $0x10,%xmm12
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
.byte 102,15,102,193 // pcmpgtd %xmm1,%xmm0
.byte 102,15,114,241,13 // pslld $0xd,%xmm1
.byte 102,65,15,235,204 // por %xmm12,%xmm1
- .byte 102,68,15,111,37,68,36,1,0 // movdqa 0x12444(%rip),%xmm12 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 102,68,15,111,37,164,37,1,0 // movdqa 0x125a4(%rip),%xmm12 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
.byte 102,65,15,254,204 // paddd %xmm12,%xmm1
.byte 102,15,223,193 // pandn %xmm1,%xmm0
.byte 102,65,15,105,210 // punpckhwd %xmm10,%xmm2
@@ -48053,18 +48053,18 @@ _sk_load_f16_dst_sse2:
.byte 102,69,15,239,210 // pxor %xmm10,%xmm10
.byte 102,15,111,238 // movdqa %xmm6,%xmm5
.byte 102,65,15,97,234 // punpcklwd %xmm10,%xmm5
- .byte 102,68,15,111,13,204,34,1,0 // movdqa 0x122cc(%rip),%xmm9 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
+ .byte 102,68,15,111,13,44,36,1,0 // movdqa 0x1242c(%rip),%xmm9 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
.byte 102,68,15,111,229 // movdqa %xmm5,%xmm12
.byte 102,69,15,219,225 // pand %xmm9,%xmm12
- .byte 102,68,15,111,29,201,34,1,0 // movdqa 0x122c9(%rip),%xmm11 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
+ .byte 102,68,15,111,29,41,36,1,0 // movdqa 0x12429(%rip),%xmm11 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
.byte 102,65,15,219,235 // pand %xmm11,%xmm5
- .byte 102,15,111,61,204,34,1,0 // movdqa 0x122cc(%rip),%xmm7 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
+ .byte 102,15,111,61,44,36,1,0 // movdqa 0x1242c(%rip),%xmm7 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
.byte 102,65,15,114,244,16 // pslld $0x10,%xmm12
.byte 102,15,111,231 // movdqa %xmm7,%xmm4
.byte 102,15,102,229 // pcmpgtd %xmm5,%xmm4
.byte 102,15,114,245,13 // pslld $0xd,%xmm5
.byte 102,65,15,235,236 // por %xmm12,%xmm5
- .byte 102,68,15,111,37,187,34,1,0 // movdqa 0x122bb(%rip),%xmm12 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 102,68,15,111,37,27,36,1,0 // movdqa 0x1241b(%rip),%xmm12 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
.byte 102,65,15,254,236 // paddd %xmm12,%xmm5
.byte 102,15,223,229 // pandn %xmm5,%xmm4
.byte 102,65,15,105,242 // punpckhwd %xmm10,%xmm6
@@ -48166,18 +48166,18 @@ _sk_gather_f16_sse2:
.byte 102,68,15,105,201 // punpckhwd %xmm1,%xmm9
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
.byte 102,65,15,97,200 // punpcklwd %xmm8,%xmm1
- .byte 102,68,15,111,21,202,32,1,0 // movdqa 0x120ca(%rip),%xmm10 # 397c0 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
+ .byte 102,68,15,111,21,42,34,1,0 // movdqa 0x1222a(%rip),%xmm10 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x10ac>
.byte 102,68,15,111,225 // movdqa %xmm1,%xmm12
.byte 102,69,15,219,226 // pand %xmm10,%xmm12
- .byte 102,68,15,111,29,199,32,1,0 // movdqa 0x120c7(%rip),%xmm11 # 397d0 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
+ .byte 102,68,15,111,29,39,34,1,0 // movdqa 0x12227(%rip),%xmm11 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x10bc>
.byte 102,65,15,219,203 // pand %xmm11,%xmm1
- .byte 102,15,111,29,202,32,1,0 // movdqa 0x120ca(%rip),%xmm3 # 397e0 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
+ .byte 102,15,111,29,42,34,1,0 // movdqa 0x1222a(%rip),%xmm3 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x10cc>
.byte 102,65,15,114,244,16 // pslld $0x10,%xmm12
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
.byte 102,15,102,193 // pcmpgtd %xmm1,%xmm0
.byte 102,15,114,241,13 // pslld $0xd,%xmm1
.byte 102,65,15,235,204 // por %xmm12,%xmm1
- .byte 102,68,15,111,37,185,32,1,0 // movdqa 0x120b9(%rip),%xmm12 # 397f0 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 102,68,15,111,37,25,34,1,0 // movdqa 0x12219(%rip),%xmm12 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x10dc>
.byte 102,65,15,254,204 // paddd %xmm12,%xmm1
.byte 102,15,223,193 // pandn %xmm1,%xmm0
.byte 102,65,15,105,208 // punpckhwd %xmm8,%xmm2
@@ -48220,19 +48220,19 @@ HIDDEN _sk_store_f16_sse2
FUNCTION(_sk_store_f16_sse2)
_sk_store_f16_sse2:
.byte 15,41,124,36,232 // movaps %xmm7,-0x18(%rsp)
- .byte 102,68,15,111,29,13,32,1,0 // movdqa 0x1200d(%rip),%xmm11 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
+ .byte 102,68,15,111,29,109,33,1,0 // movdqa 0x1216d(%rip),%xmm11 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
.byte 102,69,15,219,195 // pand %xmm11,%xmm8
- .byte 102,68,15,111,21,10,32,1,0 // movdqa 0x1200a(%rip),%xmm10 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
+ .byte 102,68,15,111,21,106,33,1,0 // movdqa 0x1216a(%rip),%xmm10 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
.byte 102,68,15,111,240 // movdqa %xmm0,%xmm14
.byte 102,69,15,219,242 // pand %xmm10,%xmm14
- .byte 102,15,111,61,8,32,1,0 // movdqa 0x12008(%rip),%xmm7 # 39820 <_sk_srcover_bgra_8888_sse2_lowp+0x111c>
- .byte 102,68,15,111,37,175,33,1,0 // movdqa 0x121af(%rip),%xmm12 # 399d0 <_sk_srcover_bgra_8888_sse2_lowp+0x12cc>
+ .byte 102,15,111,61,104,33,1,0 // movdqa 0x12168(%rip),%xmm7 # 39980 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
+ .byte 102,68,15,111,37,15,35,1,0 // movdqa 0x1230f(%rip),%xmm12 # 39b30 <_sk_srcover_bgra_8888_sse2_lowp+0x12bc>
.byte 102,68,15,111,248 // movdqa %xmm0,%xmm15
.byte 102,65,15,114,247,3 // pslld $0x3,%xmm15
.byte 102,69,15,219,252 // pand %xmm12,%xmm15
.byte 102,69,15,254,248 // paddd %xmm8,%xmm15
- .byte 102,68,15,111,45,97,28,1,0 // movdqa 0x11c61(%rip),%xmm13 # 394a0 <_sk_srcover_bgra_8888_sse2_lowp+0xd9c>
+ .byte 102,68,15,111,45,193,29,1,0 // movdqa 0x11dc1(%rip),%xmm13 # 39600 <_sk_srcover_bgra_8888_sse2_lowp+0xd8c>
.byte 102,69,15,254,253 // paddd %xmm13,%xmm15
.byte 102,65,15,114,231,16 // psrad $0x10,%xmm15
.byte 102,68,15,111,199 // movdqa %xmm7,%xmm8
@@ -48339,7 +48339,7 @@ _sk_load_u16_be_sse2:
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 102,65,15,97,201 // punpcklwd %xmm9,%xmm1
.byte 15,91,193 // cvtdq2ps %xmm1,%xmm0
- .byte 68,15,40,5,118,27,1,0 // movaps 0x11b76(%rip),%xmm8 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,40,5,214,28,1,0 // movaps 0x11cd6(%rip),%xmm8 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 102,15,111,203 // movdqa %xmm3,%xmm1
.byte 102,15,113,241,8 // psllw $0x8,%xmm1
@@ -48414,7 +48414,7 @@ _sk_load_rgb_u16_be_sse2:
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,65,15,97,200 // punpcklwd %xmm8,%xmm1
.byte 15,91,193 // cvtdq2ps %xmm1,%xmm0
- .byte 68,15,40,13,64,26,1,0 // movaps 0x11a40(%rip),%xmm9 # 39590 <_sk_srcover_bgra_8888_sse2_lowp+0xe8c>
+ .byte 68,15,40,13,160,27,1,0 // movaps 0x11ba0(%rip),%xmm9 # 396f0 <_sk_srcover_bgra_8888_sse2_lowp+0xe7c>
.byte 65,15,89,193 // mulps %xmm9,%xmm0
.byte 102,15,111,203 // movdqa %xmm3,%xmm1
.byte 102,15,113,241,8 // psllw $0x8,%xmm1
@@ -48431,7 +48431,7 @@ _sk_load_rgb_u16_be_sse2:
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
.byte 65,15,89,209 // mulps %xmm9,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,87,23,1,0 // movaps 0x11757(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,183,24,1,0 // movaps 0x118b7(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,20,64 // movd (%r8,%rax,2),%xmm2
.byte 102,15,239,201 // pxor %xmm1,%xmm1
@@ -48465,7 +48465,7 @@ _sk_store_u16_be_sse2:
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 73,99,193 // movslq %r9d,%rax
- .byte 68,15,40,21,32,28,1,0 // movaps 0x11c20(%rip),%xmm10 # 39840 <_sk_srcover_bgra_8888_sse2_lowp+0x113c>
+ .byte 68,15,40,21,128,29,1,0 // movaps 0x11d80(%rip),%xmm10 # 399a0 <_sk_srcover_bgra_8888_sse2_lowp+0x112c>
.byte 68,15,40,192 // movaps %xmm0,%xmm8
.byte 69,15,89,194 // mulps %xmm10,%xmm8
.byte 102,69,15,91,192 // cvtps2dq %xmm8,%xmm8
@@ -48672,7 +48672,7 @@ _sk_repeat_x_sse2:
.byte 243,69,15,91,209 // cvttps2dq %xmm9,%xmm10
.byte 69,15,91,210 // cvtdq2ps %xmm10,%xmm10
.byte 69,15,194,202,1 // cmpltps %xmm10,%xmm9
- .byte 68,15,84,13,191,19,1,0 // andps 0x113bf(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,84,13,31,21,1,0 // andps 0x1151f(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,209 // subps %xmm9,%xmm10
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 69,15,89,194 // mulps %xmm10,%xmm8
@@ -48692,7 +48692,7 @@ _sk_repeat_y_sse2:
.byte 243,69,15,91,209 // cvttps2dq %xmm9,%xmm10
.byte 69,15,91,210 // cvtdq2ps %xmm10,%xmm10
.byte 69,15,194,202,1 // cmpltps %xmm10,%xmm9
- .byte 68,15,84,13,126,19,1,0 // andps 0x1137e(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,84,13,222,20,1,0 // andps 0x114de(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,209 // subps %xmm9,%xmm10
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 69,15,89,194 // mulps %xmm10,%xmm8
@@ -48712,13 +48712,13 @@ _sk_mirror_x_sse2:
.byte 65,15,92,194 // subps %xmm10,%xmm0
.byte 243,69,15,88,192 // addss %xmm8,%xmm8
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
- .byte 243,68,15,89,13,252,9,1,0 // mulss 0x109fc(%rip),%xmm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,13,108,11,1,0 // mulss 0x10b6c(%rip),%xmm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,89,200 // mulps %xmm0,%xmm9
.byte 243,69,15,91,217 // cvttps2dq %xmm9,%xmm11
.byte 69,15,91,219 // cvtdq2ps %xmm11,%xmm11
.byte 69,15,194,203,1 // cmpltps %xmm11,%xmm9
- .byte 68,15,84,13,29,19,1,0 // andps 0x1131d(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,84,13,125,20,1,0 // andps 0x1147d(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,87,228 // xorps %xmm12,%xmm12
.byte 69,15,92,217 // subps %xmm9,%xmm11
.byte 69,15,89,216 // mulps %xmm8,%xmm11
@@ -48741,13 +48741,13 @@ _sk_mirror_y_sse2:
.byte 65,15,92,202 // subps %xmm10,%xmm1
.byte 243,69,15,88,192 // addss %xmm8,%xmm8
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
- .byte 243,68,15,89,13,144,9,1,0 // mulss 0x10990(%rip),%xmm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,13,0,11,1,0 // mulss 0x10b00(%rip),%xmm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,89,201 // mulps %xmm1,%xmm9
.byte 243,69,15,91,217 // cvttps2dq %xmm9,%xmm11
.byte 69,15,91,219 // cvtdq2ps %xmm11,%xmm11
.byte 69,15,194,203,1 // cmpltps %xmm11,%xmm9
- .byte 68,15,84,13,177,18,1,0 // andps 0x112b1(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,84,13,17,20,1,0 // andps 0x11411(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,87,228 // xorps %xmm12,%xmm12
.byte 69,15,92,217 // subps %xmm9,%xmm11
.byte 69,15,89,216 // mulps %xmm8,%xmm11
@@ -48764,7 +48764,7 @@ FUNCTION(_sk_clamp_x_1_sse2)
_sk_clamp_x_1_sse2:
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 68,15,95,192 // maxps %xmm0,%xmm8
- .byte 68,15,93,5,129,18,1,0 // minps 0x11281(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,93,5,225,19,1,0 // minps 0x113e1(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 65,15,40,192 // movaps %xmm8,%xmm0
.byte 255,224 // jmpq *%rax
@@ -48777,7 +48777,7 @@ _sk_repeat_x_1_sse2:
.byte 69,15,91,200 // cvtdq2ps %xmm8,%xmm9
.byte 68,15,40,208 // movaps %xmm0,%xmm10
.byte 69,15,194,209,1 // cmpltps %xmm9,%xmm10
- .byte 68,15,40,29,95,18,1,0 // movaps 0x1125f(%rip),%xmm11 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,29,191,19,1,0 // movaps 0x113bf(%rip),%xmm11 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,211 // andps %xmm11,%xmm10
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 69,15,92,202 // subps %xmm10,%xmm9
@@ -48792,14 +48792,14 @@ HIDDEN _sk_mirror_x_1_sse2
.globl _sk_mirror_x_1_sse2
FUNCTION(_sk_mirror_x_1_sse2)
_sk_mirror_x_1_sse2:
- .byte 68,15,40,13,151,18,1,0 // movaps 0x11297(%rip),%xmm9 # 39350 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
+ .byte 68,15,40,13,247,19,1,0 // movaps 0x113f7(%rip),%xmm9 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
.byte 65,15,88,193 // addps %xmm9,%xmm0
- .byte 68,15,40,21,27,18,1,0 // movaps 0x1121b(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,21,123,19,1,0 // movaps 0x1137b(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,89,208 // mulps %xmm0,%xmm10
.byte 243,69,15,91,194 // cvttps2dq %xmm10,%xmm8
.byte 69,15,91,216 // cvtdq2ps %xmm8,%xmm11
.byte 69,15,194,211,1 // cmpltps %xmm11,%xmm10
- .byte 68,15,40,37,17,18,1,0 // movaps 0x11211(%rip),%xmm12 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,37,113,19,1,0 // movaps 0x11371(%rip),%xmm12 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,212 // andps %xmm12,%xmm10
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 69,15,92,218 // subps %xmm10,%xmm11
@@ -48820,10 +48820,10 @@ HIDDEN _sk_luminance_to_alpha_sse2
FUNCTION(_sk_luminance_to_alpha_sse2)
_sk_luminance_to_alpha_sse2:
.byte 15,40,218 // movaps %xmm2,%xmm3
- .byte 15,89,5,51,23,1,0 // mulps 0x11733(%rip),%xmm0 # 39850 <_sk_srcover_bgra_8888_sse2_lowp+0x114c>
- .byte 15,89,13,60,23,1,0 // mulps 0x1173c(%rip),%xmm1 # 39860 <_sk_srcover_bgra_8888_sse2_lowp+0x115c>
+ .byte 15,89,5,147,24,1,0 // mulps 0x11893(%rip),%xmm0 # 399b0 <_sk_srcover_bgra_8888_sse2_lowp+0x113c>
+ .byte 15,89,13,156,24,1,0 // mulps 0x1189c(%rip),%xmm1 # 399c0 <_sk_srcover_bgra_8888_sse2_lowp+0x114c>
.byte 15,88,200 // addps %xmm0,%xmm1
- .byte 15,89,29,66,23,1,0 // mulps 0x11742(%rip),%xmm3 # 39870 <_sk_srcover_bgra_8888_sse2_lowp+0x116c>
+ .byte 15,89,29,162,24,1,0 // mulps 0x118a2(%rip),%xmm3 # 399d0 <_sk_srcover_bgra_8888_sse2_lowp+0x115c>
.byte 15,88,217 // addps %xmm1,%xmm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,192 // xorps %xmm0,%xmm0
@@ -49390,29 +49390,29 @@ _sk_xy_to_unit_angle_sse2:
.byte 69,15,94,220 // divps %xmm12,%xmm11
.byte 69,15,40,227 // movaps %xmm11,%xmm12
.byte 69,15,89,228 // mulps %xmm12,%xmm12
- .byte 68,15,40,45,158,14,1,0 // movaps 0x10e9e(%rip),%xmm13 # 39880 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
+ .byte 68,15,40,45,254,15,1,0 // movaps 0x10ffe(%rip),%xmm13 # 399e0 <_sk_srcover_bgra_8888_sse2_lowp+0x116c>
.byte 69,15,89,236 // mulps %xmm12,%xmm13
- .byte 68,15,88,45,162,14,1,0 // addps 0x10ea2(%rip),%xmm13 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
+ .byte 68,15,88,45,2,16,1,0 // addps 0x11002(%rip),%xmm13 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
.byte 69,15,89,236 // mulps %xmm12,%xmm13
- .byte 68,15,88,45,166,14,1,0 // addps 0x10ea6(%rip),%xmm13 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
+ .byte 68,15,88,45,6,16,1,0 // addps 0x11006(%rip),%xmm13 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
.byte 69,15,89,236 // mulps %xmm12,%xmm13
- .byte 68,15,88,45,170,14,1,0 // addps 0x10eaa(%rip),%xmm13 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
+ .byte 68,15,88,45,10,16,1,0 // addps 0x1100a(%rip),%xmm13 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
.byte 69,15,89,235 // mulps %xmm11,%xmm13
.byte 69,15,194,202,1 // cmpltps %xmm10,%xmm9
- .byte 68,15,40,21,169,14,1,0 // movaps 0x10ea9(%rip),%xmm10 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
+ .byte 68,15,40,21,9,16,1,0 // movaps 0x11009(%rip),%xmm10 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
.byte 69,15,92,213 // subps %xmm13,%xmm10
.byte 69,15,84,209 // andps %xmm9,%xmm10
.byte 69,15,85,205 // andnps %xmm13,%xmm9
.byte 69,15,86,202 // orps %xmm10,%xmm9
.byte 68,15,194,192,1 // cmpltps %xmm0,%xmm8
- .byte 68,15,40,21,172,8,1,0 // movaps 0x108ac(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,21,12,10,1,0 // movaps 0x10a0c(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,92,209 // subps %xmm9,%xmm10
.byte 69,15,84,208 // andps %xmm8,%xmm10
.byte 69,15,85,193 // andnps %xmm9,%xmm8
.byte 69,15,86,194 // orps %xmm10,%xmm8
.byte 68,15,40,201 // movaps %xmm1,%xmm9
.byte 68,15,194,200,1 // cmpltps %xmm0,%xmm9
- .byte 68,15,40,21,155,8,1,0 // movaps 0x1089b(%rip),%xmm10 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,21,251,9,1,0 // movaps 0x109fb(%rip),%xmm10 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,208 // subps %xmm8,%xmm10
.byte 69,15,84,209 // andps %xmm9,%xmm10
.byte 69,15,85,200 // andnps %xmm8,%xmm9
@@ -49447,7 +49447,7 @@ _sk_xy_to_2pt_conical_quadratic_max_sse2:
.byte 243,69,15,89,203 // mulss %xmm11,%xmm9
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,88,200 // addps %xmm0,%xmm9
- .byte 68,15,89,13,18,14,1,0 // mulps 0x10e12(%rip),%xmm9 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
+ .byte 68,15,89,13,114,15,1,0 // mulps 0x10f72(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 68,15,40,225 // movaps %xmm1,%xmm12
.byte 69,15,89,228 // mulps %xmm12,%xmm12
@@ -49455,7 +49455,7 @@ _sk_xy_to_2pt_conical_quadratic_max_sse2:
.byte 243,69,15,89,219 // mulss %xmm11,%xmm11
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,92,227 // subps %xmm11,%xmm12
- .byte 68,15,89,21,253,13,1,0 // mulps 0x10dfd(%rip),%xmm10 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,89,21,93,15,1,0 // mulps 0x10f5d(%rip),%xmm10 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
.byte 69,15,89,212 // mulps %xmm12,%xmm10
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
@@ -49464,8 +49464,8 @@ _sk_xy_to_2pt_conical_quadratic_max_sse2:
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 65,15,92,193 // subps %xmm9,%xmm0
- .byte 68,15,87,13,245,12,1,0 // xorps 0x10cf5(%rip),%xmm9 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
- .byte 68,15,89,5,205,7,1,0 // mulps 0x107cd(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,87,13,85,14,1,0 // xorps 0x10e55(%rip),%xmm9 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 68,15,89,5,45,9,1,0 // mulps 0x1092d(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 69,15,92,202 // subps %xmm10,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
@@ -49486,7 +49486,7 @@ _sk_xy_to_2pt_conical_quadratic_min_sse2:
.byte 243,69,15,89,203 // mulss %xmm11,%xmm9
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,88,200 // addps %xmm0,%xmm9
- .byte 68,15,89,13,116,13,1,0 // mulps 0x10d74(%rip),%xmm9 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
+ .byte 68,15,89,13,212,14,1,0 // mulps 0x10ed4(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 68,15,40,225 // movaps %xmm1,%xmm12
.byte 69,15,89,228 // mulps %xmm12,%xmm12
@@ -49494,7 +49494,7 @@ _sk_xy_to_2pt_conical_quadratic_min_sse2:
.byte 243,69,15,89,219 // mulss %xmm11,%xmm11
.byte 69,15,198,219,0 // shufps $0x0,%xmm11,%xmm11
.byte 69,15,92,227 // subps %xmm11,%xmm12
- .byte 68,15,89,21,95,13,1,0 // mulps 0x10d5f(%rip),%xmm10 # 398e0 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,89,21,191,14,1,0 // mulps 0x10ebf(%rip),%xmm10 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
.byte 69,15,89,212 // mulps %xmm12,%xmm10
.byte 65,15,40,193 // movaps %xmm9,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
@@ -49503,8 +49503,8 @@ _sk_xy_to_2pt_conical_quadratic_min_sse2:
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 65,15,40,194 // movaps %xmm10,%xmm0
.byte 65,15,92,193 // subps %xmm9,%xmm0
- .byte 68,15,87,13,87,12,1,0 // xorps 0x10c57(%rip),%xmm9 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
- .byte 68,15,89,5,47,7,1,0 // mulps 0x1072f(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,87,13,183,13,1,0 // xorps 0x10db7(%rip),%xmm9 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
+ .byte 68,15,89,5,143,8,1,0 // mulps 0x1088f(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,89,192 // mulps %xmm8,%xmm0
.byte 69,15,92,202 // subps %xmm10,%xmm9
.byte 69,15,89,200 // mulps %xmm8,%xmm9
@@ -49522,7 +49522,7 @@ _sk_xy_to_2pt_conical_linear_sse2:
.byte 243,69,15,89,200 // mulss %xmm8,%xmm9
.byte 69,15,198,201,0 // shufps $0x0,%xmm9,%xmm9
.byte 68,15,88,200 // addps %xmm0,%xmm9
- .byte 68,15,89,13,231,12,1,0 // mulps 0x10ce7(%rip),%xmm9 # 398d0 <_sk_srcover_bgra_8888_sse2_lowp+0x11cc>
+ .byte 68,15,89,13,71,14,1,0 // mulps 0x10e47(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 68,15,40,209 // movaps %xmm1,%xmm10
.byte 69,15,89,210 // mulps %xmm10,%xmm10
@@ -49530,7 +49530,7 @@ _sk_xy_to_2pt_conical_linear_sse2:
.byte 243,69,15,89,192 // mulss %xmm8,%xmm8
.byte 69,15,198,192,0 // shufps $0x0,%xmm8,%xmm8
.byte 65,15,92,192 // subps %xmm8,%xmm0
- .byte 15,87,5,243,11,1,0 // xorps 0x10bf3(%rip),%xmm0 # 39800 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
+ .byte 15,87,5,83,13,1,0 // xorps 0x10d53(%rip),%xmm0 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x10ec>
.byte 65,15,94,193 // divps %xmm9,%xmm0
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -49576,7 +49576,7 @@ HIDDEN _sk_save_xy_sse2
FUNCTION(_sk_save_xy_sse2)
_sk_save_xy_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,90,6,1,0 // movaps 0x1065a(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,5,186,7,1,0 // movaps 0x107ba(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 15,17,0 // movups %xmm0,(%rax)
.byte 68,15,40,200 // movaps %xmm0,%xmm9
.byte 69,15,88,200 // addps %xmm8,%xmm9
@@ -49584,7 +49584,7 @@ _sk_save_xy_sse2:
.byte 69,15,91,210 // cvtdq2ps %xmm10,%xmm10
.byte 69,15,40,217 // movaps %xmm9,%xmm11
.byte 69,15,194,218,1 // cmpltps %xmm10,%xmm11
- .byte 68,15,40,37,69,6,1,0 // movaps 0x10645(%rip),%xmm12 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,37,165,7,1,0 // movaps 0x107a5(%rip),%xmm12 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,220 // andps %xmm12,%xmm11
.byte 69,15,92,211 // subps %xmm11,%xmm10
.byte 69,15,92,202 // subps %xmm10,%xmm9
@@ -49631,8 +49631,8 @@ _sk_bilinear_nx_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,165,11,1,0 // addps 0x10ba5(%rip),%xmm0 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,157,5,1,0 // movaps 0x1059d(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,5,5,13,1,0 // addps 0x10d05(%rip),%xmm0 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,253,6,1,0 // movaps 0x106fd(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 68,15,17,136,0,1,0,0 // movups %xmm9,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -49645,7 +49645,7 @@ _sk_bilinear_px_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,105,5,1,0 // addps 0x10569(%rip),%xmm0 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,5,201,6,1,0 // addps 0x106c9(%rip),%xmm0 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,17,128,0,1,0,0 // movups %xmm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -49657,8 +49657,8 @@ _sk_bilinear_ny_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,88,11,1,0 // addps 0x10b58(%rip),%xmm1 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,80,5,1,0 // movaps 0x10550(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,13,184,12,1,0 // addps 0x10cb8(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,176,6,1,0 // movaps 0x106b0(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 68,15,17,136,64,1,0,0 // movups %xmm9,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -49671,7 +49671,7 @@ _sk_bilinear_py_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,27,5,1,0 // addps 0x1051b(%rip),%xmm1 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,13,123,6,1,0 // addps 0x1067b(%rip),%xmm1 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,17,128,64,1,0,0 // movups %xmm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -49683,13 +49683,13 @@ _sk_bicubic_n3x_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,27,11,1,0 // addps 0x10b1b(%rip),%xmm0 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
- .byte 68,15,40,13,3,5,1,0 // movaps 0x10503(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,5,123,12,1,0 // addps 0x10c7b(%rip),%xmm0 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
+ .byte 68,15,40,13,99,6,1,0 // movaps 0x10663(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 69,15,40,193 // movaps %xmm9,%xmm8
.byte 69,15,89,192 // mulps %xmm8,%xmm8
- .byte 68,15,89,13,15,11,1,0 // mulps 0x10b0f(%rip),%xmm9 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,13,231,6,1,0 // addps 0x106e7(%rip),%xmm9 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,13,111,12,1,0 // mulps 0x10c6f(%rip),%xmm9 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,13,71,8,1,0 // addps 0x10847(%rip),%xmm9 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 68,15,17,136,0,1,0,0 // movups %xmm9,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -49702,16 +49702,16 @@ _sk_bicubic_n1x_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,195,10,1,0 // addps 0x10ac3(%rip),%xmm0 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,187,4,1,0 // movaps 0x104bb(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,5,35,12,1,0 // addps 0x10c23(%rip),%xmm0 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,27,6,1,0 // movaps 0x1061b(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
- .byte 68,15,40,5,223,10,1,0 // movaps 0x10adf(%rip),%xmm8 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,5,63,12,1,0 // movaps 0x10c3f(%rip),%xmm8 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,227,10,1,0 // addps 0x10ae3(%rip),%xmm8 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,5,67,12,1,0 // addps 0x10c43(%rip),%xmm8 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,135,4,1,0 // addps 0x10487(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,88,5,231,5,1,0 // addps 0x105e7(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,219,10,1,0 // addps 0x10adb(%rip),%xmm8 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,5,59,12,1,0 // addps 0x10c3b(%rip),%xmm8 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,128,0,1,0,0 // movups %xmm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -49721,17 +49721,17 @@ HIDDEN _sk_bicubic_p1x_sse2
FUNCTION(_sk_bicubic_p1x_sse2)
_sk_bicubic_p1x_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,101,4,1,0 // movaps 0x10465(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,5,197,5,1,0 // movaps 0x105c5(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,136,128,0,0,0 // movups 0x80(%rax),%xmm9
.byte 65,15,88,192 // addps %xmm8,%xmm0
- .byte 68,15,40,21,142,10,1,0 // movaps 0x10a8e(%rip),%xmm10 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,21,238,11,1,0 // movaps 0x10bee(%rip),%xmm10 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,146,10,1,0 // addps 0x10a92(%rip),%xmm10 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,21,242,11,1,0 // addps 0x10bf2(%rip),%xmm10 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
.byte 69,15,88,208 // addps %xmm8,%xmm10
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,142,10,1,0 // addps 0x10a8e(%rip),%xmm10 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,21,238,11,1,0 // addps 0x10bee(%rip),%xmm10 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,144,0,1,0,0 // movups %xmm10,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -49743,11 +49743,11 @@ _sk_bicubic_p3x_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,0 // movups (%rax),%xmm0
.byte 68,15,16,128,128,0,0,0 // movups 0x80(%rax),%xmm8
- .byte 15,88,5,94,10,1,0 // addps 0x10a5e(%rip),%xmm0 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 15,88,5,190,11,1,0 // addps 0x10bbe(%rip),%xmm0 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 69,15,89,201 // mulps %xmm9,%xmm9
- .byte 68,15,89,5,46,10,1,0 // mulps 0x10a2e(%rip),%xmm8 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,5,6,6,1,0 // addps 0x10606(%rip),%xmm8 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,5,142,11,1,0 // mulps 0x10b8e(%rip),%xmm8 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,5,102,7,1,0 // addps 0x10766(%rip),%xmm8 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
.byte 68,15,17,128,0,1,0,0 // movups %xmm8,0x100(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -49760,13 +49760,13 @@ _sk_bicubic_n3y_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,241,9,1,0 // addps 0x109f1(%rip),%xmm1 # 39900 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
- .byte 68,15,40,13,217,3,1,0 // movaps 0x103d9(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,13,81,11,1,0 // addps 0x10b51(%rip),%xmm1 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
+ .byte 68,15,40,13,57,5,1,0 // movaps 0x10539(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
.byte 69,15,40,193 // movaps %xmm9,%xmm8
.byte 69,15,89,192 // mulps %xmm8,%xmm8
- .byte 68,15,89,13,229,9,1,0 // mulps 0x109e5(%rip),%xmm9 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,13,189,5,1,0 // addps 0x105bd(%rip),%xmm9 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,13,69,11,1,0 // mulps 0x10b45(%rip),%xmm9 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,13,29,7,1,0 // addps 0x1071d(%rip),%xmm9 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,200 // mulps %xmm8,%xmm9
.byte 68,15,17,136,64,1,0,0 // movups %xmm9,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -49779,16 +49779,16 @@ _sk_bicubic_n1y_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,152,9,1,0 // addps 0x10998(%rip),%xmm1 # 398f0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ec>
- .byte 68,15,40,13,144,3,1,0 // movaps 0x10390(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,88,13,248,10,1,0 // addps 0x10af8(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x11dc>
+ .byte 68,15,40,13,240,4,1,0 // movaps 0x104f0(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,92,200 // subps %xmm8,%xmm9
- .byte 68,15,40,5,180,9,1,0 // movaps 0x109b4(%rip),%xmm8 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,5,20,11,1,0 // movaps 0x10b14(%rip),%xmm8 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,184,9,1,0 // addps 0x109b8(%rip),%xmm8 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,5,24,11,1,0 // addps 0x10b18(%rip),%xmm8 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,92,3,1,0 // addps 0x1035c(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,88,5,188,4,1,0 // addps 0x104bc(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
- .byte 68,15,88,5,176,9,1,0 // addps 0x109b0(%rip),%xmm8 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,5,16,11,1,0 // addps 0x10b10(%rip),%xmm8 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,128,64,1,0,0 // movups %xmm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -49798,17 +49798,17 @@ HIDDEN _sk_bicubic_p1y_sse2
FUNCTION(_sk_bicubic_p1y_sse2)
_sk_bicubic_p1y_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 68,15,40,5,58,3,1,0 // movaps 0x1033a(%rip),%xmm8 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,5,154,4,1,0 // movaps 0x1049a(%rip),%xmm8 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,136,192,0,0,0 // movups 0xc0(%rax),%xmm9
.byte 65,15,88,200 // addps %xmm8,%xmm1
- .byte 68,15,40,21,98,9,1,0 // movaps 0x10962(%rip),%xmm10 # 39920 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
+ .byte 68,15,40,21,194,10,1,0 // movaps 0x10ac2(%rip),%xmm10 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,102,9,1,0 // addps 0x10966(%rip),%xmm10 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 68,15,88,21,198,10,1,0 // addps 0x10ac6(%rip),%xmm10 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,89,209 // mulps %xmm9,%xmm10
.byte 69,15,88,208 // addps %xmm8,%xmm10
.byte 69,15,89,209 // mulps %xmm9,%xmm10
- .byte 68,15,88,21,98,9,1,0 // addps 0x10962(%rip),%xmm10 # 39940 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
+ .byte 68,15,88,21,194,10,1,0 // addps 0x10ac2(%rip),%xmm10 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
.byte 68,15,17,144,64,1,0,0 // movups %xmm10,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -49820,11 +49820,11 @@ _sk_bicubic_p3y_sse2:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,16,72,64 // movups 0x40(%rax),%xmm1
.byte 68,15,16,128,192,0,0,0 // movups 0xc0(%rax),%xmm8
- .byte 15,88,13,49,9,1,0 // addps 0x10931(%rip),%xmm1 # 39930 <_sk_srcover_bgra_8888_sse2_lowp+0x122c>
+ .byte 15,88,13,145,10,1,0 // addps 0x10a91(%rip),%xmm1 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x121c>
.byte 69,15,40,200 // movaps %xmm8,%xmm9
.byte 69,15,89,201 // mulps %xmm9,%xmm9
- .byte 68,15,89,5,1,9,1,0 // mulps 0x10901(%rip),%xmm8 # 39910 <_sk_srcover_bgra_8888_sse2_lowp+0x120c>
- .byte 68,15,88,5,217,4,1,0 // addps 0x104d9(%rip),%xmm8 # 394f0 <_sk_srcover_bgra_8888_sse2_lowp+0xdec>
+ .byte 68,15,89,5,97,10,1,0 // mulps 0x10a61(%rip),%xmm8 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x11fc>
+ .byte 68,15,88,5,57,6,1,0 // addps 0x10639(%rip),%xmm8 # 39650 <_sk_srcover_bgra_8888_sse2_lowp+0xddc>
.byte 69,15,89,193 // mulps %xmm9,%xmm8
.byte 68,15,17,128,64,1,0,0 // movups %xmm8,0x140(%rax)
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -49969,7 +49969,7 @@ _sk_clut_3D_sse2:
.byte 102,65,15,254,205 // paddd %xmm13,%xmm1
.byte 102,68,15,127,108,36,208 // movdqa %xmm13,-0x30(%rsp)
.byte 102,15,254,217 // paddd %xmm1,%xmm3
- .byte 102,68,15,111,5,72,7,1,0 // movdqa 0x10748(%rip),%xmm8 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,68,15,111,5,168,8,1,0 // movdqa 0x108a8(%rip),%xmm8 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,112,195,245 // pshufd $0xf5,%xmm3,%xmm0
.byte 102,65,15,244,216 // pmuludq %xmm8,%xmm3
.byte 102,65,15,244,192 // pmuludq %xmm8,%xmm0
@@ -50008,7 +50008,7 @@ _sk_clut_3D_sse2:
.byte 243,66,15,16,44,139 // movss (%rbx,%r9,4),%xmm5
.byte 15,20,236 // unpcklps %xmm4,%xmm5
.byte 102,15,20,221 // unpcklpd %xmm5,%xmm3
- .byte 102,68,15,111,37,79,0,1,0 // movdqa 0x1004f(%rip),%xmm12 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,37,175,1,1,0 // movdqa 0x101af(%rip),%xmm12 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,65,15,254,196 // paddd %xmm12,%xmm0
.byte 102,15,112,224,78 // pshufd $0x4e,%xmm0,%xmm4
.byte 102,73,15,126,224 // movq %xmm4,%r8
@@ -50024,7 +50024,7 @@ _sk_clut_3D_sse2:
.byte 243,66,15,16,36,139 // movss (%rbx,%r9,4),%xmm4
.byte 15,20,224 // unpcklps %xmm0,%xmm4
.byte 102,15,20,236 // unpcklpd %xmm4,%xmm5
- .byte 15,40,37,52,6,1,0 // movaps 0x10634(%rip),%xmm4 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
+ .byte 15,40,37,148,7,1,0 // movaps 0x10794(%rip),%xmm4 # 39ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
.byte 68,15,88,220 // addps %xmm4,%xmm11
.byte 68,15,41,92,36,192 // movaps %xmm11,-0x40(%rsp)
.byte 68,15,40,223 // movaps %xmm7,%xmm11
@@ -50060,7 +50060,7 @@ _sk_clut_3D_sse2:
.byte 15,20,249 // unpcklps %xmm1,%xmm7
.byte 102,15,20,252 // unpcklpd %xmm4,%xmm7
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
- .byte 102,15,250,13,169,5,1,0 // psubd 0x105a9(%rip),%xmm1 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,13,9,7,1,0 // psubd 0x10709(%rip),%xmm1 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,225,78 // pshufd $0x4e,%xmm1,%xmm4
.byte 102,73,15,126,224 // movq %xmm4,%r8
.byte 102,72,15,126,200 // movq %xmm1,%rax
@@ -50150,7 +50150,7 @@ _sk_clut_3D_sse2:
.byte 243,15,16,44,131 // movss (%rbx,%rax,4),%xmm5
.byte 15,20,233 // unpcklps %xmm1,%xmm5
.byte 102,15,20,221 // unpcklpd %xmm5,%xmm3
- .byte 102,68,15,111,45,193,253,0,0 // movdqa 0xfdc1(%rip),%xmm13 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,45,33,255,0,0 // movdqa 0xff21(%rip),%xmm13 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,65,15,254,197 // paddd %xmm13,%xmm0
.byte 102,15,112,200,78 // pshufd $0x4e,%xmm0,%xmm1
.byte 102,73,15,126,200 // movq %xmm1,%r8
@@ -50247,7 +50247,7 @@ _sk_clut_3D_sse2:
.byte 102,15,111,206 // movdqa %xmm6,%xmm1
.byte 102,65,15,254,202 // paddd %xmm10,%xmm1
.byte 102,15,112,209,245 // pshufd $0xf5,%xmm1,%xmm2
- .byte 102,15,111,29,68,2,1,0 // movdqa 0x10244(%rip),%xmm3 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,29,164,3,1,0 // movdqa 0x103a4(%rip),%xmm3 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,244,203 // pmuludq %xmm3,%xmm1
.byte 102,15,244,211 // pmuludq %xmm3,%xmm2
.byte 102,15,111,251 // movdqa %xmm3,%xmm7
@@ -50285,7 +50285,7 @@ _sk_clut_3D_sse2:
.byte 243,15,16,44,131 // movss (%rbx,%rax,4),%xmm5
.byte 15,20,236 // unpcklps %xmm4,%xmm5
.byte 102,15,20,213 // unpcklpd %xmm5,%xmm2
- .byte 102,15,111,5,84,251,0,0 // movdqa 0xfb54(%rip),%xmm0 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,111,5,180,252,0,0 // movdqa 0xfcb4(%rip),%xmm0 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,254,216 // paddd %xmm0,%xmm3
.byte 102,15,112,227,78 // pshufd $0x4e,%xmm3,%xmm4
.byte 102,73,15,126,224 // movq %xmm4,%r8
@@ -50366,7 +50366,7 @@ _sk_clut_3D_sse2:
.byte 102,15,111,214 // movdqa %xmm6,%xmm2
.byte 102,65,15,254,212 // paddd %xmm12,%xmm2
.byte 102,15,112,194,245 // pshufd $0xf5,%xmm2,%xmm0
- .byte 102,15,111,13,33,0,1,0 // movdqa 0x10021(%rip),%xmm1 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,13,129,1,1,0 // movdqa 0x10181(%rip),%xmm1 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,244,209 // pmuludq %xmm1,%xmm2
.byte 102,15,244,193 // pmuludq %xmm1,%xmm0
.byte 102,15,111,241 // movdqa %xmm1,%xmm6
@@ -50388,7 +50388,7 @@ _sk_clut_3D_sse2:
.byte 68,15,20,209 // unpcklps %xmm1,%xmm10
.byte 102,68,15,20,210 // unpcklpd %xmm2,%xmm10
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
- .byte 102,15,250,13,198,255,0,0 // psubd 0xffc6(%rip),%xmm1 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,13,38,1,1,0 // psubd 0x10126(%rip),%xmm1 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,209,78 // pshufd $0x4e,%xmm1,%xmm2
.byte 102,73,15,126,208 // movq %xmm2,%r8
.byte 102,72,15,126,200 // movq %xmm1,%rax
@@ -50403,7 +50403,7 @@ _sk_clut_3D_sse2:
.byte 243,15,16,20,131 // movss (%rbx,%rax,4),%xmm2
.byte 15,20,209 // unpcklps %xmm1,%xmm2
.byte 102,15,20,226 // unpcklpd %xmm2,%xmm4
- .byte 102,15,254,5,49,249,0,0 // paddd 0xf931(%rip),%xmm0 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,5,145,250,0,0 // paddd 0xfa91(%rip),%xmm0 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,112,200,78 // pshufd $0x4e,%xmm0,%xmm1
.byte 102,73,15,126,200 // movq %xmm1,%r8
.byte 102,72,15,126,192 // movq %xmm0,%rax
@@ -50440,7 +50440,7 @@ _sk_clut_3D_sse2:
.byte 15,20,199 // unpcklps %xmm7,%xmm0
.byte 102,15,20,193 // unpcklpd %xmm1,%xmm0
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
- .byte 102,15,250,13,208,254,0,0 // psubd 0xfed0(%rip),%xmm1 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,13,48,0,1,0 // psubd 0x10030(%rip),%xmm1 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,249,78 // pshufd $0x4e,%xmm1,%xmm7
.byte 102,73,15,126,248 // movq %xmm7,%r8
.byte 102,72,15,126,200 // movq %xmm1,%rax
@@ -50455,7 +50455,7 @@ _sk_clut_3D_sse2:
.byte 243,15,16,52,131 // movss (%rbx,%rax,4),%xmm6
.byte 15,20,247 // unpcklps %xmm7,%xmm6
.byte 102,15,20,206 // unpcklpd %xmm6,%xmm1
- .byte 102,15,254,21,59,248,0,0 // paddd 0xf83b(%rip),%xmm2 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,21,155,249,0,0 // paddd 0xf99b(%rip),%xmm2 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,112,242,78 // pshufd $0x4e,%xmm2,%xmm6
.byte 102,73,15,126,240 // movq %xmm6,%r8
.byte 102,72,15,126,208 // movq %xmm2,%rax
@@ -50601,7 +50601,7 @@ _sk_clut_4D_sse2:
.byte 102,65,15,254,210 // paddd %xmm10,%xmm2
.byte 102,15,111,220 // movdqa %xmm4,%xmm3
.byte 102,15,254,218 // paddd %xmm2,%xmm3
- .byte 102,15,111,37,19,252,0,0 // movdqa 0xfc13(%rip),%xmm4 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,37,115,253,0,0 // movdqa 0xfd73(%rip),%xmm4 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,112,195,245 // pshufd $0xf5,%xmm3,%xmm0
.byte 102,15,244,220 // pmuludq %xmm4,%xmm3
.byte 102,15,244,196 // pmuludq %xmm4,%xmm0
@@ -50640,7 +50640,7 @@ _sk_clut_4D_sse2:
.byte 243,66,15,16,52,139 // movss (%rbx,%r9,4),%xmm6
.byte 15,20,244 // unpcklps %xmm4,%xmm6
.byte 102,15,20,238 // unpcklpd %xmm6,%xmm5
- .byte 102,15,254,5,29,245,0,0 // paddd 0xf51d(%rip),%xmm0 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,5,125,246,0,0 // paddd 0xf67d(%rip),%xmm0 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,112,224,78 // pshufd $0x4e,%xmm0,%xmm4
.byte 102,73,15,126,224 // movq %xmm4,%r8
.byte 102,73,15,126,193 // movq %xmm0,%r9
@@ -50655,7 +50655,7 @@ _sk_clut_4D_sse2:
.byte 243,66,15,16,52,139 // movss (%rbx,%r9,4),%xmm6
.byte 15,20,244 // unpcklps %xmm4,%xmm6
.byte 102,15,20,222 // unpcklpd %xmm6,%xmm3
- .byte 15,40,53,7,251,0,0 // movaps 0xfb07(%rip),%xmm6 # 39950 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
+ .byte 15,40,53,103,252,0,0 // movaps 0xfc67(%rip),%xmm6 # 39ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x123c>
.byte 68,15,88,198 // addps %xmm6,%xmm8
.byte 68,15,41,68,36,176 // movaps %xmm8,-0x50(%rsp)
.byte 68,15,88,254 // addps %xmm6,%xmm15
@@ -50673,7 +50673,7 @@ _sk_clut_4D_sse2:
.byte 102,65,15,254,215 // paddd %xmm15,%xmm2
.byte 102,68,15,127,124,36,224 // movdqa %xmm15,-0x20(%rsp)
.byte 102,15,112,194,245 // pshufd $0xf5,%xmm2,%xmm0
- .byte 102,15,111,13,189,250,0,0 // movdqa 0xfabd(%rip),%xmm1 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,13,29,252,0,0 // movdqa 0xfc1d(%rip),%xmm1 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,244,209 // pmuludq %xmm1,%xmm2
.byte 102,15,244,193 // pmuludq %xmm1,%xmm0
.byte 102,15,112,192,232 // pshufd $0xe8,%xmm0,%xmm0
@@ -50694,7 +50694,7 @@ _sk_clut_4D_sse2:
.byte 15,20,240 // unpcklps %xmm0,%xmm6
.byte 102,15,20,242 // unpcklpd %xmm2,%xmm6
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
- .byte 102,15,250,5,104,250,0,0 // psubd 0xfa68(%rip),%xmm0 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,5,200,251,0,0 // psubd 0xfbc8(%rip),%xmm0 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,208,78 // pshufd $0x4e,%xmm0,%xmm2
.byte 102,73,15,126,208 // movq %xmm2,%r8
.byte 102,72,15,126,192 // movq %xmm0,%rax
@@ -50709,7 +50709,7 @@ _sk_clut_4D_sse2:
.byte 243,68,15,16,28,131 // movss (%rbx,%rax,4),%xmm11
.byte 68,15,20,216 // unpcklps %xmm0,%xmm11
.byte 102,65,15,20,211 // unpcklpd %xmm11,%xmm2
- .byte 102,15,254,13,208,243,0,0 // paddd 0xf3d0(%rip),%xmm1 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,13,48,245,0,0 // paddd 0xf530(%rip),%xmm1 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,112,193,78 // pshufd $0x4e,%xmm1,%xmm0
.byte 102,73,15,126,192 // movq %xmm0,%r8
.byte 102,72,15,126,200 // movq %xmm1,%rax
@@ -50747,7 +50747,7 @@ _sk_clut_4D_sse2:
.byte 102,65,15,111,194 // movdqa %xmm10,%xmm0
.byte 102,15,254,68,36,160 // paddd -0x60(%rsp),%xmm0
.byte 102,15,112,216,245 // pshufd $0xf5,%xmm0,%xmm3
- .byte 102,15,111,37,103,249,0,0 // movdqa 0xf967(%rip),%xmm4 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,37,199,250,0,0 // movdqa 0xfac7(%rip),%xmm4 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,244,196 // pmuludq %xmm4,%xmm0
.byte 102,15,244,220 // pmuludq %xmm4,%xmm3
.byte 102,68,15,111,220 // movdqa %xmm4,%xmm11
@@ -50785,7 +50785,7 @@ _sk_clut_4D_sse2:
.byte 243,15,16,60,131 // movss (%rbx,%rax,4),%xmm7
.byte 15,20,253 // unpcklps %xmm5,%xmm7
.byte 102,15,20,199 // unpcklpd %xmm7,%xmm0
- .byte 102,68,15,111,5,115,242,0,0 // movdqa 0xf273(%rip),%xmm8 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,5,211,243,0,0 // movdqa 0xf3d3(%rip),%xmm8 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,65,15,254,224 // paddd %xmm8,%xmm4
.byte 102,15,112,236,78 // pshufd $0x4e,%xmm4,%xmm5
.byte 102,73,15,126,232 // movq %xmm5,%r8
@@ -50931,7 +50931,7 @@ _sk_clut_4D_sse2:
.byte 243,15,16,36,131 // movss (%rbx,%rax,4),%xmm4
.byte 15,20,226 // unpcklps %xmm2,%xmm4
.byte 102,15,20,252 // unpcklpd %xmm4,%xmm7
- .byte 102,68,15,111,61,205,239,0,0 // movdqa 0xefcd(%rip),%xmm15 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,61,45,241,0,0 // movdqa 0xf12d(%rip),%xmm15 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,65,15,254,207 // paddd %xmm15,%xmm1
.byte 102,15,112,209,78 // pshufd $0x4e,%xmm1,%xmm2
.byte 102,73,15,126,208 // movq %xmm2,%r8
@@ -51014,7 +51014,7 @@ _sk_clut_4D_sse2:
.byte 102,65,15,111,217 // movdqa %xmm9,%xmm3
.byte 102,65,15,254,216 // paddd %xmm8,%xmm3
.byte 102,15,112,211,245 // pshufd $0xf5,%xmm3,%xmm2
- .byte 102,15,111,45,156,244,0,0 // movdqa 0xf49c(%rip),%xmm5 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,45,252,245,0,0 // movdqa 0xf5fc(%rip),%xmm5 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,244,221 // pmuludq %xmm5,%xmm3
.byte 102,15,244,213 // pmuludq %xmm5,%xmm2
.byte 102,15,112,234,232 // pshufd $0xe8,%xmm2,%xmm5
@@ -51035,7 +51035,7 @@ _sk_clut_4D_sse2:
.byte 68,15,20,198 // unpcklps %xmm6,%xmm8
.byte 102,68,15,20,197 // unpcklpd %xmm5,%xmm8
.byte 102,15,111,234 // movdqa %xmm2,%xmm5
- .byte 102,15,250,45,69,244,0,0 // psubd 0xf445(%rip),%xmm5 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,45,165,245,0,0 // psubd 0xf5a5(%rip),%xmm5 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,245,78 // pshufd $0x4e,%xmm5,%xmm6
.byte 102,73,15,126,240 // movq %xmm6,%r8
.byte 102,72,15,126,232 // movq %xmm5,%rax
@@ -51067,7 +51067,7 @@ _sk_clut_4D_sse2:
.byte 102,15,20,214 // unpcklpd %xmm6,%xmm2
.byte 102,69,15,254,205 // paddd %xmm13,%xmm9
.byte 102,65,15,112,233,245 // pshufd $0xf5,%xmm9,%xmm5
- .byte 102,15,111,29,163,243,0,0 // movdqa 0xf3a3(%rip),%xmm3 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,29,3,245,0,0 // movdqa 0xf503(%rip),%xmm3 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,68,15,244,203 // pmuludq %xmm3,%xmm9
.byte 102,15,244,235 // pmuludq %xmm3,%xmm5
.byte 102,15,112,237,232 // pshufd $0xe8,%xmm5,%xmm5
@@ -51088,7 +51088,7 @@ _sk_clut_4D_sse2:
.byte 69,15,20,233 // unpcklps %xmm9,%xmm13
.byte 102,68,15,20,237 // unpcklpd %xmm5,%xmm13
.byte 102,65,15,111,239 // movdqa %xmm15,%xmm5
- .byte 102,15,250,45,67,243,0,0 // psubd 0xf343(%rip),%xmm5 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,45,163,244,0,0 // psubd 0xf4a3(%rip),%xmm5 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,221,78 // pshufd $0x4e,%xmm5,%xmm3
.byte 102,73,15,126,216 // movq %xmm3,%r8
.byte 102,72,15,126,232 // movq %xmm5,%rax
@@ -51103,7 +51103,7 @@ _sk_clut_4D_sse2:
.byte 243,15,16,44,131 // movss (%rbx,%rax,4),%xmm5
.byte 15,20,235 // unpcklps %xmm3,%xmm5
.byte 102,15,20,245 // unpcklpd %xmm5,%xmm6
- .byte 102,68,15,111,13,173,236,0,0 // movdqa 0xecad(%rip),%xmm9 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,13,13,238,0,0 // movdqa 0xee0d(%rip),%xmm9 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,69,15,254,249 // paddd %xmm9,%xmm15
.byte 102,65,15,112,223,78 // pshufd $0x4e,%xmm15,%xmm3
.byte 102,73,15,126,216 // movq %xmm3,%r8
@@ -51165,7 +51165,7 @@ _sk_clut_4D_sse2:
.byte 102,65,15,111,195 // movdqa %xmm11,%xmm0
.byte 102,15,254,195 // paddd %xmm3,%xmm0
.byte 102,15,112,200,245 // pshufd $0xf5,%xmm0,%xmm1
- .byte 102,15,111,21,222,241,0,0 // movdqa 0xf1de(%rip),%xmm2 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,21,62,243,0,0 // movdqa 0xf33e(%rip),%xmm2 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,244,194 // pmuludq %xmm2,%xmm0
.byte 102,15,244,202 // pmuludq %xmm2,%xmm1
.byte 102,68,15,111,234 // movdqa %xmm2,%xmm13
@@ -51358,7 +51358,7 @@ _sk_clut_4D_sse2:
.byte 68,15,20,238 // unpcklps %xmm6,%xmm13
.byte 102,68,15,20,234 // unpcklpd %xmm2,%xmm13
.byte 102,15,111,212 // movdqa %xmm4,%xmm2
- .byte 102,15,250,21,122,238,0,0 // psubd 0xee7a(%rip),%xmm2 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,21,218,239,0,0 // psubd 0xefda(%rip),%xmm2 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,242,78 // pshufd $0x4e,%xmm2,%xmm6
.byte 102,73,15,126,240 // movq %xmm6,%r8
.byte 102,72,15,126,208 // movq %xmm2,%rax
@@ -51418,7 +51418,7 @@ _sk_clut_4D_sse2:
.byte 102,68,15,111,116,36,160 // movdqa -0x60(%rsp),%xmm14
.byte 102,65,15,254,198 // paddd %xmm14,%xmm0
.byte 102,15,112,200,245 // pshufd $0xf5,%xmm0,%xmm1
- .byte 102,68,15,111,5,89,237,0,0 // movdqa 0xed59(%rip),%xmm8 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,68,15,111,5,185,238,0,0 // movdqa 0xeeb9(%rip),%xmm8 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,65,15,244,192 // pmuludq %xmm8,%xmm0
.byte 102,65,15,244,200 // pmuludq %xmm8,%xmm1
.byte 102,65,15,111,240 // movdqa %xmm8,%xmm6
@@ -51456,7 +51456,7 @@ _sk_clut_4D_sse2:
.byte 243,15,16,44,131 // movss (%rbx,%rax,4),%xmm5
.byte 15,20,235 // unpcklps %xmm3,%xmm5
.byte 102,15,20,205 // unpcklpd %xmm5,%xmm1
- .byte 102,68,15,111,5,103,230,0,0 // movdqa 0xe667(%rip),%xmm8 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,68,15,111,5,199,231,0,0 // movdqa 0xe7c7(%rip),%xmm8 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,65,15,254,208 // paddd %xmm8,%xmm2
.byte 102,15,112,218,78 // pshufd $0x4e,%xmm2,%xmm3
.byte 102,73,15,126,216 // movq %xmm3,%r8
@@ -51539,7 +51539,7 @@ _sk_clut_4D_sse2:
.byte 102,15,254,208 // paddd %xmm0,%xmm2
.byte 102,15,111,240 // movdqa %xmm0,%xmm6
.byte 102,15,112,194,245 // pshufd $0xf5,%xmm2,%xmm0
- .byte 102,15,111,37,51,235,0,0 // movdqa 0xeb33(%rip),%xmm4 # 39960 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
+ .byte 102,15,111,37,147,236,0,0 // movdqa 0xec93(%rip),%xmm4 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x124c>
.byte 102,15,244,212 // pmuludq %xmm4,%xmm2
.byte 102,15,244,196 // pmuludq %xmm4,%xmm0
.byte 102,15,112,200,232 // pshufd $0xe8,%xmm0,%xmm1
@@ -51560,7 +51560,7 @@ _sk_clut_4D_sse2:
.byte 68,15,20,193 // unpcklps %xmm1,%xmm8
.byte 102,68,15,20,194 // unpcklpd %xmm2,%xmm8
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
- .byte 102,15,250,13,220,234,0,0 // psubd 0xeadc(%rip),%xmm1 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,13,60,236,0,0 // psubd 0xec3c(%rip),%xmm1 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,209,78 // pshufd $0x4e,%xmm1,%xmm2
.byte 102,73,15,126,208 // movq %xmm2,%r8
.byte 102,72,15,126,200 // movq %xmm1,%rax
@@ -51575,7 +51575,7 @@ _sk_clut_4D_sse2:
.byte 243,15,16,20,131 // movss (%rbx,%rax,4),%xmm2
.byte 15,20,209 // unpcklps %xmm1,%xmm2
.byte 102,68,15,20,242 // unpcklpd %xmm2,%xmm14
- .byte 102,15,254,5,69,228,0,0 // paddd 0xe445(%rip),%xmm0 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,5,165,229,0,0 // paddd 0xe5a5(%rip),%xmm0 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,112,200,78 // pshufd $0x4e,%xmm0,%xmm1
.byte 102,73,15,126,200 // movq %xmm1,%r8
.byte 102,72,15,126,192 // movq %xmm0,%rax
@@ -51613,7 +51613,7 @@ _sk_clut_4D_sse2:
.byte 15,20,198 // unpcklps %xmm6,%xmm0
.byte 102,15,20,193 // unpcklpd %xmm1,%xmm0
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
- .byte 102,15,250,13,228,233,0,0 // psubd 0xe9e4(%rip),%xmm1 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
+ .byte 102,15,250,13,68,235,0,0 // psubd 0xeb44(%rip),%xmm1 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x125c>
.byte 102,15,112,241,78 // pshufd $0x4e,%xmm1,%xmm6
.byte 102,73,15,126,240 // movq %xmm6,%r8
.byte 102,72,15,126,200 // movq %xmm1,%rax
@@ -51628,7 +51628,7 @@ _sk_clut_4D_sse2:
.byte 243,15,16,36,131 // movss (%rbx,%rax,4),%xmm4
.byte 15,20,230 // unpcklps %xmm6,%xmm4
.byte 102,15,20,204 // unpcklpd %xmm4,%xmm1
- .byte 102,15,254,21,79,227,0,0 // paddd 0xe34f(%rip),%xmm2 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xc1c>
+ .byte 102,15,254,21,175,228,0,0 // paddd 0xe4af(%rip),%xmm2 # 39480 <_sk_srcover_bgra_8888_sse2_lowp+0xc0c>
.byte 102,15,112,226,78 // pshufd $0x4e,%xmm2,%xmm4
.byte 102,73,15,126,224 // movq %xmm4,%r8
.byte 102,72,15,126,208 // movq %xmm2,%rax
@@ -51687,7 +51687,7 @@ _sk_clut_4D_sse2:
.byte 15,89,212 // mulps %xmm4,%xmm2
.byte 65,15,88,215 // addps %xmm15,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,59,226,0,0 // movaps 0xe23b(%rip),%xmm3 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,29,155,227,0,0 // movaps 0xe39b(%rip),%xmm3 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,100,36,96 // movaps 0x60(%rsp),%xmm4
.byte 15,40,108,36,112 // movaps 0x70(%rsp),%xmm5
.byte 15,40,180,36,128,0,0,0 // movaps 0x80(%rsp),%xmm6
@@ -51700,15 +51700,15 @@ HIDDEN _sk_gauss_a_to_rgba_sse2
.globl _sk_gauss_a_to_rgba_sse2
FUNCTION(_sk_gauss_a_to_rgba_sse2)
_sk_gauss_a_to_rgba_sse2:
- .byte 15,40,5,160,232,0,0 // movaps 0xe8a0(%rip),%xmm0 # 39980 <_sk_srcover_bgra_8888_sse2_lowp+0x127c>
+ .byte 15,40,5,0,234,0,0 // movaps 0xea00(%rip),%xmm0 # 39ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x126c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,166,232,0,0 // addps 0xe8a6(%rip),%xmm0 # 39990 <_sk_srcover_bgra_8888_sse2_lowp+0x128c>
+ .byte 15,88,5,6,234,0,0 // addps 0xea06(%rip),%xmm0 # 39af0 <_sk_srcover_bgra_8888_sse2_lowp+0x127c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,172,232,0,0 // addps 0xe8ac(%rip),%xmm0 # 399a0 <_sk_srcover_bgra_8888_sse2_lowp+0x129c>
+ .byte 15,88,5,12,234,0,0 // addps 0xea0c(%rip),%xmm0 # 39b00 <_sk_srcover_bgra_8888_sse2_lowp+0x128c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,178,232,0,0 // addps 0xe8b2(%rip),%xmm0 # 399b0 <_sk_srcover_bgra_8888_sse2_lowp+0x12ac>
+ .byte 15,88,5,18,234,0,0 // addps 0xea12(%rip),%xmm0 # 39b10 <_sk_srcover_bgra_8888_sse2_lowp+0x129c>
.byte 15,89,195 // mulps %xmm3,%xmm0
- .byte 15,88,5,184,232,0,0 // addps 0xe8b8(%rip),%xmm0 # 399c0 <_sk_srcover_bgra_8888_sse2_lowp+0x12bc>
+ .byte 15,88,5,24,234,0,0 // addps 0xea18(%rip),%xmm0 # 39b20 <_sk_srcover_bgra_8888_sse2_lowp+0x12ac>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
@@ -51811,7 +51811,7 @@ _sk_seed_shader_hsw_lowp:
.byte 197,249,110,209 // vmovd %ecx,%xmm2
.byte 196,226,125,88,210 // vpbroadcastd %xmm2,%ymm2
.byte 197,252,91,210 // vcvtdq2ps %ymm2,%ymm2
- .byte 196,226,125,24,29,141,215,0,0 // vbroadcastss 0xd78d(%rip),%ymm3 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,29,253,216,0,0 // vbroadcastss 0xd8fd(%rip),%ymm3 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,236,88,211 // vaddps %ymm3,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,40,218 // vmovaps %ymm2,%ymm3
@@ -51927,7 +51927,7 @@ HIDDEN _sk_black_color_hsw_lowp
FUNCTION(_sk_black_color_hsw_lowp)
_sk_black_color_hsw_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,29,120,220,0,0 // vpbroadcastw 0xdc78(%rip),%ymm3 # 39020 <_sk_srcover_bgra_8888_sse2_lowp+0x91c>
+ .byte 196,226,125,121,29,216,221,0,0 // vpbroadcastw 0xddd8(%rip),%ymm3 # 39180 <_sk_srcover_bgra_8888_sse2_lowp+0x90c>
.byte 197,252,87,192 // vxorps %ymm0,%ymm0,%ymm0
.byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
.byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
@@ -51938,7 +51938,7 @@ HIDDEN _sk_white_color_hsw_lowp
FUNCTION(_sk_white_color_hsw_lowp)
_sk_white_color_hsw_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,5,97,220,0,0 // vpbroadcastw 0xdc61(%rip),%ymm0 # 39022 <_sk_srcover_bgra_8888_sse2_lowp+0x91e>
+ .byte 196,226,125,121,5,193,221,0,0 // vpbroadcastw 0xddc1(%rip),%ymm0 # 39182 <_sk_srcover_bgra_8888_sse2_lowp+0x90e>
.byte 197,253,111,200 // vmovdqa %ymm0,%ymm1
.byte 197,253,111,208 // vmovdqa %ymm0,%ymm2
.byte 197,253,111,216 // vmovdqa %ymm0,%ymm3
@@ -51949,8 +51949,8 @@ HIDDEN _sk_set_rgb_hsw_lowp
FUNCTION(_sk_set_rgb_hsw_lowp)
_sk_set_rgb_hsw_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 197,122,16,5,215,213,0,0 // vmovss 0xd5d7(%rip),%xmm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 197,250,16,21,255,213,0,0 // vmovss 0xd5ff(%rip),%xmm2 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 197,122,16,5,71,215,0,0 // vmovss 0xd747(%rip),%xmm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,250,16,21,111,215,0,0 // vmovss 0xd76f(%rip),%xmm2 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 197,250,16,0 // vmovss (%rax),%xmm0
.byte 196,226,57,153,194 // vfmadd132ss %xmm2,%xmm8,%xmm0
.byte 197,122,44,192 // vcvttss2si %xmm0,%r8d
@@ -51993,7 +51993,7 @@ HIDDEN _sk_premul_hsw_lowp
FUNCTION(_sk_premul_hsw_lowp)
_sk_premul_hsw_lowp:
.byte 197,229,213,192 // vpmullw %ymm0,%ymm3,%ymm0
- .byte 196,98,125,121,5,202,219,0,0 // vpbroadcastw 0xdbca(%rip),%ymm8 # 39024 <_sk_srcover_bgra_8888_sse2_lowp+0x920>
+ .byte 196,98,125,121,5,42,221,0,0 // vpbroadcastw 0xdd2a(%rip),%ymm8 # 39184 <_sk_srcover_bgra_8888_sse2_lowp+0x910>
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,229,213,201 // vpmullw %ymm1,%ymm3,%ymm1
@@ -52010,7 +52010,7 @@ HIDDEN _sk_premul_dst_hsw_lowp
FUNCTION(_sk_premul_dst_hsw_lowp)
_sk_premul_dst_hsw_lowp:
.byte 197,197,213,228 // vpmullw %ymm4,%ymm7,%ymm4
- .byte 196,98,125,121,5,149,219,0,0 // vpbroadcastw 0xdb95(%rip),%ymm8 # 39026 <_sk_srcover_bgra_8888_sse2_lowp+0x922>
+ .byte 196,98,125,121,5,245,220,0,0 // vpbroadcastw 0xdcf5(%rip),%ymm8 # 39186 <_sk_srcover_bgra_8888_sse2_lowp+0x912>
.byte 196,193,93,253,224 // vpaddw %ymm8,%ymm4,%ymm4
.byte 197,221,113,212,8 // vpsrlw $0x8,%ymm4,%ymm4
.byte 197,197,213,237 // vpmullw %ymm5,%ymm7,%ymm5
@@ -52058,7 +52058,7 @@ HIDDEN _sk_invert_hsw_lowp
.globl _sk_invert_hsw_lowp
FUNCTION(_sk_invert_hsw_lowp)
_sk_invert_hsw_lowp:
- .byte 196,98,125,121,5,44,219,0,0 // vpbroadcastw 0xdb2c(%rip),%ymm8 # 39028 <_sk_srcover_bgra_8888_sse2_lowp+0x924>
+ .byte 196,98,125,121,5,140,220,0,0 // vpbroadcastw 0xdc8c(%rip),%ymm8 # 39188 <_sk_srcover_bgra_8888_sse2_lowp+0x914>
.byte 197,189,249,192 // vpsubw %ymm0,%ymm8,%ymm0
.byte 197,189,249,201 // vpsubw %ymm1,%ymm8,%ymm1
.byte 197,189,249,210 // vpsubw %ymm2,%ymm8,%ymm2
@@ -52082,7 +52082,7 @@ HIDDEN _sk_srcatop_hsw_lowp
FUNCTION(_sk_srcatop_hsw_lowp)
_sk_srcatop_hsw_lowp:
.byte 197,197,213,192 // vpmullw %ymm0,%ymm7,%ymm0
- .byte 196,98,125,121,5,249,218,0,0 // vpbroadcastw 0xdaf9(%rip),%ymm8 # 3902a <_sk_srcover_bgra_8888_sse2_lowp+0x926>
+ .byte 196,98,125,121,5,89,220,0,0 // vpbroadcastw 0xdc59(%rip),%ymm8 # 3918a <_sk_srcover_bgra_8888_sse2_lowp+0x916>
.byte 197,189,249,219 // vpsubw %ymm3,%ymm8,%ymm3
.byte 197,101,213,204 // vpmullw %ymm4,%ymm3,%ymm9
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
@@ -52109,7 +52109,7 @@ HIDDEN _sk_dstatop_hsw_lowp
FUNCTION(_sk_dstatop_hsw_lowp)
_sk_dstatop_hsw_lowp:
.byte 197,93,213,195 // vpmullw %ymm3,%ymm4,%ymm8
- .byte 196,98,125,121,13,153,218,0,0 // vpbroadcastw 0xda99(%rip),%ymm9 # 3902c <_sk_srcover_bgra_8888_sse2_lowp+0x928>
+ .byte 196,98,125,121,13,249,219,0,0 // vpbroadcastw 0xdbf9(%rip),%ymm9 # 3918c <_sk_srcover_bgra_8888_sse2_lowp+0x918>
.byte 197,53,249,215 // vpsubw %ymm7,%ymm9,%ymm10
.byte 197,173,213,192 // vpmullw %ymm0,%ymm10,%ymm0
.byte 196,65,61,253,193 // vpaddw %ymm9,%ymm8,%ymm8
@@ -52136,7 +52136,7 @@ HIDDEN _sk_srcin_hsw_lowp
FUNCTION(_sk_srcin_hsw_lowp)
_sk_srcin_hsw_lowp:
.byte 197,197,213,192 // vpmullw %ymm0,%ymm7,%ymm0
- .byte 196,98,125,121,5,54,218,0,0 // vpbroadcastw 0xda36(%rip),%ymm8 # 3902e <_sk_srcover_bgra_8888_sse2_lowp+0x92a>
+ .byte 196,98,125,121,5,150,219,0,0 // vpbroadcastw 0xdb96(%rip),%ymm8 # 3918e <_sk_srcover_bgra_8888_sse2_lowp+0x91a>
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,197,213,201 // vpmullw %ymm1,%ymm7,%ymm1
@@ -52156,7 +52156,7 @@ HIDDEN _sk_dstin_hsw_lowp
FUNCTION(_sk_dstin_hsw_lowp)
_sk_dstin_hsw_lowp:
.byte 197,221,213,195 // vpmullw %ymm3,%ymm4,%ymm0
- .byte 196,98,125,121,5,243,217,0,0 // vpbroadcastw 0xd9f3(%rip),%ymm8 # 39030 <_sk_srcover_bgra_8888_sse2_lowp+0x92c>
+ .byte 196,98,125,121,5,83,219,0,0 // vpbroadcastw 0xdb53(%rip),%ymm8 # 39190 <_sk_srcover_bgra_8888_sse2_lowp+0x91c>
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,213,213,203 // vpmullw %ymm3,%ymm5,%ymm1
@@ -52175,7 +52175,7 @@ HIDDEN _sk_srcout_hsw_lowp
.globl _sk_srcout_hsw_lowp
FUNCTION(_sk_srcout_hsw_lowp)
_sk_srcout_hsw_lowp:
- .byte 196,98,125,121,5,180,217,0,0 // vpbroadcastw 0xd9b4(%rip),%ymm8 # 39032 <_sk_srcover_bgra_8888_sse2_lowp+0x92e>
+ .byte 196,98,125,121,5,20,219,0,0 // vpbroadcastw 0xdb14(%rip),%ymm8 # 39192 <_sk_srcover_bgra_8888_sse2_lowp+0x91e>
.byte 197,61,249,207 // vpsubw %ymm7,%ymm8,%ymm9
.byte 197,181,213,192 // vpmullw %ymm0,%ymm9,%ymm0
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
@@ -52196,7 +52196,7 @@ HIDDEN _sk_dstout_hsw_lowp
.globl _sk_dstout_hsw_lowp
FUNCTION(_sk_dstout_hsw_lowp)
_sk_dstout_hsw_lowp:
- .byte 196,98,125,121,5,109,217,0,0 // vpbroadcastw 0xd96d(%rip),%ymm8 # 39034 <_sk_srcover_bgra_8888_sse2_lowp+0x930>
+ .byte 196,98,125,121,5,205,218,0,0 // vpbroadcastw 0xdacd(%rip),%ymm8 # 39194 <_sk_srcover_bgra_8888_sse2_lowp+0x920>
.byte 197,189,249,219 // vpsubw %ymm3,%ymm8,%ymm3
.byte 197,229,213,196 // vpmullw %ymm4,%ymm3,%ymm0
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
@@ -52217,7 +52217,7 @@ HIDDEN _sk_srcover_hsw_lowp
.globl _sk_srcover_hsw_lowp
FUNCTION(_sk_srcover_hsw_lowp)
_sk_srcover_hsw_lowp:
- .byte 196,98,125,121,5,38,217,0,0 // vpbroadcastw 0xd926(%rip),%ymm8 # 39036 <_sk_srcover_bgra_8888_sse2_lowp+0x932>
+ .byte 196,98,125,121,5,134,218,0,0 // vpbroadcastw 0xda86(%rip),%ymm8 # 39196 <_sk_srcover_bgra_8888_sse2_lowp+0x922>
.byte 197,61,249,203 // vpsubw %ymm3,%ymm8,%ymm9
.byte 197,53,213,212 // vpmullw %ymm4,%ymm9,%ymm10
.byte 196,65,45,253,208 // vpaddw %ymm8,%ymm10,%ymm10
@@ -52242,7 +52242,7 @@ HIDDEN _sk_dstover_hsw_lowp
.globl _sk_dstover_hsw_lowp
FUNCTION(_sk_dstover_hsw_lowp)
_sk_dstover_hsw_lowp:
- .byte 196,98,125,121,5,203,216,0,0 // vpbroadcastw 0xd8cb(%rip),%ymm8 # 39038 <_sk_srcover_bgra_8888_sse2_lowp+0x934>
+ .byte 196,98,125,121,5,43,218,0,0 // vpbroadcastw 0xda2b(%rip),%ymm8 # 39198 <_sk_srcover_bgra_8888_sse2_lowp+0x924>
.byte 197,61,249,207 // vpsubw %ymm7,%ymm8,%ymm9
.byte 197,181,213,192 // vpmullw %ymm0,%ymm9,%ymm0
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
@@ -52268,7 +52268,7 @@ HIDDEN _sk_modulate_hsw_lowp
FUNCTION(_sk_modulate_hsw_lowp)
_sk_modulate_hsw_lowp:
.byte 197,221,213,192 // vpmullw %ymm0,%ymm4,%ymm0
- .byte 196,98,125,121,5,112,216,0,0 // vpbroadcastw 0xd870(%rip),%ymm8 # 3903a <_sk_srcover_bgra_8888_sse2_lowp+0x936>
+ .byte 196,98,125,121,5,208,217,0,0 // vpbroadcastw 0xd9d0(%rip),%ymm8 # 3919a <_sk_srcover_bgra_8888_sse2_lowp+0x926>
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,213,213,201 // vpmullw %ymm1,%ymm5,%ymm1
@@ -52287,7 +52287,7 @@ HIDDEN _sk_multiply_hsw_lowp
.globl _sk_multiply_hsw_lowp
FUNCTION(_sk_multiply_hsw_lowp)
_sk_multiply_hsw_lowp:
- .byte 196,98,125,121,5,49,216,0,0 // vpbroadcastw 0xd831(%rip),%ymm8 # 3903c <_sk_srcover_bgra_8888_sse2_lowp+0x938>
+ .byte 196,98,125,121,5,145,217,0,0 // vpbroadcastw 0xd991(%rip),%ymm8 # 3919c <_sk_srcover_bgra_8888_sse2_lowp+0x928>
.byte 197,61,249,203 // vpsubw %ymm3,%ymm8,%ymm9
.byte 197,53,213,212 // vpmullw %ymm4,%ymm9,%ymm10
.byte 196,65,93,253,216 // vpaddw %ymm8,%ymm4,%ymm11
@@ -52323,7 +52323,7 @@ HIDDEN _sk_plus__hsw_lowp
FUNCTION(_sk_plus__hsw_lowp)
_sk_plus__hsw_lowp:
.byte 197,221,253,192 // vpaddw %ymm0,%ymm4,%ymm0
- .byte 196,98,125,121,5,167,215,0,0 // vpbroadcastw 0xd7a7(%rip),%ymm8 # 3903e <_sk_srcover_bgra_8888_sse2_lowp+0x93a>
+ .byte 196,98,125,121,5,7,217,0,0 // vpbroadcastw 0xd907(%rip),%ymm8 # 3919e <_sk_srcover_bgra_8888_sse2_lowp+0x92a>
.byte 196,194,125,58,192 // vpminuw %ymm8,%ymm0,%ymm0
.byte 197,213,253,201 // vpaddw %ymm1,%ymm5,%ymm1
.byte 196,194,117,58,200 // vpminuw %ymm8,%ymm1,%ymm1
@@ -52340,7 +52340,7 @@ FUNCTION(_sk_screen_hsw_lowp)
_sk_screen_hsw_lowp:
.byte 197,93,253,192 // vpaddw %ymm0,%ymm4,%ymm8
.byte 197,221,213,192 // vpmullw %ymm0,%ymm4,%ymm0
- .byte 196,98,125,121,13,116,215,0,0 // vpbroadcastw 0xd774(%rip),%ymm9 # 39040 <_sk_srcover_bgra_8888_sse2_lowp+0x93c>
+ .byte 196,98,125,121,13,212,216,0,0 // vpbroadcastw 0xd8d4(%rip),%ymm9 # 391a0 <_sk_srcover_bgra_8888_sse2_lowp+0x92c>
.byte 196,193,125,253,193 // vpaddw %ymm9,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,189,249,192 // vpsubw %ymm0,%ymm8,%ymm0
@@ -52366,7 +52366,7 @@ HIDDEN _sk_xor__hsw_lowp
.globl _sk_xor__hsw_lowp
FUNCTION(_sk_xor__hsw_lowp)
_sk_xor__hsw_lowp:
- .byte 196,98,125,121,5,25,215,0,0 // vpbroadcastw 0xd719(%rip),%ymm8 # 39042 <_sk_srcover_bgra_8888_sse2_lowp+0x93e>
+ .byte 196,98,125,121,5,121,216,0,0 // vpbroadcastw 0xd879(%rip),%ymm8 # 391a2 <_sk_srcover_bgra_8888_sse2_lowp+0x92e>
.byte 197,61,249,207 // vpsubw %ymm7,%ymm8,%ymm9
.byte 197,181,213,192 // vpmullw %ymm0,%ymm9,%ymm0
.byte 197,61,249,211 // vpsubw %ymm3,%ymm8,%ymm10
@@ -52400,7 +52400,7 @@ _sk_darken_hsw_lowp:
.byte 197,197,213,192 // vpmullw %ymm0,%ymm7,%ymm0
.byte 197,93,213,203 // vpmullw %ymm3,%ymm4,%ymm9
.byte 196,194,125,62,193 // vpmaxuw %ymm9,%ymm0,%ymm0
- .byte 196,98,125,121,13,157,214,0,0 // vpbroadcastw 0xd69d(%rip),%ymm9 # 39044 <_sk_srcover_bgra_8888_sse2_lowp+0x940>
+ .byte 196,98,125,121,13,253,215,0,0 // vpbroadcastw 0xd7fd(%rip),%ymm9 # 391a4 <_sk_srcover_bgra_8888_sse2_lowp+0x930>
.byte 196,193,125,253,193 // vpaddw %ymm9,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,189,249,192 // vpsubw %ymm0,%ymm8,%ymm0
@@ -52434,7 +52434,7 @@ _sk_lighten_hsw_lowp:
.byte 197,197,213,192 // vpmullw %ymm0,%ymm7,%ymm0
.byte 197,93,213,203 // vpmullw %ymm3,%ymm4,%ymm9
.byte 196,194,125,58,193 // vpminuw %ymm9,%ymm0,%ymm0
- .byte 196,98,125,121,13,30,214,0,0 // vpbroadcastw 0xd61e(%rip),%ymm9 # 39046 <_sk_srcover_bgra_8888_sse2_lowp+0x942>
+ .byte 196,98,125,121,13,126,215,0,0 // vpbroadcastw 0xd77e(%rip),%ymm9 # 391a6 <_sk_srcover_bgra_8888_sse2_lowp+0x932>
.byte 196,193,125,253,193 // vpaddw %ymm9,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,189,249,192 // vpsubw %ymm0,%ymm8,%ymm0
@@ -52468,10 +52468,10 @@ _sk_difference_hsw_lowp:
.byte 197,197,213,192 // vpmullw %ymm0,%ymm7,%ymm0
.byte 197,93,213,203 // vpmullw %ymm3,%ymm4,%ymm9
.byte 196,194,125,58,193 // vpminuw %ymm9,%ymm0,%ymm0
- .byte 196,98,125,121,13,159,213,0,0 // vpbroadcastw 0xd59f(%rip),%ymm9 # 39048 <_sk_srcover_bgra_8888_sse2_lowp+0x944>
+ .byte 196,98,125,121,13,255,214,0,0 // vpbroadcastw 0xd6ff(%rip),%ymm9 # 391a8 <_sk_srcover_bgra_8888_sse2_lowp+0x934>
.byte 196,193,125,253,193 // vpaddw %ymm9,%ymm0,%ymm0
.byte 197,253,113,208,7 // vpsrlw $0x7,%ymm0,%ymm0
- .byte 196,98,125,121,21,142,213,0,0 // vpbroadcastw 0xd58e(%rip),%ymm10 # 3904a <_sk_srcover_bgra_8888_sse2_lowp+0x946>
+ .byte 196,98,125,121,21,238,214,0,0 // vpbroadcastw 0xd6ee(%rip),%ymm10 # 391aa <_sk_srcover_bgra_8888_sse2_lowp+0x936>
.byte 196,193,125,219,194 // vpand %ymm10,%ymm0,%ymm0
.byte 197,189,249,192 // vpsubw %ymm0,%ymm8,%ymm0
.byte 197,85,253,193 // vpaddw %ymm1,%ymm5,%ymm8
@@ -52504,10 +52504,10 @@ FUNCTION(_sk_exclusion_hsw_lowp)
_sk_exclusion_hsw_lowp:
.byte 197,93,253,192 // vpaddw %ymm0,%ymm4,%ymm8
.byte 197,221,213,192 // vpmullw %ymm0,%ymm4,%ymm0
- .byte 196,98,125,121,13,19,213,0,0 // vpbroadcastw 0xd513(%rip),%ymm9 # 3904c <_sk_srcover_bgra_8888_sse2_lowp+0x948>
+ .byte 196,98,125,121,13,115,214,0,0 // vpbroadcastw 0xd673(%rip),%ymm9 # 391ac <_sk_srcover_bgra_8888_sse2_lowp+0x938>
.byte 196,193,125,253,193 // vpaddw %ymm9,%ymm0,%ymm0
.byte 197,253,113,208,7 // vpsrlw $0x7,%ymm0,%ymm0
- .byte 196,98,125,121,21,2,213,0,0 // vpbroadcastw 0xd502(%rip),%ymm10 # 3904e <_sk_srcover_bgra_8888_sse2_lowp+0x94a>
+ .byte 196,98,125,121,21,98,214,0,0 // vpbroadcastw 0xd662(%rip),%ymm10 # 391ae <_sk_srcover_bgra_8888_sse2_lowp+0x93a>
.byte 196,193,125,219,194 // vpand %ymm10,%ymm0,%ymm0
.byte 197,189,249,192 // vpsubw %ymm0,%ymm8,%ymm0
.byte 197,85,253,193 // vpaddw %ymm1,%ymm5,%ymm8
@@ -52535,7 +52535,7 @@ HIDDEN _sk_hardlight_hsw_lowp
FUNCTION(_sk_hardlight_hsw_lowp)
_sk_hardlight_hsw_lowp:
.byte 197,125,253,192 // vpaddw %ymm0,%ymm0,%ymm8
- .byte 196,98,125,121,13,159,212,0,0 // vpbroadcastw 0xd49f(%rip),%ymm9 # 39052 <_sk_srcover_bgra_8888_sse2_lowp+0x94e>
+ .byte 196,98,125,121,13,255,213,0,0 // vpbroadcastw 0xd5ff(%rip),%ymm9 # 391b2 <_sk_srcover_bgra_8888_sse2_lowp+0x93e>
.byte 196,65,101,239,209 // vpxor %ymm9,%ymm3,%ymm10
.byte 196,65,61,239,217 // vpxor %ymm9,%ymm8,%ymm11
.byte 196,65,37,101,218 // vpcmpgtw %ymm10,%ymm11,%ymm11
@@ -52563,7 +52563,7 @@ _sk_hardlight_hsw_lowp:
.byte 197,101,249,210 // vpsubw %ymm2,%ymm3,%ymm10
.byte 197,69,249,246 // vpsubw %ymm6,%ymm7,%ymm14
.byte 196,65,45,213,214 // vpmullw %ymm14,%ymm10,%ymm10
- .byte 196,98,125,121,53,22,212,0,0 // vpbroadcastw 0xd416(%rip),%ymm14 # 39050 <_sk_srcover_bgra_8888_sse2_lowp+0x94c>
+ .byte 196,98,125,121,53,118,213,0,0 // vpbroadcastw 0xd576(%rip),%ymm14 # 391b0 <_sk_srcover_bgra_8888_sse2_lowp+0x93c>
.byte 196,65,45,253,210 // vpaddw %ymm10,%ymm10,%ymm10
.byte 196,65,61,249,194 // vpsubw %ymm10,%ymm8,%ymm8
.byte 197,13,249,215 // vpsubw %ymm7,%ymm14,%ymm10
@@ -52600,7 +52600,7 @@ HIDDEN _sk_overlay_hsw_lowp
FUNCTION(_sk_overlay_hsw_lowp)
_sk_overlay_hsw_lowp:
.byte 197,93,253,196 // vpaddw %ymm4,%ymm4,%ymm8
- .byte 196,98,125,121,13,139,211,0,0 // vpbroadcastw 0xd38b(%rip),%ymm9 # 39056 <_sk_srcover_bgra_8888_sse2_lowp+0x952>
+ .byte 196,98,125,121,13,235,212,0,0 // vpbroadcastw 0xd4eb(%rip),%ymm9 # 391b6 <_sk_srcover_bgra_8888_sse2_lowp+0x942>
.byte 196,65,69,239,209 // vpxor %ymm9,%ymm7,%ymm10
.byte 196,65,61,239,193 // vpxor %ymm9,%ymm8,%ymm8
.byte 196,65,61,101,218 // vpcmpgtw %ymm10,%ymm8,%ymm11
@@ -52624,7 +52624,7 @@ _sk_overlay_hsw_lowp:
.byte 196,65,21,253,237 // vpaddw %ymm13,%ymm13,%ymm13
.byte 196,65,61,249,237 // vpsubw %ymm13,%ymm8,%ymm13
.byte 196,67,13,76,229,192 // vpblendvb %ymm12,%ymm13,%ymm14,%ymm12
- .byte 196,98,125,121,45,19,211,0,0 // vpbroadcastw 0xd313(%rip),%ymm13 # 39054 <_sk_srcover_bgra_8888_sse2_lowp+0x950>
+ .byte 196,98,125,121,45,115,212,0,0 // vpbroadcastw 0xd473(%rip),%ymm13 # 391b4 <_sk_srcover_bgra_8888_sse2_lowp+0x940>
.byte 197,77,253,246 // vpaddw %ymm6,%ymm6,%ymm14
.byte 196,65,13,239,201 // vpxor %ymm9,%ymm14,%ymm9
.byte 196,65,53,101,202 // vpcmpgtw %ymm10,%ymm9,%ymm9
@@ -52747,7 +52747,7 @@ _sk_load_8888_hsw_lowp:
.byte 196,227,101,14,200,170 // vpblendw $0xaa,%ymm0,%ymm3,%ymm1
.byte 196,227,109,14,192,170 // vpblendw $0xaa,%ymm0,%ymm2,%ymm0
.byte 196,226,125,43,201 // vpackusdw %ymm1,%ymm0,%ymm1
- .byte 197,125,111,5,217,208,0,0 // vmovdqa 0xd0d9(%rip),%ymm8 # 39060 <_sk_srcover_bgra_8888_sse2_lowp+0x95c>
+ .byte 197,125,111,5,57,210,0,0 // vmovdqa 0xd239(%rip),%ymm8 # 391c0 <_sk_srcover_bgra_8888_sse2_lowp+0x94c>
.byte 196,193,117,219,192 // vpand %ymm8,%ymm1,%ymm0
.byte 197,245,113,209,8 // vpsrlw $0x8,%ymm1,%ymm1
.byte 197,229,114,211,16 // vpsrld $0x10,%ymm3,%ymm3
@@ -52877,7 +52877,7 @@ _sk_load_8888_dst_hsw_lowp:
.byte 196,227,69,14,236,170 // vpblendw $0xaa,%ymm4,%ymm7,%ymm5
.byte 196,227,77,14,228,170 // vpblendw $0xaa,%ymm4,%ymm6,%ymm4
.byte 196,226,93,43,237 // vpackusdw %ymm5,%ymm4,%ymm5
- .byte 197,125,111,5,242,206,0,0 // vmovdqa 0xcef2(%rip),%ymm8 # 39080 <_sk_srcover_bgra_8888_sse2_lowp+0x97c>
+ .byte 197,125,111,5,82,208,0,0 // vmovdqa 0xd052(%rip),%ymm8 # 391e0 <_sk_srcover_bgra_8888_sse2_lowp+0x96c>
.byte 196,193,85,219,224 // vpand %ymm8,%ymm5,%ymm4
.byte 197,213,113,213,8 // vpsrlw $0x8,%ymm5,%ymm5
.byte 197,197,114,215,16 // vpsrld $0x10,%ymm7,%ymm7
@@ -53103,7 +53103,7 @@ _sk_load_bgra_hsw_lowp:
.byte 196,227,125,14,209,170 // vpblendw $0xaa,%ymm1,%ymm0,%ymm2
.byte 196,227,101,14,201,170 // vpblendw $0xaa,%ymm1,%ymm3,%ymm1
.byte 196,226,117,43,202 // vpackusdw %ymm2,%ymm1,%ymm1
- .byte 197,125,111,5,158,203,0,0 // vmovdqa 0xcb9e(%rip),%ymm8 # 390a0 <_sk_srcover_bgra_8888_sse2_lowp+0x99c>
+ .byte 197,125,111,5,254,204,0,0 // vmovdqa 0xccfe(%rip),%ymm8 # 39200 <_sk_srcover_bgra_8888_sse2_lowp+0x98c>
.byte 196,193,117,219,208 // vpand %ymm8,%ymm1,%ymm2
.byte 197,245,113,209,8 // vpsrlw $0x8,%ymm1,%ymm1
.byte 197,253,114,208,16 // vpsrld $0x10,%ymm0,%ymm0
@@ -53232,7 +53232,7 @@ _sk_load_bgra_dst_hsw_lowp:
.byte 196,227,93,14,245,170 // vpblendw $0xaa,%ymm5,%ymm4,%ymm6
.byte 196,227,69,14,237,170 // vpblendw $0xaa,%ymm5,%ymm7,%ymm5
.byte 196,226,85,43,238 // vpackusdw %ymm6,%ymm5,%ymm5
- .byte 197,125,111,5,182,201,0,0 // vmovdqa 0xc9b6(%rip),%ymm8 # 390c0 <_sk_srcover_bgra_8888_sse2_lowp+0x9bc>
+ .byte 197,125,111,5,22,203,0,0 // vmovdqa 0xcb16(%rip),%ymm8 # 39220 <_sk_srcover_bgra_8888_sse2_lowp+0x9ac>
.byte 196,193,85,219,240 // vpand %ymm8,%ymm5,%ymm6
.byte 197,213,113,213,8 // vpsrlw $0x8,%ymm5,%ymm5
.byte 197,221,114,212,16 // vpsrld $0x10,%ymm4,%ymm4
@@ -53413,7 +53413,7 @@ _sk_gather_8888_hsw_lowp:
.byte 196,195,101,14,194,170 // vpblendw $0xaa,%ymm10,%ymm3,%ymm0
.byte 196,195,109,14,202,170 // vpblendw $0xaa,%ymm10,%ymm2,%ymm1
.byte 196,226,117,43,200 // vpackusdw %ymm0,%ymm1,%ymm1
- .byte 197,125,111,5,82,199,0,0 // vmovdqa 0xc752(%rip),%ymm8 # 390e0 <_sk_srcover_bgra_8888_sse2_lowp+0x9dc>
+ .byte 197,125,111,5,178,200,0,0 // vmovdqa 0xc8b2(%rip),%ymm8 # 39240 <_sk_srcover_bgra_8888_sse2_lowp+0x9cc>
.byte 196,193,117,219,192 // vpand %ymm8,%ymm1,%ymm0
.byte 197,245,113,209,8 // vpsrlw $0x8,%ymm1,%ymm1
.byte 197,229,114,211,16 // vpsrld $0x10,%ymm3,%ymm3
@@ -53463,7 +53463,7 @@ _sk_gather_bgra_hsw_lowp:
.byte 196,195,125,14,202,170 // vpblendw $0xaa,%ymm10,%ymm0,%ymm1
.byte 196,195,61,14,210,170 // vpblendw $0xaa,%ymm10,%ymm8,%ymm2
.byte 196,226,109,43,201 // vpackusdw %ymm1,%ymm2,%ymm1
- .byte 197,253,111,29,153,198,0,0 // vmovdqa 0xc699(%rip),%ymm3 # 39100 <_sk_srcover_bgra_8888_sse2_lowp+0x9fc>
+ .byte 197,253,111,29,249,199,0,0 // vmovdqa 0xc7f9(%rip),%ymm3 # 39260 <_sk_srcover_bgra_8888_sse2_lowp+0x9ec>
.byte 197,245,219,211 // vpand %ymm3,%ymm1,%ymm2
.byte 197,245,113,209,8 // vpsrlw $0x8,%ymm1,%ymm1
.byte 197,253,114,208,16 // vpsrld $0x10,%ymm0,%ymm0
@@ -53540,13 +53540,13 @@ _sk_load_565_hsw_lowp:
.byte 196,193,122,111,20,80 // vmovdqu (%r8,%rdx,2),%xmm2
.byte 196,227,109,56,201,1 // vinserti128 $0x1,%xmm1,%ymm2,%ymm1
.byte 196,227,117,2,192,192 // vpblendd $0xc0,%ymm0,%ymm1,%ymm0
- .byte 196,226,125,121,13,66,197,0,0 // vpbroadcastw 0xc542(%rip),%ymm1 # 39120 <_sk_srcover_bgra_8888_sse2_lowp+0xa1c>
+ .byte 196,226,125,121,13,162,198,0,0 // vpbroadcastw 0xc6a2(%rip),%ymm1 # 39280 <_sk_srcover_bgra_8888_sse2_lowp+0xa0c>
.byte 197,237,113,208,8 // vpsrlw $0x8,%ymm0,%ymm2
.byte 197,237,219,201 // vpand %ymm1,%ymm2,%ymm1
.byte 197,237,113,208,5 // vpsrlw $0x5,%ymm0,%ymm2
- .byte 196,226,125,121,29,45,197,0,0 // vpbroadcastw 0xc52d(%rip),%ymm3 # 39122 <_sk_srcover_bgra_8888_sse2_lowp+0xa1e>
+ .byte 196,226,125,121,29,141,198,0,0 // vpbroadcastw 0xc68d(%rip),%ymm3 # 39282 <_sk_srcover_bgra_8888_sse2_lowp+0xa0e>
.byte 197,237,219,211 // vpand %ymm3,%ymm2,%ymm2
- .byte 196,226,125,121,29,34,197,0,0 // vpbroadcastw 0xc522(%rip),%ymm3 # 39124 <_sk_srcover_bgra_8888_sse2_lowp+0xa20>
+ .byte 196,226,125,121,29,130,198,0,0 // vpbroadcastw 0xc682(%rip),%ymm3 # 39284 <_sk_srcover_bgra_8888_sse2_lowp+0xa10>
.byte 197,253,219,219 // vpand %ymm3,%ymm0,%ymm3
.byte 197,253,113,208,13 // vpsrlw $0xd,%ymm0,%ymm0
.byte 197,245,235,192 // vpor %ymm0,%ymm1,%ymm0
@@ -53557,7 +53557,7 @@ _sk_load_565_hsw_lowp:
.byte 197,229,113,211,2 // vpsrlw $0x2,%ymm3,%ymm3
.byte 197,237,235,211 // vpor %ymm3,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,29,240,196,0,0 // vpbroadcastw 0xc4f0(%rip),%ymm3 # 39126 <_sk_srcover_bgra_8888_sse2_lowp+0xa22>
+ .byte 196,226,125,121,29,80,198,0,0 // vpbroadcastw 0xc650(%rip),%ymm3 # 39286 <_sk_srcover_bgra_8888_sse2_lowp+0xa12>
.byte 255,224 // jmpq *%rax
.byte 135,254 // xchg %edi,%esi
.byte 255 // (bad)
@@ -53660,13 +53660,13 @@ _sk_load_565_dst_hsw_lowp:
.byte 196,193,122,111,52,80 // vmovdqu (%r8,%rdx,2),%xmm6
.byte 196,227,77,56,237,1 // vinserti128 $0x1,%xmm5,%ymm6,%ymm5
.byte 196,227,85,2,228,192 // vpblendd $0xc0,%ymm4,%ymm5,%ymm4
- .byte 196,226,125,121,45,100,195,0,0 // vpbroadcastw 0xc364(%rip),%ymm5 # 39128 <_sk_srcover_bgra_8888_sse2_lowp+0xa24>
+ .byte 196,226,125,121,45,196,196,0,0 // vpbroadcastw 0xc4c4(%rip),%ymm5 # 39288 <_sk_srcover_bgra_8888_sse2_lowp+0xa14>
.byte 197,205,113,212,8 // vpsrlw $0x8,%ymm4,%ymm6
.byte 197,205,219,237 // vpand %ymm5,%ymm6,%ymm5
.byte 197,205,113,212,5 // vpsrlw $0x5,%ymm4,%ymm6
- .byte 196,226,125,121,61,79,195,0,0 // vpbroadcastw 0xc34f(%rip),%ymm7 # 3912a <_sk_srcover_bgra_8888_sse2_lowp+0xa26>
+ .byte 196,226,125,121,61,175,196,0,0 // vpbroadcastw 0xc4af(%rip),%ymm7 # 3928a <_sk_srcover_bgra_8888_sse2_lowp+0xa16>
.byte 197,205,219,247 // vpand %ymm7,%ymm6,%ymm6
- .byte 196,226,125,121,61,68,195,0,0 // vpbroadcastw 0xc344(%rip),%ymm7 # 3912c <_sk_srcover_bgra_8888_sse2_lowp+0xa28>
+ .byte 196,226,125,121,61,164,196,0,0 // vpbroadcastw 0xc4a4(%rip),%ymm7 # 3928c <_sk_srcover_bgra_8888_sse2_lowp+0xa18>
.byte 197,221,219,255 // vpand %ymm7,%ymm4,%ymm7
.byte 197,221,113,212,13 // vpsrlw $0xd,%ymm4,%ymm4
.byte 197,213,235,228 // vpor %ymm4,%ymm5,%ymm4
@@ -53677,7 +53677,7 @@ _sk_load_565_dst_hsw_lowp:
.byte 197,197,113,215,2 // vpsrlw $0x2,%ymm7,%ymm7
.byte 197,205,235,247 // vpor %ymm7,%ymm6,%ymm6
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,61,18,195,0,0 // vpbroadcastw 0xc312(%rip),%ymm7 # 3912e <_sk_srcover_bgra_8888_sse2_lowp+0xa2a>
+ .byte 196,226,125,121,61,114,196,0,0 // vpbroadcastw 0xc472(%rip),%ymm7 # 3928e <_sk_srcover_bgra_8888_sse2_lowp+0xa1a>
.byte 255,224 // jmpq *%rax
.byte 102,144 // xchg %ax,%ax
.byte 133,254 // test %edi,%esi
@@ -53728,10 +53728,10 @@ _sk_store_565_hsw_lowp:
.byte 76,15,175,193 // imul %rcx,%r8
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
- .byte 196,98,125,121,5,187,194,0,0 // vpbroadcastw 0xc2bb(%rip),%ymm8 # 39130 <_sk_srcover_bgra_8888_sse2_lowp+0xa2c>
+ .byte 196,98,125,121,5,27,196,0,0 // vpbroadcastw 0xc41b(%rip),%ymm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xa1c>
.byte 197,181,113,240,8 // vpsllw $0x8,%ymm0,%ymm9
.byte 196,65,53,219,192 // vpand %ymm8,%ymm9,%ymm8
- .byte 196,98,125,121,13,170,194,0,0 // vpbroadcastw 0xc2aa(%rip),%ymm9 # 39132 <_sk_srcover_bgra_8888_sse2_lowp+0xa2e>
+ .byte 196,98,125,121,13,10,196,0,0 // vpbroadcastw 0xc40a(%rip),%ymm9 # 39292 <_sk_srcover_bgra_8888_sse2_lowp+0xa1e>
.byte 197,173,113,241,3 // vpsllw $0x3,%ymm1,%ymm10
.byte 196,65,45,219,201 // vpand %ymm9,%ymm10,%ymm9
.byte 196,65,53,235,192 // vpor %ymm8,%ymm9,%ymm8
@@ -53900,13 +53900,13 @@ _sk_gather_565_hsw_lowp:
.byte 67,15,183,4,80 // movzwl (%r8,%r10,2),%eax
.byte 197,249,196,192,7 // vpinsrw $0x7,%eax,%xmm0,%xmm0
.byte 196,227,125,56,193,1 // vinserti128 $0x1,%xmm1,%ymm0,%ymm0
- .byte 196,226,125,121,13,238,191,0,0 // vpbroadcastw 0xbfee(%rip),%ymm1 # 39134 <_sk_srcover_bgra_8888_sse2_lowp+0xa30>
+ .byte 196,226,125,121,13,78,193,0,0 // vpbroadcastw 0xc14e(%rip),%ymm1 # 39294 <_sk_srcover_bgra_8888_sse2_lowp+0xa20>
.byte 197,237,113,208,8 // vpsrlw $0x8,%ymm0,%ymm2
.byte 197,237,219,201 // vpand %ymm1,%ymm2,%ymm1
.byte 197,237,113,208,5 // vpsrlw $0x5,%ymm0,%ymm2
- .byte 196,226,125,121,29,217,191,0,0 // vpbroadcastw 0xbfd9(%rip),%ymm3 # 39136 <_sk_srcover_bgra_8888_sse2_lowp+0xa32>
+ .byte 196,226,125,121,29,57,193,0,0 // vpbroadcastw 0xc139(%rip),%ymm3 # 39296 <_sk_srcover_bgra_8888_sse2_lowp+0xa22>
.byte 197,237,219,211 // vpand %ymm3,%ymm2,%ymm2
- .byte 196,226,125,121,29,206,191,0,0 // vpbroadcastw 0xbfce(%rip),%ymm3 # 39138 <_sk_srcover_bgra_8888_sse2_lowp+0xa34>
+ .byte 196,226,125,121,29,46,193,0,0 // vpbroadcastw 0xc12e(%rip),%ymm3 # 39298 <_sk_srcover_bgra_8888_sse2_lowp+0xa24>
.byte 197,253,219,219 // vpand %ymm3,%ymm0,%ymm3
.byte 197,253,113,208,13 // vpsrlw $0xd,%ymm0,%ymm0
.byte 197,245,235,192 // vpor %ymm0,%ymm1,%ymm0
@@ -53917,7 +53917,7 @@ _sk_gather_565_hsw_lowp:
.byte 197,229,113,211,2 // vpsrlw $0x2,%ymm3,%ymm3
.byte 197,237,235,211 // vpor %ymm3,%ymm2,%ymm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,29,156,191,0,0 // vpbroadcastw 0xbf9c(%rip),%ymm3 # 3913a <_sk_srcover_bgra_8888_sse2_lowp+0xa36>
+ .byte 196,226,125,121,29,252,192,0,0 // vpbroadcastw 0xc0fc(%rip),%ymm3 # 3929a <_sk_srcover_bgra_8888_sse2_lowp+0xa26>
.byte 255,224 // jmpq *%rax
HIDDEN _sk_load_4444_hsw_lowp
@@ -53988,7 +53988,7 @@ _sk_load_4444_hsw_lowp:
.byte 196,227,117,2,192,192 // vpblendd $0xc0,%ymm0,%ymm1,%ymm0
.byte 197,245,113,208,12 // vpsrlw $0xc,%ymm0,%ymm1
.byte 197,237,113,208,8 // vpsrlw $0x8,%ymm0,%ymm2
- .byte 196,226,125,121,29,66,190,0,0 // vpbroadcastw 0xbe42(%rip),%ymm3 # 3913c <_sk_srcover_bgra_8888_sse2_lowp+0xa38>
+ .byte 196,226,125,121,29,162,191,0,0 // vpbroadcastw 0xbfa2(%rip),%ymm3 # 3929c <_sk_srcover_bgra_8888_sse2_lowp+0xa28>
.byte 197,237,219,211 // vpand %ymm3,%ymm2,%ymm2
.byte 197,189,113,208,4 // vpsrlw $0x4,%ymm0,%ymm8
.byte 197,61,219,195 // vpand %ymm3,%ymm8,%ymm8
@@ -54108,7 +54108,7 @@ _sk_load_4444_dst_hsw_lowp:
.byte 196,227,85,2,228,192 // vpblendd $0xc0,%ymm4,%ymm5,%ymm4
.byte 197,213,113,212,12 // vpsrlw $0xc,%ymm4,%ymm5
.byte 197,205,113,212,8 // vpsrlw $0x8,%ymm4,%ymm6
- .byte 196,226,125,121,61,112,188,0,0 // vpbroadcastw 0xbc70(%rip),%ymm7 # 3913e <_sk_srcover_bgra_8888_sse2_lowp+0xa3a>
+ .byte 196,226,125,121,61,208,189,0,0 // vpbroadcastw 0xbdd0(%rip),%ymm7 # 3929e <_sk_srcover_bgra_8888_sse2_lowp+0xa2a>
.byte 197,205,219,247 // vpand %ymm7,%ymm6,%ymm6
.byte 197,189,113,212,4 // vpsrlw $0x4,%ymm4,%ymm8
.byte 197,61,219,199 // vpand %ymm7,%ymm8,%ymm8
@@ -54169,13 +54169,13 @@ _sk_store_4444_hsw_lowp:
.byte 76,15,175,193 // imul %rcx,%r8
.byte 77,1,192 // add %r8,%r8
.byte 76,3,0 // add (%rax),%r8
- .byte 196,98,125,121,5,223,187,0,0 // vpbroadcastw 0xbbdf(%rip),%ymm8 # 39140 <_sk_srcover_bgra_8888_sse2_lowp+0xa3c>
+ .byte 196,98,125,121,5,63,189,0,0 // vpbroadcastw 0xbd3f(%rip),%ymm8 # 392a0 <_sk_srcover_bgra_8888_sse2_lowp+0xa2c>
.byte 197,181,113,240,8 // vpsllw $0x8,%ymm0,%ymm9
.byte 196,65,53,219,192 // vpand %ymm8,%ymm9,%ymm8
.byte 197,181,113,241,4 // vpsllw $0x4,%ymm1,%ymm9
- .byte 197,53,219,13,232,187,0,0 // vpand 0xbbe8(%rip),%ymm9,%ymm9 # 39160 <_sk_srcover_bgra_8888_sse2_lowp+0xa5c>
+ .byte 197,53,219,13,72,189,0,0 // vpand 0xbd48(%rip),%ymm9,%ymm9 # 392c0 <_sk_srcover_bgra_8888_sse2_lowp+0xa4c>
.byte 196,65,53,235,192 // vpor %ymm8,%ymm9,%ymm8
- .byte 196,98,125,121,13,250,187,0,0 // vpbroadcastw 0xbbfa(%rip),%ymm9 # 39180 <_sk_srcover_bgra_8888_sse2_lowp+0xa7c>
+ .byte 196,98,125,121,13,90,189,0,0 // vpbroadcastw 0xbd5a(%rip),%ymm9 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xa6c>
.byte 196,65,109,219,201 // vpand %ymm9,%ymm2,%ymm9
.byte 197,173,113,211,4 // vpsrlw $0x4,%ymm3,%ymm10
.byte 196,65,53,235,202 // vpor %ymm10,%ymm9,%ymm9
@@ -54346,7 +54346,7 @@ _sk_gather_4444_hsw_lowp:
.byte 196,227,125,56,193,1 // vinserti128 $0x1,%xmm1,%ymm0,%ymm0
.byte 197,245,113,208,12 // vpsrlw $0xc,%ymm0,%ymm1
.byte 197,237,113,208,8 // vpsrlw $0x8,%ymm0,%ymm2
- .byte 196,226,125,121,29,58,185,0,0 // vpbroadcastw 0xb93a(%rip),%ymm3 # 39182 <_sk_srcover_bgra_8888_sse2_lowp+0xa7e>
+ .byte 196,226,125,121,29,154,186,0,0 // vpbroadcastw 0xba9a(%rip),%ymm3 # 392e2 <_sk_srcover_bgra_8888_sse2_lowp+0xa6e>
.byte 197,237,219,211 // vpand %ymm3,%ymm2,%ymm2
.byte 197,189,113,208,4 // vpsrlw $0x4,%ymm0,%ymm8
.byte 197,61,219,195 // vpand %ymm3,%ymm8,%ymm8
@@ -54551,7 +54551,7 @@ _sk_store_a8_hsw_lowp:
.byte 76,15,175,193 // imul %rcx,%r8
.byte 76,3,0 // add (%rax),%r8
.byte 196,195,125,57,216,1 // vextracti128 $0x1,%ymm3,%xmm8
- .byte 197,121,111,13,201,190,0,0 // vmovdqa 0xbec9(%rip),%xmm9 # 399e0 <_sk_srcover_bgra_8888_sse2_lowp+0x12dc>
+ .byte 197,121,111,13,41,192,0,0 // vmovdqa 0xc029(%rip),%xmm9 # 39b40 <_sk_srcover_bgra_8888_sse2_lowp+0x12cc>
.byte 196,66,57,0,193 // vpshufb %xmm9,%xmm8,%xmm8
.byte 196,66,97,0,201 // vpshufb %xmm9,%xmm3,%xmm9
.byte 196,65,49,108,192 // vpunpcklqdq %xmm8,%xmm9,%xmm8
@@ -54771,7 +54771,7 @@ _sk_load_g8_hsw_lowp:
.byte 196,227,113,2,192,8 // vpblendd $0x8,%xmm0,%xmm1,%xmm0
.byte 196,226,125,48,192 // vpmovzxbw %xmm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,29,218,178,0,0 // vpbroadcastw 0xb2da(%rip),%ymm3 # 39184 <_sk_srcover_bgra_8888_sse2_lowp+0xa80>
+ .byte 196,226,125,121,29,58,180,0,0 // vpbroadcastw 0xb43a(%rip),%ymm3 # 392e4 <_sk_srcover_bgra_8888_sse2_lowp+0xa70>
.byte 197,253,111,200 // vmovdqa %ymm0,%ymm1
.byte 197,253,111,208 // vmovdqa %ymm0,%ymm2
.byte 255,224 // jmpq *%rax
@@ -54863,7 +54863,7 @@ _sk_load_g8_dst_hsw_lowp:
.byte 196,227,81,2,228,8 // vpblendd $0x8,%xmm4,%xmm5,%xmm4
.byte 196,226,125,48,228 // vpmovzxbw %xmm4,%ymm4
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,61,155,177,0,0 // vpbroadcastw 0xb19b(%rip),%ymm7 # 39186 <_sk_srcover_bgra_8888_sse2_lowp+0xa82>
+ .byte 196,226,125,121,61,251,178,0,0 // vpbroadcastw 0xb2fb(%rip),%ymm7 # 392e6 <_sk_srcover_bgra_8888_sse2_lowp+0xa72>
.byte 197,253,111,236 // vmovdqa %ymm4,%ymm5
.byte 197,253,111,244 // vmovdqa %ymm4,%ymm6
.byte 255,224 // jmpq *%rax
@@ -54907,12 +54907,12 @@ HIDDEN _sk_luminance_to_alpha_hsw_lowp
.globl _sk_luminance_to_alpha_hsw_lowp
FUNCTION(_sk_luminance_to_alpha_hsw_lowp)
_sk_luminance_to_alpha_hsw_lowp:
- .byte 196,226,125,121,29,75,177,0,0 // vpbroadcastw 0xb14b(%rip),%ymm3 # 39188 <_sk_srcover_bgra_8888_sse2_lowp+0xa84>
+ .byte 196,226,125,121,29,171,178,0,0 // vpbroadcastw 0xb2ab(%rip),%ymm3 # 392e8 <_sk_srcover_bgra_8888_sse2_lowp+0xa74>
.byte 197,253,213,195 // vpmullw %ymm3,%ymm0,%ymm0
- .byte 196,226,125,121,29,64,177,0,0 // vpbroadcastw 0xb140(%rip),%ymm3 # 3918a <_sk_srcover_bgra_8888_sse2_lowp+0xa86>
+ .byte 196,226,125,121,29,160,178,0,0 // vpbroadcastw 0xb2a0(%rip),%ymm3 # 392ea <_sk_srcover_bgra_8888_sse2_lowp+0xa76>
.byte 197,245,213,203 // vpmullw %ymm3,%ymm1,%ymm1
.byte 197,245,253,192 // vpaddw %ymm0,%ymm1,%ymm0
- .byte 196,226,125,121,13,49,177,0,0 // vpbroadcastw 0xb131(%rip),%ymm1 # 3918c <_sk_srcover_bgra_8888_sse2_lowp+0xa88>
+ .byte 196,226,125,121,13,145,178,0,0 // vpbroadcastw 0xb291(%rip),%ymm1 # 392ec <_sk_srcover_bgra_8888_sse2_lowp+0xa78>
.byte 197,237,213,201 // vpmullw %ymm1,%ymm2,%ymm1
.byte 197,253,253,193 // vpaddw %ymm1,%ymm0,%ymm0
.byte 197,229,113,208,8 // vpsrlw $0x8,%ymm0,%ymm3
@@ -55011,7 +55011,7 @@ _sk_gather_g8_hsw_lowp:
.byte 196,227,121,32,192,15 // vpinsrb $0xf,%eax,%xmm0,%xmm0
.byte 196,226,125,48,192 // vpmovzxbw %xmm0,%ymm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 196,226,125,121,29,116,175,0,0 // vpbroadcastw 0xaf74(%rip),%ymm3 # 3918e <_sk_srcover_bgra_8888_sse2_lowp+0xa8a>
+ .byte 196,226,125,121,29,212,176,0,0 // vpbroadcastw 0xb0d4(%rip),%ymm3 # 392ee <_sk_srcover_bgra_8888_sse2_lowp+0xa7a>
.byte 197,253,111,200 // vmovdqa %ymm0,%ymm1
.byte 197,253,111,208 // vmovdqa %ymm0,%ymm2
.byte 255,224 // jmpq *%rax
@@ -55022,13 +55022,13 @@ FUNCTION(_sk_scale_1_float_hsw_lowp)
_sk_scale_1_float_hsw_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,122,16,0 // vmovss (%rax),%xmm8
- .byte 197,122,16,13,174,167,0,0 // vmovss 0xa7ae(%rip),%xmm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 196,98,57,169,13,117,167,0,0 // vfmadd213ss 0xa775(%rip),%xmm8,%xmm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,122,16,13,30,169,0,0 // vmovss 0xa91e(%rip),%xmm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,57,169,13,229,168,0,0 // vfmadd213ss 0xa8e5(%rip),%xmm8,%xmm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,122,44,193 // vcvttss2si %xmm9,%eax
.byte 197,121,110,192 // vmovd %eax,%xmm8
.byte 196,66,125,121,192 // vpbroadcastw %xmm8,%ymm8
.byte 197,189,213,192 // vpmullw %ymm0,%ymm8,%ymm0
- .byte 196,98,125,121,13,58,175,0,0 // vpbroadcastw 0xaf3a(%rip),%ymm9 # 39190 <_sk_srcover_bgra_8888_sse2_lowp+0xa8c>
+ .byte 196,98,125,121,13,154,176,0,0 // vpbroadcastw 0xb09a(%rip),%ymm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xa7c>
.byte 196,193,125,253,193 // vpaddw %ymm9,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,189,213,201 // vpmullw %ymm1,%ymm8,%ymm1
@@ -55049,12 +55049,12 @@ FUNCTION(_sk_lerp_1_float_hsw_lowp)
_sk_lerp_1_float_hsw_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,122,16,0 // vmovss (%rax),%xmm8
- .byte 197,122,16,13,68,167,0,0 // vmovss 0xa744(%rip),%xmm9 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 196,98,57,169,13,11,167,0,0 // vfmadd213ss 0xa70b(%rip),%xmm8,%xmm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 197,122,16,13,180,168,0,0 // vmovss 0xa8b4(%rip),%xmm9 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,57,169,13,123,168,0,0 // vfmadd213ss 0xa87b(%rip),%xmm8,%xmm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,193,122,44,193 // vcvttss2si %xmm9,%eax
.byte 197,121,110,192 // vmovd %eax,%xmm8
.byte 196,66,125,121,192 // vpbroadcastw %xmm8,%ymm8
- .byte 196,98,125,121,13,214,174,0,0 // vpbroadcastw 0xaed6(%rip),%ymm9 # 39192 <_sk_srcover_bgra_8888_sse2_lowp+0xa8e>
+ .byte 196,98,125,121,13,54,176,0,0 // vpbroadcastw 0xb036(%rip),%ymm9 # 392f2 <_sk_srcover_bgra_8888_sse2_lowp+0xa7e>
.byte 196,65,53,249,208 // vpsubw %ymm8,%ymm9,%ymm10
.byte 197,45,213,220 // vpmullw %ymm4,%ymm10,%ymm11
.byte 197,189,213,192 // vpmullw %ymm0,%ymm8,%ymm0
@@ -55132,7 +55132,7 @@ _sk_scale_u8_hsw_lowp:
.byte 196,67,49,2,192,8 // vpblendd $0x8,%xmm8,%xmm9,%xmm8
.byte 196,66,125,48,192 // vpmovzxbw %xmm8,%ymm8
.byte 197,189,213,192 // vpmullw %ymm0,%ymm8,%ymm0
- .byte 196,98,125,121,13,117,173,0,0 // vpbroadcastw 0xad75(%rip),%ymm9 # 39194 <_sk_srcover_bgra_8888_sse2_lowp+0xa90>
+ .byte 196,98,125,121,13,213,174,0,0 // vpbroadcastw 0xaed5(%rip),%ymm9 # 392f4 <_sk_srcover_bgra_8888_sse2_lowp+0xa80>
.byte 196,193,125,253,193 // vpaddw %ymm9,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,189,213,201 // vpmullw %ymm1,%ymm8,%ymm1
@@ -55153,7 +55153,7 @@ _sk_scale_u8_hsw_lowp:
.byte 255,26 // lcall *(%rdx)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,75 // decl 0x4bffffff(%rip) # 4c02e464 <_sk_srcover_bgra_8888_sse2_lowp+0x4bff5d60>
+ .byte 255,13,255,255,255,75 // decl 0x4bffffff(%rip) # 4c02e464 <_sk_srcover_bgra_8888_sse2_lowp+0x4bff5bf0>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,67,255 // incl -0x1(%rbx)
@@ -55232,7 +55232,7 @@ _sk_lerp_u8_hsw_lowp:
.byte 196,67,49,34,76,16,8,2 // vpinsrd $0x2,0x8(%r8,%rdx,1),%xmm9,%xmm9
.byte 196,67,49,2,192,8 // vpblendd $0x8,%xmm8,%xmm9,%xmm8
.byte 196,66,125,48,192 // vpmovzxbw %xmm8,%ymm8
- .byte 196,98,125,121,13,4,172,0,0 // vpbroadcastw 0xac04(%rip),%ymm9 # 39196 <_sk_srcover_bgra_8888_sse2_lowp+0xa92>
+ .byte 196,98,125,121,13,100,173,0,0 // vpbroadcastw 0xad64(%rip),%ymm9 # 392f6 <_sk_srcover_bgra_8888_sse2_lowp+0xa82>
.byte 196,65,61,239,209 // vpxor %ymm9,%ymm8,%ymm10
.byte 197,45,213,220 // vpmullw %ymm4,%ymm10,%ymm11
.byte 197,189,213,192 // vpmullw %ymm0,%ymm8,%ymm0
@@ -55264,7 +55264,7 @@ _sk_lerp_u8_hsw_lowp:
.byte 254 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 232,254,255,255,38 // callq 2702e5ff <_sk_srcover_bgra_8888_sse2_lowp+0x26ff5efb>
+ .byte 232,254,255,255,38 // callq 2702e5ff <_sk_srcover_bgra_8888_sse2_lowp+0x26ff5d8b>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,30 // lcall *(%rsi)
@@ -55359,13 +55359,13 @@ _sk_scale_565_hsw_lowp:
.byte 196,65,122,111,20,80 // vmovdqu (%r8,%rdx,2),%xmm10
.byte 196,67,45,56,201,1 // vinserti128 $0x1,%xmm9,%ymm10,%ymm9
.byte 196,67,53,2,192,192 // vpblendd $0xc0,%ymm8,%ymm9,%ymm8
- .byte 196,98,125,121,13,19,170,0,0 // vpbroadcastw 0xaa13(%rip),%ymm9 # 39198 <_sk_srcover_bgra_8888_sse2_lowp+0xa94>
+ .byte 196,98,125,121,13,115,171,0,0 // vpbroadcastw 0xab73(%rip),%ymm9 # 392f8 <_sk_srcover_bgra_8888_sse2_lowp+0xa84>
.byte 196,193,45,113,208,8 // vpsrlw $0x8,%ymm8,%ymm10
.byte 196,65,45,219,201 // vpand %ymm9,%ymm10,%ymm9
.byte 196,193,45,113,208,5 // vpsrlw $0x5,%ymm8,%ymm10
- .byte 196,98,125,121,29,251,169,0,0 // vpbroadcastw 0xa9fb(%rip),%ymm11 # 3919a <_sk_srcover_bgra_8888_sse2_lowp+0xa96>
+ .byte 196,98,125,121,29,91,171,0,0 // vpbroadcastw 0xab5b(%rip),%ymm11 # 392fa <_sk_srcover_bgra_8888_sse2_lowp+0xa86>
.byte 196,65,45,219,211 // vpand %ymm11,%ymm10,%ymm10
- .byte 196,98,125,121,29,239,169,0,0 // vpbroadcastw 0xa9ef(%rip),%ymm11 # 3919c <_sk_srcover_bgra_8888_sse2_lowp+0xa98>
+ .byte 196,98,125,121,29,79,171,0,0 // vpbroadcastw 0xab4f(%rip),%ymm11 # 392fc <_sk_srcover_bgra_8888_sse2_lowp+0xa88>
.byte 196,65,61,219,219 // vpand %ymm11,%ymm8,%ymm11
.byte 196,193,61,113,208,13 // vpsrlw $0xd,%ymm8,%ymm8
.byte 196,65,53,235,192 // vpor %ymm8,%ymm9,%ymm8
@@ -55375,7 +55375,7 @@ _sk_scale_565_hsw_lowp:
.byte 196,193,45,113,243,3 // vpsllw $0x3,%ymm11,%ymm10
.byte 196,193,37,113,211,2 // vpsrlw $0x2,%ymm11,%ymm11
.byte 196,65,45,235,211 // vpor %ymm11,%ymm10,%ymm10
- .byte 196,98,125,121,29,182,169,0,0 // vpbroadcastw 0xa9b6(%rip),%ymm11 # 3919e <_sk_srcover_bgra_8888_sse2_lowp+0xa9a>
+ .byte 196,98,125,121,29,22,171,0,0 // vpbroadcastw 0xab16(%rip),%ymm11 # 392fe <_sk_srcover_bgra_8888_sse2_lowp+0xa8a>
.byte 196,65,101,239,227 // vpxor %ymm11,%ymm3,%ymm12
.byte 196,65,69,239,219 // vpxor %ymm11,%ymm7,%ymm11
.byte 196,65,37,101,220 // vpcmpgtw %ymm12,%ymm11,%ymm11
@@ -55385,7 +55385,7 @@ _sk_scale_565_hsw_lowp:
.byte 196,66,21,62,232 // vpmaxuw %ymm8,%ymm13,%ymm13
.byte 196,67,21,76,220,176 // vpblendvb %ymm11,%ymm12,%ymm13,%ymm11
.byte 197,189,213,192 // vpmullw %ymm0,%ymm8,%ymm0
- .byte 196,98,125,121,5,130,169,0,0 // vpbroadcastw 0xa982(%rip),%ymm8 # 391a0 <_sk_srcover_bgra_8888_sse2_lowp+0xa9c>
+ .byte 196,98,125,121,5,226,170,0,0 // vpbroadcastw 0xaae2(%rip),%ymm8 # 39300 <_sk_srcover_bgra_8888_sse2_lowp+0xa8c>
.byte 196,193,125,253,192 // vpaddw %ymm8,%ymm0,%ymm0
.byte 197,253,113,208,8 // vpsrlw $0x8,%ymm0,%ymm0
.byte 197,181,213,201 // vpmullw %ymm1,%ymm9,%ymm1
@@ -55502,13 +55502,13 @@ _sk_lerp_565_hsw_lowp:
.byte 196,65,122,111,20,80 // vmovdqu (%r8,%rdx,2),%xmm10
.byte 196,67,45,56,201,1 // vinserti128 $0x1,%xmm9,%ymm10,%ymm9
.byte 196,67,53,2,192,192 // vpblendd $0xc0,%ymm8,%ymm9,%ymm8
- .byte 196,98,125,121,13,185,167,0,0 // vpbroadcastw 0xa7b9(%rip),%ymm9 # 391a2 <_sk_srcover_bgra_8888_sse2_lowp+0xa9e>
+ .byte 196,98,125,121,13,25,169,0,0 // vpbroadcastw 0xa919(%rip),%ymm9 # 39302 <_sk_srcover_bgra_8888_sse2_lowp+0xa8e>
.byte 196,193,45,113,208,8 // vpsrlw $0x8,%ymm8,%ymm10
.byte 196,65,45,219,201 // vpand %ymm9,%ymm10,%ymm9
.byte 196,193,45,113,208,5 // vpsrlw $0x5,%ymm8,%ymm10
- .byte 196,98,125,121,29,161,167,0,0 // vpbroadcastw 0xa7a1(%rip),%ymm11 # 391a4 <_sk_srcover_bgra_8888_sse2_lowp+0xaa0>
+ .byte 196,98,125,121,29,1,169,0,0 // vpbroadcastw 0xa901(%rip),%ymm11 # 39304 <_sk_srcover_bgra_8888_sse2_lowp+0xa90>
.byte 196,65,45,219,211 // vpand %ymm11,%ymm10,%ymm10
- .byte 196,98,125,121,29,149,167,0,0 // vpbroadcastw 0xa795(%rip),%ymm11 # 391a6 <_sk_srcover_bgra_8888_sse2_lowp+0xaa2>
+ .byte 196,98,125,121,29,245,168,0,0 // vpbroadcastw 0xa8f5(%rip),%ymm11 # 39306 <_sk_srcover_bgra_8888_sse2_lowp+0xa92>
.byte 196,65,61,219,219 // vpand %ymm11,%ymm8,%ymm11
.byte 196,193,61,113,208,13 // vpsrlw $0xd,%ymm8,%ymm8
.byte 196,65,53,235,192 // vpor %ymm8,%ymm9,%ymm8
@@ -55518,7 +55518,7 @@ _sk_lerp_565_hsw_lowp:
.byte 196,193,45,113,243,3 // vpsllw $0x3,%ymm11,%ymm10
.byte 196,193,37,113,211,2 // vpsrlw $0x2,%ymm11,%ymm11
.byte 196,65,45,235,211 // vpor %ymm11,%ymm10,%ymm10
- .byte 196,98,125,121,29,92,167,0,0 // vpbroadcastw 0xa75c(%rip),%ymm11 # 391a8 <_sk_srcover_bgra_8888_sse2_lowp+0xaa4>
+ .byte 196,98,125,121,29,188,168,0,0 // vpbroadcastw 0xa8bc(%rip),%ymm11 # 39308 <_sk_srcover_bgra_8888_sse2_lowp+0xa94>
.byte 196,65,101,239,227 // vpxor %ymm11,%ymm3,%ymm12
.byte 196,65,69,239,219 // vpxor %ymm11,%ymm7,%ymm11
.byte 196,65,37,101,220 // vpcmpgtw %ymm12,%ymm11,%ymm11
@@ -55527,7 +55527,7 @@ _sk_lerp_565_hsw_lowp:
.byte 196,66,53,62,234 // vpmaxuw %ymm10,%ymm9,%ymm13
.byte 196,66,21,62,232 // vpmaxuw %ymm8,%ymm13,%ymm13
.byte 196,67,21,76,220,176 // vpblendvb %ymm11,%ymm12,%ymm13,%ymm11
- .byte 196,98,125,121,37,44,167,0,0 // vpbroadcastw 0xa72c(%rip),%ymm12 # 391aa <_sk_srcover_bgra_8888_sse2_lowp+0xaa6>
+ .byte 196,98,125,121,37,140,168,0,0 // vpbroadcastw 0xa88c(%rip),%ymm12 # 3930a <_sk_srcover_bgra_8888_sse2_lowp+0xa96>
.byte 196,65,61,239,236 // vpxor %ymm12,%ymm8,%ymm13
.byte 197,21,213,236 // vpmullw %ymm4,%ymm13,%ymm13
.byte 197,189,213,192 // vpmullw %ymm0,%ymm8,%ymm0
@@ -55600,7 +55600,7 @@ _sk_clamp_x_1_hsw_lowp:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,116,95,200 // vmaxps %ymm8,%ymm1,%ymm1
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,112,158,0,0 // vbroadcastss 0x9e70(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,224,159,0,0 // vbroadcastss 0x9fe0(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -55617,7 +55617,7 @@ _sk_repeat_x_1_hsw_lowp:
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,116,95,200 // vmaxps %ymm8,%ymm1,%ymm1
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,52,158,0,0 // vbroadcastss 0x9e34(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,164,159,0,0 // vbroadcastss 0x9fa4(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -55627,10 +55627,10 @@ HIDDEN _sk_mirror_x_1_hsw_lowp
.globl _sk_mirror_x_1_hsw_lowp
FUNCTION(_sk_mirror_x_1_hsw_lowp)
_sk_mirror_x_1_hsw_lowp:
- .byte 196,98,125,24,5,53,158,0,0 // vbroadcastss 0x9e35(%rip),%ymm8 # 389cc <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
+ .byte 196,98,125,24,5,165,159,0,0 // vbroadcastss 0x9fa5(%rip),%ymm8 # 38b3c <_sk_srcover_bgra_8888_sse2_lowp+0x2c8>
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
.byte 196,193,116,88,200 // vaddps %ymm8,%ymm1,%ymm1
- .byte 196,98,125,24,13,6,158,0,0 // vbroadcastss 0x9e06(%rip),%ymm9 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,13,118,159,0,0 // vbroadcastss 0x9f76(%rip),%ymm9 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,65,116,89,209 // vmulps %ymm9,%ymm1,%ymm10
.byte 196,65,124,89,201 // vmulps %ymm9,%ymm0,%ymm9
.byte 196,67,125,8,201,1 // vroundps $0x1,%ymm9,%ymm9
@@ -55641,13 +55641,13 @@ _sk_mirror_x_1_hsw_lowp:
.byte 196,193,116,92,202 // vsubps %ymm10,%ymm1,%ymm1
.byte 196,193,116,88,200 // vaddps %ymm8,%ymm1,%ymm1
.byte 196,193,124,88,192 // vaddps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,93,159,0,0 // vbroadcastss 0x9f5d(%rip),%ymm8 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x440>
+ .byte 196,98,125,24,5,205,160,0,0 // vbroadcastss 0xa0cd(%rip),%ymm8 # 38cb4 <_sk_srcover_bgra_8888_sse2_lowp+0x440>
.byte 196,193,124,84,192 // vandps %ymm8,%ymm0,%ymm0
.byte 196,193,116,84,200 // vandps %ymm8,%ymm1,%ymm1
.byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 196,193,116,95,200 // vmaxps %ymm8,%ymm1,%ymm1
.byte 196,193,124,95,192 // vmaxps %ymm8,%ymm0,%ymm0
- .byte 196,98,125,24,5,171,157,0,0 // vbroadcastss 0x9dab(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,27,159,0,0 // vbroadcastss 0x9f1b(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 196,193,124,93,192 // vminps %ymm8,%ymm0,%ymm0
.byte 196,193,116,93,200 // vminps %ymm8,%ymm1,%ymm1
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -55657,144 +55657,181 @@ HIDDEN _sk_gradient_hsw_lowp
.globl _sk_gradient_hsw_lowp
FUNCTION(_sk_gradient_hsw_lowp)
_sk_gradient_hsw_lowp:
- .byte 72,129,236,184,0,0,0 // sub $0xb8,%rsp
- .byte 197,252,17,188,36,128,0,0,0 // vmovups %ymm7,0x80(%rsp)
- .byte 197,252,17,116,36,96 // vmovups %ymm6,0x60(%rsp)
- .byte 197,254,127,108,36,64 // vmovdqu %ymm5,0x40(%rsp)
- .byte 197,252,17,100,36,32 // vmovups %ymm4,0x20(%rsp)
+ .byte 72,129,236,152,0,0,0 // sub $0x98,%rsp
+ .byte 197,254,127,124,36,96 // vmovdqu %ymm7,0x60(%rsp)
+ .byte 197,252,17,116,36,64 // vmovups %ymm6,0x40(%rsp)
+ .byte 197,254,127,108,36,32 // vmovdqu %ymm5,0x20(%rsp)
+ .byte 197,254,127,36,36 // vmovdqu %ymm4,(%rsp)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 76,139,0 // mov (%rax),%r8
- .byte 196,65,28,87,228 // vxorps %ymm12,%ymm12,%ymm12
- .byte 197,237,239,210 // vpxor %ymm2,%ymm2,%ymm2
+ .byte 197,197,239,255 // vpxor %ymm7,%ymm7,%ymm7
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
.byte 73,131,248,2 // cmp $0x2,%r8
- .byte 114,51 // jb 2ec84 <_sk_gradient_hsw_lowp+0x6d>
+ .byte 114,60 // jb 2ec84 <_sk_gradient_hsw_lowp+0x6d>
.byte 76,139,72,72 // mov 0x48(%rax),%r9
- .byte 73,255,200 // dec %r8
- .byte 73,131,193,4 // add $0x4,%r9
- .byte 197,237,239,210 // vpxor %ymm2,%ymm2,%ymm2
+ .byte 197,197,239,255 // vpxor %ymm7,%ymm7,%ymm7
+ .byte 65,186,1,0,0,0 // mov $0x1,%r10d
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
- .byte 196,194,125,24,25 // vbroadcastss (%r9),%ymm3
- .byte 197,228,194,224,2 // vcmpleps %ymm0,%ymm3,%ymm4
- .byte 197,237,250,212 // vpsubd %ymm4,%ymm2,%ymm2
- .byte 197,228,194,217,2 // vcmpleps %ymm1,%ymm3,%ymm3
- .byte 197,213,250,235 // vpsubd %ymm3,%ymm5,%ymm5
- .byte 73,131,193,4 // add $0x4,%r9
- .byte 73,255,200 // dec %r8
- .byte 117,224 // jne 2ec64 <_sk_gradient_hsw_lowp+0x4d>
+ .byte 196,130,125,24,20,145 // vbroadcastss (%r9,%r10,4),%ymm2
+ .byte 197,236,194,216,2 // vcmpleps %ymm0,%ymm2,%ymm3
+ .byte 197,197,250,251 // vpsubd %ymm3,%ymm7,%ymm7
+ .byte 197,236,194,209,2 // vcmpleps %ymm1,%ymm2,%ymm2
+ .byte 197,213,250,234 // vpsubd %ymm2,%ymm5,%ymm5
+ .byte 73,255,194 // inc %r10
+ .byte 77,57,208 // cmp %r10,%r8
+ .byte 117,224 // jne 2ec5a <_sk_gradient_hsw_lowp+0x43>
+ .byte 73,131,248,8 // cmp $0x8,%r8
+ .byte 15,135,180,0,0,0 // ja 2ed38 <_sk_gradient_hsw_lowp+0x121>
.byte 76,139,64,8 // mov 0x8(%rax),%r8
.byte 76,139,72,16 // mov 0x10(%rax),%r9
- .byte 196,65,52,87,201 // vxorps %ymm9,%ymm9,%ymm9
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,66,101,146,12,144 // vgatherdps %ymm3,(%r8,%ymm2,4),%ymm9
- .byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,194,101,146,36,168 // vgatherdps %ymm3,(%r8,%ymm5,4),%ymm4
- .byte 197,252,17,36,36 // vmovups %ymm4,(%rsp)
- .byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,194,101,146,36,145 // vgatherdps %ymm3,(%r9,%ymm2,4),%ymm4
- .byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,194,101,146,52,169 // vgatherdps %ymm3,(%r9,%ymm5,4),%ymm6
- .byte 197,252,17,116,36,224 // vmovups %ymm6,-0x20(%rsp)
+ .byte 196,193,124,16,24 // vmovups (%r8),%ymm3
+ .byte 196,226,69,22,211 // vpermps %ymm3,%ymm7,%ymm2
+ .byte 196,226,85,22,219 // vpermps %ymm3,%ymm5,%ymm3
+ .byte 76,139,64,40 // mov 0x28(%rax),%r8
+ .byte 196,193,124,16,32 // vmovups (%r8),%ymm4
+ .byte 196,98,69,22,244 // vpermps %ymm4,%ymm7,%ymm14
+ .byte 196,226,85,22,228 // vpermps %ymm4,%ymm5,%ymm4
+ .byte 197,252,17,100,36,224 // vmovups %ymm4,-0x20(%rsp)
+ .byte 196,193,124,16,33 // vmovups (%r9),%ymm4
+ .byte 196,98,69,22,196 // vpermps %ymm4,%ymm7,%ymm8
+ .byte 196,98,85,22,204 // vpermps %ymm4,%ymm5,%ymm9
+ .byte 76,139,64,48 // mov 0x30(%rax),%r8
+ .byte 196,193,124,16,32 // vmovups (%r8),%ymm4
+ .byte 196,98,69,22,236 // vpermps %ymm4,%ymm7,%ymm13
+ .byte 196,226,85,22,228 // vpermps %ymm4,%ymm5,%ymm4
+ .byte 197,252,17,100,36,128 // vmovups %ymm4,-0x80(%rsp)
.byte 76,139,64,24 // mov 0x18(%rax),%r8
- .byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
+ .byte 196,193,124,16,32 // vmovups (%r8),%ymm4
+ .byte 196,98,69,22,212 // vpermps %ymm4,%ymm7,%ymm10
+ .byte 196,98,85,22,220 // vpermps %ymm4,%ymm5,%ymm11
+ .byte 76,139,64,56 // mov 0x38(%rax),%r8
+ .byte 196,193,124,16,48 // vmovups (%r8),%ymm6
+ .byte 196,226,69,22,230 // vpermps %ymm6,%ymm7,%ymm4
+ .byte 196,226,85,22,246 // vpermps %ymm6,%ymm5,%ymm6
+ .byte 76,139,64,32 // mov 0x20(%rax),%r8
+ .byte 196,65,124,16,32 // vmovups (%r8),%ymm12
+ .byte 196,66,69,22,252 // vpermps %ymm12,%ymm7,%ymm15
+ .byte 197,124,17,124,36,192 // vmovups %ymm15,-0x40(%rsp)
+ .byte 196,66,85,22,228 // vpermps %ymm12,%ymm5,%ymm12
+ .byte 197,124,17,100,36,160 // vmovups %ymm12,-0x60(%rsp)
+ .byte 72,139,64,64 // mov 0x40(%rax),%rax
+ .byte 197,124,16,56 // vmovups (%rax),%ymm15
+ .byte 196,66,69,22,231 // vpermps %ymm15,%ymm7,%ymm12
+ .byte 196,194,85,22,255 // vpermps %ymm15,%ymm5,%ymm7
+ .byte 233,34,1,0,0 // jmpq 2ee5a <_sk_gradient_hsw_lowp+0x243>
+ .byte 76,139,64,8 // mov 0x8(%rax),%r8
+ .byte 76,139,72,16 // mov 0x10(%rax),%r9
.byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,66,101,146,4,144 // vgatherdps %ymm3,(%r8,%ymm2,4),%ymm8
+ .byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
+ .byte 196,194,101,146,20,184 // vgatherdps %ymm3,(%r8,%ymm7,4),%ymm2
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 197,228,87,219 // vxorps %ymm3,%ymm3,%ymm3
+ .byte 196,194,93,146,28,168 // vgatherdps %ymm4,(%r8,%ymm5,4),%ymm3
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
+ .byte 196,66,93,146,4,185 // vgatherdps %ymm4,(%r9,%ymm7,4),%ymm8
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,65,52,87,201 // vxorps %ymm9,%ymm9,%ymm9
+ .byte 196,66,93,146,12,169 // vgatherdps %ymm4,(%r9,%ymm5,4),%ymm9
+ .byte 76,139,64,24 // mov 0x18(%rax),%r8
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
+ .byte 196,66,93,146,20,184 // vgatherdps %ymm4,(%r8,%ymm7,4),%ymm10
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,65,36,87,219 // vxorps %ymm11,%ymm11,%ymm11
+ .byte 196,66,93,146,28,168 // vgatherdps %ymm4,(%r8,%ymm5,4),%ymm11
+ .byte 76,139,64,32 // mov 0x20(%rax),%r8
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
.byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,194,101,146,52,168 // vgatherdps %ymm3,(%r8,%ymm5,4),%ymm6
+ .byte 196,194,93,146,52,184 // vgatherdps %ymm4,(%r8,%ymm7,4),%ymm6
.byte 197,252,17,116,36,192 // vmovups %ymm6,-0x40(%rsp)
- .byte 76,139,64,32 // mov 0x20(%rax),%r8
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
.byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,194,101,146,52,144 // vgatherdps %ymm3,(%r8,%ymm2,4),%ymm6
+ .byte 196,194,93,146,52,168 // vgatherdps %ymm4,(%r8,%ymm5,4),%ymm6
.byte 197,252,17,116,36,160 // vmovups %ymm6,-0x60(%rsp)
- .byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,194,101,146,52,168 // vgatherdps %ymm3,(%r8,%ymm5,4),%ymm6
- .byte 197,252,17,116,36,128 // vmovups %ymm6,-0x80(%rsp)
.byte 76,139,64,40 // mov 0x28(%rax),%r8
- .byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,66,101,146,20,144 // vgatherdps %ymm3,(%r8,%ymm2,4),%ymm10
- .byte 196,65,36,87,219 // vxorps %ymm11,%ymm11,%ymm11
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,66,101,146,28,168 // vgatherdps %ymm3,(%r8,%ymm5,4),%ymm11
- .byte 76,139,64,48 // mov 0x30(%rax),%r8
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
.byte 196,65,12,87,246 // vxorps %ymm14,%ymm14,%ymm14
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,66,101,146,52,144 // vgatherdps %ymm3,(%r8,%ymm2,4),%ymm14
- .byte 196,65,4,87,255 // vxorps %ymm15,%ymm15,%ymm15
- .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
- .byte 196,66,101,146,60,168 // vgatherdps %ymm3,(%r8,%ymm5,4),%ymm15
+ .byte 196,66,93,146,52,184 // vgatherdps %ymm4,(%r8,%ymm7,4),%ymm14
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
+ .byte 196,194,93,146,52,168 // vgatherdps %ymm4,(%r8,%ymm5,4),%ymm6
+ .byte 197,252,17,116,36,224 // vmovups %ymm6,-0x20(%rsp)
+ .byte 76,139,64,48 // mov 0x30(%rax),%r8
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,65,20,87,237 // vxorps %ymm13,%ymm13,%ymm13
+ .byte 196,66,93,146,44,184 // vgatherdps %ymm4,(%r8,%ymm7,4),%ymm13
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
+ .byte 196,194,93,146,52,168 // vgatherdps %ymm4,(%r8,%ymm5,4),%ymm6
+ .byte 197,252,17,116,36,128 // vmovups %ymm6,-0x80(%rsp)
.byte 76,139,64,56 // mov 0x38(%rax),%r8
- .byte 197,228,87,219 // vxorps %ymm3,%ymm3,%ymm3
- .byte 197,205,118,246 // vpcmpeqd %ymm6,%ymm6,%ymm6
- .byte 196,194,77,146,28,144 // vgatherdps %ymm6,(%r8,%ymm2,4),%ymm3
- .byte 197,196,87,255 // vxorps %ymm7,%ymm7,%ymm7
.byte 197,205,118,246 // vpcmpeqd %ymm6,%ymm6,%ymm6
- .byte 196,194,77,146,60,168 // vgatherdps %ymm6,(%r8,%ymm5,4),%ymm7
- .byte 72,139,64,64 // mov 0x40(%rax),%rax
+ .byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
+ .byte 196,194,77,146,36,184 // vgatherdps %ymm6,(%r8,%ymm7,4),%ymm4
+ .byte 196,65,29,118,228 // vpcmpeqd %ymm12,%ymm12,%ymm12
.byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
- .byte 196,65,21,118,237 // vpcmpeqd %ymm13,%ymm13,%ymm13
- .byte 196,226,21,146,52,144 // vgatherdps %ymm13,(%rax,%ymm2,4),%ymm6
- .byte 197,237,118,210 // vpcmpeqd %ymm2,%ymm2,%ymm2
- .byte 196,98,109,146,36,168 // vgatherdps %ymm2,(%rax,%ymm5,4),%ymm12
- .byte 196,66,125,184,209 // vfmadd231ps %ymm9,%ymm0,%ymm10
- .byte 196,98,125,184,244 // vfmadd231ps %ymm4,%ymm0,%ymm14
- .byte 196,194,125,184,216 // vfmadd231ps %ymm8,%ymm0,%ymm3
- .byte 196,226,125,184,116,36,160 // vfmadd231ps -0x60(%rsp),%ymm0,%ymm6
- .byte 196,98,117,184,28,36 // vfmadd231ps (%rsp),%ymm1,%ymm11
- .byte 196,98,117,184,124,36,224 // vfmadd231ps -0x20(%rsp),%ymm1,%ymm15
- .byte 196,226,117,184,124,36,192 // vfmadd231ps -0x40(%rsp),%ymm1,%ymm7
- .byte 196,98,117,184,100,36,128 // vfmadd231ps -0x80(%rsp),%ymm1,%ymm12
- .byte 196,226,125,24,5,2,156,0,0 // vbroadcastss 0x9c02(%rip),%ymm0 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 196,226,125,24,13,201,155,0,0 // vbroadcastss 0x9bc9(%rip),%ymm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,194,29,146,52,168 // vgatherdps %ymm12,(%r8,%ymm5,4),%ymm6
+ .byte 72,139,64,64 // mov 0x40(%rax),%rax
+ .byte 196,65,5,118,255 // vpcmpeqd %ymm15,%ymm15,%ymm15
+ .byte 196,65,28,87,228 // vxorps %ymm12,%ymm12,%ymm12
+ .byte 196,98,5,146,36,184 // vgatherdps %ymm15,(%rax,%ymm7,4),%ymm12
+ .byte 196,65,5,118,255 // vpcmpeqd %ymm15,%ymm15,%ymm15
+ .byte 197,196,87,255 // vxorps %ymm7,%ymm7,%ymm7
+ .byte 196,226,5,146,60,168 // vgatherdps %ymm15,(%rax,%ymm5,4),%ymm7
+ .byte 196,194,125,168,214 // vfmadd213ps %ymm14,%ymm0,%ymm2
+ .byte 196,66,125,168,197 // vfmadd213ps %ymm13,%ymm0,%ymm8
+ .byte 196,98,125,168,212 // vfmadd213ps %ymm4,%ymm0,%ymm10
+ .byte 196,98,125,184,100,36,192 // vfmadd231ps -0x40(%rsp),%ymm0,%ymm12
+ .byte 196,226,117,168,92,36,224 // vfmadd213ps -0x20(%rsp),%ymm1,%ymm3
+ .byte 196,98,117,168,76,36,128 // vfmadd213ps -0x80(%rsp),%ymm1,%ymm9
+ .byte 196,98,117,168,222 // vfmadd213ps %ymm6,%ymm1,%ymm11
+ .byte 196,226,117,184,124,36,160 // vfmadd231ps -0x60(%rsp),%ymm1,%ymm7
+ .byte 196,226,125,24,5,189,156,0,0 // vbroadcastss 0x9cbd(%rip),%ymm0 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,226,125,24,13,132,156,0,0 // vbroadcastss 0x9c84(%rip),%ymm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,168,217 // vfmadd213ps %ymm1,%ymm0,%ymm3
+ .byte 196,226,125,168,209 // vfmadd213ps %ymm1,%ymm0,%ymm2
+ .byte 196,98,125,168,201 // vfmadd213ps %ymm1,%ymm0,%ymm9
+ .byte 196,98,125,168,193 // vfmadd213ps %ymm1,%ymm0,%ymm8
.byte 196,98,125,168,217 // vfmadd213ps %ymm1,%ymm0,%ymm11
.byte 196,98,125,168,209 // vfmadd213ps %ymm1,%ymm0,%ymm10
- .byte 196,98,125,168,249 // vfmadd213ps %ymm1,%ymm0,%ymm15
- .byte 196,98,125,168,241 // vfmadd213ps %ymm1,%ymm0,%ymm14
.byte 196,226,125,168,249 // vfmadd213ps %ymm1,%ymm0,%ymm7
- .byte 196,226,125,168,217 // vfmadd213ps %ymm1,%ymm0,%ymm3
.byte 196,98,125,168,225 // vfmadd213ps %ymm1,%ymm0,%ymm12
- .byte 196,226,125,168,241 // vfmadd213ps %ymm1,%ymm0,%ymm6
- .byte 196,193,126,91,194 // vcvttps2dq %ymm10,%ymm0
- .byte 197,253,111,37,164,163,0,0 // vmovdqa 0xa3a4(%rip),%ymm4 # 391c0 <_sk_srcover_bgra_8888_sse2_lowp+0xabc>
+ .byte 197,254,91,194 // vcvttps2dq %ymm2,%ymm0
+ .byte 197,253,111,37,80,164,0,0 // vmovdqa 0xa450(%rip),%ymm4 # 39320 <_sk_srcover_bgra_8888_sse2_lowp+0xaac>
.byte 196,226,125,0,196 // vpshufb %ymm4,%ymm0,%ymm0
.byte 196,227,253,0,192,232 // vpermq $0xe8,%ymm0,%ymm0
- .byte 196,193,126,91,203 // vcvttps2dq %ymm11,%ymm1
+ .byte 197,254,91,203 // vcvttps2dq %ymm3,%ymm1
.byte 196,226,117,0,204 // vpshufb %ymm4,%ymm1,%ymm1
.byte 196,227,253,0,201,232 // vpermq $0xe8,%ymm1,%ymm1
.byte 196,227,125,56,193,1 // vinserti128 $0x1,%xmm1,%ymm0,%ymm0
- .byte 196,193,126,91,206 // vcvttps2dq %ymm14,%ymm1
+ .byte 196,193,126,91,200 // vcvttps2dq %ymm8,%ymm1
.byte 196,226,117,0,204 // vpshufb %ymm4,%ymm1,%ymm1
.byte 196,227,253,0,201,232 // vpermq $0xe8,%ymm1,%ymm1
- .byte 196,193,126,91,215 // vcvttps2dq %ymm15,%ymm2
+ .byte 196,193,126,91,209 // vcvttps2dq %ymm9,%ymm2
.byte 196,226,109,0,212 // vpshufb %ymm4,%ymm2,%ymm2
.byte 196,227,253,0,210,232 // vpermq $0xe8,%ymm2,%ymm2
.byte 196,227,117,56,202,1 // vinserti128 $0x1,%xmm2,%ymm1,%ymm1
- .byte 197,254,91,211 // vcvttps2dq %ymm3,%ymm2
+ .byte 196,193,126,91,210 // vcvttps2dq %ymm10,%ymm2
.byte 196,226,109,0,212 // vpshufb %ymm4,%ymm2,%ymm2
.byte 196,227,253,0,210,232 // vpermq $0xe8,%ymm2,%ymm2
- .byte 197,254,91,223 // vcvttps2dq %ymm7,%ymm3
+ .byte 196,193,126,91,219 // vcvttps2dq %ymm11,%ymm3
.byte 196,226,101,0,220 // vpshufb %ymm4,%ymm3,%ymm3
.byte 196,227,253,0,219,232 // vpermq $0xe8,%ymm3,%ymm3
.byte 196,227,109,56,211,1 // vinserti128 $0x1,%xmm3,%ymm2,%ymm2
- .byte 197,254,91,222 // vcvttps2dq %ymm6,%ymm3
+ .byte 196,193,126,91,220 // vcvttps2dq %ymm12,%ymm3
.byte 196,226,101,0,220 // vpshufb %ymm4,%ymm3,%ymm3
- .byte 196,193,126,91,236 // vcvttps2dq %ymm12,%ymm5
+ .byte 197,254,91,239 // vcvttps2dq %ymm7,%ymm5
.byte 196,226,85,0,228 // vpshufb %ymm4,%ymm5,%ymm4
.byte 196,227,253,0,219,232 // vpermq $0xe8,%ymm3,%ymm3
.byte 196,227,253,0,228,232 // vpermq $0xe8,%ymm4,%ymm4
.byte 196,227,101,56,220,1 // vinserti128 $0x1,%xmm4,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 197,252,16,100,36,32 // vmovups 0x20(%rsp),%ymm4
- .byte 197,252,16,108,36,64 // vmovups 0x40(%rsp),%ymm5
- .byte 197,252,16,116,36,96 // vmovups 0x60(%rsp),%ymm6
- .byte 197,252,16,188,36,128,0,0,0 // vmovups 0x80(%rsp),%ymm7
- .byte 72,129,196,184,0,0,0 // add $0xb8,%rsp
+ .byte 197,252,16,36,36 // vmovups (%rsp),%ymm4
+ .byte 197,252,16,108,36,32 // vmovups 0x20(%rsp),%ymm5
+ .byte 197,252,16,116,36,64 // vmovups 0x40(%rsp),%ymm6
+ .byte 197,252,16,124,36,96 // vmovups 0x60(%rsp),%ymm7
+ .byte 72,129,196,152,0,0,0 // add $0x98,%rsp
.byte 255,224 // jmpq *%rax
HIDDEN _sk_evenly_spaced_gradient_hsw_lowp
@@ -55805,15 +55842,14 @@ _sk_evenly_spaced_gradient_hsw_lowp:
.byte 197,252,17,124,36,96 // vmovups %ymm7,0x60(%rsp)
.byte 197,252,17,116,36,64 // vmovups %ymm6,0x40(%rsp)
.byte 197,252,17,108,36,32 // vmovups %ymm5,0x20(%rsp)
- .byte 197,252,17,36,36 // vmovups %ymm4,(%rsp)
- .byte 197,124,40,217 // vmovaps %ymm1,%ymm11
+ .byte 197,254,127,36,36 // vmovdqu %ymm4,(%rsp)
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 76,139,8 // mov (%rax),%r9
- .byte 76,139,64,8 // mov 0x8(%rax),%r8
+ .byte 76,139,0 // mov (%rax),%r8
+ .byte 77,137,193 // mov %r8,%r9
.byte 73,255,201 // dec %r9
- .byte 120,7 // js 2ef09 <_sk_evenly_spaced_gradient_hsw_lowp+0x37>
+ .byte 120,7 // js 2efb5 <_sk_evenly_spaced_gradient_hsw_lowp+0x32>
.byte 196,193,234,42,209 // vcvtsi2ss %r9,%xmm2,%xmm2
- .byte 235,22 // jmp 2ef1f <_sk_evenly_spaced_gradient_hsw_lowp+0x4d>
+ .byte 235,22 // jmp 2efcb <_sk_evenly_spaced_gradient_hsw_lowp+0x48>
.byte 77,137,202 // mov %r9,%r10
.byte 73,209,234 // shr %r10
.byte 65,131,225,1 // and $0x1,%r9d
@@ -55822,115 +55858,155 @@ _sk_evenly_spaced_gradient_hsw_lowp:
.byte 197,234,88,210 // vaddss %xmm2,%xmm2,%xmm2
.byte 196,226,125,24,210 // vbroadcastss %xmm2,%ymm2
.byte 197,252,89,218 // vmulps %ymm2,%ymm0,%ymm3
- .byte 197,164,89,210 // vmulps %ymm2,%ymm11,%ymm2
- .byte 197,254,91,210 // vcvttps2dq %ymm2,%ymm2
- .byte 197,254,91,243 // vcvttps2dq %ymm3,%ymm6
- .byte 197,245,118,201 // vpcmpeqd %ymm1,%ymm1,%ymm1
- .byte 197,228,87,219 // vxorps %ymm3,%ymm3,%ymm3
- .byte 196,194,117,146,28,176 // vgatherdps %ymm1,(%r8,%ymm6,4),%ymm3
- .byte 197,245,118,201 // vpcmpeqd %ymm1,%ymm1,%ymm1
- .byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
- .byte 196,194,117,146,36,144 // vgatherdps %ymm1,(%r8,%ymm2,4),%ymm4
+ .byte 197,244,89,210 // vmulps %ymm2,%ymm1,%ymm2
+ .byte 197,126,91,242 // vcvttps2dq %ymm2,%ymm14
+ .byte 197,254,91,251 // vcvttps2dq %ymm3,%ymm7
+ .byte 73,131,248,8 // cmp $0x8,%r8
+ .byte 15,135,180,0,0,0 // ja 2f09e <_sk_evenly_spaced_gradient_hsw_lowp+0x11b>
+ .byte 76,139,64,8 // mov 0x8(%rax),%r8
+ .byte 76,139,72,16 // mov 0x10(%rax),%r9
+ .byte 196,193,124,16,24 // vmovups (%r8),%ymm3
+ .byte 196,226,69,22,211 // vpermps %ymm3,%ymm7,%ymm2
+ .byte 196,226,13,22,219 // vpermps %ymm3,%ymm14,%ymm3
+ .byte 76,139,64,40 // mov 0x28(%rax),%r8
+ .byte 196,193,124,16,32 // vmovups (%r8),%ymm4
+ .byte 196,98,69,22,236 // vpermps %ymm4,%ymm7,%ymm13
+ .byte 196,226,13,22,228 // vpermps %ymm4,%ymm14,%ymm4
.byte 197,252,17,100,36,224 // vmovups %ymm4,-0x20(%rsp)
- .byte 76,139,64,16 // mov 0x10(%rax),%r8
+ .byte 196,193,124,16,33 // vmovups (%r9),%ymm4
+ .byte 196,98,69,22,196 // vpermps %ymm4,%ymm7,%ymm8
+ .byte 196,98,13,22,204 // vpermps %ymm4,%ymm14,%ymm9
+ .byte 76,139,64,48 // mov 0x30(%rax),%r8
+ .byte 196,193,124,16,40 // vmovups (%r8),%ymm5
+ .byte 196,226,69,22,229 // vpermps %ymm5,%ymm7,%ymm4
+ .byte 196,226,13,22,237 // vpermps %ymm5,%ymm14,%ymm5
+ .byte 197,252,17,108,36,128 // vmovups %ymm5,-0x80(%rsp)
+ .byte 76,139,64,24 // mov 0x18(%rax),%r8
+ .byte 196,193,124,16,40 // vmovups (%r8),%ymm5
+ .byte 196,98,69,22,213 // vpermps %ymm5,%ymm7,%ymm10
+ .byte 196,98,13,22,221 // vpermps %ymm5,%ymm14,%ymm11
+ .byte 76,139,64,56 // mov 0x38(%rax),%r8
+ .byte 196,193,124,16,48 // vmovups (%r8),%ymm6
+ .byte 196,226,69,22,238 // vpermps %ymm6,%ymm7,%ymm5
+ .byte 196,226,13,22,246 // vpermps %ymm6,%ymm14,%ymm6
+ .byte 76,139,64,32 // mov 0x20(%rax),%r8
+ .byte 196,65,124,16,32 // vmovups (%r8),%ymm12
+ .byte 196,66,69,22,252 // vpermps %ymm12,%ymm7,%ymm15
+ .byte 197,124,17,124,36,192 // vmovups %ymm15,-0x40(%rsp)
+ .byte 196,66,13,22,228 // vpermps %ymm12,%ymm14,%ymm12
+ .byte 197,124,17,100,36,160 // vmovups %ymm12,-0x60(%rsp)
+ .byte 72,139,64,64 // mov 0x40(%rax),%rax
+ .byte 197,124,16,56 // vmovups (%rax),%ymm15
+ .byte 196,66,69,22,231 // vpermps %ymm15,%ymm7,%ymm12
+ .byte 196,194,13,22,255 // vpermps %ymm15,%ymm14,%ymm7
+ .byte 233,33,1,0,0 // jmpq 2f1bf <_sk_evenly_spaced_gradient_hsw_lowp+0x23c>
+ .byte 76,139,64,8 // mov 0x8(%rax),%r8
+ .byte 76,139,72,16 // mov 0x10(%rax),%r9
+ .byte 197,236,87,210 // vxorps %ymm2,%ymm2,%ymm2
+ .byte 197,229,118,219 // vpcmpeqd %ymm3,%ymm3,%ymm3
+ .byte 196,194,101,146,20,184 // vgatherdps %ymm3,(%r8,%ymm7,4),%ymm2
+ .byte 197,228,87,219 // vxorps %ymm3,%ymm3,%ymm3
.byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
- .byte 197,244,87,201 // vxorps %ymm1,%ymm1,%ymm1
- .byte 196,194,93,146,12,176 // vgatherdps %ymm4,(%r8,%ymm6,4),%ymm1
+ .byte 196,130,93,146,28,176 // vgatherdps %ymm4,(%r8,%ymm14,4),%ymm3
+ .byte 196,65,60,87,192 // vxorps %ymm8,%ymm8,%ymm8
.byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
- .byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
- .byte 196,194,93,146,44,144 // vgatherdps %ymm4,(%r8,%ymm2,4),%ymm5
- .byte 197,252,17,108,36,192 // vmovups %ymm5,-0x40(%rsp)
+ .byte 196,66,93,146,4,185 // vgatherdps %ymm4,(%r9,%ymm7,4),%ymm8
+ .byte 196,65,52,87,201 // vxorps %ymm9,%ymm9,%ymm9
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,2,93,146,12,177 // vgatherdps %ymm4,(%r9,%ymm14,4),%ymm9
.byte 76,139,64,24 // mov 0x18(%rax),%r8
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
- .byte 196,194,85,146,36,176 // vgatherdps %ymm5,(%r8,%ymm6,4),%ymm4
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 197,196,87,255 // vxorps %ymm7,%ymm7,%ymm7
- .byte 196,194,85,146,60,144 // vgatherdps %ymm5,(%r8,%ymm2,4),%ymm7
- .byte 197,252,17,124,36,160 // vmovups %ymm7,-0x60(%rsp)
+ .byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,66,93,146,20,184 // vgatherdps %ymm4,(%r8,%ymm7,4),%ymm10
+ .byte 196,65,36,87,219 // vxorps %ymm11,%ymm11,%ymm11
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,2,93,146,28,176 // vgatherdps %ymm4,(%r8,%ymm14,4),%ymm11
.byte 76,139,64,32 // mov 0x20(%rax),%r8
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 197,196,87,255 // vxorps %ymm7,%ymm7,%ymm7
- .byte 196,194,85,146,60,176 // vgatherdps %ymm5,(%r8,%ymm6,4),%ymm7
- .byte 197,252,17,124,36,128 // vmovups %ymm7,-0x80(%rsp)
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 196,65,28,87,228 // vxorps %ymm12,%ymm12,%ymm12
- .byte 196,66,85,146,36,144 // vgatherdps %ymm5,(%r8,%ymm2,4),%ymm12
+ .byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,194,93,146,44,184 // vgatherdps %ymm4,(%r8,%ymm7,4),%ymm5
+ .byte 197,252,17,108,36,192 // vmovups %ymm5,-0x40(%rsp)
+ .byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,130,93,146,44,176 // vgatherdps %ymm4,(%r8,%ymm14,4),%ymm5
+ .byte 197,252,17,108,36,160 // vmovups %ymm5,-0x60(%rsp)
.byte 76,139,64,40 // mov 0x28(%rax),%r8
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 196,65,52,87,201 // vxorps %ymm9,%ymm9,%ymm9
- .byte 196,66,85,146,12,176 // vgatherdps %ymm5,(%r8,%ymm6,4),%ymm9
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 196,65,44,87,210 // vxorps %ymm10,%ymm10,%ymm10
- .byte 196,66,85,146,20,144 // vgatherdps %ymm5,(%r8,%ymm2,4),%ymm10
+ .byte 196,65,20,87,237 // vxorps %ymm13,%ymm13,%ymm13
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,66,93,146,44,184 // vgatherdps %ymm4,(%r8,%ymm7,4),%ymm13
+ .byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
+ .byte 197,221,118,228 // vpcmpeqd %ymm4,%ymm4,%ymm4
+ .byte 196,130,93,146,44,176 // vgatherdps %ymm4,(%r8,%ymm14,4),%ymm5
+ .byte 197,252,17,108,36,224 // vmovups %ymm5,-0x20(%rsp)
.byte 76,139,64,48 // mov 0x30(%rax),%r8
+ .byte 197,220,87,228 // vxorps %ymm4,%ymm4,%ymm4
.byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 196,65,20,87,237 // vxorps %ymm13,%ymm13,%ymm13
- .byte 196,66,85,146,44,176 // vgatherdps %ymm5,(%r8,%ymm6,4),%ymm13
+ .byte 196,194,85,146,36,184 // vgatherdps %ymm5,(%r8,%ymm7,4),%ymm4
+ .byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
.byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 196,65,12,87,246 // vxorps %ymm14,%ymm14,%ymm14
- .byte 196,66,85,146,52,144 // vgatherdps %ymm5,(%r8,%ymm2,4),%ymm14
+ .byte 196,130,85,146,52,176 // vgatherdps %ymm5,(%r8,%ymm14,4),%ymm6
+ .byte 197,252,17,116,36,128 // vmovups %ymm6,-0x80(%rsp)
.byte 76,139,64,56 // mov 0x38(%rax),%r8
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 196,65,4,87,255 // vxorps %ymm15,%ymm15,%ymm15
- .byte 196,66,85,146,60,176 // vgatherdps %ymm5,(%r8,%ymm6,4),%ymm15
- .byte 197,213,118,237 // vpcmpeqd %ymm5,%ymm5,%ymm5
- .byte 197,196,87,255 // vxorps %ymm7,%ymm7,%ymm7
- .byte 196,194,85,146,60,144 // vgatherdps %ymm5,(%r8,%ymm2,4),%ymm7
- .byte 72,139,64,64 // mov 0x40(%rax),%rax
- .byte 196,65,61,118,192 // vpcmpeqd %ymm8,%ymm8,%ymm8
.byte 197,212,87,237 // vxorps %ymm5,%ymm5,%ymm5
- .byte 196,226,61,146,44,176 // vgatherdps %ymm8,(%rax,%ymm6,4),%ymm5
- .byte 196,65,61,118,192 // vpcmpeqd %ymm8,%ymm8,%ymm8
+ .byte 197,205,118,246 // vpcmpeqd %ymm6,%ymm6,%ymm6
+ .byte 196,194,77,146,44,184 // vgatherdps %ymm6,(%r8,%ymm7,4),%ymm5
.byte 197,204,87,246 // vxorps %ymm6,%ymm6,%ymm6
- .byte 196,226,61,146,52,144 // vgatherdps %ymm8,(%rax,%ymm2,4),%ymm6
- .byte 196,98,125,184,203 // vfmadd231ps %ymm3,%ymm0,%ymm9
- .byte 196,98,125,184,233 // vfmadd231ps %ymm1,%ymm0,%ymm13
- .byte 196,98,125,184,252 // vfmadd231ps %ymm4,%ymm0,%ymm15
- .byte 196,226,125,184,108,36,128 // vfmadd231ps -0x80(%rsp),%ymm0,%ymm5
- .byte 196,98,37,184,84,36,224 // vfmadd231ps -0x20(%rsp),%ymm11,%ymm10
- .byte 196,98,37,184,116,36,192 // vfmadd231ps -0x40(%rsp),%ymm11,%ymm14
- .byte 196,226,37,184,124,36,160 // vfmadd231ps -0x60(%rsp),%ymm11,%ymm7
- .byte 196,194,37,184,244 // vfmadd231ps %ymm12,%ymm11,%ymm6
- .byte 196,226,125,24,5,87,153,0,0 // vbroadcastss 0x9957(%rip),%ymm0 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 196,226,125,24,13,30,153,0,0 // vbroadcastss 0x991e(%rip),%ymm1 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
- .byte 196,98,125,168,209 // vfmadd213ps %ymm1,%ymm0,%ymm10
+ .byte 196,65,29,118,228 // vpcmpeqd %ymm12,%ymm12,%ymm12
+ .byte 196,130,29,146,52,176 // vgatherdps %ymm12,(%r8,%ymm14,4),%ymm6
+ .byte 72,139,64,64 // mov 0x40(%rax),%rax
+ .byte 196,65,28,87,228 // vxorps %ymm12,%ymm12,%ymm12
+ .byte 196,65,5,118,255 // vpcmpeqd %ymm15,%ymm15,%ymm15
+ .byte 196,98,5,146,36,184 // vgatherdps %ymm15,(%rax,%ymm7,4),%ymm12
+ .byte 196,65,5,118,255 // vpcmpeqd %ymm15,%ymm15,%ymm15
+ .byte 197,196,87,255 // vxorps %ymm7,%ymm7,%ymm7
+ .byte 196,162,5,146,60,176 // vgatherdps %ymm15,(%rax,%ymm14,4),%ymm7
+ .byte 196,194,125,168,213 // vfmadd213ps %ymm13,%ymm0,%ymm2
+ .byte 196,98,125,168,196 // vfmadd213ps %ymm4,%ymm0,%ymm8
+ .byte 196,98,125,168,213 // vfmadd213ps %ymm5,%ymm0,%ymm10
+ .byte 196,98,125,184,100,36,192 // vfmadd231ps -0x40(%rsp),%ymm0,%ymm12
+ .byte 196,226,117,168,92,36,224 // vfmadd213ps -0x20(%rsp),%ymm1,%ymm3
+ .byte 196,98,117,168,76,36,128 // vfmadd213ps -0x80(%rsp),%ymm1,%ymm9
+ .byte 196,98,117,168,222 // vfmadd213ps %ymm6,%ymm1,%ymm11
+ .byte 196,226,117,184,124,36,160 // vfmadd231ps -0x60(%rsp),%ymm1,%ymm7
+ .byte 196,226,125,24,5,88,153,0,0 // vbroadcastss 0x9958(%rip),%ymm0 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,226,125,24,13,31,153,0,0 // vbroadcastss 0x991f(%rip),%ymm1 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,168,217 // vfmadd213ps %ymm1,%ymm0,%ymm3
+ .byte 196,226,125,168,209 // vfmadd213ps %ymm1,%ymm0,%ymm2
.byte 196,98,125,168,201 // vfmadd213ps %ymm1,%ymm0,%ymm9
- .byte 196,98,125,168,241 // vfmadd213ps %ymm1,%ymm0,%ymm14
- .byte 196,98,125,168,233 // vfmadd213ps %ymm1,%ymm0,%ymm13
+ .byte 196,98,125,168,193 // vfmadd213ps %ymm1,%ymm0,%ymm8
+ .byte 196,98,125,168,217 // vfmadd213ps %ymm1,%ymm0,%ymm11
+ .byte 196,98,125,168,209 // vfmadd213ps %ymm1,%ymm0,%ymm10
.byte 196,226,125,168,249 // vfmadd213ps %ymm1,%ymm0,%ymm7
- .byte 196,98,125,168,249 // vfmadd213ps %ymm1,%ymm0,%ymm15
- .byte 196,226,125,168,241 // vfmadd213ps %ymm1,%ymm0,%ymm6
- .byte 196,226,125,168,233 // vfmadd213ps %ymm1,%ymm0,%ymm5
- .byte 196,193,126,91,193 // vcvttps2dq %ymm9,%ymm0
- .byte 197,253,111,29,25,161,0,0 // vmovdqa 0xa119(%rip),%ymm3 # 391e0 <_sk_srcover_bgra_8888_sse2_lowp+0xadc>
- .byte 196,226,125,0,195 // vpshufb %ymm3,%ymm0,%ymm0
+ .byte 196,98,125,168,225 // vfmadd213ps %ymm1,%ymm0,%ymm12
+ .byte 197,254,91,194 // vcvttps2dq %ymm2,%ymm0
+ .byte 197,253,111,37,11,161,0,0 // vmovdqa 0xa10b(%rip),%ymm4 # 39340 <_sk_srcover_bgra_8888_sse2_lowp+0xacc>
+ .byte 196,226,125,0,196 // vpshufb %ymm4,%ymm0,%ymm0
.byte 196,227,253,0,192,232 // vpermq $0xe8,%ymm0,%ymm0
- .byte 196,193,126,91,202 // vcvttps2dq %ymm10,%ymm1
- .byte 196,226,117,0,203 // vpshufb %ymm3,%ymm1,%ymm1
+ .byte 197,254,91,203 // vcvttps2dq %ymm3,%ymm1
+ .byte 196,226,117,0,204 // vpshufb %ymm4,%ymm1,%ymm1
.byte 196,227,253,0,201,232 // vpermq $0xe8,%ymm1,%ymm1
.byte 196,227,125,56,193,1 // vinserti128 $0x1,%xmm1,%ymm0,%ymm0
- .byte 196,193,126,91,205 // vcvttps2dq %ymm13,%ymm1
- .byte 196,226,117,0,203 // vpshufb %ymm3,%ymm1,%ymm1
+ .byte 196,193,126,91,200 // vcvttps2dq %ymm8,%ymm1
+ .byte 196,226,117,0,204 // vpshufb %ymm4,%ymm1,%ymm1
.byte 196,227,253,0,201,232 // vpermq $0xe8,%ymm1,%ymm1
- .byte 196,193,126,91,214 // vcvttps2dq %ymm14,%ymm2
- .byte 196,226,109,0,211 // vpshufb %ymm3,%ymm2,%ymm2
+ .byte 196,193,126,91,209 // vcvttps2dq %ymm9,%ymm2
+ .byte 196,226,109,0,212 // vpshufb %ymm4,%ymm2,%ymm2
.byte 196,227,253,0,210,232 // vpermq $0xe8,%ymm2,%ymm2
.byte 196,227,117,56,202,1 // vinserti128 $0x1,%xmm2,%ymm1,%ymm1
- .byte 196,193,126,91,215 // vcvttps2dq %ymm15,%ymm2
- .byte 196,226,109,0,211 // vpshufb %ymm3,%ymm2,%ymm2
+ .byte 196,193,126,91,210 // vcvttps2dq %ymm10,%ymm2
+ .byte 196,226,109,0,212 // vpshufb %ymm4,%ymm2,%ymm2
.byte 196,227,253,0,210,232 // vpermq $0xe8,%ymm2,%ymm2
- .byte 197,254,91,231 // vcvttps2dq %ymm7,%ymm4
- .byte 196,226,93,0,227 // vpshufb %ymm3,%ymm4,%ymm4
- .byte 196,227,253,0,228,232 // vpermq $0xe8,%ymm4,%ymm4
- .byte 196,227,109,56,212,1 // vinserti128 $0x1,%xmm4,%ymm2,%ymm2
- .byte 197,254,91,229 // vcvttps2dq %ymm5,%ymm4
- .byte 196,226,93,0,227 // vpshufb %ymm3,%ymm4,%ymm4
- .byte 197,254,91,238 // vcvttps2dq %ymm6,%ymm5
- .byte 196,226,85,0,219 // vpshufb %ymm3,%ymm5,%ymm3
- .byte 196,227,253,0,228,232 // vpermq $0xe8,%ymm4,%ymm4
+ .byte 196,193,126,91,219 // vcvttps2dq %ymm11,%ymm3
+ .byte 196,226,101,0,220 // vpshufb %ymm4,%ymm3,%ymm3
.byte 196,227,253,0,219,232 // vpermq $0xe8,%ymm3,%ymm3
- .byte 196,227,93,56,219,1 // vinserti128 $0x1,%xmm3,%ymm4,%ymm3
+ .byte 196,227,109,56,211,1 // vinserti128 $0x1,%xmm3,%ymm2,%ymm2
+ .byte 196,193,126,91,220 // vcvttps2dq %ymm12,%ymm3
+ .byte 196,226,101,0,220 // vpshufb %ymm4,%ymm3,%ymm3
+ .byte 197,254,91,239 // vcvttps2dq %ymm7,%ymm5
+ .byte 196,226,85,0,228 // vpshufb %ymm4,%ymm5,%ymm4
+ .byte 196,227,253,0,219,232 // vpermq $0xe8,%ymm3,%ymm3
+ .byte 196,227,253,0,228,232 // vpermq $0xe8,%ymm4,%ymm4
+ .byte 196,227,101,56,220,1 // vinserti128 $0x1,%xmm4,%ymm3,%ymm3
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 197,252,16,36,36 // vmovups (%rsp),%ymm4
.byte 197,252,16,108,36,32 // vmovups 0x20(%rsp),%ymm5
@@ -55949,12 +56025,12 @@ _sk_evenly_spaced_2_stop_gradient_hsw_lowp:
.byte 197,124,40,194 // vmovaps %ymm2,%ymm8
.byte 196,98,125,168,195 // vfmadd213ps %ymm3,%ymm0,%ymm8
.byte 196,226,117,168,211 // vfmadd213ps %ymm3,%ymm1,%ymm2
- .byte 196,226,125,24,29,67,152,0,0 // vbroadcastss 0x9843(%rip),%ymm3 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 196,98,125,24,29,10,152,0,0 // vbroadcastss 0x980a(%rip),%ymm11 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,226,125,24,29,68,152,0,0 // vbroadcastss 0x9844(%rip),%ymm3 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 196,98,125,24,29,11,152,0,0 // vbroadcastss 0x980b(%rip),%ymm11 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 196,194,101,168,211 // vfmadd213ps %ymm11,%ymm3,%ymm2
.byte 196,66,101,168,195 // vfmadd213ps %ymm11,%ymm3,%ymm8
.byte 196,65,126,91,192 // vcvttps2dq %ymm8,%ymm8
- .byte 197,125,111,21,67,160,0,0 // vmovdqa 0xa043(%rip),%ymm10 # 39200 <_sk_srcover_bgra_8888_sse2_lowp+0xafc>
+ .byte 197,125,111,21,52,160,0,0 // vmovdqa 0xa034(%rip),%ymm10 # 39360 <_sk_srcover_bgra_8888_sse2_lowp+0xaec>
.byte 196,66,61,0,194 // vpshufb %ymm10,%ymm8,%ymm8
.byte 196,67,253,0,192,232 // vpermq $0xe8,%ymm8,%ymm8
.byte 197,254,91,210 // vcvttps2dq %ymm2,%ymm2
@@ -56018,7 +56094,7 @@ _sk_xy_to_unit_angle_hsw_lowp:
.byte 197,252,40,227 // vmovaps %ymm3,%ymm4
.byte 197,252,40,217 // vmovaps %ymm1,%ymm3
.byte 197,252,40,200 // vmovaps %ymm0,%ymm1
- .byte 196,98,125,24,5,93,152,0,0 // vbroadcastss 0x985d(%rip),%ymm8 # 38b44 <_sk_srcover_bgra_8888_sse2_lowp+0x440>
+ .byte 196,98,125,24,5,94,152,0,0 // vbroadcastss 0x985e(%rip),%ymm8 # 38cb4 <_sk_srcover_bgra_8888_sse2_lowp+0x440>
.byte 196,65,116,84,200 // vandps %ymm8,%ymm1,%ymm9
.byte 196,65,100,84,208 // vandps %ymm8,%ymm3,%ymm10
.byte 196,65,108,84,216 // vandps %ymm8,%ymm2,%ymm11
@@ -56032,20 +56108,20 @@ _sk_xy_to_unit_angle_hsw_lowp:
.byte 196,67,45,74,192,192 // vblendvps %ymm12,%ymm8,%ymm10,%ymm8
.byte 196,65,4,94,192 // vdivps %ymm8,%ymm15,%ymm8
.byte 196,65,60,89,208 // vmulps %ymm8,%ymm8,%ymm10
- .byte 196,98,125,24,29,169,151,0,0 // vbroadcastss 0x97a9(%rip),%ymm11 # 38ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
- .byte 196,98,125,24,53,164,151,0,0 // vbroadcastss 0x97a4(%rip),%ymm14 # 38ae4 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
+ .byte 196,98,125,24,29,170,151,0,0 // vbroadcastss 0x97aa(%rip),%ymm11 # 38c50 <_sk_srcover_bgra_8888_sse2_lowp+0x3dc>
+ .byte 196,98,125,24,53,165,151,0,0 // vbroadcastss 0x97a5(%rip),%ymm14 # 38c54 <_sk_srcover_bgra_8888_sse2_lowp+0x3e0>
.byte 196,65,124,40,251 // vmovaps %ymm11,%ymm15
.byte 196,66,45,168,254 // vfmadd213ps %ymm14,%ymm10,%ymm15
.byte 196,193,52,89,193 // vmulps %ymm9,%ymm9,%ymm0
.byte 196,66,125,168,222 // vfmadd213ps %ymm14,%ymm0,%ymm11
- .byte 196,98,125,24,53,139,151,0,0 // vbroadcastss 0x978b(%rip),%ymm14 # 38ae8 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
+ .byte 196,98,125,24,53,140,151,0,0 // vbroadcastss 0x978c(%rip),%ymm14 # 38c58 <_sk_srcover_bgra_8888_sse2_lowp+0x3e4>
.byte 196,66,125,168,222 // vfmadd213ps %ymm14,%ymm0,%ymm11
.byte 196,66,45,168,254 // vfmadd213ps %ymm14,%ymm10,%ymm15
- .byte 196,98,125,24,53,124,151,0,0 // vbroadcastss 0x977c(%rip),%ymm14 # 38aec <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
+ .byte 196,98,125,24,53,125,151,0,0 // vbroadcastss 0x977d(%rip),%ymm14 # 38c5c <_sk_srcover_bgra_8888_sse2_lowp+0x3e8>
.byte 196,66,45,168,254 // vfmadd213ps %ymm14,%ymm10,%ymm15
.byte 196,66,125,168,222 // vfmadd213ps %ymm14,%ymm0,%ymm11
.byte 196,193,52,89,195 // vmulps %ymm11,%ymm9,%ymm0
- .byte 196,98,125,24,13,104,151,0,0 // vbroadcastss 0x9768(%rip),%ymm9 # 38af0 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
+ .byte 196,98,125,24,13,105,151,0,0 // vbroadcastss 0x9769(%rip),%ymm9 # 38c60 <_sk_srcover_bgra_8888_sse2_lowp+0x3ec>
.byte 197,52,92,208 // vsubps %ymm0,%ymm9,%ymm10
.byte 196,195,125,74,194,208 // vblendvps %ymm13,%ymm10,%ymm0,%ymm0
.byte 196,65,60,89,199 // vmulps %ymm15,%ymm8,%ymm8
@@ -56053,14 +56129,14 @@ _sk_xy_to_unit_angle_hsw_lowp:
.byte 196,67,61,74,193,192 // vblendvps %ymm12,%ymm9,%ymm8,%ymm8
.byte 196,65,52,87,201 // vxorps %ymm9,%ymm9,%ymm9
.byte 196,193,116,194,201,1 // vcmpltps %ymm9,%ymm1,%ymm1
- .byte 196,98,125,24,21,250,149,0,0 // vbroadcastss 0x95fa(%rip),%ymm10 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 196,98,125,24,21,251,149,0,0 // vbroadcastss 0x95fb(%rip),%ymm10 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 197,44,92,216 // vsubps %ymm0,%ymm10,%ymm11
.byte 196,195,125,74,195,16 // vblendvps %ymm1,%ymm11,%ymm0,%ymm0
.byte 196,193,100,194,201,1 // vcmpltps %ymm9,%ymm3,%ymm1
.byte 196,193,44,92,216 // vsubps %ymm8,%ymm10,%ymm3
.byte 196,227,61,74,203,16 // vblendvps %ymm1,%ymm3,%ymm8,%ymm1
.byte 196,193,108,194,217,1 // vcmpltps %ymm9,%ymm2,%ymm3
- .byte 196,98,125,24,5,212,149,0,0 // vbroadcastss 0x95d4(%rip),%ymm8 # 389b4 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
+ .byte 196,98,125,24,5,213,149,0,0 // vbroadcastss 0x95d5(%rip),%ymm8 # 38b24 <_sk_srcover_bgra_8888_sse2_lowp+0x2b0>
.byte 197,60,92,208 // vsubps %ymm0,%ymm8,%ymm10
.byte 196,195,125,74,194,48 // vblendvps %ymm3,%ymm10,%ymm0,%ymm0
.byte 196,193,92,194,217,1 // vcmpltps %ymm9,%ymm4,%ymm3
@@ -56106,9 +56182,9 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 65,254,202 // dec %r10b
.byte 69,15,182,194 // movzbl %r10b,%r8d
.byte 65,128,248,14 // cmp $0xe,%r8b
- .byte 119,51 // ja 2f4a4 <_sk_srcover_rgba_8888_hsw_lowp+0x5c>
+ .byte 119,51 // ja 2f613 <_sk_srcover_rgba_8888_hsw_lowp+0x5c>
.byte 197,221,239,228 // vpxor %ymm4,%ymm4,%ymm4
- .byte 76,141,29,236,2,0,0 // lea 0x2ec(%rip),%r11 # 2f768 <_sk_srcover_rgba_8888_hsw_lowp+0x320>
+ .byte 76,141,29,237,2,0,0 // lea 0x2ed(%rip),%r11 # 2f8d8 <_sk_srcover_rgba_8888_hsw_lowp+0x321>
.byte 75,99,4,131 // movslq (%r11,%r8,4),%rax
.byte 76,1,216 // add %r11,%rax
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56118,10 +56194,10 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 255,224 // jmpq *%rax
.byte 196,193,121,110,41 // vmovd (%r9),%xmm5
.byte 197,221,239,228 // vpxor %ymm4,%ymm4,%ymm4
- .byte 233,22,1,0,0 // jmpq 2f5ba <_sk_srcover_rgba_8888_hsw_lowp+0x172>
+ .byte 233,22,1,0,0 // jmpq 2f729 <_sk_srcover_rgba_8888_hsw_lowp+0x172>
.byte 196,193,126,111,41 // vmovdqu (%r9),%ymm5
.byte 196,193,126,111,97,32 // vmovdqu 0x20(%r9),%ymm4
- .byte 233,6,1,0,0 // jmpq 2f5ba <_sk_srcover_rgba_8888_hsw_lowp+0x172>
+ .byte 233,6,1,0,0 // jmpq 2f729 <_sk_srcover_rgba_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,8 // vmovd 0x8(%r9),%xmm4
.byte 196,226,121,89,228 // vpbroadcastq %xmm4,%xmm4
.byte 197,197,239,255 // vpxor %ymm7,%ymm7,%ymm7
@@ -56130,7 +56206,7 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 197,249,112,237,232 // vpshufd $0xe8,%xmm5,%xmm5
.byte 196,227,93,2,237,3 // vpblendd $0x3,%ymm5,%ymm4,%ymm5
.byte 197,253,111,231 // vmovdqa %ymm7,%ymm4
- .byte 233,216,0,0,0 // jmpq 2f5ba <_sk_srcover_rgba_8888_hsw_lowp+0x172>
+ .byte 233,216,0,0,0 // jmpq 2f729 <_sk_srcover_rgba_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,24 // vmovd 0x18(%r9),%xmm4
.byte 196,226,125,89,228 // vpbroadcastq %xmm4,%ymm4
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56146,7 +56222,7 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 196,193,122,111,41 // vmovdqu (%r9),%xmm5
.byte 196,227,85,2,236,240 // vpblendd $0xf0,%ymm4,%ymm5,%ymm5
.byte 197,125,127,196 // vmovdqa %ymm8,%ymm4
- .byte 233,129,0,0,0 // jmpq 2f5ba <_sk_srcover_rgba_8888_hsw_lowp+0x172>
+ .byte 233,129,0,0,0 // jmpq 2f729 <_sk_srcover_rgba_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,40 // vmovd 0x28(%r9),%xmm4
.byte 196,226,121,89,228 // vpbroadcastq %xmm4,%xmm4
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56156,7 +56232,7 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 196,193,121,110,105,32 // vmovd 0x20(%r9),%xmm5
.byte 196,227,93,2,229,1 // vpblendd $0x1,%ymm5,%ymm4,%ymm4
.byte 196,193,126,111,41 // vmovdqu (%r9),%ymm5
- .byte 235,76 // jmp 2f5ba <_sk_srcover_rgba_8888_hsw_lowp+0x172>
+ .byte 235,76 // jmp 2f729 <_sk_srcover_rgba_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,56 // vmovd 0x38(%r9),%xmm4
.byte 196,226,125,89,228 // vpbroadcastq %xmm4,%ymm4
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56176,9 +56252,9 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 196,227,69,14,236,170 // vpblendw $0xaa,%ymm4,%ymm7,%ymm5
.byte 196,227,77,14,228,170 // vpblendw $0xaa,%ymm4,%ymm6,%ymm4
.byte 196,226,93,43,237 // vpackusdw %ymm5,%ymm4,%ymm5
- .byte 197,125,111,5,61,156,0,0 // vmovdqa 0x9c3d(%rip),%ymm8 # 39220 <_sk_srcover_bgra_8888_sse2_lowp+0xb1c>
+ .byte 197,125,111,5,46,156,0,0 // vmovdqa 0x9c2e(%rip),%ymm8 # 39380 <_sk_srcover_bgra_8888_sse2_lowp+0xb0c>
.byte 196,193,85,219,224 // vpand %ymm8,%ymm5,%ymm4
- .byte 196,98,125,121,13,79,156,0,0 // vpbroadcastw 0x9c4f(%rip),%ymm9 # 39240 <_sk_srcover_bgra_8888_sse2_lowp+0xb3c>
+ .byte 196,98,125,121,13,64,156,0,0 // vpbroadcastw 0x9c40(%rip),%ymm9 # 393a0 <_sk_srcover_bgra_8888_sse2_lowp+0xb2c>
.byte 197,213,113,213,8 // vpsrlw $0x8,%ymm5,%ymm5
.byte 197,197,114,215,16 // vpsrld $0x10,%ymm7,%ymm7
.byte 197,205,114,214,16 // vpsrld $0x10,%ymm6,%ymm6
@@ -56217,19 +56293,19 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 196,193,53,114,243,16 // vpslld $0x10,%ymm11,%ymm9
.byte 196,65,53,235,202 // vpor %ymm10,%ymm9,%ymm9
.byte 65,128,250,14 // cmp $0xe,%r10b
- .byte 119,26 // ja 2f6cb <_sk_srcover_rgba_8888_hsw_lowp+0x283>
- .byte 76,141,21,236,0,0,0 // lea 0xec(%rip),%r10 # 2f7a4 <_sk_srcover_rgba_8888_hsw_lowp+0x35c>
+ .byte 119,26 // ja 2f83a <_sk_srcover_rgba_8888_hsw_lowp+0x283>
+ .byte 76,141,21,237,0,0,0 // lea 0xed(%rip),%r10 # 2f914 <_sk_srcover_rgba_8888_hsw_lowp+0x35d>
.byte 75,99,4,130 // movslq (%r10,%r8,4),%rax
.byte 76,1,208 // add %r10,%rax
.byte 255,224 // jmpq *%rax
.byte 196,65,121,126,1 // vmovd %xmm8,(%r9)
- .byte 233,151,0,0,0 // jmpq 2f762 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
+ .byte 233,151,0,0,0 // jmpq 2f8d1 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
.byte 196,65,126,127,1 // vmovdqu %ymm8,(%r9)
.byte 196,65,126,127,73,32 // vmovdqu %ymm9,0x20(%r9)
- .byte 233,135,0,0,0 // jmpq 2f762 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
+ .byte 233,135,0,0,0 // jmpq 2f8d1 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
.byte 196,67,121,22,65,8,2 // vpextrd $0x2,%xmm8,0x8(%r9)
.byte 196,65,121,214,1 // vmovq %xmm8,(%r9)
- .byte 235,121 // jmp 2f762 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
+ .byte 235,121 // jmp 2f8d1 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
.byte 196,67,125,57,193,1 // vextracti128 $0x1,%ymm8,%xmm9
.byte 196,67,121,22,73,24,2 // vpextrd $0x2,%xmm9,0x18(%r9)
.byte 196,67,125,57,193,1 // vextracti128 $0x1,%ymm8,%xmm9
@@ -56237,12 +56313,12 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 196,67,125,57,193,1 // vextracti128 $0x1,%ymm8,%xmm9
.byte 196,65,121,126,73,16 // vmovd %xmm9,0x10(%r9)
.byte 196,65,122,127,1 // vmovdqu %xmm8,(%r9)
- .byte 235,76 // jmp 2f762 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
+ .byte 235,76 // jmp 2f8d1 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
.byte 196,67,121,22,73,40,2 // vpextrd $0x2,%xmm9,0x28(%r9)
.byte 196,67,121,22,73,36,1 // vpextrd $0x1,%xmm9,0x24(%r9)
.byte 196,65,121,126,73,32 // vmovd %xmm9,0x20(%r9)
.byte 196,65,126,127,1 // vmovdqu %ymm8,(%r9)
- .byte 235,49 // jmp 2f762 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
+ .byte 235,49 // jmp 2f8d1 <_sk_srcover_rgba_8888_hsw_lowp+0x31a>
.byte 196,67,125,57,202,1 // vextracti128 $0x1,%ymm9,%xmm10
.byte 196,67,121,22,81,56,2 // vpextrd $0x2,%xmm10,0x38(%r9)
.byte 196,67,125,57,202,1 // vextracti128 $0x1,%ymm9,%xmm10
@@ -56253,69 +56329,63 @@ _sk_srcover_rgba_8888_hsw_lowp:
.byte 196,65,122,127,73,32 // vmovdqu %xmm9,0x20(%r9)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
- .byte 102,144 // xchg %ax,%ax
- .byte 46,253 // cs std
+ .byte 15,31,0 // nopl (%rax)
+ .byte 45,253,255,255,96 // sub $0x60fffffd,%eax
+ .byte 253 // std
.byte 255 // (bad)
- .byte 255,97,253 // jmpq *-0x3(%rcx)
+ .byte 255,75,253 // decl -0x3(%rbx)
.byte 255 // (bad)
- .byte 255,76,253,255 // decl -0x1(%rbp,%rdi,8)
.byte 255 // (bad)
- .byte 189,253,255,255,166 // mov $0xa6fffffd,%ebp
+ .byte 188,253,255,255,165 // mov $0xa5fffffd,%esp
.byte 253 // std
.byte 255 // (bad)
- .byte 255,143,253,255,255,122 // decl 0x7afffffd(%rdi)
+ .byte 255,142,253,255,255,121 // decl 0x79fffffd(%rsi)
.byte 253 // std
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255 // (bad)
+ .byte 254 // (bad)
.byte 253 // std
.byte 255 // (bad)
- .byte 255,243 // push %rbx
+ .byte 255,242 // push %rdx
.byte 253 // std
.byte 255 // (bad)
- .byte 255,230 // jmpq *%rsi
+ .byte 255,229 // jmpq *%rbp
.byte 253 // std
.byte 255 // (bad)
- .byte 255,209 // callq *%rcx
+ .byte 255,208 // callq *%rax
.byte 253 // std
.byte 255 // (bad)
- .byte 255,65,254 // incl -0x2(%rcx)
+ .byte 255,64,254 // incl -0x2(%rax)
.byte 255 // (bad)
- .byte 255,46 // ljmp *(%rsi)
+ .byte 255,45,254,255,255,26 // ljmp *0x1afffffe(%rip) # 1b02f90b <_sk_srcover_bgra_8888_sse2_lowp+0x1aff7097>
.byte 254 // (bad)
.byte 255 // (bad)
- .byte 255,27 // lcall *(%rbx)
- .byte 254 // (bad)
+ .byte 255,5,254,255,255,28 // incl 0x1cfffffe(%rip) # 1d02f913 <_sk_srcover_bgra_8888_sse2_lowp+0x1cff709f>
.byte 255 // (bad)
- .byte 255,6 // incl (%rsi)
- .byte 254 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,62 // lcall *0x3effffff(%rip) # 3f02f7a8 <_sk_srcover_bgra_8888_sse2_lowp+0x3eff70a4>
.byte 255 // (bad)
+ .byte 61,255,255,255,54 // cmp $0x36ffffff,%eax
.byte 255 // (bad)
- .byte 255,55 // pushq (%rdi)
.byte 255 // (bad)
+ .byte 255,106,255 // ljmp *-0x1(%rdx)
.byte 255 // (bad)
- .byte 255,107,255 // ljmp *-0x1(%rbx)
- .byte 255 // (bad)
- .byte 255,95,255 // lcall *-0x1(%rdi)
- .byte 255 // (bad)
- .byte 255,82,255 // callq *-0x1(%rdx)
+ .byte 255,94,255 // lcall *-0x1(%rsi)
.byte 255 // (bad)
- .byte 255,69,255 // incl -0x1(%rbp)
+ .byte 255,81,255 // callq *-0x1(%rcx)
.byte 255 // (bad)
- .byte 255,134,255,255,255,128 // incl -0x7f000001(%rsi)
+ .byte 255,68,255,255 // incl -0x1(%rdi,%rdi,8)
+ .byte 255,133,255,255,255,127 // incl 0x7fffffff(%rbp)
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 121,255 // jns 2f7c9 <_sk_srcover_rgba_8888_hsw_lowp+0x381>
+ .byte 120,255 // js 2f939 <_sk_srcover_rgba_8888_hsw_lowp+0x382>
.byte 255 // (bad)
- .byte 255,114,255 // pushq -0x1(%rdx)
+ .byte 255,113,255 // pushq -0x1(%rcx)
.byte 255 // (bad)
- .byte 255,179,255,255,255,167 // pushq -0x58000001(%rbx)
+ .byte 255,178,255,255,255,166 // pushq -0x59000001(%rdx)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,154,255,255,255,141 // lcall *-0x72000001(%rdx)
+ .byte 255,153,255,255,255,140 // lcall *-0x73000001(%rcx)
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // .byte 0xff
@@ -56335,9 +56405,9 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 65,254,202 // dec %r10b
.byte 69,15,182,194 // movzbl %r10b,%r8d
.byte 65,128,248,14 // cmp $0xe,%r8b
- .byte 119,51 // ja 2f83c <_sk_srcover_bgra_8888_hsw_lowp+0x5c>
+ .byte 119,51 // ja 2f9ac <_sk_srcover_bgra_8888_hsw_lowp+0x5c>
.byte 197,221,239,228 // vpxor %ymm4,%ymm4,%ymm4
- .byte 76,141,29,236,2,0,0 // lea 0x2ec(%rip),%r11 # 2fb00 <_sk_srcover_bgra_8888_hsw_lowp+0x320>
+ .byte 76,141,29,236,2,0,0 // lea 0x2ec(%rip),%r11 # 2fc70 <_sk_srcover_bgra_8888_hsw_lowp+0x320>
.byte 75,99,4,131 // movslq (%r11,%r8,4),%rax
.byte 76,1,216 // add %r11,%rax
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56347,10 +56417,10 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 255,224 // jmpq *%rax
.byte 196,193,121,110,41 // vmovd (%r9),%xmm5
.byte 197,221,239,228 // vpxor %ymm4,%ymm4,%ymm4
- .byte 233,22,1,0,0 // jmpq 2f952 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
+ .byte 233,22,1,0,0 // jmpq 2fac2 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
.byte 196,193,126,111,41 // vmovdqu (%r9),%ymm5
.byte 196,193,126,111,97,32 // vmovdqu 0x20(%r9),%ymm4
- .byte 233,6,1,0,0 // jmpq 2f952 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
+ .byte 233,6,1,0,0 // jmpq 2fac2 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,8 // vmovd 0x8(%r9),%xmm4
.byte 196,226,121,89,228 // vpbroadcastq %xmm4,%xmm4
.byte 197,197,239,255 // vpxor %ymm7,%ymm7,%ymm7
@@ -56359,7 +56429,7 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 197,249,112,237,232 // vpshufd $0xe8,%xmm5,%xmm5
.byte 196,227,93,2,237,3 // vpblendd $0x3,%ymm5,%ymm4,%ymm5
.byte 197,253,111,231 // vmovdqa %ymm7,%ymm4
- .byte 233,216,0,0,0 // jmpq 2f952 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
+ .byte 233,216,0,0,0 // jmpq 2fac2 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,24 // vmovd 0x18(%r9),%xmm4
.byte 196,226,125,89,228 // vpbroadcastq %xmm4,%ymm4
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56375,7 +56445,7 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 196,193,122,111,41 // vmovdqu (%r9),%xmm5
.byte 196,227,85,2,236,240 // vpblendd $0xf0,%ymm4,%ymm5,%ymm5
.byte 197,125,127,196 // vmovdqa %ymm8,%ymm4
- .byte 233,129,0,0,0 // jmpq 2f952 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
+ .byte 233,129,0,0,0 // jmpq 2fac2 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,40 // vmovd 0x28(%r9),%xmm4
.byte 196,226,121,89,228 // vpbroadcastq %xmm4,%xmm4
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56385,7 +56455,7 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 196,193,121,110,105,32 // vmovd 0x20(%r9),%xmm5
.byte 196,227,93,2,229,1 // vpblendd $0x1,%ymm5,%ymm4,%ymm4
.byte 196,193,126,111,41 // vmovdqu (%r9),%ymm5
- .byte 235,76 // jmp 2f952 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
+ .byte 235,76 // jmp 2fac2 <_sk_srcover_bgra_8888_hsw_lowp+0x172>
.byte 196,193,121,110,97,56 // vmovd 0x38(%r9),%xmm4
.byte 196,226,125,89,228 // vpbroadcastq %xmm4,%ymm4
.byte 197,213,239,237 // vpxor %ymm5,%ymm5,%ymm5
@@ -56405,9 +56475,9 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 196,227,93,14,245,170 // vpblendw $0xaa,%ymm5,%ymm4,%ymm6
.byte 196,227,69,14,237,170 // vpblendw $0xaa,%ymm5,%ymm7,%ymm5
.byte 196,226,85,43,238 // vpackusdw %ymm6,%ymm5,%ymm5
- .byte 197,125,111,5,229,152,0,0 // vmovdqa 0x98e5(%rip),%ymm8 # 39260 <_sk_srcover_bgra_8888_sse2_lowp+0xb5c>
+ .byte 197,125,111,5,213,152,0,0 // vmovdqa 0x98d5(%rip),%ymm8 # 393c0 <_sk_srcover_bgra_8888_sse2_lowp+0xb4c>
.byte 196,193,85,219,240 // vpand %ymm8,%ymm5,%ymm6
- .byte 196,98,125,121,13,247,152,0,0 // vpbroadcastw 0x98f7(%rip),%ymm9 # 39280 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
+ .byte 196,98,125,121,13,231,152,0,0 // vpbroadcastw 0x98e7(%rip),%ymm9 # 393e0 <_sk_srcover_bgra_8888_sse2_lowp+0xb6c>
.byte 197,213,113,213,8 // vpsrlw $0x8,%ymm5,%ymm5
.byte 197,221,114,212,16 // vpsrld $0x10,%ymm4,%ymm4
.byte 197,197,114,215,16 // vpsrld $0x10,%ymm7,%ymm7
@@ -56446,19 +56516,19 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 196,193,53,114,243,16 // vpslld $0x10,%ymm11,%ymm9
.byte 196,65,53,235,202 // vpor %ymm10,%ymm9,%ymm9
.byte 65,128,250,14 // cmp $0xe,%r10b
- .byte 119,26 // ja 2fa63 <_sk_srcover_bgra_8888_hsw_lowp+0x283>
- .byte 76,141,21,236,0,0,0 // lea 0xec(%rip),%r10 # 2fb3c <_sk_srcover_bgra_8888_hsw_lowp+0x35c>
+ .byte 119,26 // ja 2fbd3 <_sk_srcover_bgra_8888_hsw_lowp+0x283>
+ .byte 76,141,21,236,0,0,0 // lea 0xec(%rip),%r10 # 2fcac <_sk_srcover_bgra_8888_hsw_lowp+0x35c>
.byte 75,99,4,130 // movslq (%r10,%r8,4),%rax
.byte 76,1,208 // add %r10,%rax
.byte 255,224 // jmpq *%rax
.byte 196,65,121,126,1 // vmovd %xmm8,(%r9)
- .byte 233,151,0,0,0 // jmpq 2fafa <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
+ .byte 233,151,0,0,0 // jmpq 2fc6a <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
.byte 196,65,126,127,1 // vmovdqu %ymm8,(%r9)
.byte 196,65,126,127,73,32 // vmovdqu %ymm9,0x20(%r9)
- .byte 233,135,0,0,0 // jmpq 2fafa <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
+ .byte 233,135,0,0,0 // jmpq 2fc6a <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
.byte 196,67,121,22,65,8,2 // vpextrd $0x2,%xmm8,0x8(%r9)
.byte 196,65,121,214,1 // vmovq %xmm8,(%r9)
- .byte 235,121 // jmp 2fafa <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
+ .byte 235,121 // jmp 2fc6a <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
.byte 196,67,125,57,193,1 // vextracti128 $0x1,%ymm8,%xmm9
.byte 196,67,121,22,73,24,2 // vpextrd $0x2,%xmm9,0x18(%r9)
.byte 196,67,125,57,193,1 // vextracti128 $0x1,%ymm8,%xmm9
@@ -56466,12 +56536,12 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 196,67,125,57,193,1 // vextracti128 $0x1,%ymm8,%xmm9
.byte 196,65,121,126,73,16 // vmovd %xmm9,0x10(%r9)
.byte 196,65,122,127,1 // vmovdqu %xmm8,(%r9)
- .byte 235,76 // jmp 2fafa <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
+ .byte 235,76 // jmp 2fc6a <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
.byte 196,67,121,22,73,40,2 // vpextrd $0x2,%xmm9,0x28(%r9)
.byte 196,67,121,22,73,36,1 // vpextrd $0x1,%xmm9,0x24(%r9)
.byte 196,65,121,126,73,32 // vmovd %xmm9,0x20(%r9)
.byte 196,65,126,127,1 // vmovdqu %ymm8,(%r9)
- .byte 235,49 // jmp 2fafa <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
+ .byte 235,49 // jmp 2fc6a <_sk_srcover_bgra_8888_hsw_lowp+0x31a>
.byte 196,67,125,57,202,1 // vextracti128 $0x1,%ymm9,%xmm10
.byte 196,67,121,22,81,56,2 // vpextrd $0x2,%xmm10,0x38(%r9)
.byte 196,67,125,57,202,1 // vextracti128 $0x1,%ymm9,%xmm10
@@ -56519,7 +56589,7 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 255,6 // incl (%rsi)
.byte 254 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,62 // lcall *0x3effffff(%rip) # 3f02fb40 <_sk_srcover_bgra_8888_sse2_lowp+0x3eff743c>
+ .byte 255,29,255,255,255,62 // lcall *0x3effffff(%rip) # 3f02fcb0 <_sk_srcover_bgra_8888_sse2_lowp+0x3eff743c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,55 // pushq (%rdi)
@@ -56537,7 +56607,7 @@ _sk_srcover_bgra_8888_hsw_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 121,255 // jns 2fb61 <_sk_srcover_bgra_8888_hsw_lowp+0x381>
+ .byte 121,255 // jns 2fcd1 <_sk_srcover_bgra_8888_hsw_lowp+0x381>
.byte 255 // (bad)
.byte 255,114,255 // pushq -0x1(%rdx)
.byte 255 // (bad)
@@ -56570,13 +56640,13 @@ _sk_start_pipeline_sse41_lowp:
.byte 73,137,246 // mov %rsi,%r14
.byte 72,137,77,192 // mov %rcx,-0x40(%rbp)
.byte 72,57,203 // cmp %rcx,%rbx
- .byte 15,131,131,0,0,0 // jae 2fc2e <_sk_start_pipeline_sse41_lowp+0xb6>
+ .byte 15,131,131,0,0,0 // jae 2fd9e <_sk_start_pipeline_sse41_lowp+0xb6>
.byte 72,139,69,208 // mov -0x30(%rbp),%rax
.byte 72,141,64,8 // lea 0x8(%rax),%rax
.byte 72,137,69,200 // mov %rax,-0x38(%rbp)
.byte 76,57,125,200 // cmp %r15,-0x38(%rbp)
.byte 72,139,85,208 // mov -0x30(%rbp),%rdx
- .byte 119,59 // ja 2fbfc <_sk_start_pipeline_sse41_lowp+0x84>
+ .byte 119,59 // ja 2fd6c <_sk_start_pipeline_sse41_lowp+0x84>
.byte 76,139,101,208 // mov -0x30(%rbp),%r12
.byte 49,255 // xor %edi,%edi
.byte 15,87,192 // xorps %xmm0,%xmm0
@@ -56595,10 +56665,10 @@ _sk_start_pipeline_sse41_lowp:
.byte 73,131,196,16 // add $0x10,%r12
.byte 77,57,252 // cmp %r15,%r12
.byte 73,137,212 // mov %rdx,%r12
- .byte 118,201 // jbe 2fbc5 <_sk_start_pipeline_sse41_lowp+0x4d>
+ .byte 118,201 // jbe 2fd35 <_sk_start_pipeline_sse41_lowp+0x4d>
.byte 76,137,255 // mov %r15,%rdi
.byte 72,41,215 // sub %rdx,%rdi
- .byte 116,33 // je 2fc25 <_sk_start_pipeline_sse41_lowp+0xad>
+ .byte 116,33 // je 2fd95 <_sk_start_pipeline_sse41_lowp+0xad>
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,87,201 // xorps %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
@@ -56612,7 +56682,7 @@ _sk_start_pipeline_sse41_lowp:
.byte 65,255,213 // callq *%r13
.byte 72,255,195 // inc %rbx
.byte 72,59,93,192 // cmp -0x40(%rbp),%rbx
- .byte 117,137 // jne 2fbb7 <_sk_start_pipeline_sse41_lowp+0x3f>
+ .byte 117,137 // jne 2fd27 <_sk_start_pipeline_sse41_lowp+0x3f>
.byte 72,131,196,24 // add $0x18,%rsp
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
@@ -56643,7 +56713,7 @@ _sk_seed_shader_sse41_lowp:
.byte 102,15,110,209 // movd %ecx,%xmm2
.byte 102,15,112,210,0 // pshufd $0x0,%xmm2,%xmm2
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,88,21,116,150,0,0 // addps 0x9674(%rip),%xmm2 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,21,100,150,0,0 // addps 0x9664(%rip),%xmm2 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 255,224 // jmpq *%rax
@@ -56820,7 +56890,7 @@ HIDDEN _sk_black_color_sse41_lowp
FUNCTION(_sk_black_color_sse41_lowp)
_sk_black_color_sse41_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,185,147,0,0 // movaps 0x93b9(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,169,147,0,0 // movaps 0x93a9(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,87,201 // xorps %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
@@ -56831,7 +56901,7 @@ HIDDEN _sk_white_color_sse41_lowp
FUNCTION(_sk_white_color_sse41_lowp)
_sk_white_color_sse41_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,5,165,147,0,0 // movaps 0x93a5(%rip),%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,5,149,147,0,0 // movaps 0x9395(%rip),%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,40,216 // movaps %xmm0,%xmm3
@@ -56842,10 +56912,10 @@ HIDDEN _sk_set_rgb_sse41_lowp
FUNCTION(_sk_set_rgb_sse41_lowp)
_sk_set_rgb_sse41_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 243,15,16,21,224,138,0,0 // movss 0x8ae0(%rip),%xmm2 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 243,15,16,21,224,138,0,0 // movss 0x8ae0(%rip),%xmm2 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 243,15,16,0 // movss (%rax),%xmm0
.byte 243,15,89,194 // mulss %xmm2,%xmm0
- .byte 243,68,15,16,5,159,138,0,0 // movss 0x8a9f(%rip),%xmm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,16,5,159,138,0,0 // movss 0x8a9f(%rip),%xmm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 243,65,15,88,192 // addss %xmm8,%xmm0
.byte 243,68,15,44,192 // cvttss2si %xmm0,%r8d
.byte 102,65,15,110,192 // movd %r8d,%xmm0
@@ -56892,7 +56962,7 @@ HIDDEN _sk_premul_sse41_lowp
FUNCTION(_sk_premul_sse41_lowp)
_sk_premul_sse41_lowp:
.byte 102,15,213,195 // pmullw %xmm3,%xmm0
- .byte 102,68,15,111,5,241,146,0,0 // movdqa 0x92f1(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,225,146,0,0 // movdqa 0x92e1(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,213,203 // pmullw %xmm3,%xmm1
@@ -56909,7 +56979,7 @@ HIDDEN _sk_premul_dst_sse41_lowp
FUNCTION(_sk_premul_dst_sse41_lowp)
_sk_premul_dst_sse41_lowp:
.byte 102,15,213,231 // pmullw %xmm7,%xmm4
- .byte 102,68,15,111,5,186,146,0,0 // movdqa 0x92ba(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,170,146,0,0 // movdqa 0x92aa(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,224 // paddw %xmm8,%xmm4
.byte 102,15,113,212,8 // psrlw $0x8,%xmm4
.byte 102,15,213,239 // pmullw %xmm7,%xmm5
@@ -56957,7 +57027,7 @@ HIDDEN _sk_invert_sse41_lowp
.globl _sk_invert_sse41_lowp
FUNCTION(_sk_invert_sse41_lowp)
_sk_invert_sse41_lowp:
- .byte 102,68,15,111,5,88,146,0,0 // movdqa 0x9258(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,72,146,0,0 // movdqa 0x9248(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,68,15,249,200 // psubw %xmm0,%xmm9
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
@@ -56989,7 +57059,7 @@ FUNCTION(_sk_srcatop_sse41_lowp)
_sk_srcatop_sse41_lowp:
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
.byte 102,15,213,199 // pmullw %xmm7,%xmm0
- .byte 102,15,111,29,252,145,0,0 // movdqa 0x91fc(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,111,29,236,145,0,0 // movdqa 0x91ec(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,111,203 // movdqa %xmm3,%xmm9
.byte 102,69,15,249,200 // psubw %xmm8,%xmm9
.byte 102,69,15,111,193 // movdqa %xmm9,%xmm8
@@ -57021,7 +57091,7 @@ FUNCTION(_sk_dstatop_sse41_lowp)
_sk_dstatop_sse41_lowp:
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
.byte 102,68,15,213,195 // pmullw %xmm3,%xmm8
- .byte 102,68,15,111,13,124,145,0,0 // movdqa 0x917c(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,108,145,0,0 // movdqa 0x916c(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,253,193 // paddw %xmm9,%xmm8
.byte 102,68,15,111,213 // movdqa %xmm5,%xmm10
.byte 102,68,15,213,211 // pmullw %xmm3,%xmm10
@@ -57050,7 +57120,7 @@ HIDDEN _sk_srcin_sse41_lowp
FUNCTION(_sk_srcin_sse41_lowp)
_sk_srcin_sse41_lowp:
.byte 102,15,213,199 // pmullw %xmm7,%xmm0
- .byte 102,68,15,111,5,7,145,0,0 // movdqa 0x9107(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,247,144,0,0 // movdqa 0x90f7(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,213,207 // pmullw %xmm7,%xmm1
@@ -57071,7 +57141,7 @@ FUNCTION(_sk_dstin_sse41_lowp)
_sk_dstin_sse41_lowp:
.byte 102,15,111,196 // movdqa %xmm4,%xmm0
.byte 102,15,213,195 // pmullw %xmm3,%xmm0
- .byte 102,68,15,111,5,190,144,0,0 // movdqa 0x90be(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,174,144,0,0 // movdqa 0x90ae(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,111,205 // movdqa %xmm5,%xmm1
@@ -57092,7 +57162,7 @@ HIDDEN _sk_srcout_sse41_lowp
.globl _sk_srcout_sse41_lowp
FUNCTION(_sk_srcout_sse41_lowp)
_sk_srcout_sse41_lowp:
- .byte 102,68,15,111,5,117,144,0,0 // movdqa 0x9075(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,101,144,0,0 // movdqa 0x9065(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,68,15,249,207 // psubw %xmm7,%xmm9
.byte 102,65,15,213,193 // pmullw %xmm9,%xmm0
@@ -57115,7 +57185,7 @@ HIDDEN _sk_dstout_sse41_lowp
FUNCTION(_sk_dstout_sse41_lowp)
_sk_dstout_sse41_lowp:
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
- .byte 102,68,15,111,5,30,144,0,0 // movdqa 0x901e(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,14,144,0,0 // movdqa 0x900e(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,249,216 // psubw %xmm0,%xmm3
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
@@ -57140,7 +57210,7 @@ HIDDEN _sk_srcover_sse41_lowp
.globl _sk_srcover_sse41_lowp
FUNCTION(_sk_srcover_sse41_lowp)
_sk_srcover_sse41_lowp:
- .byte 102,68,15,111,13,196,143,0,0 // movdqa 0x8fc4(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,180,143,0,0 // movdqa 0x8fb4(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,193 // movdqa %xmm9,%xmm8
.byte 102,68,15,249,195 // psubw %xmm3,%xmm8
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
@@ -57169,7 +57239,7 @@ HIDDEN _sk_dstover_sse41_lowp
.globl _sk_dstover_sse41_lowp
FUNCTION(_sk_dstover_sse41_lowp)
_sk_dstover_sse41_lowp:
- .byte 102,68,15,111,5,74,143,0,0 // movdqa 0x8f4a(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,58,143,0,0 // movdqa 0x8f3a(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,68,15,249,207 // psubw %xmm7,%xmm9
.byte 102,65,15,213,193 // pmullw %xmm9,%xmm0
@@ -57196,7 +57266,7 @@ HIDDEN _sk_modulate_sse41_lowp
FUNCTION(_sk_modulate_sse41_lowp)
_sk_modulate_sse41_lowp:
.byte 102,15,213,196 // pmullw %xmm4,%xmm0
- .byte 102,68,15,111,5,227,142,0,0 // movdqa 0x8ee3(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,211,142,0,0 // movdqa 0x8ed3(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,213,205 // pmullw %xmm5,%xmm1
@@ -57215,7 +57285,7 @@ HIDDEN _sk_multiply_sse41_lowp
.globl _sk_multiply_sse41_lowp
FUNCTION(_sk_multiply_sse41_lowp)
_sk_multiply_sse41_lowp:
- .byte 102,68,15,111,13,162,142,0,0 // movdqa 0x8ea2(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,146,142,0,0 // movdqa 0x8e92(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,193 // movdqa %xmm9,%xmm8
.byte 102,68,15,249,195 // psubw %xmm3,%xmm8
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
@@ -57258,7 +57328,7 @@ HIDDEN _sk_plus__sse41_lowp
FUNCTION(_sk_plus__sse41_lowp)
_sk_plus__sse41_lowp:
.byte 102,15,253,196 // paddw %xmm4,%xmm0
- .byte 102,68,15,111,5,231,141,0,0 // movdqa 0x8de7(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,215,141,0,0 // movdqa 0x8dd7(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,56,58,192 // pminuw %xmm8,%xmm0
.byte 102,15,253,205 // paddw %xmm5,%xmm1
.byte 102,65,15,56,58,200 // pminuw %xmm8,%xmm1
@@ -57278,7 +57348,7 @@ _sk_screen_sse41_lowp:
.byte 102,15,111,196 // movdqa %xmm4,%xmm0
.byte 102,15,253,193 // paddw %xmm1,%xmm0
.byte 102,15,213,204 // pmullw %xmm4,%xmm1
- .byte 102,68,15,111,21,161,141,0,0 // movdqa 0x8da1(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,21,145,141,0,0 // movdqa 0x8d91(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,202 // paddw %xmm10,%xmm1
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
@@ -57310,7 +57380,7 @@ HIDDEN _sk_xor__sse41_lowp
FUNCTION(_sk_xor__sse41_lowp)
_sk_xor__sse41_lowp:
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
- .byte 102,68,15,111,13,31,141,0,0 // movdqa 0x8d1f(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,15,141,0,0 // movdqa 0x8d0f(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,209 // movdqa %xmm9,%xmm10
.byte 102,68,15,249,215 // psubw %xmm7,%xmm10
.byte 102,65,15,213,194 // pmullw %xmm10,%xmm0
@@ -57353,7 +57423,7 @@ _sk_darken_sse41_lowp:
.byte 102,68,15,213,207 // pmullw %xmm7,%xmm9
.byte 102,15,213,203 // pmullw %xmm3,%xmm1
.byte 102,65,15,56,62,201 // pmaxuw %xmm9,%xmm1
- .byte 102,68,15,111,13,102,140,0,0 // movdqa 0x8c66(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,86,140,0,0 // movdqa 0x8c56(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,201 // paddw %xmm9,%xmm1
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
@@ -57397,7 +57467,7 @@ _sk_lighten_sse41_lowp:
.byte 102,68,15,213,207 // pmullw %xmm7,%xmm9
.byte 102,15,213,203 // pmullw %xmm3,%xmm1
.byte 102,65,15,56,58,201 // pminuw %xmm9,%xmm1
- .byte 102,68,15,111,13,169,139,0,0 // movdqa 0x8ba9(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,153,139,0,0 // movdqa 0x8b99(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,201 // paddw %xmm9,%xmm1
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
@@ -57441,10 +57511,10 @@ _sk_difference_sse41_lowp:
.byte 102,68,15,213,207 // pmullw %xmm7,%xmm9
.byte 102,15,213,203 // pmullw %xmm3,%xmm1
.byte 102,65,15,56,58,201 // pminuw %xmm9,%xmm1
- .byte 102,68,15,111,13,236,138,0,0 // movdqa 0x8aec(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,220,138,0,0 // movdqa 0x8adc(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,201 // paddw %xmm9,%xmm1
.byte 102,15,113,209,7 // psrlw $0x7,%xmm1
- .byte 102,68,15,111,21,57,146,0,0 // movdqa 0x9239(%rip),%xmm10 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
+ .byte 102,68,15,111,21,41,146,0,0 // movdqa 0x9229(%rip),%xmm10 # 39b50 <_sk_srcover_bgra_8888_sse2_lowp+0x12dc>
.byte 102,65,15,219,202 // pand %xmm10,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
.byte 102,68,15,111,221 // movdqa %xmm5,%xmm11
@@ -57486,10 +57556,10 @@ _sk_exclusion_sse41_lowp:
.byte 102,15,111,196 // movdqa %xmm4,%xmm0
.byte 102,15,253,193 // paddw %xmm1,%xmm0
.byte 102,15,213,204 // pmullw %xmm4,%xmm1
- .byte 102,68,15,111,13,40,138,0,0 // movdqa 0x8a28(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,24,138,0,0 // movdqa 0x8a18(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,201 // paddw %xmm9,%xmm1
.byte 102,15,113,209,7 // psrlw $0x7,%xmm1
- .byte 102,68,15,111,21,117,145,0,0 // movdqa 0x9175(%rip),%xmm10 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
+ .byte 102,68,15,111,21,101,145,0,0 // movdqa 0x9165(%rip),%xmm10 # 39b50 <_sk_srcover_bgra_8888_sse2_lowp+0x12dc>
.byte 102,65,15,219,202 // pand %xmm10,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
.byte 102,15,111,205 // movdqa %xmm5,%xmm1
@@ -57525,14 +57595,14 @@ _sk_hardlight_sse41_lowp:
.byte 102,15,111,245 // movdqa %xmm5,%xmm6
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
- .byte 102,68,15,111,29,131,137,0,0 // movdqa 0x8983(%rip),%xmm11 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,29,115,137,0,0 // movdqa 0x8973(%rip),%xmm11 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,249,220 // psubw %xmm12,%xmm11
.byte 102,65,15,111,227 // movdqa %xmm11,%xmm4
.byte 102,65,15,213,224 // pmullw %xmm8,%xmm4
.byte 102,68,15,111,251 // movdqa %xmm3,%xmm15
.byte 102,69,15,249,248 // psubw %xmm8,%xmm15
.byte 102,69,15,253,192 // paddw %xmm8,%xmm8
- .byte 102,68,15,111,13,204,144,0,0 // movdqa 0x90cc(%rip),%xmm9 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,13,188,144,0,0 // movdqa 0x90bc(%rip),%xmm9 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,68,15,111,243 // movdqa %xmm3,%xmm14
.byte 102,69,15,239,241 // pxor %xmm9,%xmm14
.byte 102,65,15,111,196 // movdqa %xmm12,%xmm0
@@ -57579,7 +57649,7 @@ _sk_hardlight_sse41_lowp:
.byte 102,68,15,111,232 // movdqa %xmm0,%xmm13
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,65,15,56,16,210 // pblendvb %xmm0,%xmm10,%xmm2
- .byte 102,68,15,111,13,116,136,0,0 // movdqa 0x8874(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,100,136,0,0 // movdqa 0x8864(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,15,249,195 // psubw %xmm3,%xmm0
.byte 102,15,111,248 // movdqa %xmm0,%xmm7
@@ -57618,7 +57688,7 @@ FUNCTION(_sk_overlay_sse41_lowp)
_sk_overlay_sse41_lowp:
.byte 102,68,15,111,231 // movdqa %xmm7,%xmm12
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
- .byte 102,68,15,111,29,215,135,0,0 // movdqa 0x87d7(%rip),%xmm11 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,29,199,135,0,0 // movdqa 0x87c7(%rip),%xmm11 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,249,220 // psubw %xmm12,%xmm11
.byte 102,65,15,111,195 // movdqa %xmm11,%xmm0
.byte 102,65,15,213,192 // pmullw %xmm8,%xmm0
@@ -57630,7 +57700,7 @@ _sk_overlay_sse41_lowp:
.byte 102,68,15,249,252 // psubw %xmm4,%xmm15
.byte 102,15,111,196 // movdqa %xmm4,%xmm0
.byte 102,15,253,192 // paddw %xmm0,%xmm0
- .byte 102,68,15,111,13,8,143,0,0 // movdqa 0x8f08(%rip),%xmm9 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,13,248,142,0,0 // movdqa 0x8ef8(%rip),%xmm9 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,69,15,111,212 // movdqa %xmm12,%xmm10
.byte 102,68,15,213,211 // pmullw %xmm3,%xmm10
.byte 102,69,15,213,254 // pmullw %xmm14,%xmm15
@@ -57676,7 +57746,7 @@ _sk_overlay_sse41_lowp:
.byte 102,15,253,210 // paddw %xmm2,%xmm2
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,65,15,56,16,210 // pblendvb %xmm0,%xmm10,%xmm2
- .byte 102,68,15,111,13,186,134,0,0 // movdqa 0x86ba(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,170,134,0,0 // movdqa 0x86aa(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,15,249,195 // psubw %xmm3,%xmm0
.byte 102,15,111,248 // movdqa %xmm0,%xmm7
@@ -57719,49 +57789,49 @@ _sk_load_8888_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 30c98 <_sk_load_8888_sse41_lowp+0x44>
+ .byte 119,41 // ja 30e08 <_sk_load_8888_sse41_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 30d70 <_sk_load_8888_sse41_lowp+0x11c>
+ .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 30ee0 <_sk_load_8888_sse41_lowp+0x11c>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,20,144 // movd (%r8,%rdx,4),%xmm2
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,89 // jmp 30cf1 <_sk_load_8888_sse41_lowp+0x9d>
+ .byte 235,89 // jmp 30e61 <_sk_load_8888_sse41_lowp+0x9d>
.byte 243,65,15,111,20,144 // movdqu (%r8,%rdx,4),%xmm2
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,74 // jmp 30cf1 <_sk_load_8888_sse41_lowp+0x9d>
+ .byte 235,74 // jmp 30e61 <_sk_load_8888_sse41_lowp+0x9d>
.byte 102,65,15,110,68,144,8 // movd 0x8(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 243,65,15,126,20,144 // movq (%r8,%rdx,4),%xmm2
.byte 102,65,15,58,14,208,240 // pblendw $0xf0,%xmm8,%xmm2
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
- .byte 235,37 // jmp 30cf1 <_sk_load_8888_sse41_lowp+0x9d>
+ .byte 235,37 // jmp 30e61 <_sk_load_8888_sse41_lowp+0x9d>
.byte 102,65,15,110,68,144,24 // movd 0x18(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 102,69,15,58,34,68,144,20,1 // pinsrd $0x1,0x14(%r8,%rdx,4),%xmm8
.byte 102,69,15,58,34,68,144,16,0 // pinsrd $0x0,0x10(%r8,%rdx,4),%xmm8
.byte 243,65,15,111,20,144 // movdqu (%r8,%rdx,4),%xmm2
- .byte 102,15,111,5,23,141,0,0 // movdqa 0x8d17(%rip),%xmm0 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,15,111,5,7,141,0,0 // movdqa 0x8d07(%rip),%xmm0 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,15,111,202 // movdqa %xmm2,%xmm1
.byte 102,15,56,0,200 // pshufb %xmm0,%xmm1
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,56,0,216 // pshufb %xmm0,%xmm3
.byte 102,15,108,203 // punpcklqdq %xmm3,%xmm1
- .byte 102,68,15,111,13,119,133,0,0 // movdqa 0x8577(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,103,133,0,0 // movdqa 0x8567(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,65,15,219,193 // pand %xmm9,%xmm0
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
- .byte 102,68,15,111,21,240,140,0,0 // movdqa 0x8cf0(%rip),%xmm10 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,21,224,140,0,0 // movdqa 0x8ce0(%rip),%xmm10 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,218 // movdqa %xmm2,%xmm3
.byte 102,65,15,56,0,218 // pshufb %xmm10,%xmm3
.byte 102,69,15,111,216 // movdqa %xmm8,%xmm11
.byte 102,69,15,56,0,218 // pshufb %xmm10,%xmm11
.byte 102,65,15,108,219 // punpcklqdq %xmm11,%xmm3
- .byte 102,68,15,111,21,221,140,0,0 // movdqa 0x8cdd(%rip),%xmm10 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,21,205,140,0,0 // movdqa 0x8ccd(%rip),%xmm10 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,210 // pshufb %xmm10,%xmm2
.byte 102,69,15,56,0,194 // pshufb %xmm10,%xmm8
.byte 102,65,15,108,208 // punpcklqdq %xmm8,%xmm2
@@ -57777,7 +57847,7 @@ _sk_load_8888_sse41_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 123,255 // jnp 30d7d <_sk_load_8888_sse41_lowp+0x129>
+ .byte 123,255 // jnp 30eed <_sk_load_8888_sse41_lowp+0x129>
.byte 255 // (bad)
.byte 255,114,255 // pushq -0x1(%rdx)
.byte 255 // (bad)
@@ -57799,49 +57869,49 @@ _sk_load_8888_dst_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 30dd0 <_sk_load_8888_dst_sse41_lowp+0x44>
+ .byte 119,41 // ja 30f40 <_sk_load_8888_dst_sse41_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 30ea8 <_sk_load_8888_dst_sse41_lowp+0x11c>
+ .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 31018 <_sk_load_8888_dst_sse41_lowp+0x11c>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,52,144 // movd (%r8,%rdx,4),%xmm6
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,89 // jmp 30e29 <_sk_load_8888_dst_sse41_lowp+0x9d>
+ .byte 235,89 // jmp 30f99 <_sk_load_8888_dst_sse41_lowp+0x9d>
.byte 243,65,15,111,52,144 // movdqu (%r8,%rdx,4),%xmm6
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,74 // jmp 30e29 <_sk_load_8888_dst_sse41_lowp+0x9d>
+ .byte 235,74 // jmp 30f99 <_sk_load_8888_dst_sse41_lowp+0x9d>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 243,65,15,126,52,144 // movq (%r8,%rdx,4),%xmm6
.byte 102,65,15,58,14,240,240 // pblendw $0xf0,%xmm8,%xmm6
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
- .byte 235,37 // jmp 30e29 <_sk_load_8888_dst_sse41_lowp+0x9d>
+ .byte 235,37 // jmp 30f99 <_sk_load_8888_dst_sse41_lowp+0x9d>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,69,15,58,34,68,144,20,1 // pinsrd $0x1,0x14(%r8,%rdx,4),%xmm8
.byte 102,69,15,58,34,68,144,16,0 // pinsrd $0x0,0x10(%r8,%rdx,4),%xmm8
.byte 243,65,15,111,52,144 // movdqu (%r8,%rdx,4),%xmm6
- .byte 102,15,111,37,223,139,0,0 // movdqa 0x8bdf(%rip),%xmm4 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,15,111,37,207,139,0,0 // movdqa 0x8bcf(%rip),%xmm4 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,15,111,238 // movdqa %xmm6,%xmm5
.byte 102,15,56,0,236 // pshufb %xmm4,%xmm5
.byte 102,65,15,111,248 // movdqa %xmm8,%xmm7
.byte 102,15,56,0,252 // pshufb %xmm4,%xmm7
.byte 102,15,108,239 // punpcklqdq %xmm7,%xmm5
- .byte 102,68,15,111,13,63,132,0,0 // movdqa 0x843f(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,47,132,0,0 // movdqa 0x842f(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,229 // movdqa %xmm5,%xmm4
.byte 102,65,15,219,225 // pand %xmm9,%xmm4
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
- .byte 102,68,15,111,21,184,139,0,0 // movdqa 0x8bb8(%rip),%xmm10 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,21,168,139,0,0 // movdqa 0x8ba8(%rip),%xmm10 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,254 // movdqa %xmm6,%xmm7
.byte 102,65,15,56,0,250 // pshufb %xmm10,%xmm7
.byte 102,69,15,111,216 // movdqa %xmm8,%xmm11
.byte 102,69,15,56,0,218 // pshufb %xmm10,%xmm11
.byte 102,65,15,108,251 // punpcklqdq %xmm11,%xmm7
- .byte 102,68,15,111,21,165,139,0,0 // movdqa 0x8ba5(%rip),%xmm10 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,21,149,139,0,0 // movdqa 0x8b95(%rip),%xmm10 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,242 // pshufb %xmm10,%xmm6
.byte 102,69,15,56,0,194 // pshufb %xmm10,%xmm8
.byte 102,65,15,108,240 // punpcklqdq %xmm8,%xmm6
@@ -57857,7 +57927,7 @@ _sk_load_8888_dst_sse41_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 123,255 // jnp 30eb5 <_sk_load_8888_dst_sse41_lowp+0x129>
+ .byte 123,255 // jnp 31025 <_sk_load_8888_dst_sse41_lowp+0x129>
.byte 255 // (bad)
.byte 255,114,255 // pushq -0x1(%rdx)
.byte 255 // (bad)
@@ -57894,20 +57964,20 @@ _sk_store_8888_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,27 // ja 30f4a <_sk_store_8888_sse41_lowp+0x86>
+ .byte 119,27 // ja 310ba <_sk_store_8888_sse41_lowp+0x86>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,87,0,0,0 // lea 0x57(%rip),%r9 # 30f90 <_sk_store_8888_sse41_lowp+0xcc>
+ .byte 76,141,13,87,0,0,0 // lea 0x57(%rip),%r9 # 31100 <_sk_store_8888_sse41_lowp+0xcc>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,126,4,144 // movd %xmm8,(%r8,%rdx,4)
- .byte 235,63 // jmp 30f89 <_sk_store_8888_sse41_lowp+0xc5>
+ .byte 235,63 // jmp 310f9 <_sk_store_8888_sse41_lowp+0xc5>
.byte 243,69,15,127,4,144 // movdqu %xmm8,(%r8,%rdx,4)
.byte 243,69,15,127,76,144,16 // movdqu %xmm9,0x10(%r8,%rdx,4)
- .byte 235,48 // jmp 30f89 <_sk_store_8888_sse41_lowp+0xc5>
+ .byte 235,48 // jmp 310f9 <_sk_store_8888_sse41_lowp+0xc5>
.byte 102,69,15,58,22,68,144,8,2 // pextrd $0x2,%xmm8,0x8(%r8,%rdx,4)
.byte 102,69,15,214,4,144 // movq %xmm8,(%r8,%rdx,4)
- .byte 235,31 // jmp 30f89 <_sk_store_8888_sse41_lowp+0xc5>
+ .byte 235,31 // jmp 310f9 <_sk_store_8888_sse41_lowp+0xc5>
.byte 102,69,15,58,22,76,144,24,2 // pextrd $0x2,%xmm9,0x18(%r8,%rdx,4)
.byte 102,69,15,58,22,76,144,20,1 // pextrd $0x1,%xmm9,0x14(%r8,%rdx,4)
.byte 102,69,15,126,76,144,16 // movd %xmm9,0x10(%r8,%rdx,4)
@@ -57951,49 +58021,49 @@ _sk_load_bgra_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 30ff0 <_sk_load_bgra_sse41_lowp+0x44>
+ .byte 119,41 // ja 31160 <_sk_load_bgra_sse41_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 310c8 <_sk_load_bgra_sse41_lowp+0x11c>
+ .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 31238 <_sk_load_bgra_sse41_lowp+0x11c>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,4,144 // movd (%r8,%rdx,4),%xmm0
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,89 // jmp 31049 <_sk_load_bgra_sse41_lowp+0x9d>
+ .byte 235,89 // jmp 311b9 <_sk_load_bgra_sse41_lowp+0x9d>
.byte 243,65,15,111,4,144 // movdqu (%r8,%rdx,4),%xmm0
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,74 // jmp 31049 <_sk_load_bgra_sse41_lowp+0x9d>
+ .byte 235,74 // jmp 311b9 <_sk_load_bgra_sse41_lowp+0x9d>
.byte 102,65,15,110,68,144,8 // movd 0x8(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 243,65,15,126,4,144 // movq (%r8,%rdx,4),%xmm0
.byte 102,65,15,58,14,192,240 // pblendw $0xf0,%xmm8,%xmm0
.byte 102,68,15,111,193 // movdqa %xmm1,%xmm8
- .byte 235,37 // jmp 31049 <_sk_load_bgra_sse41_lowp+0x9d>
+ .byte 235,37 // jmp 311b9 <_sk_load_bgra_sse41_lowp+0x9d>
.byte 102,65,15,110,68,144,24 // movd 0x18(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 102,69,15,58,34,68,144,20,1 // pinsrd $0x1,0x14(%r8,%rdx,4),%xmm8
.byte 102,69,15,58,34,68,144,16,0 // pinsrd $0x0,0x10(%r8,%rdx,4),%xmm8
.byte 243,65,15,111,4,144 // movdqu (%r8,%rdx,4),%xmm0
- .byte 102,15,111,21,191,137,0,0 // movdqa 0x89bf(%rip),%xmm2 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,15,111,21,175,137,0,0 // movdqa 0x89af(%rip),%xmm2 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,56,0,202 // pshufb %xmm2,%xmm1
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,56,0,218 // pshufb %xmm2,%xmm3
.byte 102,15,108,203 // punpcklqdq %xmm3,%xmm1
- .byte 102,68,15,111,13,31,130,0,0 // movdqa 0x821f(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,15,130,0,0 // movdqa 0x820f(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,209 // movdqa %xmm1,%xmm2
.byte 102,65,15,219,209 // pand %xmm9,%xmm2
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
- .byte 102,68,15,111,21,152,137,0,0 // movdqa 0x8998(%rip),%xmm10 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,21,136,137,0,0 // movdqa 0x8988(%rip),%xmm10 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,216 // movdqa %xmm0,%xmm3
.byte 102,65,15,56,0,218 // pshufb %xmm10,%xmm3
.byte 102,69,15,111,216 // movdqa %xmm8,%xmm11
.byte 102,69,15,56,0,218 // pshufb %xmm10,%xmm11
.byte 102,65,15,108,219 // punpcklqdq %xmm11,%xmm3
- .byte 102,68,15,111,21,133,137,0,0 // movdqa 0x8985(%rip),%xmm10 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,21,117,137,0,0 // movdqa 0x8975(%rip),%xmm10 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,194 // pshufb %xmm10,%xmm0
.byte 102,69,15,56,0,194 // pshufb %xmm10,%xmm8
.byte 102,65,15,108,192 // punpcklqdq %xmm8,%xmm0
@@ -58009,7 +58079,7 @@ _sk_load_bgra_sse41_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 123,255 // jnp 310d5 <_sk_load_bgra_sse41_lowp+0x129>
+ .byte 123,255 // jnp 31245 <_sk_load_bgra_sse41_lowp+0x129>
.byte 255 // (bad)
.byte 255,114,255 // pushq -0x1(%rdx)
.byte 255 // (bad)
@@ -58031,49 +58101,49 @@ _sk_load_bgra_dst_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 31128 <_sk_load_bgra_dst_sse41_lowp+0x44>
+ .byte 119,41 // ja 31298 <_sk_load_bgra_dst_sse41_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 31200 <_sk_load_bgra_dst_sse41_lowp+0x11c>
+ .byte 76,141,13,242,0,0,0 // lea 0xf2(%rip),%r9 # 31370 <_sk_load_bgra_dst_sse41_lowp+0x11c>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,36,144 // movd (%r8,%rdx,4),%xmm4
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,89 // jmp 31181 <_sk_load_bgra_dst_sse41_lowp+0x9d>
+ .byte 235,89 // jmp 312f1 <_sk_load_bgra_dst_sse41_lowp+0x9d>
.byte 243,65,15,111,36,144 // movdqu (%r8,%rdx,4),%xmm4
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,74 // jmp 31181 <_sk_load_bgra_dst_sse41_lowp+0x9d>
+ .byte 235,74 // jmp 312f1 <_sk_load_bgra_dst_sse41_lowp+0x9d>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 243,65,15,126,36,144 // movq (%r8,%rdx,4),%xmm4
.byte 102,65,15,58,14,224,240 // pblendw $0xf0,%xmm8,%xmm4
.byte 102,68,15,111,197 // movdqa %xmm5,%xmm8
- .byte 235,37 // jmp 31181 <_sk_load_bgra_dst_sse41_lowp+0x9d>
+ .byte 235,37 // jmp 312f1 <_sk_load_bgra_dst_sse41_lowp+0x9d>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,69,15,58,34,68,144,20,1 // pinsrd $0x1,0x14(%r8,%rdx,4),%xmm8
.byte 102,69,15,58,34,68,144,16,0 // pinsrd $0x0,0x10(%r8,%rdx,4),%xmm8
.byte 243,65,15,111,36,144 // movdqu (%r8,%rdx,4),%xmm4
- .byte 102,15,111,53,135,136,0,0 // movdqa 0x8887(%rip),%xmm6 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,15,111,53,119,136,0,0 // movdqa 0x8877(%rip),%xmm6 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,15,56,0,238 // pshufb %xmm6,%xmm5
.byte 102,65,15,111,248 // movdqa %xmm8,%xmm7
.byte 102,15,56,0,254 // pshufb %xmm6,%xmm7
.byte 102,15,108,239 // punpcklqdq %xmm7,%xmm5
- .byte 102,68,15,111,13,231,128,0,0 // movdqa 0x80e7(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,215,128,0,0 // movdqa 0x80d7(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,245 // movdqa %xmm5,%xmm6
.byte 102,65,15,219,241 // pand %xmm9,%xmm6
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
- .byte 102,68,15,111,21,96,136,0,0 // movdqa 0x8860(%rip),%xmm10 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,21,80,136,0,0 // movdqa 0x8850(%rip),%xmm10 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,252 // movdqa %xmm4,%xmm7
.byte 102,65,15,56,0,250 // pshufb %xmm10,%xmm7
.byte 102,69,15,111,216 // movdqa %xmm8,%xmm11
.byte 102,69,15,56,0,218 // pshufb %xmm10,%xmm11
.byte 102,65,15,108,251 // punpcklqdq %xmm11,%xmm7
- .byte 102,68,15,111,21,77,136,0,0 // movdqa 0x884d(%rip),%xmm10 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,21,61,136,0,0 // movdqa 0x883d(%rip),%xmm10 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,226 // pshufb %xmm10,%xmm4
.byte 102,69,15,56,0,194 // pshufb %xmm10,%xmm8
.byte 102,65,15,108,224 // punpcklqdq %xmm8,%xmm4
@@ -58089,7 +58159,7 @@ _sk_load_bgra_dst_sse41_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 123,255 // jnp 3120d <_sk_load_bgra_dst_sse41_lowp+0x129>
+ .byte 123,255 // jnp 3137d <_sk_load_bgra_dst_sse41_lowp+0x129>
.byte 255 // (bad)
.byte 255,114,255 // pushq -0x1(%rdx)
.byte 255 // (bad)
@@ -58126,20 +58196,20 @@ _sk_store_bgra_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,27 // ja 312a2 <_sk_store_bgra_sse41_lowp+0x86>
+ .byte 119,27 // ja 31412 <_sk_store_bgra_sse41_lowp+0x86>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,87,0,0,0 // lea 0x57(%rip),%r9 # 312e8 <_sk_store_bgra_sse41_lowp+0xcc>
+ .byte 76,141,13,87,0,0,0 // lea 0x57(%rip),%r9 # 31458 <_sk_store_bgra_sse41_lowp+0xcc>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,126,4,144 // movd %xmm8,(%r8,%rdx,4)
- .byte 235,63 // jmp 312e1 <_sk_store_bgra_sse41_lowp+0xc5>
+ .byte 235,63 // jmp 31451 <_sk_store_bgra_sse41_lowp+0xc5>
.byte 243,69,15,127,4,144 // movdqu %xmm8,(%r8,%rdx,4)
.byte 243,69,15,127,76,144,16 // movdqu %xmm9,0x10(%r8,%rdx,4)
- .byte 235,48 // jmp 312e1 <_sk_store_bgra_sse41_lowp+0xc5>
+ .byte 235,48 // jmp 31451 <_sk_store_bgra_sse41_lowp+0xc5>
.byte 102,69,15,58,22,68,144,8,2 // pextrd $0x2,%xmm8,0x8(%r8,%rdx,4)
.byte 102,69,15,214,4,144 // movq %xmm8,(%r8,%rdx,4)
- .byte 235,31 // jmp 312e1 <_sk_store_bgra_sse41_lowp+0xc5>
+ .byte 235,31 // jmp 31451 <_sk_store_bgra_sse41_lowp+0xc5>
.byte 102,69,15,58,22,76,144,24,2 // pextrd $0x2,%xmm9,0x18(%r8,%rdx,4)
.byte 102,69,15,58,22,76,144,20,1 // pextrd $0x1,%xmm9,0x14(%r8,%rdx,4)
.byte 102,69,15,126,76,144,16 // movd %xmm9,0x10(%r8,%rdx,4)
@@ -58242,17 +58312,17 @@ _sk_gather_8888_sse41_lowp:
.byte 102,65,15,196,202,5 // pinsrw $0x5,%r10d,%xmm1
.byte 102,65,15,196,201,6 // pinsrw $0x6,%r9d,%xmm1
.byte 102,15,196,200,7 // pinsrw $0x7,%eax,%xmm1
- .byte 102,68,15,111,13,80,126,0,0 // movdqa 0x7e50(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,64,126,0,0 // movdqa 0x7e40(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,65,15,219,193 // pand %xmm9,%xmm0
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
- .byte 102,68,15,111,21,201,133,0,0 // movdqa 0x85c9(%rip),%xmm10 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,21,185,133,0,0 // movdqa 0x85b9(%rip),%xmm10 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,218 // movdqa %xmm2,%xmm3
.byte 102,65,15,56,0,218 // pshufb %xmm10,%xmm3
.byte 102,69,15,111,216 // movdqa %xmm8,%xmm11
.byte 102,69,15,56,0,218 // pshufb %xmm10,%xmm11
.byte 102,65,15,108,219 // punpcklqdq %xmm11,%xmm3
- .byte 102,68,15,111,21,182,133,0,0 // movdqa 0x85b6(%rip),%xmm10 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,21,166,133,0,0 // movdqa 0x85a6(%rip),%xmm10 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,210 // pshufb %xmm10,%xmm2
.byte 102,69,15,56,0,194 // pshufb %xmm10,%xmm8
.byte 102,65,15,108,208 // punpcklqdq %xmm8,%xmm2
@@ -58337,17 +58407,17 @@ _sk_gather_bgra_sse41_lowp:
.byte 102,65,15,196,202,5 // pinsrw $0x5,%r10d,%xmm1
.byte 102,65,15,196,201,6 // pinsrw $0x6,%r9d,%xmm1
.byte 102,15,196,200,7 // pinsrw $0x7,%eax,%xmm1
- .byte 102,68,15,111,13,184,124,0,0 // movdqa 0x7cb8(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,168,124,0,0 // movdqa 0x7ca8(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,209 // movdqa %xmm1,%xmm2
.byte 102,65,15,219,209 // pand %xmm9,%xmm2
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
- .byte 102,68,15,111,21,49,132,0,0 // movdqa 0x8431(%rip),%xmm10 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,21,33,132,0,0 // movdqa 0x8421(%rip),%xmm10 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,216 // movdqa %xmm0,%xmm3
.byte 102,65,15,56,0,218 // pshufb %xmm10,%xmm3
.byte 102,69,15,111,216 // movdqa %xmm8,%xmm11
.byte 102,69,15,56,0,218 // pshufb %xmm10,%xmm11
.byte 102,65,15,108,219 // punpcklqdq %xmm11,%xmm3
- .byte 102,68,15,111,21,30,132,0,0 // movdqa 0x841e(%rip),%xmm10 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,21,14,132,0,0 // movdqa 0x840e(%rip),%xmm10 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,194 // pshufb %xmm10,%xmm0
.byte 102,69,15,56,0,194 // pshufb %xmm10,%xmm8
.byte 102,65,15,108,192 // punpcklqdq %xmm8,%xmm0
@@ -58373,23 +58443,23 @@ _sk_load_565_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 31670 <_sk_load_565_sse41_lowp+0x3c>
+ .byte 119,34 // ja 317e0 <_sk_load_565_sse41_lowp+0x3c>
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,196,0,0,0 // lea 0xc4(%rip),%r9 # 31720 <_sk_load_565_sse41_lowp+0xec>
+ .byte 76,141,13,196,0,0,0 // lea 0xc4(%rip),%r9 # 31890 <_sk_load_565_sse41_lowp+0xec>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,15,110,192 // movd %eax,%xmm0
- .byte 235,74 // jmp 316ba <_sk_load_565_sse41_lowp+0x86>
+ .byte 235,74 // jmp 3182a <_sk_load_565_sse41_lowp+0x86>
.byte 243,65,15,111,4,80 // movdqu (%r8,%rdx,2),%xmm0
- .byte 235,66 // jmp 316ba <_sk_load_565_sse41_lowp+0x86>
+ .byte 235,66 // jmp 3182a <_sk_load_565_sse41_lowp+0x86>
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 102,65,15,196,76,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm1
.byte 102,65,15,110,4,80 // movd (%r8,%rdx,2),%xmm0
.byte 102,15,58,14,193,252 // pblendw $0xfc,%xmm1,%xmm0
- .byte 235,40 // jmp 316ba <_sk_load_565_sse41_lowp+0x86>
+ .byte 235,40 // jmp 3182a <_sk_load_565_sse41_lowp+0x86>
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 102,65,15,196,76,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm1
.byte 102,65,15,196,76,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm1
@@ -58398,11 +58468,11 @@ _sk_load_565_sse41_lowp:
.byte 102,15,58,14,193,240 // pblendw $0xf0,%xmm1,%xmm0
.byte 102,15,111,216 // movdqa %xmm0,%xmm3
.byte 102,15,113,211,8 // psrlw $0x8,%xmm3
- .byte 102,15,219,29,117,131,0,0 // pand 0x8375(%rip),%xmm3 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,29,101,131,0,0 // pand 0x8365(%rip),%xmm3 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,113,209,5 // psrlw $0x5,%xmm1
- .byte 102,15,219,13,116,131,0,0 // pand 0x8374(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,15,111,21,124,131,0,0 // movdqa 0x837c(%rip),%xmm2 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,15,219,13,100,131,0,0 // pand 0x8364(%rip),%xmm1 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,111,21,108,131,0,0 // movdqa 0x836c(%rip),%xmm2 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,15,113,208,13 // psrlw $0xd,%xmm0
.byte 102,15,235,195 // por %xmm3,%xmm0
@@ -58415,7 +58485,7 @@ _sk_load_565_sse41_lowp:
.byte 102,15,113,210,2 // psrlw $0x2,%xmm2
.byte 102,15,235,211 // por %xmm3,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,114,123,0,0 // movaps 0x7b72(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,98,123,0,0 // movaps 0x7b62(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 255,224 // jmpq *%rax
.byte 69,255 // rex.RB (bad)
.byte 255 // (bad)
@@ -58426,7 +58496,7 @@ _sk_load_565_sse41_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 126,255 // jle 31735 <_sk_load_565_sse41_lowp+0x101>
+ .byte 126,255 // jle 318a5 <_sk_load_565_sse41_lowp+0x101>
.byte 255 // (bad)
.byte 255,114,255 // pushq -0x1(%rdx)
.byte 255 // (bad)
@@ -58445,23 +58515,23 @@ _sk_load_565_dst_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 31778 <_sk_load_565_dst_sse41_lowp+0x3c>
+ .byte 119,34 // ja 318e8 <_sk_load_565_dst_sse41_lowp+0x3c>
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,196,0,0,0 // lea 0xc4(%rip),%r9 # 31828 <_sk_load_565_dst_sse41_lowp+0xec>
+ .byte 76,141,13,196,0,0,0 // lea 0xc4(%rip),%r9 # 31998 <_sk_load_565_dst_sse41_lowp+0xec>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,15,110,224 // movd %eax,%xmm4
- .byte 235,74 // jmp 317c2 <_sk_load_565_dst_sse41_lowp+0x86>
+ .byte 235,74 // jmp 31932 <_sk_load_565_dst_sse41_lowp+0x86>
.byte 243,65,15,111,36,80 // movdqu (%r8,%rdx,2),%xmm4
- .byte 235,66 // jmp 317c2 <_sk_load_565_dst_sse41_lowp+0x86>
+ .byte 235,66 // jmp 31932 <_sk_load_565_dst_sse41_lowp+0x86>
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 102,65,15,196,108,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm5
.byte 102,65,15,110,36,80 // movd (%r8,%rdx,2),%xmm4
.byte 102,15,58,14,229,252 // pblendw $0xfc,%xmm5,%xmm4
- .byte 235,40 // jmp 317c2 <_sk_load_565_dst_sse41_lowp+0x86>
+ .byte 235,40 // jmp 31932 <_sk_load_565_dst_sse41_lowp+0x86>
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 102,65,15,196,108,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm5
.byte 102,65,15,196,108,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm5
@@ -58470,11 +58540,11 @@ _sk_load_565_dst_sse41_lowp:
.byte 102,15,58,14,229,240 // pblendw $0xf0,%xmm5,%xmm4
.byte 102,15,111,252 // movdqa %xmm4,%xmm7
.byte 102,15,113,215,8 // psrlw $0x8,%xmm7
- .byte 102,15,219,61,109,130,0,0 // pand 0x826d(%rip),%xmm7 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,61,93,130,0,0 // pand 0x825d(%rip),%xmm7 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,15,113,213,5 // psrlw $0x5,%xmm5
- .byte 102,15,219,45,108,130,0,0 // pand 0x826c(%rip),%xmm5 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,15,111,53,116,130,0,0 // movdqa 0x8274(%rip),%xmm6 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,15,219,45,92,130,0,0 // pand 0x825c(%rip),%xmm5 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,111,53,100,130,0,0 // movdqa 0x8264(%rip),%xmm6 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,15,219,244 // pand %xmm4,%xmm6
.byte 102,15,113,212,13 // psrlw $0xd,%xmm4
.byte 102,15,235,231 // por %xmm7,%xmm4
@@ -58487,7 +58557,7 @@ _sk_load_565_dst_sse41_lowp:
.byte 102,15,113,214,2 // psrlw $0x2,%xmm6
.byte 102,15,235,247 // por %xmm7,%xmm6
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,106,122,0,0 // movaps 0x7a6a(%rip),%xmm7 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,61,90,122,0,0 // movaps 0x7a5a(%rip),%xmm7 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 255,224 // jmpq *%rax
.byte 69,255 // rex.RB (bad)
.byte 255 // (bad)
@@ -58498,7 +58568,7 @@ _sk_load_565_dst_sse41_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 126,255 // jle 3183d <_sk_load_565_dst_sse41_lowp+0x101>
+ .byte 126,255 // jle 319ad <_sk_load_565_dst_sse41_lowp+0x101>
.byte 255 // (bad)
.byte 255,114,255 // pushq -0x1(%rdx)
.byte 255 // (bad)
@@ -58515,10 +58585,10 @@ _sk_store_565_sse41_lowp:
.byte 76,3,0 // add (%rax),%r8
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
.byte 102,65,15,113,240,8 // psllw $0x8,%xmm8
- .byte 102,68,15,219,5,8,130,0,0 // pand 0x8208(%rip),%xmm8 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x136c>
+ .byte 102,68,15,219,5,248,129,0,0 // pand 0x81f8(%rip),%xmm8 # 39bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
.byte 102,68,15,111,201 // movdqa %xmm1,%xmm9
.byte 102,65,15,113,241,3 // psllw $0x3,%xmm9
- .byte 102,68,15,219,13,4,130,0,0 // pand 0x8204(%rip),%xmm9 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
+ .byte 102,68,15,219,13,244,129,0,0 // pand 0x81f4(%rip),%xmm9 # 39be0 <_sk_srcover_bgra_8888_sse2_lowp+0x136c>
.byte 102,69,15,235,200 // por %xmm8,%xmm9
.byte 102,68,15,111,194 // movdqa %xmm2,%xmm8
.byte 102,65,15,113,208,3 // psrlw $0x3,%xmm8
@@ -58527,19 +58597,19 @@ _sk_store_565_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,29 // ja 318b8 <_sk_store_565_sse41_lowp+0x74>
+ .byte 119,29 // ja 31a28 <_sk_store_565_sse41_lowp+0x74>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,83,0,0,0 // lea 0x53(%rip),%r9 # 318f8 <_sk_store_565_sse41_lowp+0xb4>
+ .byte 76,141,13,83,0,0,0 // lea 0x53(%rip),%r9 # 31a68 <_sk_store_565_sse41_lowp+0xb4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,58,21,4,80,0 // pextrw $0x0,%xmm8,(%r8,%rdx,2)
- .byte 235,58 // jmp 318f2 <_sk_store_565_sse41_lowp+0xae>
+ .byte 235,58 // jmp 31a62 <_sk_store_565_sse41_lowp+0xae>
.byte 243,69,15,127,4,80 // movdqu %xmm8,(%r8,%rdx,2)
- .byte 235,50 // jmp 318f2 <_sk_store_565_sse41_lowp+0xae>
+ .byte 235,50 // jmp 31a62 <_sk_store_565_sse41_lowp+0xae>
.byte 102,69,15,58,21,68,80,4,2 // pextrw $0x2,%xmm8,0x4(%r8,%rdx,2)
.byte 102,69,15,126,4,80 // movd %xmm8,(%r8,%rdx,2)
- .byte 235,33 // jmp 318f2 <_sk_store_565_sse41_lowp+0xae>
+ .byte 235,33 // jmp 31a62 <_sk_store_565_sse41_lowp+0xae>
.byte 102,69,15,58,21,68,80,12,6 // pextrw $0x6,%xmm8,0xc(%r8,%rdx,2)
.byte 102,69,15,58,21,68,80,10,5 // pextrw $0x5,%xmm8,0xa(%r8,%rdx,2)
.byte 102,69,15,58,21,68,80,8,4 // pextrw $0x4,%xmm8,0x8(%r8,%rdx,2)
@@ -58559,7 +58629,7 @@ _sk_store_565_sse41_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 235,255 // jmp 31909 <_sk_store_565_sse41_lowp+0xc5>
+ .byte 235,255 // jmp 31a79 <_sk_store_565_sse41_lowp+0xc5>
.byte 255 // (bad)
.byte 255,226 // jmpq *%rdx
.byte 255 // (bad)
@@ -58636,11 +58706,11 @@ _sk_gather_565_sse41_lowp:
.byte 102,15,196,192,7 // pinsrw $0x7,%eax,%xmm0
.byte 102,15,111,216 // movdqa %xmm0,%xmm3
.byte 102,15,113,211,8 // psrlw $0x8,%xmm3
- .byte 102,15,219,29,18,128,0,0 // pand 0x8012(%rip),%xmm3 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,29,2,128,0,0 // pand 0x8002(%rip),%xmm3 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,113,209,5 // psrlw $0x5,%xmm1
- .byte 102,15,219,13,17,128,0,0 // pand 0x8011(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,15,111,21,25,128,0,0 // movdqa 0x8019(%rip),%xmm2 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,15,219,13,1,128,0,0 // pand 0x8001(%rip),%xmm1 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,111,21,9,128,0,0 // movdqa 0x8009(%rip),%xmm2 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,15,113,208,13 // psrlw $0xd,%xmm0
.byte 102,15,235,195 // por %xmm3,%xmm0
@@ -58653,7 +58723,7 @@ _sk_gather_565_sse41_lowp:
.byte 102,15,113,210,2 // psrlw $0x2,%xmm2
.byte 102,15,235,211 // por %xmm3,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,15,120,0,0 // movaps 0x780f(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,255,119,0,0 // movaps 0x77ff(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
.byte 65,94 // pop %r14
@@ -58674,23 +58744,23 @@ _sk_load_4444_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,35 // ja 31ac8 <_sk_load_4444_sse41_lowp+0x3d>
+ .byte 119,35 // ja 31c38 <_sk_load_4444_sse41_lowp+0x3d>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,209,0,0,0 // lea 0xd1(%rip),%r9 # 31b84 <_sk_load_4444_sse41_lowp+0xf9>
+ .byte 76,141,13,209,0,0,0 // lea 0xd1(%rip),%r9 # 31cf4 <_sk_load_4444_sse41_lowp+0xf9>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,76 // jmp 31b14 <_sk_load_4444_sse41_lowp+0x89>
+ .byte 235,76 // jmp 31c84 <_sk_load_4444_sse41_lowp+0x89>
.byte 243,69,15,111,4,80 // movdqu (%r8,%rdx,2),%xmm8
- .byte 235,68 // jmp 31b14 <_sk_load_4444_sse41_lowp+0x89>
+ .byte 235,68 // jmp 31c84 <_sk_load_4444_sse41_lowp+0x89>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm0
.byte 102,69,15,110,4,80 // movd (%r8,%rdx,2),%xmm8
.byte 102,68,15,58,14,192,252 // pblendw $0xfc,%xmm0,%xmm8
- .byte 235,41 // jmp 31b14 <_sk_load_4444_sse41_lowp+0x89>
+ .byte 235,41 // jmp 31c84 <_sk_load_4444_sse41_lowp+0x89>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm0
.byte 102,65,15,196,68,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm0
@@ -58701,7 +58771,7 @@ _sk_load_4444_sse41_lowp:
.byte 102,15,113,209,12 // psrlw $0xc,%xmm1
.byte 102,65,15,111,208 // movdqa %xmm8,%xmm2
.byte 102,15,113,210,8 // psrlw $0x8,%xmm2
- .byte 102,15,111,5,96,127,0,0 // movdqa 0x7f60(%rip),%xmm0 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
+ .byte 102,15,111,5,80,127,0,0 // movdqa 0x7f50(%rip),%xmm0 # 39bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,113,211,4 // psrlw $0x4,%xmm3
@@ -58749,23 +58819,23 @@ _sk_load_4444_dst_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,35 // ja 31bdd <_sk_load_4444_dst_sse41_lowp+0x3d>
+ .byte 119,35 // ja 31d4d <_sk_load_4444_dst_sse41_lowp+0x3d>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,208,0,0,0 // lea 0xd0(%rip),%r9 # 31c98 <_sk_load_4444_dst_sse41_lowp+0xf8>
+ .byte 76,141,13,208,0,0,0 // lea 0xd0(%rip),%r9 # 31e08 <_sk_load_4444_dst_sse41_lowp+0xf8>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,76 // jmp 31c29 <_sk_load_4444_dst_sse41_lowp+0x89>
+ .byte 235,76 // jmp 31d99 <_sk_load_4444_dst_sse41_lowp+0x89>
.byte 243,69,15,111,4,80 // movdqu (%r8,%rdx,2),%xmm8
- .byte 235,68 // jmp 31c29 <_sk_load_4444_dst_sse41_lowp+0x89>
+ .byte 235,68 // jmp 31d99 <_sk_load_4444_dst_sse41_lowp+0x89>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,65,15,196,100,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm4
.byte 102,69,15,110,4,80 // movd (%r8,%rdx,2),%xmm8
.byte 102,68,15,58,14,196,252 // pblendw $0xfc,%xmm4,%xmm8
- .byte 235,41 // jmp 31c29 <_sk_load_4444_dst_sse41_lowp+0x89>
+ .byte 235,41 // jmp 31d99 <_sk_load_4444_dst_sse41_lowp+0x89>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,65,15,196,100,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm4
.byte 102,65,15,196,100,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm4
@@ -58776,7 +58846,7 @@ _sk_load_4444_dst_sse41_lowp:
.byte 102,15,113,213,12 // psrlw $0xc,%xmm5
.byte 102,65,15,111,240 // movdqa %xmm8,%xmm6
.byte 102,15,113,214,8 // psrlw $0x8,%xmm6
- .byte 102,15,111,37,75,126,0,0 // movdqa 0x7e4b(%rip),%xmm4 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
+ .byte 102,15,111,37,59,126,0,0 // movdqa 0x7e3b(%rip),%xmm4 # 39bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
.byte 102,15,219,244 // pand %xmm4,%xmm6
.byte 102,65,15,111,248 // movdqa %xmm8,%xmm7
.byte 102,15,113,215,4 // psrlw $0x4,%xmm7
@@ -58821,12 +58891,12 @@ _sk_store_4444_sse41_lowp:
.byte 76,3,0 // add (%rax),%r8
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
.byte 102,65,15,113,240,8 // psllw $0x8,%xmm8
- .byte 102,68,15,219,5,200,125,0,0 // pand 0x7dc8(%rip),%xmm8 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x139c>
+ .byte 102,68,15,219,5,184,125,0,0 // pand 0x7db8(%rip),%xmm8 # 39c00 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
.byte 102,68,15,111,201 // movdqa %xmm1,%xmm9
.byte 102,65,15,113,241,4 // psllw $0x4,%xmm9
- .byte 102,68,15,219,13,196,125,0,0 // pand 0x7dc4(%rip),%xmm9 # 39ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x13ac>
+ .byte 102,68,15,219,13,180,125,0,0 // pand 0x7db4(%rip),%xmm9 # 39c10 <_sk_srcover_bgra_8888_sse2_lowp+0x139c>
.byte 102,69,15,235,200 // por %xmm8,%xmm9
- .byte 102,68,15,111,21,198,125,0,0 // movdqa 0x7dc6(%rip),%xmm10 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x13bc>
+ .byte 102,68,15,111,21,182,125,0,0 // movdqa 0x7db6(%rip),%xmm10 # 39c20 <_sk_srcover_bgra_8888_sse2_lowp+0x13ac>
.byte 102,68,15,219,210 // pand %xmm2,%xmm10
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
.byte 102,65,15,113,208,4 // psrlw $0x4,%xmm8
@@ -58836,19 +58906,19 @@ _sk_store_4444_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,29 // ja 31d3b <_sk_store_4444_sse41_lowp+0x87>
+ .byte 119,29 // ja 31eab <_sk_store_4444_sse41_lowp+0x87>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,84,0,0,0 // lea 0x54(%rip),%r9 # 31d7c <_sk_store_4444_sse41_lowp+0xc8>
+ .byte 76,141,13,84,0,0,0 // lea 0x54(%rip),%r9 # 31eec <_sk_store_4444_sse41_lowp+0xc8>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,58,21,4,80,0 // pextrw $0x0,%xmm8,(%r8,%rdx,2)
- .byte 235,58 // jmp 31d75 <_sk_store_4444_sse41_lowp+0xc1>
+ .byte 235,58 // jmp 31ee5 <_sk_store_4444_sse41_lowp+0xc1>
.byte 243,69,15,127,4,80 // movdqu %xmm8,(%r8,%rdx,2)
- .byte 235,50 // jmp 31d75 <_sk_store_4444_sse41_lowp+0xc1>
+ .byte 235,50 // jmp 31ee5 <_sk_store_4444_sse41_lowp+0xc1>
.byte 102,69,15,58,21,68,80,4,2 // pextrw $0x2,%xmm8,0x4(%r8,%rdx,2)
.byte 102,69,15,126,4,80 // movd %xmm8,(%r8,%rdx,2)
- .byte 235,33 // jmp 31d75 <_sk_store_4444_sse41_lowp+0xc1>
+ .byte 235,33 // jmp 31ee5 <_sk_store_4444_sse41_lowp+0xc1>
.byte 102,69,15,58,21,68,80,12,6 // pextrw $0x6,%xmm8,0xc(%r8,%rdx,2)
.byte 102,69,15,58,21,68,80,10,5 // pextrw $0x5,%xmm8,0xa(%r8,%rdx,2)
.byte 102,69,15,58,21,68,80,8,4 // pextrw $0x4,%xmm8,0x8(%r8,%rdx,2)
@@ -58948,7 +59018,7 @@ _sk_gather_4444_sse41_lowp:
.byte 102,15,113,209,12 // psrlw $0xc,%xmm1
.byte 102,65,15,111,208 // movdqa %xmm8,%xmm2
.byte 102,15,113,210,8 // psrlw $0x8,%xmm2
- .byte 102,15,111,5,204,123,0,0 // movdqa 0x7bcc(%rip),%xmm0 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
+ .byte 102,15,111,5,188,123,0,0 // movdqa 0x7bbc(%rip),%xmm0 # 39bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,113,211,4 // psrlw $0x4,%xmm3
@@ -58986,18 +59056,18 @@ _sk_load_a8_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 31f56 <_sk_load_a8_sse41_lowp+0x39>
+ .byte 119,34 // ja 320c6 <_sk_load_a8_sse41_lowp+0x39>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,146,0,0,0 // lea 0x92(%rip),%r9 # 31fd4 <_sk_load_a8_sse41_lowp+0xb7>
+ .byte 76,141,13,146,0,0,0 // lea 0x92(%rip),%r9 # 32144 <_sk_load_a8_sse41_lowp+0xb7>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,216 // movd %eax,%xmm3
- .byte 235,100 // jmp 31fba <_sk_load_a8_sse41_lowp+0x9d>
+ .byte 235,100 // jmp 3212a <_sk_load_a8_sse41_lowp+0x9d>
.byte 102,65,15,56,48,28,16 // pmovzxbw (%r8,%rdx,1),%xmm3
- .byte 235,91 // jmp 31fba <_sk_load_a8_sse41_lowp+0x9d>
+ .byte 235,91 // jmp 3212a <_sk_load_a8_sse41_lowp+0x9d>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,15,196,192,2 // pinsrw $0x2,%eax,%xmm0
@@ -59005,7 +59075,7 @@ _sk_load_a8_sse41_lowp:
.byte 102,15,110,200 // movd %eax,%xmm1
.byte 102,15,56,48,217 // pmovzxbw %xmm1,%xmm3
.byte 102,15,58,14,216,252 // pblendw $0xfc,%xmm0,%xmm3
- .byte 235,54 // jmp 31fba <_sk_load_a8_sse41_lowp+0x9d>
+ .byte 235,54 // jmp 3212a <_sk_load_a8_sse41_lowp+0x9d>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,15,196,192,6 // pinsrw $0x6,%eax,%xmm0
@@ -59016,14 +59086,14 @@ _sk_load_a8_sse41_lowp:
.byte 102,65,15,110,12,16 // movd (%r8,%rdx,1),%xmm1
.byte 102,15,56,48,217 // pmovzxbw %xmm1,%xmm3
.byte 102,15,58,14,216,240 // pblendw $0xf0,%xmm0,%xmm3
- .byte 102,15,219,29,206,114,0,0 // pand 0x72ce(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,29,190,114,0,0 // pand 0x72be(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
.byte 255,224 // jmpq *%rax
.byte 15,31,0 // nopl (%rax)
- .byte 119,255 // ja 31fd5 <_sk_load_a8_sse41_lowp+0xb8>
+ .byte 119,255 // ja 32145 <_sk_load_a8_sse41_lowp+0xb8>
.byte 255 // (bad)
.byte 255,154,255,255,255,139 // lcall *-0x74000001(%rdx)
.byte 255 // (bad)
@@ -59052,18 +59122,18 @@ _sk_load_a8_dst_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 32029 <_sk_load_a8_dst_sse41_lowp+0x39>
+ .byte 119,34 // ja 32199 <_sk_load_a8_dst_sse41_lowp+0x39>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,143,0,0,0 // lea 0x8f(%rip),%r9 # 320a4 <_sk_load_a8_dst_sse41_lowp+0xb4>
+ .byte 76,141,13,143,0,0,0 // lea 0x8f(%rip),%r9 # 32214 <_sk_load_a8_dst_sse41_lowp+0xb4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,248 // movd %eax,%xmm7
- .byte 235,100 // jmp 3208d <_sk_load_a8_dst_sse41_lowp+0x9d>
+ .byte 235,100 // jmp 321fd <_sk_load_a8_dst_sse41_lowp+0x9d>
.byte 102,65,15,56,48,60,16 // pmovzxbw (%r8,%rdx,1),%xmm7
- .byte 235,91 // jmp 3208d <_sk_load_a8_dst_sse41_lowp+0x9d>
+ .byte 235,91 // jmp 321fd <_sk_load_a8_dst_sse41_lowp+0x9d>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,15,196,224,2 // pinsrw $0x2,%eax,%xmm4
@@ -59071,7 +59141,7 @@ _sk_load_a8_dst_sse41_lowp:
.byte 102,15,110,232 // movd %eax,%xmm5
.byte 102,15,56,48,253 // pmovzxbw %xmm5,%xmm7
.byte 102,15,58,14,252,252 // pblendw $0xfc,%xmm4,%xmm7
- .byte 235,54 // jmp 3208d <_sk_load_a8_dst_sse41_lowp+0x9d>
+ .byte 235,54 // jmp 321fd <_sk_load_a8_dst_sse41_lowp+0x9d>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,15,196,224,6 // pinsrw $0x6,%eax,%xmm4
@@ -59082,13 +59152,13 @@ _sk_load_a8_dst_sse41_lowp:
.byte 102,65,15,110,44,16 // movd (%r8,%rdx,1),%xmm5
.byte 102,15,56,48,253 // pmovzxbw %xmm5,%xmm7
.byte 102,15,58,14,252,240 // pblendw $0xf0,%xmm4,%xmm7
- .byte 102,15,219,61,251,113,0,0 // pand 0x71fb(%rip),%xmm7 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,61,235,113,0,0 // pand 0x71eb(%rip),%xmm7 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 15,87,246 // xorps %xmm6,%xmm6
.byte 255,224 // jmpq *%rax
- .byte 122,255 // jp 320a5 <_sk_load_a8_dst_sse41_lowp+0xb5>
+ .byte 122,255 // jp 32215 <_sk_load_a8_dst_sse41_lowp+0xb5>
.byte 255 // (bad)
.byte 255,157,255,255,255,142 // lcall *-0x71000001(%rbp)
.byte 255 // (bad)
@@ -59119,28 +59189,28 @@ _sk_store_a8_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,29 // ja 320f4 <_sk_store_a8_sse41_lowp+0x34>
+ .byte 119,29 // ja 32264 <_sk_store_a8_sse41_lowp+0x34>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,131,0,0,0 // lea 0x83(%rip),%r9 # 32164 <_sk_store_a8_sse41_lowp+0xa4>
+ .byte 76,141,13,131,0,0,0 // lea 0x83(%rip),%r9 # 322d4 <_sk_store_a8_sse41_lowp+0xa4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,65,15,58,20,28,16,0 // pextrb $0x0,%xmm3,(%r8,%rdx,1)
- .byte 235,105 // jmp 3215d <_sk_store_a8_sse41_lowp+0x9d>
+ .byte 235,105 // jmp 322cd <_sk_store_a8_sse41_lowp+0x9d>
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
- .byte 102,68,15,56,0,5,221,120,0,0 // pshufb 0x78dd(%rip),%xmm8 # 399e0 <_sk_srcover_bgra_8888_sse2_lowp+0x12dc>
+ .byte 102,68,15,56,0,5,205,120,0,0 // pshufb 0x78cd(%rip),%xmm8 # 39b40 <_sk_srcover_bgra_8888_sse2_lowp+0x12cc>
.byte 102,69,15,214,4,16 // movq %xmm8,(%r8,%rdx,1)
- .byte 235,82 // jmp 3215d <_sk_store_a8_sse41_lowp+0x9d>
+ .byte 235,82 // jmp 322cd <_sk_store_a8_sse41_lowp+0x9d>
.byte 102,65,15,58,20,92,16,2,4 // pextrb $0x4,%xmm3,0x2(%r8,%rdx,1)
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
- .byte 102,68,15,56,0,5,125,113,0,0 // pshufb 0x717d(%rip),%xmm8 # 392a0 <_sk_srcover_bgra_8888_sse2_lowp+0xb9c>
+ .byte 102,68,15,56,0,5,109,113,0,0 // pshufb 0x716d(%rip),%xmm8 # 39400 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
.byte 102,69,15,58,21,4,16,0 // pextrw $0x0,%xmm8,(%r8,%rdx,1)
- .byte 235,48 // jmp 3215d <_sk_store_a8_sse41_lowp+0x9d>
+ .byte 235,48 // jmp 322cd <_sk_store_a8_sse41_lowp+0x9d>
.byte 102,65,15,58,20,92,16,6,12 // pextrb $0xc,%xmm3,0x6(%r8,%rdx,1)
.byte 102,65,15,58,20,92,16,5,10 // pextrb $0xa,%xmm3,0x5(%r8,%rdx,1)
.byte 102,65,15,58,20,92,16,4,8 // pextrb $0x8,%xmm3,0x4(%r8,%rdx,1)
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
- .byte 102,68,15,56,0,5,89,113,0,0 // pshufb 0x7159(%rip),%xmm8 # 392b0 <_sk_srcover_bgra_8888_sse2_lowp+0xbac>
+ .byte 102,68,15,56,0,5,73,113,0,0 // pshufb 0x7149(%rip),%xmm8 # 39410 <_sk_srcover_bgra_8888_sse2_lowp+0xb9c>
.byte 102,69,15,126,4,16 // movd %xmm8,(%r8,%rdx,1)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
@@ -59253,18 +59323,18 @@ _sk_load_g8_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 322e6 <_sk_load_g8_sse41_lowp+0x39>
+ .byte 119,34 // ja 32456 <_sk_load_g8_sse41_lowp+0x39>
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,150,0,0,0 // lea 0x96(%rip),%r9 # 32368 <_sk_load_g8_sse41_lowp+0xbb>
+ .byte 76,141,13,150,0,0,0 // lea 0x96(%rip),%r9 # 324d8 <_sk_load_g8_sse41_lowp+0xbb>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,192 // movd %eax,%xmm0
- .byte 235,100 // jmp 3234a <_sk_load_g8_sse41_lowp+0x9d>
+ .byte 235,100 // jmp 324ba <_sk_load_g8_sse41_lowp+0x9d>
.byte 102,65,15,56,48,4,16 // pmovzxbw (%r8,%rdx,1),%xmm0
- .byte 235,91 // jmp 3234a <_sk_load_g8_sse41_lowp+0x9d>
+ .byte 235,91 // jmp 324ba <_sk_load_g8_sse41_lowp+0x9d>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 102,15,196,200,2 // pinsrw $0x2,%eax,%xmm1
@@ -59272,7 +59342,7 @@ _sk_load_g8_sse41_lowp:
.byte 102,15,110,192 // movd %eax,%xmm0
.byte 102,15,56,48,192 // pmovzxbw %xmm0,%xmm0
.byte 102,15,58,14,193,252 // pblendw $0xfc,%xmm1,%xmm0
- .byte 235,54 // jmp 3234a <_sk_load_g8_sse41_lowp+0x9d>
+ .byte 235,54 // jmp 324ba <_sk_load_g8_sse41_lowp+0x9d>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,201 // pxor %xmm1,%xmm1
.byte 102,15,196,200,6 // pinsrw $0x6,%eax,%xmm1
@@ -59283,14 +59353,14 @@ _sk_load_g8_sse41_lowp:
.byte 102,65,15,110,4,16 // movd (%r8,%rdx,1),%xmm0
.byte 102,15,56,48,192 // pmovzxbw %xmm0,%xmm0
.byte 102,15,58,14,193,240 // pblendw $0xf0,%xmm1,%xmm0
- .byte 102,15,219,5,62,111,0,0 // pand 0x6f3e(%rip),%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,5,46,111,0,0 // pand 0x6f2e(%rip),%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,53,111,0,0 // movaps 0x6f35(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,37,111,0,0 // movaps 0x6f25(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,111,208 // movdqa %xmm0,%xmm2
.byte 255,224 // jmpq *%rax
.byte 15,31,0 // nopl (%rax)
- .byte 115,255 // jae 32369 <_sk_load_g8_sse41_lowp+0xbc>
+ .byte 115,255 // jae 324d9 <_sk_load_g8_sse41_lowp+0xbc>
.byte 255 // (bad)
.byte 255,150,255,255,255,135 // callq *-0x78000001(%rsi)
.byte 255 // (bad)
@@ -59319,18 +59389,18 @@ _sk_load_g8_dst_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 323bd <_sk_load_g8_dst_sse41_lowp+0x39>
+ .byte 119,34 // ja 3252d <_sk_load_g8_dst_sse41_lowp+0x39>
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 3243c <_sk_load_g8_dst_sse41_lowp+0xb8>
+ .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 325ac <_sk_load_g8_dst_sse41_lowp+0xb8>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,224 // movd %eax,%xmm4
- .byte 235,100 // jmp 32421 <_sk_load_g8_dst_sse41_lowp+0x9d>
+ .byte 235,100 // jmp 32591 <_sk_load_g8_dst_sse41_lowp+0x9d>
.byte 102,65,15,56,48,36,16 // pmovzxbw (%r8,%rdx,1),%xmm4
- .byte 235,91 // jmp 32421 <_sk_load_g8_dst_sse41_lowp+0x9d>
+ .byte 235,91 // jmp 32591 <_sk_load_g8_dst_sse41_lowp+0x9d>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 102,15,196,232,2 // pinsrw $0x2,%eax,%xmm5
@@ -59338,7 +59408,7 @@ _sk_load_g8_dst_sse41_lowp:
.byte 102,15,110,224 // movd %eax,%xmm4
.byte 102,15,56,48,228 // pmovzxbw %xmm4,%xmm4
.byte 102,15,58,14,229,252 // pblendw $0xfc,%xmm5,%xmm4
- .byte 235,54 // jmp 32421 <_sk_load_g8_dst_sse41_lowp+0x9d>
+ .byte 235,54 // jmp 32591 <_sk_load_g8_dst_sse41_lowp+0x9d>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 102,15,196,232,6 // pinsrw $0x6,%eax,%xmm5
@@ -59349,13 +59419,13 @@ _sk_load_g8_dst_sse41_lowp:
.byte 102,65,15,110,36,16 // movd (%r8,%rdx,1),%xmm4
.byte 102,15,56,48,228 // pmovzxbw %xmm4,%xmm4
.byte 102,15,58,14,229,240 // pblendw $0xf0,%xmm5,%xmm4
- .byte 102,15,219,37,103,110,0,0 // pand 0x6e67(%rip),%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,37,87,110,0,0 // pand 0x6e57(%rip),%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,94,110,0,0 // movaps 0x6e5e(%rip),%xmm7 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,61,78,110,0,0 // movaps 0x6e4e(%rip),%xmm7 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,15,111,244 // movdqa %xmm4,%xmm6
.byte 255,224 // jmpq *%rax
- .byte 118,255 // jbe 3243d <_sk_load_g8_dst_sse41_lowp+0xb9>
+ .byte 118,255 // jbe 325ad <_sk_load_g8_dst_sse41_lowp+0xb9>
.byte 255 // (bad)
.byte 255,153,255,255,255,138 // lcall *-0x75000001(%rcx)
.byte 255 // (bad)
@@ -59377,10 +59447,10 @@ HIDDEN _sk_luminance_to_alpha_sse41_lowp
FUNCTION(_sk_luminance_to_alpha_sse41_lowp)
_sk_luminance_to_alpha_sse41_lowp:
.byte 102,15,111,218 // movdqa %xmm2,%xmm3
- .byte 102,15,213,5,108,118,0,0 // pmullw 0x766c(%rip),%xmm0 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x13cc>
- .byte 102,15,213,13,116,118,0,0 // pmullw 0x7674(%rip),%xmm1 # 39ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x13dc>
+ .byte 102,15,213,5,92,118,0,0 // pmullw 0x765c(%rip),%xmm0 # 39c30 <_sk_srcover_bgra_8888_sse2_lowp+0x13bc>
+ .byte 102,15,213,13,100,118,0,0 // pmullw 0x7664(%rip),%xmm1 # 39c40 <_sk_srcover_bgra_8888_sse2_lowp+0x13cc>
.byte 102,15,253,200 // paddw %xmm0,%xmm1
- .byte 102,15,213,29,120,118,0,0 // pmullw 0x7678(%rip),%xmm3 # 39af0 <_sk_srcover_bgra_8888_sse2_lowp+0x13ec>
+ .byte 102,15,213,29,104,118,0,0 // pmullw 0x7668(%rip),%xmm3 # 39c50 <_sk_srcover_bgra_8888_sse2_lowp+0x13dc>
.byte 102,15,253,217 // paddw %xmm1,%xmm3
.byte 102,15,113,211,8 // psrlw $0x8,%xmm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -59456,7 +59526,7 @@ _sk_gather_g8_sse41_lowp:
.byte 102,15,58,32,192,7 // pinsrb $0x7,%eax,%xmm0
.byte 102,15,56,48,192 // pmovzxbw %xmm0,%xmm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,225,108,0,0 // movaps 0x6ce1(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,209,108,0,0 // movaps 0x6cd1(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,111,208 // movdqa %xmm0,%xmm2
.byte 91 // pop %rbx
@@ -59472,14 +59542,14 @@ FUNCTION(_sk_scale_1_float_sse41_lowp)
_sk_scale_1_float_sse41_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 243,68,15,16,0 // movss (%rax),%xmm8
- .byte 243,68,15,89,5,15,100,0,0 // mulss 0x640f(%rip),%xmm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 243,68,15,88,5,214,99,0,0 // addss 0x63d6(%rip),%xmm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,5,15,100,0,0 // mulss 0x640f(%rip),%xmm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 243,68,15,88,5,214,99,0,0 // addss 0x63d6(%rip),%xmm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 243,65,15,44,192 // cvttss2si %xmm8,%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
.byte 242,69,15,112,192,0 // pshuflw $0x0,%xmm8,%xmm8
.byte 102,69,15,112,192,80 // pshufd $0x50,%xmm8,%xmm8
.byte 102,65,15,213,192 // pmullw %xmm8,%xmm0
- .byte 102,68,15,111,13,146,108,0,0 // movdqa 0x6c92(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,130,108,0,0 // movdqa 0x6c82(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,193 // paddw %xmm9,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,65,15,213,200 // pmullw %xmm8,%xmm1
@@ -59500,13 +59570,13 @@ FUNCTION(_sk_lerp_1_float_sse41_lowp)
_sk_lerp_1_float_sse41_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 243,68,15,16,0 // movss (%rax),%xmm8
- .byte 243,68,15,89,5,151,99,0,0 // mulss 0x6397(%rip),%xmm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 243,68,15,88,5,94,99,0,0 // addss 0x635e(%rip),%xmm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,5,151,99,0,0 // mulss 0x6397(%rip),%xmm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 243,68,15,88,5,94,99,0,0 // addss 0x635e(%rip),%xmm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 243,65,15,44,192 // cvttss2si %xmm8,%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
.byte 242,69,15,112,192,0 // pshuflw $0x0,%xmm8,%xmm8
.byte 102,69,15,112,192,80 // pshufd $0x50,%xmm8,%xmm8
- .byte 102,68,15,111,13,31,108,0,0 // movdqa 0x6c1f(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,15,108,0,0 // movdqa 0x6c0f(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,213,192 // pmullw %xmm8,%xmm0
.byte 102,65,15,253,193 // paddw %xmm9,%xmm0
.byte 102,65,15,213,200 // pmullw %xmm8,%xmm1
@@ -59546,18 +59616,18 @@ _sk_scale_u8_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,36 // ja 32728 <_sk_scale_u8_sse41_lowp+0x3b>
+ .byte 119,36 // ja 32898 <_sk_scale_u8_sse41_lowp+0x3b>
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,221,0,0,0 // lea 0xdd(%rip),%r9 # 327f0 <_sk_scale_u8_sse41_lowp+0x103>
+ .byte 76,141,13,221,0,0,0 // lea 0xdd(%rip),%r9 # 32960 <_sk_scale_u8_sse41_lowp+0x103>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,111 // jmp 32797 <_sk_scale_u8_sse41_lowp+0xaa>
+ .byte 235,111 // jmp 32907 <_sk_scale_u8_sse41_lowp+0xaa>
.byte 102,69,15,56,48,4,16 // pmovzxbw (%r8,%rdx,1),%xmm8
- .byte 235,102 // jmp 32797 <_sk_scale_u8_sse41_lowp+0xaa>
+ .byte 235,102 // jmp 32907 <_sk_scale_u8_sse41_lowp+0xaa>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 102,68,15,196,200,2 // pinsrw $0x2,%eax,%xmm9
@@ -59565,7 +59635,7 @@ _sk_scale_u8_sse41_lowp:
.byte 102,68,15,110,192 // movd %eax,%xmm8
.byte 102,69,15,56,48,192 // pmovzxbw %xmm8,%xmm8
.byte 102,69,15,58,14,193,252 // pblendw $0xfc,%xmm9,%xmm8
- .byte 235,60 // jmp 32797 <_sk_scale_u8_sse41_lowp+0xaa>
+ .byte 235,60 // jmp 32907 <_sk_scale_u8_sse41_lowp+0xaa>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 102,68,15,196,200,6 // pinsrw $0x6,%eax,%xmm9
@@ -59576,9 +59646,9 @@ _sk_scale_u8_sse41_lowp:
.byte 102,69,15,110,4,16 // movd (%r8,%rdx,1),%xmm8
.byte 102,69,15,56,48,192 // pmovzxbw %xmm8,%xmm8
.byte 102,69,15,58,14,193,240 // pblendw $0xf0,%xmm9,%xmm8
- .byte 102,68,15,219,5,240,106,0,0 // pand 0x6af0(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,219,5,224,106,0,0 // pand 0x6ae0(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,213,192 // pmullw %xmm8,%xmm0
- .byte 102,68,15,111,13,226,106,0,0 // movdqa 0x6ae2(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,210,106,0,0 // movdqa 0x6ad2(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,193 // paddw %xmm9,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,65,15,213,200 // pmullw %xmm8,%xmm1
@@ -59603,7 +59673,7 @@ _sk_scale_u8_sse41_lowp:
.byte 255,148,255,255,255,136,255 // callq *-0x770001(%rdi,%rdi,8)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 124,255 // jl 32805 <_sk_scale_u8_sse41_lowp+0x118>
+ .byte 124,255 // jl 32975 <_sk_scale_u8_sse41_lowp+0x118>
.byte 255 // (bad)
.byte 255,107,255 // ljmp *-0x1(%rbx)
.byte 255 // (bad)
@@ -59621,18 +59691,18 @@ _sk_lerp_u8_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,36 // ja 32847 <_sk_lerp_u8_sse41_lowp+0x3b>
+ .byte 119,36 // ja 329b7 <_sk_lerp_u8_sse41_lowp+0x3b>
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,30,1,0,0 // lea 0x11e(%rip),%r9 # 32950 <_sk_lerp_u8_sse41_lowp+0x144>
+ .byte 76,141,13,30,1,0,0 // lea 0x11e(%rip),%r9 # 32ac0 <_sk_lerp_u8_sse41_lowp+0x144>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,111 // jmp 328b6 <_sk_lerp_u8_sse41_lowp+0xaa>
+ .byte 235,111 // jmp 32a26 <_sk_lerp_u8_sse41_lowp+0xaa>
.byte 102,69,15,56,48,4,16 // pmovzxbw (%r8,%rdx,1),%xmm8
- .byte 235,102 // jmp 328b6 <_sk_lerp_u8_sse41_lowp+0xaa>
+ .byte 235,102 // jmp 32a26 <_sk_lerp_u8_sse41_lowp+0xaa>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 102,68,15,196,200,2 // pinsrw $0x2,%eax,%xmm9
@@ -59640,7 +59710,7 @@ _sk_lerp_u8_sse41_lowp:
.byte 102,68,15,110,192 // movd %eax,%xmm8
.byte 102,69,15,56,48,192 // pmovzxbw %xmm8,%xmm8
.byte 102,69,15,58,14,193,252 // pblendw $0xfc,%xmm9,%xmm8
- .byte 235,60 // jmp 328b6 <_sk_lerp_u8_sse41_lowp+0xaa>
+ .byte 235,60 // jmp 32a26 <_sk_lerp_u8_sse41_lowp+0xaa>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 102,68,15,196,200,6 // pinsrw $0x6,%eax,%xmm9
@@ -59651,8 +59721,8 @@ _sk_lerp_u8_sse41_lowp:
.byte 102,69,15,110,4,16 // movd (%r8,%rdx,1),%xmm8
.byte 102,69,15,56,48,192 // pmovzxbw %xmm8,%xmm8
.byte 102,69,15,58,14,193,240 // pblendw $0xf0,%xmm9,%xmm8
- .byte 102,68,15,219,5,209,105,0,0 // pand 0x69d1(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
- .byte 102,68,15,111,21,200,105,0,0 // movdqa 0x69c8(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,219,5,193,105,0,0 // pand 0x69c1(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
+ .byte 102,68,15,111,21,184,105,0,0 // movdqa 0x69b8(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,69,15,239,202 // pxor %xmm10,%xmm9
.byte 102,69,15,111,217 // movdqa %xmm9,%xmm11
@@ -59682,7 +59752,7 @@ _sk_lerp_u8_sse41_lowp:
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 255,224 // jmpq *%rax
.byte 144 // nop
- .byte 235,254 // jmp 32950 <_sk_lerp_u8_sse41_lowp+0x144>
+ .byte 235,254 // jmp 32ac0 <_sk_lerp_u8_sse41_lowp+0x144>
.byte 255 // (bad)
.byte 255,17 // callq *(%rcx)
.byte 255 // (bad)
@@ -59716,23 +59786,23 @@ _sk_scale_565_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,35 // ja 329ae <_sk_scale_565_sse41_lowp+0x42>
+ .byte 119,35 // ja 32b1e <_sk_scale_565_sse41_lowp+0x42>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,111,1,0,0 // lea 0x16f(%rip),%r9 # 32b08 <_sk_scale_565_sse41_lowp+0x19c>
+ .byte 76,141,13,111,1,0,0 // lea 0x16f(%rip),%r9 # 32c78 <_sk_scale_565_sse41_lowp+0x19c>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,68,15,110,200 // movd %eax,%xmm9
- .byte 235,76 // jmp 329fa <_sk_scale_565_sse41_lowp+0x8e>
+ .byte 235,76 // jmp 32b6a <_sk_scale_565_sse41_lowp+0x8e>
.byte 243,69,15,111,12,80 // movdqu (%r8,%rdx,2),%xmm9
- .byte 235,68 // jmp 329fa <_sk_scale_565_sse41_lowp+0x8e>
+ .byte 235,68 // jmp 32b6a <_sk_scale_565_sse41_lowp+0x8e>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm0
.byte 102,69,15,110,12,80 // movd (%r8,%rdx,2),%xmm9
.byte 102,68,15,58,14,200,252 // pblendw $0xfc,%xmm0,%xmm9
- .byte 235,41 // jmp 329fa <_sk_scale_565_sse41_lowp+0x8e>
+ .byte 235,41 // jmp 32b6a <_sk_scale_565_sse41_lowp+0x8e>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm0
.byte 102,65,15,196,68,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm0
@@ -59741,11 +59811,11 @@ _sk_scale_565_sse41_lowp:
.byte 102,68,15,58,14,200,240 // pblendw $0xf0,%xmm0,%xmm9
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
- .byte 102,15,219,5,52,112,0,0 // pand 0x7034(%rip),%xmm0 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,5,36,112,0,0 // pand 0x7024(%rip),%xmm0 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,69,15,111,209 // movdqa %xmm9,%xmm10
.byte 102,65,15,113,210,5 // psrlw $0x5,%xmm10
- .byte 102,68,15,219,21,48,112,0,0 // pand 0x7030(%rip),%xmm10 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,68,15,111,29,55,112,0,0 // movdqa 0x7037(%rip),%xmm11 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,68,15,219,21,32,112,0,0 // pand 0x7020(%rip),%xmm10 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,68,15,111,29,39,112,0,0 // movdqa 0x7027(%rip),%xmm11 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,69,15,219,217 // pand %xmm9,%xmm11
.byte 102,65,15,113,209,13 // psrlw $0xd,%xmm9
.byte 102,68,15,235,200 // por %xmm0,%xmm9
@@ -59757,7 +59827,7 @@ _sk_scale_565_sse41_lowp:
.byte 102,15,113,240,3 // psllw $0x3,%xmm0
.byte 102,65,15,113,211,2 // psrlw $0x2,%xmm11
.byte 102,68,15,235,216 // por %xmm0,%xmm11
- .byte 102,15,111,5,149,111,0,0 // movdqa 0x6f95(%rip),%xmm0 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,15,111,5,133,111,0,0 // movdqa 0x6f85(%rip),%xmm0 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,68,15,111,227 // movdqa %xmm3,%xmm12
.byte 102,68,15,239,224 // pxor %xmm0,%xmm12
.byte 102,15,239,199 // pxor %xmm7,%xmm0
@@ -59770,7 +59840,7 @@ _sk_scale_565_sse41_lowp:
.byte 102,69,15,56,62,225 // pmaxuw %xmm9,%xmm12
.byte 102,69,15,56,16,229 // pblendvb %xmm0,%xmm13,%xmm12
.byte 102,69,15,213,200 // pmullw %xmm8,%xmm9
- .byte 102,15,111,5,221,103,0,0 // movdqa 0x67dd(%rip),%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,111,5,205,103,0,0 // movdqa 0x67cd(%rip),%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,253,200 // paddw %xmm0,%xmm9
.byte 102,65,15,113,209,8 // psrlw $0x8,%xmm9
.byte 102,68,15,213,209 // pmullw %xmm1,%xmm10
@@ -59824,23 +59894,23 @@ _sk_lerp_565_sse41_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,35 // ja 32b66 <_sk_lerp_565_sse41_lowp+0x42>
+ .byte 119,35 // ja 32cd6 <_sk_lerp_565_sse41_lowp+0x42>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,187,1,0,0 // lea 0x1bb(%rip),%r9 # 32d0c <_sk_lerp_565_sse41_lowp+0x1e8>
+ .byte 76,141,13,187,1,0,0 // lea 0x1bb(%rip),%r9 # 32e7c <_sk_lerp_565_sse41_lowp+0x1e8>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,68,15,110,200 // movd %eax,%xmm9
- .byte 235,76 // jmp 32bb2 <_sk_lerp_565_sse41_lowp+0x8e>
+ .byte 235,76 // jmp 32d22 <_sk_lerp_565_sse41_lowp+0x8e>
.byte 243,69,15,111,12,80 // movdqu (%r8,%rdx,2),%xmm9
- .byte 235,68 // jmp 32bb2 <_sk_lerp_565_sse41_lowp+0x8e>
+ .byte 235,68 // jmp 32d22 <_sk_lerp_565_sse41_lowp+0x8e>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm0
.byte 102,69,15,110,12,80 // movd (%r8,%rdx,2),%xmm9
.byte 102,68,15,58,14,200,252 // pblendw $0xfc,%xmm0,%xmm9
- .byte 235,41 // jmp 32bb2 <_sk_lerp_565_sse41_lowp+0x8e>
+ .byte 235,41 // jmp 32d22 <_sk_lerp_565_sse41_lowp+0x8e>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm0
.byte 102,65,15,196,68,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm0
@@ -59849,11 +59919,11 @@ _sk_lerp_565_sse41_lowp:
.byte 102,68,15,58,14,200,240 // pblendw $0xf0,%xmm0,%xmm9
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
- .byte 102,15,219,5,124,110,0,0 // pand 0x6e7c(%rip),%xmm0 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,5,108,110,0,0 // pand 0x6e6c(%rip),%xmm0 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,69,15,111,209 // movdqa %xmm9,%xmm10
.byte 102,65,15,113,210,5 // psrlw $0x5,%xmm10
- .byte 102,68,15,219,21,120,110,0,0 // pand 0x6e78(%rip),%xmm10 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,68,15,111,29,127,110,0,0 // movdqa 0x6e7f(%rip),%xmm11 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,68,15,219,21,104,110,0,0 // pand 0x6e68(%rip),%xmm10 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,68,15,111,29,111,110,0,0 // movdqa 0x6e6f(%rip),%xmm11 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,69,15,219,217 // pand %xmm9,%xmm11
.byte 102,65,15,113,209,13 // psrlw $0xd,%xmm9
.byte 102,68,15,235,200 // por %xmm0,%xmm9
@@ -59865,7 +59935,7 @@ _sk_lerp_565_sse41_lowp:
.byte 102,15,113,240,3 // psllw $0x3,%xmm0
.byte 102,65,15,113,211,2 // psrlw $0x2,%xmm11
.byte 102,68,15,235,216 // por %xmm0,%xmm11
- .byte 102,15,111,5,221,109,0,0 // movdqa 0x6ddd(%rip),%xmm0 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,15,111,5,205,109,0,0 // movdqa 0x6dcd(%rip),%xmm0 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,68,15,111,227 // movdqa %xmm3,%xmm12
.byte 102,68,15,239,224 // pxor %xmm0,%xmm12
.byte 102,15,239,199 // pxor %xmm7,%xmm0
@@ -59877,7 +59947,7 @@ _sk_lerp_565_sse41_lowp:
.byte 102,69,15,56,62,227 // pmaxuw %xmm11,%xmm12
.byte 102,69,15,56,62,225 // pmaxuw %xmm9,%xmm12
.byte 102,69,15,56,16,229 // pblendvb %xmm0,%xmm13,%xmm12
- .byte 102,68,15,111,45,41,102,0,0 // movdqa 0x6629(%rip),%xmm13 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,45,25,102,0,0 // movdqa 0x6619(%rip),%xmm13 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,111,193 // movdqa %xmm9,%xmm0
.byte 102,65,15,239,197 // pxor %xmm13,%xmm0
.byte 102,15,213,196 // pmullw %xmm4,%xmm0
@@ -59934,7 +60004,7 @@ _sk_clamp_x_1_sse41_lowp:
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 65,15,95,200 // maxps %xmm8,%xmm1
.byte 65,15,95,192 // maxps %xmm8,%xmm0
- .byte 68,15,40,5,180,101,0,0 // movaps 0x65b4(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,164,101,0,0 // movaps 0x65a4(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 65,15,93,200 // minps %xmm8,%xmm1
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -59951,7 +60021,7 @@ _sk_repeat_x_1_sse41_lowp:
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 65,15,95,200 // maxps %xmm8,%xmm1
.byte 65,15,95,192 // maxps %xmm8,%xmm0
- .byte 68,15,40,5,126,101,0,0 // movaps 0x657e(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,110,101,0,0 // movaps 0x656e(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 65,15,93,200 // minps %xmm8,%xmm1
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -59961,10 +60031,10 @@ HIDDEN _sk_mirror_x_1_sse41_lowp
.globl _sk_mirror_x_1_sse41_lowp
FUNCTION(_sk_mirror_x_1_sse41_lowp)
_sk_mirror_x_1_sse41_lowp:
- .byte 68,15,40,5,202,101,0,0 // movaps 0x65ca(%rip),%xmm8 # 39350 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
+ .byte 68,15,40,5,186,101,0,0 // movaps 0x65ba(%rip),%xmm8 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
.byte 65,15,88,192 // addps %xmm8,%xmm0
.byte 65,15,88,200 // addps %xmm8,%xmm1
- .byte 68,15,40,13,74,101,0,0 // movaps 0x654a(%rip),%xmm9 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,13,58,101,0,0 // movaps 0x653a(%rip),%xmm9 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,40,209 // movaps %xmm1,%xmm10
.byte 69,15,89,209 // mulps %xmm9,%xmm10
.byte 68,15,89,200 // mulps %xmm0,%xmm9
@@ -59976,13 +60046,13 @@ _sk_mirror_x_1_sse41_lowp:
.byte 65,15,92,202 // subps %xmm10,%xmm1
.byte 65,15,88,200 // addps %xmm8,%xmm1
.byte 65,15,88,192 // addps %xmm8,%xmm0
- .byte 68,15,40,5,64,106,0,0 // movaps 0x6a40(%rip),%xmm8 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
+ .byte 68,15,40,5,48,106,0,0 // movaps 0x6a30(%rip),%xmm8 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
.byte 65,15,84,192 // andps %xmm8,%xmm0
.byte 65,15,84,200 // andps %xmm8,%xmm1
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 65,15,95,200 // maxps %xmm8,%xmm1
.byte 65,15,95,192 // maxps %xmm8,%xmm0
- .byte 68,15,40,5,4,101,0,0 // movaps 0x6504(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,244,100,0,0 // movaps 0x64f4(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 65,15,93,200 // minps %xmm8,%xmm1
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -60009,7 +60079,7 @@ _sk_gradient_sse41_lowp:
.byte 102,69,15,239,201 // pxor %xmm9,%xmm9
.byte 72,131,249,2 // cmp $0x2,%rcx
.byte 102,15,239,219 // pxor %xmm3,%xmm3
- .byte 114,57 // jb 32e6d <_sk_gradient_sse41_lowp+0x75>
+ .byte 114,57 // jb 32fdd <_sk_gradient_sse41_lowp+0x75>
.byte 72,139,80,72 // mov 0x48(%rax),%rdx
.byte 72,255,201 // dec %rcx
.byte 72,131,194,4 // add $0x4,%rdx
@@ -60024,7 +60094,7 @@ _sk_gradient_sse41_lowp:
.byte 102,15,250,218 // psubd %xmm2,%xmm3
.byte 72,131,194,4 // add $0x4,%rdx
.byte 72,255,201 // dec %rcx
- .byte 117,219 // jne 32e48 <_sk_gradient_sse41_lowp+0x50>
+ .byte 117,219 // jne 32fb8 <_sk_gradient_sse41_lowp+0x50>
.byte 102,72,15,58,22,219,1 // pextrq $0x1,%xmm3,%rbx
.byte 65,137,221 // mov %ebx,%r13d
.byte 72,193,235,32 // shr $0x20,%rbx
@@ -60103,14 +60173,14 @@ _sk_gradient_sse41_lowp:
.byte 69,15,88,239 // addps %xmm15,%xmm13
.byte 15,89,216 // mulps %xmm0,%xmm3
.byte 15,88,223 // addps %xmm7,%xmm3
- .byte 15,40,61,135,99,0,0 // movaps 0x6387(%rip),%xmm7 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 15,40,61,119,99,0,0 // movaps 0x6377(%rip),%xmm7 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 68,15,89,239 // mulps %xmm7,%xmm13
.byte 15,89,223 // mulps %xmm7,%xmm3
- .byte 68,15,40,61,136,98,0,0 // movaps 0x6288(%rip),%xmm15 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,61,120,98,0,0 // movaps 0x6278(%rip),%xmm15 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,88,239 // addps %xmm15,%xmm13
.byte 65,15,88,223 // addps %xmm15,%xmm3
.byte 243,65,15,91,245 // cvttps2dq %xmm13,%xmm6
- .byte 102,68,15,111,45,162,105,0,0 // movdqa 0x69a2(%rip),%xmm13 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,68,15,111,45,146,105,0,0 // movdqa 0x6992(%rip),%xmm13 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,65,15,56,0,245 // pshufb %xmm13,%xmm6
.byte 243,68,15,91,195 // cvttps2dq %xmm3,%xmm8
.byte 102,69,15,56,0,197 // pshufb %xmm13,%xmm8
@@ -60214,9 +60284,9 @@ _sk_evenly_spaced_gradient_sse41_lowp:
.byte 72,139,24 // mov (%rax),%rbx
.byte 72,139,104,8 // mov 0x8(%rax),%rbp
.byte 72,255,203 // dec %rbx
- .byte 120,7 // js 33246 <_sk_evenly_spaced_gradient_sse41_lowp+0x34>
+ .byte 120,7 // js 333b6 <_sk_evenly_spaced_gradient_sse41_lowp+0x34>
.byte 243,72,15,42,211 // cvtsi2ss %rbx,%xmm2
- .byte 235,21 // jmp 3325b <_sk_evenly_spaced_gradient_sse41_lowp+0x49>
+ .byte 235,21 // jmp 333cb <_sk_evenly_spaced_gradient_sse41_lowp+0x49>
.byte 73,137,216 // mov %rbx,%r8
.byte 73,209,232 // shr %r8
.byte 131,227,1 // and $0x1,%ebx
@@ -60354,14 +60424,14 @@ _sk_evenly_spaced_gradient_sse41_lowp:
.byte 15,40,100,36,176 // movaps -0x50(%rsp),%xmm4
.byte 68,15,89,196 // mulps %xmm4,%xmm8
.byte 68,15,88,192 // addps %xmm0,%xmm8
- .byte 15,40,5,100,94,0,0 // movaps 0x5e64(%rip),%xmm0 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 15,40,5,84,94,0,0 // movaps 0x5e54(%rip),%xmm0 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 15,89,216 // mulps %xmm0,%xmm3
.byte 68,15,89,192 // mulps %xmm0,%xmm8
- .byte 68,15,40,53,101,93,0,0 // movaps 0x5d65(%rip),%xmm14 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,53,85,93,0,0 // movaps 0x5d55(%rip),%xmm14 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 69,15,88,198 // addps %xmm14,%xmm8
.byte 65,15,88,222 // addps %xmm14,%xmm3
.byte 243,15,91,219 // cvttps2dq %xmm3,%xmm3
- .byte 102,15,111,5,129,100,0,0 // movdqa 0x6481(%rip),%xmm0 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,15,111,5,113,100,0,0 // movdqa 0x6471(%rip),%xmm0 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,15,56,0,216 // pshufb %xmm0,%xmm3
.byte 243,69,15,91,192 // cvttps2dq %xmm8,%xmm8
.byte 102,68,15,56,0,192 // pshufb %xmm0,%xmm8
@@ -60371,7 +60441,7 @@ _sk_evenly_spaced_gradient_sse41_lowp:
.byte 15,40,220 // movaps %xmm4,%xmm3
.byte 68,15,89,203 // mulps %xmm3,%xmm9
.byte 68,15,88,201 // addps %xmm1,%xmm9
- .byte 15,40,13,20,94,0,0 // movaps 0x5e14(%rip),%xmm1 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 15,40,13,4,94,0,0 // movaps 0x5e04(%rip),%xmm1 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 15,89,209 // mulps %xmm1,%xmm2
.byte 68,15,89,201 // mulps %xmm1,%xmm9
.byte 15,40,225 // movaps %xmm1,%xmm4
@@ -60440,14 +60510,14 @@ _sk_evenly_spaced_2_stop_gradient_sse41_lowp:
.byte 15,89,216 // mulps %xmm0,%xmm3
.byte 65,15,88,216 // addps %xmm8,%xmm3
.byte 65,15,88,208 // addps %xmm8,%xmm2
- .byte 68,15,40,21,14,93,0,0 // movaps 0x5d0e(%rip),%xmm10 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,21,254,92,0,0 // movaps 0x5cfe(%rip),%xmm10 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,210 // mulps %xmm10,%xmm2
.byte 65,15,89,218 // mulps %xmm10,%xmm3
- .byte 68,15,40,37,14,92,0,0 // movaps 0x5c0e(%rip),%xmm12 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,37,254,91,0,0 // movaps 0x5bfe(%rip),%xmm12 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,88,220 // addps %xmm12,%xmm3
.byte 65,15,88,212 // addps %xmm12,%xmm2
.byte 243,15,91,210 // cvttps2dq %xmm2,%xmm2
- .byte 102,68,15,111,29,41,99,0,0 // movdqa 0x6329(%rip),%xmm11 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,68,15,111,29,25,99,0,0 // movdqa 0x6319(%rip),%xmm11 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,65,15,56,0,211 // pshufb %xmm11,%xmm2
.byte 243,68,15,91,195 // cvttps2dq %xmm3,%xmm8
.byte 102,69,15,56,0,195 // pshufb %xmm11,%xmm8
@@ -60520,7 +60590,7 @@ _sk_xy_to_unit_angle_sse41_lowp:
.byte 15,40,251 // movaps %xmm3,%xmm7
.byte 15,40,242 // movaps %xmm2,%xmm6
.byte 68,15,40,192 // movaps %xmm0,%xmm8
- .byte 15,40,37,255,95,0,0 // movaps 0x5fff(%rip),%xmm4 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
+ .byte 15,40,37,239,95,0,0 // movaps 0x5fef(%rip),%xmm4 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
.byte 68,15,40,233 // movaps %xmm1,%xmm13
.byte 68,15,84,236 // andps %xmm4,%xmm13
.byte 69,15,40,216 // movaps %xmm8,%xmm11
@@ -60545,18 +60615,18 @@ _sk_xy_to_unit_angle_sse41_lowp:
.byte 69,15,94,251 // divps %xmm11,%xmm15
.byte 65,15,40,199 // movaps %xmm15,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
- .byte 68,15,40,29,255,95,0,0 // movaps 0x5fff(%rip),%xmm11 # 39880 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
+ .byte 68,15,40,29,239,95,0,0 // movaps 0x5fef(%rip),%xmm11 # 399e0 <_sk_srcover_bgra_8888_sse2_lowp+0x116c>
.byte 68,15,40,224 // movaps %xmm0,%xmm12
.byte 69,15,89,227 // mulps %xmm11,%xmm12
- .byte 68,15,88,37,255,95,0,0 // addps 0x5fff(%rip),%xmm12 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
+ .byte 68,15,88,37,239,95,0,0 // addps 0x5fef(%rip),%xmm12 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
.byte 68,15,89,224 // mulps %xmm0,%xmm12
- .byte 15,40,45,4,96,0,0 // movaps 0x6004(%rip),%xmm5 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
+ .byte 15,40,45,244,95,0,0 // movaps 0x5ff4(%rip),%xmm5 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
.byte 68,15,88,229 // addps %xmm5,%xmm12
.byte 68,15,89,224 // mulps %xmm0,%xmm12
- .byte 15,40,37,5,96,0,0 // movaps 0x6005(%rip),%xmm4 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
+ .byte 15,40,37,245,95,0,0 // movaps 0x5ff5(%rip),%xmm4 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
.byte 68,15,88,228 // addps %xmm4,%xmm12
.byte 69,15,89,231 // mulps %xmm15,%xmm12
- .byte 15,40,21,6,96,0,0 // movaps 0x6006(%rip),%xmm2 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
+ .byte 15,40,21,246,95,0,0 // movaps 0x5ff6(%rip),%xmm2 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 65,15,92,212 // subps %xmm12,%xmm2
.byte 65,15,40,194 // movaps %xmm10,%xmm0
@@ -60565,7 +60635,7 @@ _sk_xy_to_unit_angle_sse41_lowp:
.byte 65,15,40,198 // movaps %xmm14,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
.byte 68,15,89,216 // mulps %xmm0,%xmm11
- .byte 68,15,88,29,174,95,0,0 // addps 0x5fae(%rip),%xmm11 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
+ .byte 68,15,88,29,158,95,0,0 // addps 0x5f9e(%rip),%xmm11 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
.byte 68,15,89,216 // mulps %xmm0,%xmm11
.byte 68,15,88,221 // addps %xmm5,%xmm11
.byte 68,15,89,216 // mulps %xmm0,%xmm11
@@ -60576,7 +60646,7 @@ _sk_xy_to_unit_angle_sse41_lowp:
.byte 102,68,15,56,20,219 // blendvps %xmm0,%xmm3,%xmm11
.byte 69,15,87,201 // xorps %xmm9,%xmm9
.byte 69,15,194,193,1 // cmpltps %xmm9,%xmm8
- .byte 15,40,21,204,89,0,0 // movaps 0x59cc(%rip),%xmm2 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,40,21,188,89,0,0 // movaps 0x59bc(%rip),%xmm2 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 65,15,92,212 // subps %xmm12,%xmm2
.byte 65,15,40,192 // movaps %xmm8,%xmm0
@@ -60587,7 +60657,7 @@ _sk_xy_to_unit_angle_sse41_lowp:
.byte 102,68,15,56,20,219 // blendvps %xmm0,%xmm3,%xmm11
.byte 15,40,198 // movaps %xmm6,%xmm0
.byte 65,15,194,193,1 // cmpltps %xmm9,%xmm0
- .byte 15,40,13,170,89,0,0 // movaps 0x59aa(%rip),%xmm1 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 15,40,13,154,89,0,0 // movaps 0x599a(%rip),%xmm1 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 15,40,209 // movaps %xmm1,%xmm2
.byte 65,15,92,204 // subps %xmm12,%xmm1
.byte 102,68,15,56,20,225 // blendvps %xmm0,%xmm1,%xmm12
@@ -60641,48 +60711,48 @@ _sk_srcover_rgba_8888_sse41_lowp:
.byte 65,254,202 // dec %r10b
.byte 69,15,182,202 // movzbl %r10b,%r9d
.byte 65,128,249,6 // cmp $0x6,%r9b
- .byte 119,38 // ja 33a12 <_sk_srcover_rgba_8888_sse41_lowp+0x4b>
+ .byte 119,38 // ja 33b82 <_sk_srcover_rgba_8888_sse41_lowp+0x4b>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 76,141,29,20,2,0,0 // lea 0x214(%rip),%r11 # 33c0c <_sk_srcover_rgba_8888_sse41_lowp+0x245>
+ .byte 76,141,29,20,2,0,0 // lea 0x214(%rip),%r11 # 33d7c <_sk_srcover_rgba_8888_sse41_lowp+0x245>
.byte 75,99,4,139 // movslq (%r11,%r9,4),%rax
.byte 76,1,216 // add %r11,%rax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,52,144 // movd (%r8,%rdx,4),%xmm6
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,89 // jmp 33a6b <_sk_srcover_rgba_8888_sse41_lowp+0xa4>
+ .byte 235,89 // jmp 33bdb <_sk_srcover_rgba_8888_sse41_lowp+0xa4>
.byte 243,65,15,111,52,144 // movdqu (%r8,%rdx,4),%xmm6
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,74 // jmp 33a6b <_sk_srcover_rgba_8888_sse41_lowp+0xa4>
+ .byte 235,74 // jmp 33bdb <_sk_srcover_rgba_8888_sse41_lowp+0xa4>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 243,65,15,126,52,144 // movq (%r8,%rdx,4),%xmm6
.byte 102,65,15,58,14,240,240 // pblendw $0xf0,%xmm8,%xmm6
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
- .byte 235,37 // jmp 33a6b <_sk_srcover_rgba_8888_sse41_lowp+0xa4>
+ .byte 235,37 // jmp 33bdb <_sk_srcover_rgba_8888_sse41_lowp+0xa4>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,69,15,58,34,68,144,20,1 // pinsrd $0x1,0x14(%r8,%rdx,4),%xmm8
.byte 102,69,15,58,34,68,144,16,0 // pinsrd $0x0,0x10(%r8,%rdx,4),%xmm8
.byte 243,65,15,111,52,144 // movdqu (%r8,%rdx,4),%xmm6
- .byte 102,15,111,37,157,95,0,0 // movdqa 0x5f9d(%rip),%xmm4 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,15,111,37,141,95,0,0 // movdqa 0x5f8d(%rip),%xmm4 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,15,111,238 // movdqa %xmm6,%xmm5
.byte 102,15,56,0,236 // pshufb %xmm4,%xmm5
.byte 102,65,15,111,248 // movdqa %xmm8,%xmm7
.byte 102,15,56,0,252 // pshufb %xmm4,%xmm7
.byte 102,15,108,239 // punpcklqdq %xmm7,%xmm5
- .byte 102,68,15,111,37,253,87,0,0 // movdqa 0x57fd(%rip),%xmm12 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,37,237,87,0,0 // movdqa 0x57ed(%rip),%xmm12 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,229 // movdqa %xmm5,%xmm4
.byte 102,65,15,219,228 // pand %xmm12,%xmm4
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
- .byte 102,68,15,111,13,118,95,0,0 // movdqa 0x5f76(%rip),%xmm9 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,13,102,95,0,0 // movdqa 0x5f66(%rip),%xmm9 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,254 // movdqa %xmm6,%xmm7
.byte 102,65,15,56,0,249 // pshufb %xmm9,%xmm7
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
.byte 102,69,15,56,0,209 // pshufb %xmm9,%xmm10
.byte 102,65,15,108,250 // punpcklqdq %xmm10,%xmm7
- .byte 102,68,15,111,13,99,95,0,0 // movdqa 0x5f63(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,13,83,95,0,0 // movdqa 0x5f53(%rip),%xmm9 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,241 // pshufb %xmm9,%xmm6
.byte 102,69,15,56,0,193 // pshufb %xmm9,%xmm8
.byte 102,65,15,108,240 // punpcklqdq %xmm8,%xmm6
@@ -60724,19 +60794,19 @@ _sk_srcover_rgba_8888_sse41_lowp:
.byte 102,15,97,195 // punpcklwd %xmm3,%xmm0
.byte 102,65,15,235,196 // por %xmm12,%xmm0
.byte 65,128,250,6 // cmp $0x6,%r10b
- .byte 119,24 // ja 33bb5 <_sk_srcover_rgba_8888_sse41_lowp+0x1ee>
- .byte 76,141,21,132,0,0,0 // lea 0x84(%rip),%r10 # 33c28 <_sk_srcover_rgba_8888_sse41_lowp+0x261>
+ .byte 119,24 // ja 33d25 <_sk_srcover_rgba_8888_sse41_lowp+0x1ee>
+ .byte 76,141,21,132,0,0,0 // lea 0x84(%rip),%r10 # 33d98 <_sk_srcover_rgba_8888_sse41_lowp+0x261>
.byte 75,99,4,138 // movslq (%r10,%r9,4),%rax
.byte 76,1,208 // add %r10,%rax
.byte 255,224 // jmpq *%rax
.byte 102,65,15,126,4,144 // movd %xmm0,(%r8,%rdx,4)
- .byte 235,63 // jmp 33bf4 <_sk_srcover_rgba_8888_sse41_lowp+0x22d>
+ .byte 235,63 // jmp 33d64 <_sk_srcover_rgba_8888_sse41_lowp+0x22d>
.byte 243,65,15,127,4,144 // movdqu %xmm0,(%r8,%rdx,4)
.byte 243,65,15,127,76,144,16 // movdqu %xmm1,0x10(%r8,%rdx,4)
- .byte 235,48 // jmp 33bf4 <_sk_srcover_rgba_8888_sse41_lowp+0x22d>
+ .byte 235,48 // jmp 33d64 <_sk_srcover_rgba_8888_sse41_lowp+0x22d>
.byte 102,65,15,58,22,68,144,8,2 // pextrd $0x2,%xmm0,0x8(%r8,%rdx,4)
.byte 102,65,15,214,4,144 // movq %xmm0,(%r8,%rdx,4)
- .byte 235,31 // jmp 33bf4 <_sk_srcover_rgba_8888_sse41_lowp+0x22d>
+ .byte 235,31 // jmp 33d64 <_sk_srcover_rgba_8888_sse41_lowp+0x22d>
.byte 102,65,15,58,22,76,144,24,2 // pextrd $0x2,%xmm1,0x18(%r8,%rdx,4)
.byte 102,65,15,58,22,76,144,20,1 // pextrd $0x1,%xmm1,0x14(%r8,%rdx,4)
.byte 102,65,15,126,76,144,16 // movd %xmm1,0x10(%r8,%rdx,4)
@@ -60753,7 +60823,7 @@ _sk_srcover_rgba_8888_sse41_lowp:
.byte 255,38 // jmpq *(%rsi)
.byte 254 // (bad)
.byte 255 // (bad)
- .byte 255,21,254,255,255,89 // callq *0x59fffffe(%rip) # 5a033c17 <_sk_srcover_bgra_8888_sse2_lowp+0x59ffb513>
+ .byte 255,21,254,255,255,89 // callq *0x59fffffe(%rip) # 5a033d87 <_sk_srcover_bgra_8888_sse2_lowp+0x59ffb513>
.byte 254 // (bad)
.byte 255 // (bad)
.byte 255,80,254 // callq *-0x2(%rax)
@@ -60792,48 +60862,48 @@ _sk_srcover_bgra_8888_sse41_lowp:
.byte 65,254,202 // dec %r10b
.byte 69,15,182,202 // movzbl %r10b,%r9d
.byte 65,128,249,6 // cmp $0x6,%r9b
- .byte 119,38 // ja 33c8f <_sk_srcover_bgra_8888_sse41_lowp+0x4b>
+ .byte 119,38 // ja 33dff <_sk_srcover_bgra_8888_sse41_lowp+0x4b>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 76,141,29,23,2,0,0 // lea 0x217(%rip),%r11 # 33e8c <_sk_srcover_bgra_8888_sse41_lowp+0x248>
+ .byte 76,141,29,23,2,0,0 // lea 0x217(%rip),%r11 # 33ffc <_sk_srcover_bgra_8888_sse41_lowp+0x248>
.byte 75,99,4,139 // movslq (%r11,%r9,4),%rax
.byte 76,1,216 // add %r11,%rax
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 255,224 // jmpq *%rax
.byte 102,65,15,110,36,144 // movd (%r8,%rdx,4),%xmm4
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,89 // jmp 33ce8 <_sk_srcover_bgra_8888_sse41_lowp+0xa4>
+ .byte 235,89 // jmp 33e58 <_sk_srcover_bgra_8888_sse41_lowp+0xa4>
.byte 243,65,15,111,36,144 // movdqu (%r8,%rdx,4),%xmm4
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,74 // jmp 33ce8 <_sk_srcover_bgra_8888_sse41_lowp+0xa4>
+ .byte 235,74 // jmp 33e58 <_sk_srcover_bgra_8888_sse41_lowp+0xa4>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,237 // pxor %xmm5,%xmm5
.byte 243,65,15,126,36,144 // movq (%r8,%rdx,4),%xmm4
.byte 102,65,15,58,14,224,240 // pblendw $0xf0,%xmm8,%xmm4
.byte 102,68,15,111,197 // movdqa %xmm5,%xmm8
- .byte 235,37 // jmp 33ce8 <_sk_srcover_bgra_8888_sse41_lowp+0xa4>
+ .byte 235,37 // jmp 33e58 <_sk_srcover_bgra_8888_sse41_lowp+0xa4>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,69,15,58,34,68,144,20,1 // pinsrd $0x1,0x14(%r8,%rdx,4),%xmm8
.byte 102,69,15,58,34,68,144,16,0 // pinsrd $0x0,0x10(%r8,%rdx,4),%xmm8
.byte 243,65,15,111,36,144 // movdqu (%r8,%rdx,4),%xmm4
- .byte 102,15,111,53,32,93,0,0 // movdqa 0x5d20(%rip),%xmm6 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
+ .byte 102,15,111,53,16,93,0,0 // movdqa 0x5d10(%rip),%xmm6 # 39b70 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,15,56,0,238 // pshufb %xmm6,%xmm5
.byte 102,65,15,111,248 // movdqa %xmm8,%xmm7
.byte 102,15,56,0,254 // pshufb %xmm6,%xmm7
.byte 102,15,108,239 // punpcklqdq %xmm7,%xmm5
- .byte 102,68,15,111,37,128,85,0,0 // movdqa 0x5580(%rip),%xmm12 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,37,112,85,0,0 // movdqa 0x5570(%rip),%xmm12 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,245 // movdqa %xmm5,%xmm6
.byte 102,65,15,219,244 // pand %xmm12,%xmm6
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
- .byte 102,68,15,111,13,249,92,0,0 // movdqa 0x5cf9(%rip),%xmm9 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
+ .byte 102,68,15,111,13,233,92,0,0 // movdqa 0x5ce9(%rip),%xmm9 # 39b80 <_sk_srcover_bgra_8888_sse2_lowp+0x130c>
.byte 102,15,111,252 // movdqa %xmm4,%xmm7
.byte 102,65,15,56,0,249 // pshufb %xmm9,%xmm7
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
.byte 102,69,15,56,0,209 // pshufb %xmm9,%xmm10
.byte 102,65,15,108,250 // punpcklqdq %xmm10,%xmm7
- .byte 102,68,15,111,13,230,92,0,0 // movdqa 0x5ce6(%rip),%xmm9 # 39a30 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
+ .byte 102,68,15,111,13,214,92,0,0 // movdqa 0x5cd6(%rip),%xmm9 # 39b90 <_sk_srcover_bgra_8888_sse2_lowp+0x131c>
.byte 102,65,15,56,0,225 // pshufb %xmm9,%xmm4
.byte 102,69,15,56,0,193 // pshufb %xmm9,%xmm8
.byte 102,65,15,108,224 // punpcklqdq %xmm8,%xmm4
@@ -60875,19 +60945,19 @@ _sk_srcover_bgra_8888_sse41_lowp:
.byte 102,15,97,195 // punpcklwd %xmm3,%xmm0
.byte 102,65,15,235,196 // por %xmm12,%xmm0
.byte 65,128,250,6 // cmp $0x6,%r10b
- .byte 119,24 // ja 33e32 <_sk_srcover_bgra_8888_sse41_lowp+0x1ee>
- .byte 76,141,21,135,0,0,0 // lea 0x87(%rip),%r10 # 33ea8 <_sk_srcover_bgra_8888_sse41_lowp+0x264>
+ .byte 119,24 // ja 33fa2 <_sk_srcover_bgra_8888_sse41_lowp+0x1ee>
+ .byte 76,141,21,135,0,0,0 // lea 0x87(%rip),%r10 # 34018 <_sk_srcover_bgra_8888_sse41_lowp+0x264>
.byte 75,99,4,138 // movslq (%r10,%r9,4),%rax
.byte 76,1,208 // add %r10,%rax
.byte 255,224 // jmpq *%rax
.byte 102,65,15,126,4,144 // movd %xmm0,(%r8,%rdx,4)
- .byte 235,63 // jmp 33e71 <_sk_srcover_bgra_8888_sse41_lowp+0x22d>
+ .byte 235,63 // jmp 33fe1 <_sk_srcover_bgra_8888_sse41_lowp+0x22d>
.byte 243,65,15,127,4,144 // movdqu %xmm0,(%r8,%rdx,4)
.byte 243,65,15,127,76,144,16 // movdqu %xmm1,0x10(%r8,%rdx,4)
- .byte 235,48 // jmp 33e71 <_sk_srcover_bgra_8888_sse41_lowp+0x22d>
+ .byte 235,48 // jmp 33fe1 <_sk_srcover_bgra_8888_sse41_lowp+0x22d>
.byte 102,65,15,58,22,68,144,8,2 // pextrd $0x2,%xmm0,0x8(%r8,%rdx,4)
.byte 102,65,15,214,4,144 // movq %xmm0,(%r8,%rdx,4)
- .byte 235,31 // jmp 33e71 <_sk_srcover_bgra_8888_sse41_lowp+0x22d>
+ .byte 235,31 // jmp 33fe1 <_sk_srcover_bgra_8888_sse41_lowp+0x22d>
.byte 102,65,15,58,22,76,144,24,2 // pextrd $0x2,%xmm1,0x18(%r8,%rdx,4)
.byte 102,65,15,58,22,76,144,20,1 // pextrd $0x1,%xmm1,0x14(%r8,%rdx,4)
.byte 102,65,15,126,76,144,16 // movd %xmm1,0x10(%r8,%rdx,4)
@@ -60952,13 +61022,13 @@ _sk_start_pipeline_sse2_lowp:
.byte 73,137,246 // mov %rsi,%r14
.byte 72,137,77,192 // mov %rcx,-0x40(%rbp)
.byte 72,57,203 // cmp %rcx,%rbx
- .byte 15,131,131,0,0,0 // jae 33f7a <_sk_start_pipeline_sse2_lowp+0xb6>
+ .byte 15,131,131,0,0,0 // jae 340ea <_sk_start_pipeline_sse2_lowp+0xb6>
.byte 72,139,69,208 // mov -0x30(%rbp),%rax
.byte 72,141,64,8 // lea 0x8(%rax),%rax
.byte 72,137,69,200 // mov %rax,-0x38(%rbp)
.byte 76,57,125,200 // cmp %r15,-0x38(%rbp)
.byte 72,139,85,208 // mov -0x30(%rbp),%rdx
- .byte 119,59 // ja 33f48 <_sk_start_pipeline_sse2_lowp+0x84>
+ .byte 119,59 // ja 340b8 <_sk_start_pipeline_sse2_lowp+0x84>
.byte 76,139,101,208 // mov -0x30(%rbp),%r12
.byte 49,255 // xor %edi,%edi
.byte 15,87,192 // xorps %xmm0,%xmm0
@@ -60977,10 +61047,10 @@ _sk_start_pipeline_sse2_lowp:
.byte 73,131,196,16 // add $0x10,%r12
.byte 77,57,252 // cmp %r15,%r12
.byte 73,137,212 // mov %rdx,%r12
- .byte 118,201 // jbe 33f11 <_sk_start_pipeline_sse2_lowp+0x4d>
+ .byte 118,201 // jbe 34081 <_sk_start_pipeline_sse2_lowp+0x4d>
.byte 76,137,255 // mov %r15,%rdi
.byte 72,41,215 // sub %rdx,%rdi
- .byte 116,33 // je 33f71 <_sk_start_pipeline_sse2_lowp+0xad>
+ .byte 116,33 // je 340e1 <_sk_start_pipeline_sse2_lowp+0xad>
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,87,201 // xorps %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
@@ -60994,7 +61064,7 @@ _sk_start_pipeline_sse2_lowp:
.byte 65,255,213 // callq *%r13
.byte 72,255,195 // inc %rbx
.byte 72,59,93,192 // cmp -0x40(%rbp),%rbx
- .byte 117,137 // jne 33f03 <_sk_start_pipeline_sse2_lowp+0x3f>
+ .byte 117,137 // jne 34073 <_sk_start_pipeline_sse2_lowp+0x3f>
.byte 72,131,196,24 // add $0x18,%rsp
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
@@ -61025,7 +61095,7 @@ _sk_seed_shader_sse2_lowp:
.byte 102,15,110,209 // movd %ecx,%xmm2
.byte 102,15,112,210,0 // pshufd $0x0,%xmm2,%xmm2
.byte 15,91,210 // cvtdq2ps %xmm2,%xmm2
- .byte 15,88,21,40,83,0,0 // addps 0x5328(%rip),%xmm2 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,88,21,24,83,0,0 // addps 0x5318(%rip),%xmm2 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 255,224 // jmpq *%rax
@@ -61202,7 +61272,7 @@ HIDDEN _sk_black_color_sse2_lowp
FUNCTION(_sk_black_color_sse2_lowp)
_sk_black_color_sse2_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,109,80,0,0 // movaps 0x506d(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,93,80,0,0 // movaps 0x505d(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,87,201 // xorps %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
@@ -61213,7 +61283,7 @@ HIDDEN _sk_white_color_sse2_lowp
FUNCTION(_sk_white_color_sse2_lowp)
_sk_white_color_sse2_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,5,89,80,0,0 // movaps 0x5059(%rip),%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,5,73,80,0,0 // movaps 0x5049(%rip),%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 15,40,200 // movaps %xmm0,%xmm1
.byte 15,40,208 // movaps %xmm0,%xmm2
.byte 15,40,216 // movaps %xmm0,%xmm3
@@ -61224,10 +61294,10 @@ HIDDEN _sk_set_rgb_sse2_lowp
FUNCTION(_sk_set_rgb_sse2_lowp)
_sk_set_rgb_sse2_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 243,15,16,21,148,71,0,0 // movss 0x4794(%rip),%xmm2 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 243,15,16,21,148,71,0,0 // movss 0x4794(%rip),%xmm2 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
.byte 243,15,16,0 // movss (%rax),%xmm0
.byte 243,15,89,194 // mulss %xmm2,%xmm0
- .byte 243,68,15,16,5,83,71,0,0 // movss 0x4753(%rip),%xmm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,16,5,83,71,0,0 // movss 0x4753(%rip),%xmm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 243,65,15,88,192 // addss %xmm8,%xmm0
.byte 243,68,15,44,192 // cvttss2si %xmm0,%r8d
.byte 102,65,15,110,192 // movd %r8d,%xmm0
@@ -61253,7 +61323,7 @@ HIDDEN _sk_clamp_a_sse2_lowp
.globl _sk_clamp_a_sse2_lowp
FUNCTION(_sk_clamp_a_sse2_lowp)
_sk_clamp_a_sse2_lowp:
- .byte 102,68,15,111,5,63,87,0,0 // movdqa 0x573f(%rip),%xmm8 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,5,47,87,0,0 // movdqa 0x572f(%rip),%xmm8 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,68,15,111,200 // movdqa %xmm0,%xmm9
.byte 102,69,15,239,200 // pxor %xmm8,%xmm9
.byte 102,68,15,111,211 // movdqa %xmm3,%xmm10
@@ -61282,7 +61352,7 @@ HIDDEN _sk_clamp_a_dst_sse2_lowp
.globl _sk_clamp_a_dst_sse2_lowp
FUNCTION(_sk_clamp_a_dst_sse2_lowp)
_sk_clamp_a_dst_sse2_lowp:
- .byte 102,68,15,111,5,201,86,0,0 // movdqa 0x56c9(%rip),%xmm8 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,5,185,86,0,0 // movdqa 0x56b9(%rip),%xmm8 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,68,15,111,204 // movdqa %xmm4,%xmm9
.byte 102,69,15,239,200 // pxor %xmm8,%xmm9
.byte 102,68,15,111,215 // movdqa %xmm7,%xmm10
@@ -61312,7 +61382,7 @@ HIDDEN _sk_premul_sse2_lowp
FUNCTION(_sk_premul_sse2_lowp)
_sk_premul_sse2_lowp:
.byte 102,15,213,195 // pmullw %xmm3,%xmm0
- .byte 102,68,15,111,5,223,78,0,0 // movdqa 0x4edf(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,207,78,0,0 // movdqa 0x4ecf(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,213,203 // pmullw %xmm3,%xmm1
@@ -61329,7 +61399,7 @@ HIDDEN _sk_premul_dst_sse2_lowp
FUNCTION(_sk_premul_dst_sse2_lowp)
_sk_premul_dst_sse2_lowp:
.byte 102,15,213,231 // pmullw %xmm7,%xmm4
- .byte 102,68,15,111,5,168,78,0,0 // movdqa 0x4ea8(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,152,78,0,0 // movdqa 0x4e98(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,224 // paddw %xmm8,%xmm4
.byte 102,15,113,212,8 // psrlw $0x8,%xmm4
.byte 102,15,213,239 // pmullw %xmm7,%xmm5
@@ -61377,7 +61447,7 @@ HIDDEN _sk_invert_sse2_lowp
.globl _sk_invert_sse2_lowp
FUNCTION(_sk_invert_sse2_lowp)
_sk_invert_sse2_lowp:
- .byte 102,68,15,111,5,70,78,0,0 // movdqa 0x4e46(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,54,78,0,0 // movdqa 0x4e36(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,68,15,249,200 // psubw %xmm0,%xmm9
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
@@ -61409,7 +61479,7 @@ FUNCTION(_sk_srcatop_sse2_lowp)
_sk_srcatop_sse2_lowp:
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
.byte 102,15,213,199 // pmullw %xmm7,%xmm0
- .byte 102,15,111,29,234,77,0,0 // movdqa 0x4dea(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,111,29,218,77,0,0 // movdqa 0x4dda(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,111,203 // movdqa %xmm3,%xmm9
.byte 102,69,15,249,200 // psubw %xmm8,%xmm9
.byte 102,69,15,111,193 // movdqa %xmm9,%xmm8
@@ -61441,7 +61511,7 @@ FUNCTION(_sk_dstatop_sse2_lowp)
_sk_dstatop_sse2_lowp:
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
.byte 102,68,15,213,195 // pmullw %xmm3,%xmm8
- .byte 102,68,15,111,13,106,77,0,0 // movdqa 0x4d6a(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,90,77,0,0 // movdqa 0x4d5a(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,253,193 // paddw %xmm9,%xmm8
.byte 102,68,15,111,213 // movdqa %xmm5,%xmm10
.byte 102,68,15,213,211 // pmullw %xmm3,%xmm10
@@ -61470,7 +61540,7 @@ HIDDEN _sk_srcin_sse2_lowp
FUNCTION(_sk_srcin_sse2_lowp)
_sk_srcin_sse2_lowp:
.byte 102,15,213,199 // pmullw %xmm7,%xmm0
- .byte 102,68,15,111,5,245,76,0,0 // movdqa 0x4cf5(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,229,76,0,0 // movdqa 0x4ce5(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,213,207 // pmullw %xmm7,%xmm1
@@ -61491,7 +61561,7 @@ FUNCTION(_sk_dstin_sse2_lowp)
_sk_dstin_sse2_lowp:
.byte 102,15,111,196 // movdqa %xmm4,%xmm0
.byte 102,15,213,195 // pmullw %xmm3,%xmm0
- .byte 102,68,15,111,5,172,76,0,0 // movdqa 0x4cac(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,156,76,0,0 // movdqa 0x4c9c(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,111,205 // movdqa %xmm5,%xmm1
@@ -61512,7 +61582,7 @@ HIDDEN _sk_srcout_sse2_lowp
.globl _sk_srcout_sse2_lowp
FUNCTION(_sk_srcout_sse2_lowp)
_sk_srcout_sse2_lowp:
- .byte 102,68,15,111,5,99,76,0,0 // movdqa 0x4c63(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,83,76,0,0 // movdqa 0x4c53(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,68,15,249,207 // psubw %xmm7,%xmm9
.byte 102,65,15,213,193 // pmullw %xmm9,%xmm0
@@ -61535,7 +61605,7 @@ HIDDEN _sk_dstout_sse2_lowp
FUNCTION(_sk_dstout_sse2_lowp)
_sk_dstout_sse2_lowp:
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
- .byte 102,68,15,111,5,12,76,0,0 // movdqa 0x4c0c(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,252,75,0,0 // movdqa 0x4bfc(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,249,216 // psubw %xmm0,%xmm3
.byte 102,15,111,195 // movdqa %xmm3,%xmm0
@@ -61560,7 +61630,7 @@ HIDDEN _sk_srcover_sse2_lowp
.globl _sk_srcover_sse2_lowp
FUNCTION(_sk_srcover_sse2_lowp)
_sk_srcover_sse2_lowp:
- .byte 102,68,15,111,13,178,75,0,0 // movdqa 0x4bb2(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,162,75,0,0 // movdqa 0x4ba2(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,193 // movdqa %xmm9,%xmm8
.byte 102,68,15,249,195 // psubw %xmm3,%xmm8
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
@@ -61589,7 +61659,7 @@ HIDDEN _sk_dstover_sse2_lowp
.globl _sk_dstover_sse2_lowp
FUNCTION(_sk_dstover_sse2_lowp)
_sk_dstover_sse2_lowp:
- .byte 102,68,15,111,5,56,75,0,0 // movdqa 0x4b38(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,40,75,0,0 // movdqa 0x4b28(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,68,15,249,207 // psubw %xmm7,%xmm9
.byte 102,65,15,213,193 // pmullw %xmm9,%xmm0
@@ -61616,7 +61686,7 @@ HIDDEN _sk_modulate_sse2_lowp
FUNCTION(_sk_modulate_sse2_lowp)
_sk_modulate_sse2_lowp:
.byte 102,15,213,196 // pmullw %xmm4,%xmm0
- .byte 102,68,15,111,5,209,74,0,0 // movdqa 0x4ad1(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,193,74,0,0 // movdqa 0x4ac1(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,192 // paddw %xmm8,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,15,213,205 // pmullw %xmm5,%xmm1
@@ -61635,7 +61705,7 @@ HIDDEN _sk_multiply_sse2_lowp
.globl _sk_multiply_sse2_lowp
FUNCTION(_sk_multiply_sse2_lowp)
_sk_multiply_sse2_lowp:
- .byte 102,68,15,111,13,144,74,0,0 // movdqa 0x4a90(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,128,74,0,0 // movdqa 0x4a80(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,193 // movdqa %xmm9,%xmm8
.byte 102,68,15,249,195 // psubw %xmm3,%xmm8
.byte 102,69,15,111,208 // movdqa %xmm8,%xmm10
@@ -61680,12 +61750,12 @@ _sk_plus__sse2_lowp:
.byte 102,68,15,111,193 // movdqa %xmm1,%xmm8
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,253,204 // paddw %xmm4,%xmm1
- .byte 102,68,15,111,21,60,82,0,0 // movdqa 0x523c(%rip),%xmm10 # 39b00 <_sk_srcover_bgra_8888_sse2_lowp+0x13fc>
+ .byte 102,68,15,111,21,44,82,0,0 // movdqa 0x522c(%rip),%xmm10 # 39c60 <_sk_srcover_bgra_8888_sse2_lowp+0x13ec>
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,65,15,217,194 // psubusw %xmm10,%xmm0
.byte 102,69,15,239,228 // pxor %xmm12,%xmm12
.byte 102,65,15,117,196 // pcmpeqw %xmm12,%xmm0
- .byte 102,68,15,111,29,176,73,0,0 // movdqa 0x49b0(%rip),%xmm11 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,29,160,73,0,0 // movdqa 0x49a0(%rip),%xmm11 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,219,200 // pand %xmm0,%xmm1
.byte 102,65,15,223,195 // pandn %xmm11,%xmm0
.byte 102,15,235,193 // por %xmm1,%xmm0
@@ -61724,7 +61794,7 @@ _sk_screen_sse2_lowp:
.byte 102,15,111,196 // movdqa %xmm4,%xmm0
.byte 102,15,253,193 // paddw %xmm1,%xmm0
.byte 102,15,213,204 // pmullw %xmm4,%xmm1
- .byte 102,68,15,111,21,16,73,0,0 // movdqa 0x4910(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,21,0,73,0,0 // movdqa 0x4900(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,202 // paddw %xmm10,%xmm1
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
@@ -61756,7 +61826,7 @@ HIDDEN _sk_xor__sse2_lowp
FUNCTION(_sk_xor__sse2_lowp)
_sk_xor__sse2_lowp:
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
- .byte 102,68,15,111,13,142,72,0,0 // movdqa 0x488e(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,126,72,0,0 // movdqa 0x487e(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,209 // movdqa %xmm9,%xmm10
.byte 102,68,15,249,215 // psubw %xmm7,%xmm10
.byte 102,65,15,213,194 // pmullw %xmm10,%xmm0
@@ -61798,7 +61868,7 @@ _sk_darken_sse2_lowp:
.byte 102,65,15,253,194 // paddw %xmm10,%xmm0
.byte 102,68,15,213,215 // pmullw %xmm7,%xmm10
.byte 102,68,15,213,227 // pmullw %xmm3,%xmm12
- .byte 102,68,15,111,13,73,79,0,0 // movdqa 0x4f49(%rip),%xmm9 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,13,57,79,0,0 // movdqa 0x4f39(%rip),%xmm9 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,69,15,111,218 // movdqa %xmm10,%xmm11
.byte 102,69,15,239,217 // pxor %xmm9,%xmm11
.byte 102,65,15,111,204 // movdqa %xmm12,%xmm1
@@ -61807,7 +61877,7 @@ _sk_darken_sse2_lowp:
.byte 102,68,15,219,225 // pand %xmm1,%xmm12
.byte 102,65,15,223,202 // pandn %xmm10,%xmm1
.byte 102,65,15,235,204 // por %xmm12,%xmm1
- .byte 102,68,15,111,21,168,71,0,0 // movdqa 0x47a8(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,21,152,71,0,0 // movdqa 0x4798(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,202 // paddw %xmm10,%xmm1
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
@@ -61863,7 +61933,7 @@ _sk_lighten_sse2_lowp:
.byte 102,65,15,253,196 // paddw %xmm12,%xmm0
.byte 102,68,15,213,231 // pmullw %xmm7,%xmm12
.byte 102,68,15,213,211 // pmullw %xmm3,%xmm10
- .byte 102,68,15,111,13,32,78,0,0 // movdqa 0x4e20(%rip),%xmm9 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,13,16,78,0,0 // movdqa 0x4e10(%rip),%xmm9 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,69,15,111,220 // movdqa %xmm12,%xmm11
.byte 102,69,15,239,217 // pxor %xmm9,%xmm11
.byte 102,65,15,111,202 // movdqa %xmm10,%xmm1
@@ -61872,7 +61942,7 @@ _sk_lighten_sse2_lowp:
.byte 102,68,15,219,225 // pand %xmm1,%xmm12
.byte 102,65,15,223,202 // pandn %xmm10,%xmm1
.byte 102,65,15,235,204 // por %xmm12,%xmm1
- .byte 102,68,15,111,21,127,70,0,0 // movdqa 0x467f(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,21,111,70,0,0 // movdqa 0x466f(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,202 // paddw %xmm10,%xmm1
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
@@ -61928,7 +61998,7 @@ _sk_difference_sse2_lowp:
.byte 102,65,15,253,196 // paddw %xmm12,%xmm0
.byte 102,68,15,213,231 // pmullw %xmm7,%xmm12
.byte 102,68,15,213,211 // pmullw %xmm3,%xmm10
- .byte 102,68,15,111,13,247,76,0,0 // movdqa 0x4cf7(%rip),%xmm9 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,13,231,76,0,0 // movdqa 0x4ce7(%rip),%xmm9 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,69,15,111,220 // movdqa %xmm12,%xmm11
.byte 102,69,15,239,217 // pxor %xmm9,%xmm11
.byte 102,65,15,111,202 // movdqa %xmm10,%xmm1
@@ -61937,10 +62007,10 @@ _sk_difference_sse2_lowp:
.byte 102,68,15,219,225 // pand %xmm1,%xmm12
.byte 102,65,15,223,202 // pandn %xmm10,%xmm1
.byte 102,65,15,235,204 // por %xmm12,%xmm1
- .byte 102,68,15,111,21,86,69,0,0 // movdqa 0x4556(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,21,70,69,0,0 // movdqa 0x4546(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,202 // paddw %xmm10,%xmm1
.byte 102,15,113,209,7 // psrlw $0x7,%xmm1
- .byte 102,68,15,111,29,163,76,0,0 // movdqa 0x4ca3(%rip),%xmm11 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
+ .byte 102,68,15,111,29,147,76,0,0 // movdqa 0x4c93(%rip),%xmm11 # 39b50 <_sk_srcover_bgra_8888_sse2_lowp+0x12dc>
.byte 102,65,15,219,203 // pand %xmm11,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
.byte 102,68,15,111,229 // movdqa %xmm5,%xmm12
@@ -61995,10 +62065,10 @@ _sk_exclusion_sse2_lowp:
.byte 102,15,111,196 // movdqa %xmm4,%xmm0
.byte 102,15,253,193 // paddw %xmm1,%xmm0
.byte 102,15,213,204 // pmullw %xmm4,%xmm1
- .byte 102,68,15,111,13,83,68,0,0 // movdqa 0x4453(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,67,68,0,0 // movdqa 0x4443(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,201 // paddw %xmm9,%xmm1
.byte 102,15,113,209,7 // psrlw $0x7,%xmm1
- .byte 102,68,15,111,21,160,75,0,0 // movdqa 0x4ba0(%rip),%xmm10 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
+ .byte 102,68,15,111,21,144,75,0,0 // movdqa 0x4b90(%rip),%xmm10 # 39b50 <_sk_srcover_bgra_8888_sse2_lowp+0x12dc>
.byte 102,65,15,219,202 // pand %xmm10,%xmm1
.byte 102,15,249,193 // psubw %xmm1,%xmm0
.byte 102,15,111,205 // movdqa %xmm5,%xmm1
@@ -62034,7 +62104,7 @@ _sk_hardlight_sse2_lowp:
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,68,15,111,194 // movdqa %xmm2,%xmm8
.byte 102,68,15,111,200 // movdqa %xmm0,%xmm9
- .byte 102,15,111,21,175,67,0,0 // movdqa 0x43af(%rip),%xmm2 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,111,21,159,67,0,0 // movdqa 0x439f(%rip),%xmm2 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,111,226 // movdqa %xmm2,%xmm12
.byte 102,68,15,249,231 // psubw %xmm7,%xmm12
.byte 102,65,15,111,196 // movdqa %xmm12,%xmm0
@@ -62047,7 +62117,7 @@ _sk_hardlight_sse2_lowp:
.byte 102,68,15,111,243 // movdqa %xmm3,%xmm14
.byte 102,69,15,249,241 // psubw %xmm9,%xmm14
.byte 102,69,15,253,201 // paddw %xmm9,%xmm9
- .byte 102,15,111,21,219,74,0,0 // movdqa 0x4adb(%rip),%xmm2 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,15,111,21,203,74,0,0 // movdqa 0x4acb(%rip),%xmm2 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,15,111,199 // movdqa %xmm7,%xmm0
.byte 102,68,15,111,215 // movdqa %xmm7,%xmm10
.byte 102,15,111,231 // movdqa %xmm7,%xmm4
@@ -62066,7 +62136,7 @@ _sk_hardlight_sse2_lowp:
.byte 102,68,15,219,232 // pand %xmm0,%xmm13
.byte 102,65,15,223,193 // pandn %xmm9,%xmm0
.byte 102,65,15,235,197 // por %xmm13,%xmm0
- .byte 102,15,253,5,14,67,0,0 // paddw 0x430e(%rip),%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,253,5,254,66,0,0 // paddw 0x42fe(%rip),%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,199 // paddw %xmm15,%xmm0
.byte 102,69,15,111,204 // movdqa %xmm12,%xmm9
.byte 102,68,15,213,201 // pmullw %xmm1,%xmm9
@@ -62089,7 +62159,7 @@ _sk_hardlight_sse2_lowp:
.byte 102,65,15,219,249 // pand %xmm9,%xmm7
.byte 102,68,15,223,201 // pandn %xmm1,%xmm9
.byte 102,68,15,235,207 // por %xmm7,%xmm9
- .byte 102,68,15,253,13,153,66,0,0 // paddw 0x4299(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,253,13,137,66,0,0 // paddw 0x4289(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,253,205 // paddw %xmm13,%xmm9
.byte 102,69,15,213,224 // pmullw %xmm8,%xmm12
.byte 102,69,15,111,251 // movdqa %xmm11,%xmm15
@@ -62111,7 +62181,7 @@ _sk_hardlight_sse2_lowp:
.byte 102,68,15,219,210 // pand %xmm2,%xmm10
.byte 102,65,15,223,208 // pandn %xmm8,%xmm2
.byte 102,65,15,235,210 // por %xmm10,%xmm2
- .byte 102,68,15,111,5,41,66,0,0 // movdqa 0x4229(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,25,66,0,0 // movdqa 0x4219(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,208 // paddw %xmm8,%xmm2
.byte 102,65,15,253,215 // paddw %xmm15,%xmm2
.byte 102,68,15,213,220 // pmullw %xmm4,%xmm11
@@ -62139,7 +62209,7 @@ _sk_overlay_sse2_lowp:
.byte 102,15,127,124,36,232 // movdqa %xmm7,-0x18(%rsp)
.byte 102,68,15,111,193 // movdqa %xmm1,%xmm8
.byte 102,68,15,111,248 // movdqa %xmm0,%xmm15
- .byte 102,15,111,53,186,65,0,0 // movdqa 0x41ba(%rip),%xmm6 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,111,53,170,65,0,0 // movdqa 0x41aa(%rip),%xmm6 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,111,222 // movdqa %xmm6,%xmm11
.byte 102,69,15,249,222 // psubw %xmm14,%xmm11
.byte 102,65,15,111,203 // movdqa %xmm11,%xmm1
@@ -62161,7 +62231,7 @@ _sk_overlay_sse2_lowp:
.byte 102,15,253,201 // paddw %xmm1,%xmm1
.byte 102,69,15,111,225 // movdqa %xmm9,%xmm12
.byte 102,68,15,249,225 // psubw %xmm1,%xmm12
- .byte 102,15,111,13,190,72,0,0 // movdqa 0x48be(%rip),%xmm1 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,15,111,13,174,72,0,0 // movdqa 0x48ae(%rip),%xmm1 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,69,15,111,238 // movdqa %xmm14,%xmm13
.byte 102,68,15,239,233 // pxor %xmm1,%xmm13
.byte 102,15,239,249 // pxor %xmm1,%xmm7
@@ -62188,7 +62258,7 @@ _sk_overlay_sse2_lowp:
.byte 102,68,15,213,197 // pmullw %xmm5,%xmm8
.byte 102,15,111,229 // movdqa %xmm5,%xmm4
.byte 102,15,253,228 // paddw %xmm4,%xmm4
- .byte 102,15,111,61,61,72,0,0 // movdqa 0x483d(%rip),%xmm7 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,15,111,61,45,72,0,0 // movdqa 0x482d(%rip),%xmm7 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,15,239,231 // pxor %xmm7,%xmm4
.byte 102,65,15,101,229 // pcmpgtw %xmm13,%xmm4
.byte 102,69,15,253,192 // paddw %xmm8,%xmm8
@@ -62248,27 +62318,27 @@ _sk_load_8888_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 352d9 <_sk_load_8888_sse2_lowp+0x44>
+ .byte 119,41 // ja 35449 <_sk_load_8888_sse2_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,25,1,0,0 // lea 0x119(%rip),%r9 # 353d8 <_sk_load_8888_sse2_lowp+0x143>
+ .byte 76,141,13,25,1,0,0 // lea 0x119(%rip),%r9 # 35548 <_sk_load_8888_sse2_lowp+0x143>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 255,224 // jmpq *%rax
.byte 243,65,15,16,28,144 // movss (%r8,%rdx,4),%xmm3
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,102 // jmp 3533f <_sk_load_8888_sse2_lowp+0xaa>
+ .byte 235,102 // jmp 354af <_sk_load_8888_sse2_lowp+0xaa>
.byte 102,65,15,16,28,144 // movupd (%r8,%rdx,4),%xmm3
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,87 // jmp 3533f <_sk_load_8888_sse2_lowp+0xaa>
+ .byte 235,87 // jmp 354af <_sk_load_8888_sse2_lowp+0xaa>
.byte 102,65,15,110,68,144,8 // movd 0x8(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,69,15,18,4,144 // movlpd (%r8,%rdx,4),%xmm8
.byte 102,65,15,40,216 // movapd %xmm8,%xmm3
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
- .byte 235,52 // jmp 3533f <_sk_load_8888_sse2_lowp+0xaa>
+ .byte 235,52 // jmp 354af <_sk_load_8888_sse2_lowp+0xaa>
.byte 102,65,15,110,68,144,24 // movd 0x18(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 243,65,15,16,68,144,20 // movss 0x14(%r8,%rdx,4),%xmm0
@@ -62285,7 +62355,7 @@ _sk_load_8888_sse2_lowp:
.byte 102,15,114,240,16 // pslld $0x10,%xmm0
.byte 102,15,114,224,16 // psrad $0x10,%xmm0
.byte 102,15,107,200 // packssdw %xmm0,%xmm1
- .byte 102,68,15,111,13,39,63,0,0 // movdqa 0x3f27(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,23,63,0,0 // movdqa 0x3f17(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,65,15,219,193 // pand %xmm9,%xmm0
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
@@ -62342,27 +62412,27 @@ _sk_load_8888_dst_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 35438 <_sk_load_8888_dst_sse2_lowp+0x44>
+ .byte 119,41 // ja 355a8 <_sk_load_8888_dst_sse2_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,22,1,0,0 // lea 0x116(%rip),%r9 # 35534 <_sk_load_8888_dst_sse2_lowp+0x140>
+ .byte 76,141,13,22,1,0,0 // lea 0x116(%rip),%r9 # 356a4 <_sk_load_8888_dst_sse2_lowp+0x140>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 255,224 // jmpq *%rax
.byte 243,65,15,16,60,144 // movss (%r8,%rdx,4),%xmm7
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,102 // jmp 3549e <_sk_load_8888_dst_sse2_lowp+0xaa>
+ .byte 235,102 // jmp 3560e <_sk_load_8888_dst_sse2_lowp+0xaa>
.byte 102,65,15,16,60,144 // movupd (%r8,%rdx,4),%xmm7
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,87 // jmp 3549e <_sk_load_8888_dst_sse2_lowp+0xaa>
+ .byte 235,87 // jmp 3560e <_sk_load_8888_dst_sse2_lowp+0xaa>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,69,15,18,4,144 // movlpd (%r8,%rdx,4),%xmm8
.byte 102,65,15,40,248 // movapd %xmm8,%xmm7
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
- .byte 235,52 // jmp 3549e <_sk_load_8888_dst_sse2_lowp+0xaa>
+ .byte 235,52 // jmp 3560e <_sk_load_8888_dst_sse2_lowp+0xaa>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 243,65,15,16,100,144,20 // movss 0x14(%r8,%rdx,4),%xmm4
@@ -62379,7 +62449,7 @@ _sk_load_8888_dst_sse2_lowp:
.byte 102,15,114,244,16 // pslld $0x10,%xmm4
.byte 102,15,114,228,16 // psrad $0x10,%xmm4
.byte 102,15,107,236 // packssdw %xmm4,%xmm5
- .byte 102,68,15,111,13,200,61,0,0 // movdqa 0x3dc8(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,184,61,0,0 // movdqa 0x3db8(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,229 // movdqa %xmm5,%xmm4
.byte 102,65,15,219,225 // pand %xmm9,%xmm4
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
@@ -62447,21 +62517,21 @@ _sk_store_8888_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,27 // ja 355d3 <_sk_store_8888_sse2_lowp+0x83>
+ .byte 119,27 // ja 35743 <_sk_store_8888_sse2_lowp+0x83>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,98,0,0,0 // lea 0x62(%rip),%r9 # 35624 <_sk_store_8888_sse2_lowp+0xd4>
+ .byte 76,141,13,98,0,0,0 // lea 0x62(%rip),%r9 # 35794 <_sk_store_8888_sse2_lowp+0xd4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,126,12,144 // movd %xmm9,(%r8,%rdx,4)
- .byte 235,75 // jmp 3561e <_sk_store_8888_sse2_lowp+0xce>
+ .byte 235,75 // jmp 3578e <_sk_store_8888_sse2_lowp+0xce>
.byte 243,69,15,127,12,144 // movdqu %xmm9,(%r8,%rdx,4)
.byte 243,69,15,127,68,144,16 // movdqu %xmm8,0x10(%r8,%rdx,4)
- .byte 235,60 // jmp 3561e <_sk_store_8888_sse2_lowp+0xce>
+ .byte 235,60 // jmp 3578e <_sk_store_8888_sse2_lowp+0xce>
.byte 102,69,15,112,193,78 // pshufd $0x4e,%xmm9,%xmm8
.byte 102,69,15,126,68,144,8 // movd %xmm8,0x8(%r8,%rdx,4)
.byte 102,69,15,214,12,144 // movq %xmm9,(%r8,%rdx,4)
- .byte 235,39 // jmp 3561e <_sk_store_8888_sse2_lowp+0xce>
+ .byte 235,39 // jmp 3578e <_sk_store_8888_sse2_lowp+0xce>
.byte 102,69,15,112,208,78 // pshufd $0x4e,%xmm8,%xmm10
.byte 102,69,15,126,84,144,24 // movd %xmm10,0x18(%r8,%rdx,4)
.byte 102,69,15,112,208,229 // pshufd $0xe5,%xmm8,%xmm10
@@ -62506,27 +62576,27 @@ _sk_load_bgra_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 35684 <_sk_load_bgra_sse2_lowp+0x44>
+ .byte 119,41 // ja 357f4 <_sk_load_bgra_sse2_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,22,1,0,0 // lea 0x116(%rip),%r9 # 35780 <_sk_load_bgra_sse2_lowp+0x140>
+ .byte 76,141,13,22,1,0,0 // lea 0x116(%rip),%r9 # 358f0 <_sk_load_bgra_sse2_lowp+0x140>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 255,224 // jmpq *%rax
.byte 243,65,15,16,28,144 // movss (%r8,%rdx,4),%xmm3
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,102 // jmp 356ea <_sk_load_bgra_sse2_lowp+0xaa>
+ .byte 235,102 // jmp 3585a <_sk_load_bgra_sse2_lowp+0xaa>
.byte 102,65,15,16,28,144 // movupd (%r8,%rdx,4),%xmm3
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,87 // jmp 356ea <_sk_load_bgra_sse2_lowp+0xaa>
+ .byte 235,87 // jmp 3585a <_sk_load_bgra_sse2_lowp+0xaa>
.byte 102,65,15,110,68,144,8 // movd 0x8(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,69,15,18,4,144 // movlpd (%r8,%rdx,4),%xmm8
.byte 102,65,15,40,216 // movapd %xmm8,%xmm3
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
- .byte 235,52 // jmp 356ea <_sk_load_bgra_sse2_lowp+0xaa>
+ .byte 235,52 // jmp 3585a <_sk_load_bgra_sse2_lowp+0xaa>
.byte 102,65,15,110,68,144,24 // movd 0x18(%r8,%rdx,4),%xmm0
.byte 102,68,15,112,192,69 // pshufd $0x45,%xmm0,%xmm8
.byte 243,65,15,16,68,144,20 // movss 0x14(%r8,%rdx,4),%xmm0
@@ -62543,7 +62613,7 @@ _sk_load_bgra_sse2_lowp:
.byte 102,15,114,240,16 // pslld $0x10,%xmm0
.byte 102,15,114,224,16 // psrad $0x10,%xmm0
.byte 102,15,107,200 // packssdw %xmm0,%xmm1
- .byte 102,68,15,111,13,124,59,0,0 // movdqa 0x3b7c(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,108,59,0,0 // movdqa 0x3b6c(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,209 // movdqa %xmm1,%xmm2
.byte 102,65,15,219,209 // pand %xmm9,%xmm2
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
@@ -62596,27 +62666,27 @@ _sk_load_bgra_dst_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,41 // ja 357e0 <_sk_load_bgra_dst_sse2_lowp+0x44>
+ .byte 119,41 // ja 35950 <_sk_load_bgra_dst_sse2_lowp+0x44>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,22,1,0,0 // lea 0x116(%rip),%r9 # 358dc <_sk_load_bgra_dst_sse2_lowp+0x140>
+ .byte 76,141,13,22,1,0,0 // lea 0x116(%rip),%r9 # 35a4c <_sk_load_bgra_dst_sse2_lowp+0x140>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 255,224 // jmpq *%rax
.byte 243,65,15,16,60,144 // movss (%r8,%rdx,4),%xmm7
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,102 // jmp 35846 <_sk_load_bgra_dst_sse2_lowp+0xaa>
+ .byte 235,102 // jmp 359b6 <_sk_load_bgra_dst_sse2_lowp+0xaa>
.byte 102,65,15,16,60,144 // movupd (%r8,%rdx,4),%xmm7
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,87 // jmp 35846 <_sk_load_bgra_dst_sse2_lowp+0xaa>
+ .byte 235,87 // jmp 359b6 <_sk_load_bgra_dst_sse2_lowp+0xaa>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,69,15,18,4,144 // movlpd (%r8,%rdx,4),%xmm8
.byte 102,65,15,40,248 // movapd %xmm8,%xmm7
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
- .byte 235,52 // jmp 35846 <_sk_load_bgra_dst_sse2_lowp+0xaa>
+ .byte 235,52 // jmp 359b6 <_sk_load_bgra_dst_sse2_lowp+0xaa>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 243,65,15,16,100,144,20 // movss 0x14(%r8,%rdx,4),%xmm4
@@ -62633,7 +62703,7 @@ _sk_load_bgra_dst_sse2_lowp:
.byte 102,15,114,244,16 // pslld $0x10,%xmm4
.byte 102,15,114,228,16 // psrad $0x10,%xmm4
.byte 102,15,107,236 // packssdw %xmm4,%xmm5
- .byte 102,68,15,111,13,32,58,0,0 // movdqa 0x3a20(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,16,58,0,0 // movdqa 0x3a10(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,245 // movdqa %xmm5,%xmm6
.byte 102,65,15,219,241 // pand %xmm9,%xmm6
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
@@ -62701,21 +62771,21 @@ _sk_store_bgra_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,27 // ja 3597b <_sk_store_bgra_sse2_lowp+0x83>
+ .byte 119,27 // ja 35aeb <_sk_store_bgra_sse2_lowp+0x83>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,98,0,0,0 // lea 0x62(%rip),%r9 # 359cc <_sk_store_bgra_sse2_lowp+0xd4>
+ .byte 76,141,13,98,0,0,0 // lea 0x62(%rip),%r9 # 35b3c <_sk_store_bgra_sse2_lowp+0xd4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,126,12,144 // movd %xmm9,(%r8,%rdx,4)
- .byte 235,75 // jmp 359c6 <_sk_store_bgra_sse2_lowp+0xce>
+ .byte 235,75 // jmp 35b36 <_sk_store_bgra_sse2_lowp+0xce>
.byte 243,69,15,127,12,144 // movdqu %xmm9,(%r8,%rdx,4)
.byte 243,69,15,127,68,144,16 // movdqu %xmm8,0x10(%r8,%rdx,4)
- .byte 235,60 // jmp 359c6 <_sk_store_bgra_sse2_lowp+0xce>
+ .byte 235,60 // jmp 35b36 <_sk_store_bgra_sse2_lowp+0xce>
.byte 102,69,15,112,193,78 // pshufd $0x4e,%xmm9,%xmm8
.byte 102,69,15,126,68,144,8 // movd %xmm8,0x8(%r8,%rdx,4)
.byte 102,69,15,214,12,144 // movq %xmm9,(%r8,%rdx,4)
- .byte 235,39 // jmp 359c6 <_sk_store_bgra_sse2_lowp+0xce>
+ .byte 235,39 // jmp 35b36 <_sk_store_bgra_sse2_lowp+0xce>
.byte 102,69,15,112,208,78 // pshufd $0x4e,%xmm8,%xmm10
.byte 102,69,15,126,84,144,24 // movd %xmm10,0x18(%r8,%rdx,4)
.byte 102,69,15,112,208,229 // pshufd $0xe5,%xmm8,%xmm10
@@ -62829,7 +62899,7 @@ _sk_gather_8888_sse2_lowp:
.byte 102,15,114,241,16 // pslld $0x10,%xmm1
.byte 102,15,114,225,16 // psrad $0x10,%xmm1
.byte 102,15,107,200 // packssdw %xmm0,%xmm1
- .byte 102,68,15,111,13,60,55,0,0 // movdqa 0x373c(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,44,55,0,0 // movdqa 0x372c(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,193 // movdqa %xmm1,%xmm0
.byte 102,65,15,219,193 // pand %xmm9,%xmm0
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
@@ -62939,7 +63009,7 @@ _sk_gather_bgra_sse2_lowp:
.byte 102,15,114,241,16 // pslld $0x10,%xmm1
.byte 102,15,114,225,16 // psrad $0x10,%xmm1
.byte 102,15,107,200 // packssdw %xmm0,%xmm1
- .byte 102,68,15,111,13,93,53,0,0 // movdqa 0x355d(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,77,53,0,0 // movdqa 0x354d(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,209 // movdqa %xmm1,%xmm2
.byte 102,65,15,219,209 // pand %xmm9,%xmm2
.byte 102,15,113,209,8 // psrlw $0x8,%xmm1
@@ -62980,23 +63050,23 @@ _sk_load_565_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 35de2 <_sk_load_565_sse2_lowp+0x3c>
+ .byte 119,34 // ja 35f52 <_sk_load_565_sse2_lowp+0x3c>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,190,0,0,0 // lea 0xbe(%rip),%r9 # 35e8c <_sk_load_565_sse2_lowp+0xe6>
+ .byte 76,141,13,190,0,0,0 // lea 0xbe(%rip),%r9 # 35ffc <_sk_load_565_sse2_lowp+0xe6>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,15,110,192 // movd %eax,%xmm0
- .byte 235,66 // jmp 35e24 <_sk_load_565_sse2_lowp+0x7e>
+ .byte 235,66 // jmp 35f94 <_sk_load_565_sse2_lowp+0x7e>
.byte 243,65,15,111,4,80 // movdqu (%r8,%rdx,2),%xmm0
- .byte 235,58 // jmp 35e24 <_sk_load_565_sse2_lowp+0x7e>
+ .byte 235,58 // jmp 35f94 <_sk_load_565_sse2_lowp+0x7e>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm0
.byte 243,65,15,16,12,80 // movss (%r8,%rdx,2),%xmm1
.byte 243,15,16,193 // movss %xmm1,%xmm0
- .byte 235,34 // jmp 35e24 <_sk_load_565_sse2_lowp+0x7e>
+ .byte 235,34 // jmp 35f94 <_sk_load_565_sse2_lowp+0x7e>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,65,15,196,68,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm0
.byte 102,65,15,196,68,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm0
@@ -63004,11 +63074,11 @@ _sk_load_565_sse2_lowp:
.byte 102,65,15,18,4,80 // movlpd (%r8,%rdx,2),%xmm0
.byte 102,15,111,216 // movdqa %xmm0,%xmm3
.byte 102,15,113,211,8 // psrlw $0x8,%xmm3
- .byte 102,15,219,29,11,60,0,0 // pand 0x3c0b(%rip),%xmm3 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,29,251,59,0,0 // pand 0x3bfb(%rip),%xmm3 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,113,209,5 // psrlw $0x5,%xmm1
- .byte 102,15,219,13,10,60,0,0 // pand 0x3c0a(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,15,111,21,18,60,0,0 // movdqa 0x3c12(%rip),%xmm2 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,15,219,13,250,59,0,0 // pand 0x3bfa(%rip),%xmm1 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,111,21,2,60,0,0 // movdqa 0x3c02(%rip),%xmm2 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,15,113,208,13 // psrlw $0xd,%xmm0
.byte 102,15,235,195 // por %xmm3,%xmm0
@@ -63021,7 +63091,7 @@ _sk_load_565_sse2_lowp:
.byte 102,15,113,210,2 // psrlw $0x2,%xmm2
.byte 102,15,235,211 // por %xmm3,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,8,52,0,0 // movaps 0x3408(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,248,51,0,0 // movaps 0x33f8(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 255,224 // jmpq *%rax
.byte 102,144 // xchg %ax,%ax
.byte 75,255 // rex.WXB (bad)
@@ -63051,23 +63121,23 @@ _sk_load_565_dst_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 35ee4 <_sk_load_565_dst_sse2_lowp+0x3c>
+ .byte 119,34 // ja 36054 <_sk_load_565_dst_sse2_lowp+0x3c>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,188,0,0,0 // lea 0xbc(%rip),%r9 # 35f8c <_sk_load_565_dst_sse2_lowp+0xe4>
+ .byte 76,141,13,188,0,0,0 // lea 0xbc(%rip),%r9 # 360fc <_sk_load_565_dst_sse2_lowp+0xe4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,15,110,224 // movd %eax,%xmm4
- .byte 235,66 // jmp 35f26 <_sk_load_565_dst_sse2_lowp+0x7e>
+ .byte 235,66 // jmp 36096 <_sk_load_565_dst_sse2_lowp+0x7e>
.byte 243,65,15,111,36,80 // movdqu (%r8,%rdx,2),%xmm4
- .byte 235,58 // jmp 35f26 <_sk_load_565_dst_sse2_lowp+0x7e>
+ .byte 235,58 // jmp 36096 <_sk_load_565_dst_sse2_lowp+0x7e>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,65,15,196,100,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm4
.byte 243,65,15,16,44,80 // movss (%r8,%rdx,2),%xmm5
.byte 243,15,16,229 // movss %xmm5,%xmm4
- .byte 235,34 // jmp 35f26 <_sk_load_565_dst_sse2_lowp+0x7e>
+ .byte 235,34 // jmp 36096 <_sk_load_565_dst_sse2_lowp+0x7e>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,65,15,196,100,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm4
.byte 102,65,15,196,100,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm4
@@ -63075,11 +63145,11 @@ _sk_load_565_dst_sse2_lowp:
.byte 102,65,15,18,36,80 // movlpd (%r8,%rdx,2),%xmm4
.byte 102,15,111,252 // movdqa %xmm4,%xmm7
.byte 102,15,113,215,8 // psrlw $0x8,%xmm7
- .byte 102,15,219,61,9,59,0,0 // pand 0x3b09(%rip),%xmm7 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,61,249,58,0,0 // pand 0x3af9(%rip),%xmm7 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,15,113,213,5 // psrlw $0x5,%xmm5
- .byte 102,15,219,45,8,59,0,0 // pand 0x3b08(%rip),%xmm5 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,15,111,53,16,59,0,0 // movdqa 0x3b10(%rip),%xmm6 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,15,219,45,248,58,0,0 // pand 0x3af8(%rip),%xmm5 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,111,53,0,59,0,0 // movdqa 0x3b00(%rip),%xmm6 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,15,219,244 // pand %xmm4,%xmm6
.byte 102,15,113,212,13 // psrlw $0xd,%xmm4
.byte 102,15,235,231 // por %xmm7,%xmm4
@@ -63092,7 +63162,7 @@ _sk_load_565_dst_sse2_lowp:
.byte 102,15,113,214,2 // psrlw $0x2,%xmm6
.byte 102,15,235,247 // por %xmm7,%xmm6
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,6,51,0,0 // movaps 0x3306(%rip),%xmm7 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,61,246,50,0,0 // movaps 0x32f6(%rip),%xmm7 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 255,224 // jmpq *%rax
.byte 77,255 // rex.WRB (bad)
.byte 255 // (bad)
@@ -63116,10 +63186,10 @@ _sk_store_565_sse2_lowp:
.byte 76,3,0 // add (%rax),%r8
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
.byte 102,65,15,113,240,8 // psllw $0x8,%xmm8
- .byte 102,68,15,219,5,164,58,0,0 // pand 0x3aa4(%rip),%xmm8 # 39a70 <_sk_srcover_bgra_8888_sse2_lowp+0x136c>
+ .byte 102,68,15,219,5,148,58,0,0 // pand 0x3a94(%rip),%xmm8 # 39bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
.byte 102,68,15,111,201 // movdqa %xmm1,%xmm9
.byte 102,65,15,113,241,3 // psllw $0x3,%xmm9
- .byte 102,68,15,219,13,160,58,0,0 // pand 0x3aa0(%rip),%xmm9 # 39a80 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
+ .byte 102,68,15,219,13,144,58,0,0 // pand 0x3a90(%rip),%xmm9 # 39be0 <_sk_srcover_bgra_8888_sse2_lowp+0x136c>
.byte 102,69,15,235,200 // por %xmm8,%xmm9
.byte 102,68,15,111,194 // movdqa %xmm2,%xmm8
.byte 102,65,15,113,208,3 // psrlw $0x3,%xmm8
@@ -63128,21 +63198,21 @@ _sk_store_565_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,31 // ja 3601e <_sk_store_565_sse2_lowp+0x76>
+ .byte 119,31 // ja 3618e <_sk_store_565_sse2_lowp+0x76>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,95,0,0,0 // lea 0x5f(%rip),%r9 # 36068 <_sk_store_565_sse2_lowp+0xc0>
+ .byte 76,141,13,95,0,0,0 // lea 0x5f(%rip),%r9 # 361d8 <_sk_store_565_sse2_lowp+0xc0>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,68,15,126,192 // movd %xmm8,%eax
.byte 102,65,137,4,80 // mov %ax,(%r8,%rdx,2)
- .byte 235,70 // jmp 36064 <_sk_store_565_sse2_lowp+0xbc>
+ .byte 235,70 // jmp 361d4 <_sk_store_565_sse2_lowp+0xbc>
.byte 243,69,15,127,4,80 // movdqu %xmm8,(%r8,%rdx,2)
- .byte 235,62 // jmp 36064 <_sk_store_565_sse2_lowp+0xbc>
+ .byte 235,62 // jmp 361d4 <_sk_store_565_sse2_lowp+0xbc>
.byte 102,65,15,197,192,2 // pextrw $0x2,%xmm8,%eax
.byte 102,65,137,68,80,4 // mov %ax,0x4(%r8,%rdx,2)
.byte 102,69,15,126,4,80 // movd %xmm8,(%r8,%rdx,2)
- .byte 235,42 // jmp 36064 <_sk_store_565_sse2_lowp+0xbc>
+ .byte 235,42 // jmp 361d4 <_sk_store_565_sse2_lowp+0xbc>
.byte 102,65,15,197,192,6 // pextrw $0x6,%xmm8,%eax
.byte 102,65,137,68,80,12 // mov %ax,0xc(%r8,%rdx,2)
.byte 102,65,15,197,192,5 // pextrw $0x5,%xmm8,%eax
@@ -63261,11 +63331,11 @@ _sk_gather_565_sse2_lowp:
.byte 102,15,108,193 // punpcklqdq %xmm1,%xmm0
.byte 102,15,111,216 // movdqa %xmm0,%xmm3
.byte 102,15,113,211,8 // psrlw $0x8,%xmm3
- .byte 102,15,219,29,77,56,0,0 // pand 0x384d(%rip),%xmm3 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,219,29,61,56,0,0 // pand 0x383d(%rip),%xmm3 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,113,209,5 // psrlw $0x5,%xmm1
- .byte 102,15,219,13,76,56,0,0 // pand 0x384c(%rip),%xmm1 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,15,111,21,84,56,0,0 // movdqa 0x3854(%rip),%xmm2 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,15,219,13,60,56,0,0 // pand 0x383c(%rip),%xmm1 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,15,111,21,68,56,0,0 // movdqa 0x3844(%rip),%xmm2 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,15,113,208,13 // psrlw $0xd,%xmm0
.byte 102,15,235,195 // por %xmm3,%xmm0
@@ -63278,7 +63348,7 @@ _sk_gather_565_sse2_lowp:
.byte 102,15,113,210,2 // psrlw $0x2,%xmm2
.byte 102,15,235,211 // por %xmm3,%xmm2
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,74,48,0,0 // movaps 0x304a(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,58,48,0,0 // movaps 0x303a(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 91 // pop %rbx
.byte 65,92 // pop %r12
.byte 65,94 // pop %r14
@@ -63299,23 +63369,23 @@ _sk_load_4444_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,36 // ja 3628e <_sk_load_4444_sse2_lowp+0x3e>
+ .byte 119,36 // ja 363fe <_sk_load_4444_sse2_lowp+0x3e>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,199,0,0,0 // lea 0xc7(%rip),%r9 # 36340 <_sk_load_4444_sse2_lowp+0xf0>
+ .byte 76,141,13,199,0,0,0 // lea 0xc7(%rip),%r9 # 364b0 <_sk_load_4444_sse2_lowp+0xf0>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,69 // jmp 362d3 <_sk_load_4444_sse2_lowp+0x83>
+ .byte 235,69 // jmp 36443 <_sk_load_4444_sse2_lowp+0x83>
.byte 243,69,15,111,4,80 // movdqu (%r8,%rdx,2),%xmm8
- .byte 235,61 // jmp 362d3 <_sk_load_4444_sse2_lowp+0x83>
+ .byte 235,61 // jmp 36443 <_sk_load_4444_sse2_lowp+0x83>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,69,15,196,68,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm8
.byte 243,65,15,16,4,80 // movss (%r8,%rdx,2),%xmm0
.byte 243,68,15,16,192 // movss %xmm0,%xmm8
- .byte 235,35 // jmp 362d3 <_sk_load_4444_sse2_lowp+0x83>
+ .byte 235,35 // jmp 36443 <_sk_load_4444_sse2_lowp+0x83>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,69,15,196,68,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm8
.byte 102,69,15,196,68,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm8
@@ -63325,7 +63395,7 @@ _sk_load_4444_sse2_lowp:
.byte 102,15,113,209,12 // psrlw $0xc,%xmm1
.byte 102,65,15,111,208 // movdqa %xmm8,%xmm2
.byte 102,15,113,210,8 // psrlw $0x8,%xmm2
- .byte 102,15,111,5,161,55,0,0 // movdqa 0x37a1(%rip),%xmm0 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
+ .byte 102,15,111,5,145,55,0,0 // movdqa 0x3791(%rip),%xmm0 # 39bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,113,211,4 // psrlw $0x4,%xmm3
@@ -63355,7 +63425,7 @@ _sk_load_4444_sse2_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 125,255 // jge 36355 <_sk_load_4444_sse2_lowp+0x105>
+ .byte 125,255 // jge 364c5 <_sk_load_4444_sse2_lowp+0x105>
.byte 255 // (bad)
.byte 255,112,255 // pushq -0x1(%rax)
.byte 255 // (bad)
@@ -63374,23 +63444,23 @@ _sk_load_4444_dst_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,36 // ja 3639a <_sk_load_4444_dst_sse2_lowp+0x3e>
+ .byte 119,36 // ja 3650a <_sk_load_4444_dst_sse2_lowp+0x3e>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,199,0,0,0 // lea 0xc7(%rip),%r9 # 3644c <_sk_load_4444_dst_sse2_lowp+0xf0>
+ .byte 76,141,13,199,0,0,0 // lea 0xc7(%rip),%r9 # 365bc <_sk_load_4444_dst_sse2_lowp+0xf0>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,69 // jmp 363df <_sk_load_4444_dst_sse2_lowp+0x83>
+ .byte 235,69 // jmp 3654f <_sk_load_4444_dst_sse2_lowp+0x83>
.byte 243,69,15,111,4,80 // movdqu (%r8,%rdx,2),%xmm8
- .byte 235,61 // jmp 363df <_sk_load_4444_dst_sse2_lowp+0x83>
+ .byte 235,61 // jmp 3654f <_sk_load_4444_dst_sse2_lowp+0x83>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,69,15,196,68,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm8
.byte 243,65,15,16,36,80 // movss (%r8,%rdx,2),%xmm4
.byte 243,68,15,16,196 // movss %xmm4,%xmm8
- .byte 235,35 // jmp 363df <_sk_load_4444_dst_sse2_lowp+0x83>
+ .byte 235,35 // jmp 3654f <_sk_load_4444_dst_sse2_lowp+0x83>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,69,15,196,68,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm8
.byte 102,69,15,196,68,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm8
@@ -63400,7 +63470,7 @@ _sk_load_4444_dst_sse2_lowp:
.byte 102,15,113,213,12 // psrlw $0xc,%xmm5
.byte 102,65,15,111,240 // movdqa %xmm8,%xmm6
.byte 102,15,113,214,8 // psrlw $0x8,%xmm6
- .byte 102,15,111,37,149,54,0,0 // movdqa 0x3695(%rip),%xmm4 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
+ .byte 102,15,111,37,133,54,0,0 // movdqa 0x3685(%rip),%xmm4 # 39bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
.byte 102,15,219,244 // pand %xmm4,%xmm6
.byte 102,65,15,111,248 // movdqa %xmm8,%xmm7
.byte 102,15,113,215,4 // psrlw $0x4,%xmm7
@@ -63430,7 +63500,7 @@ _sk_load_4444_dst_sse2_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 125,255 // jge 36461 <_sk_load_4444_dst_sse2_lowp+0x105>
+ .byte 125,255 // jge 365d1 <_sk_load_4444_dst_sse2_lowp+0x105>
.byte 255 // (bad)
.byte 255,112,255 // pushq -0x1(%rax)
.byte 255 // (bad)
@@ -63447,12 +63517,12 @@ _sk_store_4444_sse2_lowp:
.byte 76,3,0 // add (%rax),%r8
.byte 102,68,15,111,192 // movdqa %xmm0,%xmm8
.byte 102,65,15,113,240,8 // psllw $0x8,%xmm8
- .byte 102,68,15,219,5,20,54,0,0 // pand 0x3614(%rip),%xmm8 # 39aa0 <_sk_srcover_bgra_8888_sse2_lowp+0x139c>
+ .byte 102,68,15,219,5,4,54,0,0 // pand 0x3604(%rip),%xmm8 # 39c00 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
.byte 102,68,15,111,201 // movdqa %xmm1,%xmm9
.byte 102,65,15,113,241,4 // psllw $0x4,%xmm9
- .byte 102,68,15,219,13,16,54,0,0 // pand 0x3610(%rip),%xmm9 # 39ab0 <_sk_srcover_bgra_8888_sse2_lowp+0x13ac>
+ .byte 102,68,15,219,13,0,54,0,0 // pand 0x3600(%rip),%xmm9 # 39c10 <_sk_srcover_bgra_8888_sse2_lowp+0x139c>
.byte 102,69,15,235,200 // por %xmm8,%xmm9
- .byte 102,68,15,111,21,18,54,0,0 // movdqa 0x3612(%rip),%xmm10 # 39ac0 <_sk_srcover_bgra_8888_sse2_lowp+0x13bc>
+ .byte 102,68,15,111,21,2,54,0,0 // movdqa 0x3602(%rip),%xmm10 # 39c20 <_sk_srcover_bgra_8888_sse2_lowp+0x13ac>
.byte 102,68,15,219,210 // pand %xmm2,%xmm10
.byte 102,68,15,111,195 // movdqa %xmm3,%xmm8
.byte 102,65,15,113,208,4 // psrlw $0x4,%xmm8
@@ -63462,21 +63532,21 @@ _sk_store_4444_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,31 // ja 364f1 <_sk_store_4444_sse2_lowp+0x89>
+ .byte 119,31 // ja 36661 <_sk_store_4444_sse2_lowp+0x89>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,96,0,0,0 // lea 0x60(%rip),%r9 # 3653c <_sk_store_4444_sse2_lowp+0xd4>
+ .byte 76,141,13,96,0,0,0 // lea 0x60(%rip),%r9 # 366ac <_sk_store_4444_sse2_lowp+0xd4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,68,15,126,192 // movd %xmm8,%eax
.byte 102,65,137,4,80 // mov %ax,(%r8,%rdx,2)
- .byte 235,70 // jmp 36537 <_sk_store_4444_sse2_lowp+0xcf>
+ .byte 235,70 // jmp 366a7 <_sk_store_4444_sse2_lowp+0xcf>
.byte 243,69,15,127,4,80 // movdqu %xmm8,(%r8,%rdx,2)
- .byte 235,62 // jmp 36537 <_sk_store_4444_sse2_lowp+0xcf>
+ .byte 235,62 // jmp 366a7 <_sk_store_4444_sse2_lowp+0xcf>
.byte 102,65,15,197,192,2 // pextrw $0x2,%xmm8,%eax
.byte 102,65,137,68,80,4 // mov %ax,0x4(%r8,%rdx,2)
.byte 102,69,15,126,4,80 // movd %xmm8,(%r8,%rdx,2)
- .byte 235,42 // jmp 36537 <_sk_store_4444_sse2_lowp+0xcf>
+ .byte 235,42 // jmp 366a7 <_sk_store_4444_sse2_lowp+0xcf>
.byte 102,65,15,197,192,6 // pextrw $0x6,%xmm8,%eax
.byte 102,65,137,68,80,12 // mov %ax,0xc(%r8,%rdx,2)
.byte 102,65,15,197,192,5 // pextrw $0x5,%xmm8,%eax
@@ -63495,7 +63565,7 @@ _sk_store_4444_sse2_lowp:
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 233,255,255,255,221 // jmpq ffffffffde036550 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddffde4c>
+ .byte 233,255,255,255,221 // jmpq ffffffffde0366c0 <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffddffde4c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,209 // callq *%rcx
@@ -63592,7 +63662,7 @@ _sk_gather_4444_sse2_lowp:
.byte 102,15,113,209,12 // psrlw $0xc,%xmm1
.byte 102,65,15,111,208 // movdqa %xmm8,%xmm2
.byte 102,15,113,210,8 // psrlw $0x8,%xmm2
- .byte 102,15,111,5,186,51,0,0 // movdqa 0x33ba(%rip),%xmm0 # 39a90 <_sk_srcover_bgra_8888_sse2_lowp+0x138c>
+ .byte 102,15,111,5,170,51,0,0 // movdqa 0x33aa(%rip),%xmm0 # 39bf0 <_sk_srcover_bgra_8888_sse2_lowp+0x137c>
.byte 102,15,219,208 // pand %xmm0,%xmm2
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 102,15,113,211,4 // psrlw $0x4,%xmm3
@@ -63630,19 +63700,19 @@ _sk_load_a8_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 36768 <_sk_load_a8_sse2_lowp+0x39>
+ .byte 119,34 // ja 368d8 <_sk_load_a8_sse2_lowp+0x39>
.byte 102,15,239,219 // pxor %xmm3,%xmm3
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,140,0,0,0 // lea 0x8c(%rip),%r9 # 367e0 <_sk_load_a8_sse2_lowp+0xb1>
+ .byte 76,141,13,140,0,0,0 // lea 0x8c(%rip),%r9 # 36950 <_sk_load_a8_sse2_lowp+0xb1>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,216 // movd %eax,%xmm3
- .byte 235,97 // jmp 367c9 <_sk_load_a8_sse2_lowp+0x9a>
+ .byte 235,97 // jmp 36939 <_sk_load_a8_sse2_lowp+0x9a>
.byte 243,65,15,126,28,16 // movq (%r8,%rdx,1),%xmm3
.byte 102,15,96,216 // punpcklbw %xmm0,%xmm3
- .byte 235,85 // jmp 367c9 <_sk_load_a8_sse2_lowp+0x9a>
+ .byte 235,85 // jmp 36939 <_sk_load_a8_sse2_lowp+0x9a>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,219 // pxor %xmm3,%xmm3
.byte 102,15,196,216,2 // pinsrw $0x2,%eax,%xmm3
@@ -63650,7 +63720,7 @@ _sk_load_a8_sse2_lowp:
.byte 102,15,110,192 // movd %eax,%xmm0
.byte 102,15,96,192 // punpcklbw %xmm0,%xmm0
.byte 243,15,16,216 // movss %xmm0,%xmm3
- .byte 235,51 // jmp 367c9 <_sk_load_a8_sse2_lowp+0x9a>
+ .byte 235,51 // jmp 36939 <_sk_load_a8_sse2_lowp+0x9a>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,219 // pxor %xmm3,%xmm3
.byte 102,15,196,216,6 // pinsrw $0x6,%eax,%xmm3
@@ -63661,14 +63731,14 @@ _sk_load_a8_sse2_lowp:
.byte 102,65,15,110,4,16 // movd (%r8,%rdx,1),%xmm0
.byte 102,15,96,192 // punpcklbw %xmm0,%xmm0
.byte 242,15,16,216 // movsd %xmm0,%xmm3
- .byte 102,15,219,29,191,42,0,0 // pand 0x2abf(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,29,175,42,0,0 // pand 0x2aaf(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,192 // xorps %xmm0,%xmm0
.byte 15,87,201 // xorps %xmm1,%xmm1
.byte 15,87,210 // xorps %xmm2,%xmm2
.byte 255,224 // jmpq *%rax
.byte 102,144 // xchg %ax,%ax
- .byte 125,255 // jge 367e1 <_sk_load_a8_sse2_lowp+0xb2>
+ .byte 125,255 // jge 36951 <_sk_load_a8_sse2_lowp+0xb2>
.byte 255 // (bad)
.byte 255,163,255,255,255,148 // jmpq *-0x6b000001(%rbx)
.byte 255 // (bad)
@@ -63699,19 +63769,19 @@ _sk_load_a8_dst_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 36835 <_sk_load_a8_dst_sse2_lowp+0x39>
+ .byte 119,34 // ja 369a5 <_sk_load_a8_dst_sse2_lowp+0x39>
.byte 102,15,239,255 // pxor %xmm7,%xmm7
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,139,0,0,0 // lea 0x8b(%rip),%r9 # 368ac <_sk_load_a8_dst_sse2_lowp+0xb0>
+ .byte 76,141,13,139,0,0,0 // lea 0x8b(%rip),%r9 # 36a1c <_sk_load_a8_dst_sse2_lowp+0xb0>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,248 // movd %eax,%xmm7
- .byte 235,97 // jmp 36896 <_sk_load_a8_dst_sse2_lowp+0x9a>
+ .byte 235,97 // jmp 36a06 <_sk_load_a8_dst_sse2_lowp+0x9a>
.byte 243,65,15,126,60,16 // movq (%r8,%rdx,1),%xmm7
.byte 102,15,96,248 // punpcklbw %xmm0,%xmm7
- .byte 235,85 // jmp 36896 <_sk_load_a8_dst_sse2_lowp+0x9a>
+ .byte 235,85 // jmp 36a06 <_sk_load_a8_dst_sse2_lowp+0x9a>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,255 // pxor %xmm7,%xmm7
.byte 102,15,196,248,2 // pinsrw $0x2,%eax,%xmm7
@@ -63719,7 +63789,7 @@ _sk_load_a8_dst_sse2_lowp:
.byte 102,15,110,224 // movd %eax,%xmm4
.byte 102,15,96,224 // punpcklbw %xmm0,%xmm4
.byte 243,15,16,252 // movss %xmm4,%xmm7
- .byte 235,51 // jmp 36896 <_sk_load_a8_dst_sse2_lowp+0x9a>
+ .byte 235,51 // jmp 36a06 <_sk_load_a8_dst_sse2_lowp+0x9a>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,255 // pxor %xmm7,%xmm7
.byte 102,15,196,248,6 // pinsrw $0x6,%eax,%xmm7
@@ -63730,14 +63800,14 @@ _sk_load_a8_dst_sse2_lowp:
.byte 102,65,15,110,36,16 // movd (%r8,%rdx,1),%xmm4
.byte 102,15,96,224 // punpcklbw %xmm0,%xmm4
.byte 242,15,16,252 // movsd %xmm4,%xmm7
- .byte 102,15,219,61,242,41,0,0 // pand 0x29f2(%rip),%xmm7 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,61,226,41,0,0 // pand 0x29e2(%rip),%xmm7 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 15,87,228 // xorps %xmm4,%xmm4
.byte 15,87,237 // xorps %xmm5,%xmm5
.byte 15,87,246 // xorps %xmm6,%xmm6
.byte 255,224 // jmpq *%rax
.byte 144 // nop
- .byte 126,255 // jle 368ad <_sk_load_a8_dst_sse2_lowp+0xb1>
+ .byte 126,255 // jle 36a1d <_sk_load_a8_dst_sse2_lowp+0xb1>
.byte 255 // (bad)
.byte 255,164,255,255,255,149,255 // jmpq *-0x6a0001(%rdi,%rdi,8)
.byte 255 // (bad)
@@ -63767,42 +63837,42 @@ _sk_store_a8_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,29 // ja 368fc <_sk_store_a8_sse2_lowp+0x34>
+ .byte 119,29 // ja 36a6c <_sk_store_a8_sse2_lowp+0x34>
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 3697c <_sk_store_a8_sse2_lowp+0xb4>
+ .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 36aec <_sk_store_a8_sse2_lowp+0xb4>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 102,15,126,216 // movd %xmm3,%eax
.byte 65,136,4,16 // mov %al,(%r8,%rdx,1)
- .byte 235,123 // jmp 36977 <_sk_store_a8_sse2_lowp+0xaf>
- .byte 102,68,15,111,5,139,41,0,0 // movdqa 0x298b(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 235,123 // jmp 36ae7 <_sk_store_a8_sse2_lowp+0xaf>
+ .byte 102,68,15,111,5,123,41,0,0 // movdqa 0x297b(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,219,195 // pand %xmm3,%xmm8
.byte 102,69,15,103,192 // packuswb %xmm8,%xmm8
.byte 102,69,15,214,4,16 // movq %xmm8,(%r8,%rdx,1)
- .byte 235,96 // jmp 36977 <_sk_store_a8_sse2_lowp+0xaf>
+ .byte 235,96 // jmp 36ae7 <_sk_store_a8_sse2_lowp+0xaf>
.byte 102,15,197,195,2 // pextrw $0x2,%xmm3,%eax
.byte 65,136,68,16,2 // mov %al,0x2(%r8,%rdx,1)
- .byte 102,68,15,111,5,102,41,0,0 // movdqa 0x2966(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,86,41,0,0 // movdqa 0x2956(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,219,195 // pand %xmm3,%xmm8
.byte 102,69,15,103,192 // packuswb %xmm8,%xmm8
.byte 102,68,15,126,192 // movd %xmm8,%eax
.byte 102,65,137,4,16 // mov %ax,(%r8,%rdx,1)
- .byte 235,55 // jmp 36977 <_sk_store_a8_sse2_lowp+0xaf>
+ .byte 235,55 // jmp 36ae7 <_sk_store_a8_sse2_lowp+0xaf>
.byte 102,15,197,195,6 // pextrw $0x6,%xmm3,%eax
.byte 65,136,68,16,6 // mov %al,0x6(%r8,%rdx,1)
.byte 102,15,197,195,5 // pextrw $0x5,%xmm3,%eax
.byte 65,136,68,16,5 // mov %al,0x5(%r8,%rdx,1)
.byte 102,15,197,195,4 // pextrw $0x4,%xmm3,%eax
.byte 65,136,68,16,4 // mov %al,0x4(%r8,%rdx,1)
- .byte 102,68,15,111,5,41,41,0,0 // movdqa 0x2929(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,25,41,0,0 // movdqa 0x2919(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,68,15,219,195 // pand %xmm3,%xmm8
.byte 102,69,15,103,192 // packuswb %xmm8,%xmm8
.byte 102,69,15,126,4,16 // movd %xmm8,(%r8,%rdx,1)
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 255,224 // jmpq *%rax
.byte 144 // nop
- .byte 118,255 // jbe 3697d <_sk_store_a8_sse2_lowp+0xb5>
+ .byte 118,255 // jbe 36aed <_sk_store_a8_sse2_lowp+0xb5>
.byte 255 // (bad)
.byte 255,165,255,255,255,155 // jmpq *-0x64000001(%rbp)
.byte 255 // (bad)
@@ -63927,19 +63997,19 @@ _sk_load_g8_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 36b39 <_sk_load_g8_sse2_lowp+0x39>
+ .byte 119,34 // ja 36ca9 <_sk_load_g8_sse2_lowp+0x39>
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 36bb8 <_sk_load_g8_sse2_lowp+0xb8>
+ .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 36d28 <_sk_load_g8_sse2_lowp+0xb8>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,192 // movd %eax,%xmm0
- .byte 235,97 // jmp 36b9a <_sk_load_g8_sse2_lowp+0x9a>
+ .byte 235,97 // jmp 36d0a <_sk_load_g8_sse2_lowp+0x9a>
.byte 243,65,15,126,4,16 // movq (%r8,%rdx,1),%xmm0
.byte 102,15,96,192 // punpcklbw %xmm0,%xmm0
- .byte 235,85 // jmp 36b9a <_sk_load_g8_sse2_lowp+0x9a>
+ .byte 235,85 // jmp 36d0a <_sk_load_g8_sse2_lowp+0x9a>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,15,196,192,2 // pinsrw $0x2,%eax,%xmm0
@@ -63947,7 +64017,7 @@ _sk_load_g8_sse2_lowp:
.byte 102,15,110,200 // movd %eax,%xmm1
.byte 102,15,96,200 // punpcklbw %xmm0,%xmm1
.byte 243,15,16,193 // movss %xmm1,%xmm0
- .byte 235,51 // jmp 36b9a <_sk_load_g8_sse2_lowp+0x9a>
+ .byte 235,51 // jmp 36d0a <_sk_load_g8_sse2_lowp+0x9a>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,192 // pxor %xmm0,%xmm0
.byte 102,15,196,192,6 // pinsrw $0x6,%eax,%xmm0
@@ -63958,14 +64028,14 @@ _sk_load_g8_sse2_lowp:
.byte 102,65,15,110,12,16 // movd (%r8,%rdx,1),%xmm1
.byte 102,15,96,200 // punpcklbw %xmm0,%xmm1
.byte 242,15,16,193 // movsd %xmm1,%xmm0
- .byte 102,15,219,5,238,38,0,0 // pand 0x26ee(%rip),%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,5,222,38,0,0 // pand 0x26de(%rip),%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,229,38,0,0 // movaps 0x26e5(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,213,38,0,0 // movaps 0x26d5(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,111,208 // movdqa %xmm0,%xmm2
.byte 255,224 // jmpq *%rax
.byte 15,31,0 // nopl (%rax)
- .byte 118,255 // jbe 36bb9 <_sk_load_g8_sse2_lowp+0xb9>
+ .byte 118,255 // jbe 36d29 <_sk_load_g8_sse2_lowp+0xb9>
.byte 255 // (bad)
.byte 255,156,255,255,255,141,255 // lcall *-0x720001(%rdi,%rdi,8)
.byte 255 // (bad)
@@ -63993,19 +64063,19 @@ _sk_load_g8_dst_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 36c0d <_sk_load_g8_dst_sse2_lowp+0x39>
+ .byte 119,34 // ja 36d7d <_sk_load_g8_dst_sse2_lowp+0x39>
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 36c8c <_sk_load_g8_dst_sse2_lowp+0xb8>
+ .byte 76,141,13,147,0,0,0 // lea 0x93(%rip),%r9 # 36dfc <_sk_load_g8_dst_sse2_lowp+0xb8>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,15,110,224 // movd %eax,%xmm4
- .byte 235,97 // jmp 36c6e <_sk_load_g8_dst_sse2_lowp+0x9a>
+ .byte 235,97 // jmp 36dde <_sk_load_g8_dst_sse2_lowp+0x9a>
.byte 243,65,15,126,36,16 // movq (%r8,%rdx,1),%xmm4
.byte 102,15,96,224 // punpcklbw %xmm0,%xmm4
- .byte 235,85 // jmp 36c6e <_sk_load_g8_dst_sse2_lowp+0x9a>
+ .byte 235,85 // jmp 36dde <_sk_load_g8_dst_sse2_lowp+0x9a>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,15,196,224,2 // pinsrw $0x2,%eax,%xmm4
@@ -64013,7 +64083,7 @@ _sk_load_g8_dst_sse2_lowp:
.byte 102,15,110,232 // movd %eax,%xmm5
.byte 102,15,96,232 // punpcklbw %xmm0,%xmm5
.byte 243,15,16,229 // movss %xmm5,%xmm4
- .byte 235,51 // jmp 36c6e <_sk_load_g8_dst_sse2_lowp+0x9a>
+ .byte 235,51 // jmp 36dde <_sk_load_g8_dst_sse2_lowp+0x9a>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,15,196,224,6 // pinsrw $0x6,%eax,%xmm4
@@ -64024,14 +64094,14 @@ _sk_load_g8_dst_sse2_lowp:
.byte 102,65,15,110,44,16 // movd (%r8,%rdx,1),%xmm5
.byte 102,15,96,232 // punpcklbw %xmm0,%xmm5
.byte 242,15,16,229 // movsd %xmm5,%xmm4
- .byte 102,15,219,37,26,38,0,0 // pand 0x261a(%rip),%xmm4 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,219,37,10,38,0,0 // pand 0x260a(%rip),%xmm4 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,61,17,38,0,0 // movaps 0x2611(%rip),%xmm7 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,61,1,38,0,0 // movaps 0x2601(%rip),%xmm7 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,236 // movdqa %xmm4,%xmm5
.byte 102,15,111,244 // movdqa %xmm4,%xmm6
.byte 255,224 // jmpq *%rax
.byte 15,31,0 // nopl (%rax)
- .byte 118,255 // jbe 36c8d <_sk_load_g8_dst_sse2_lowp+0xb9>
+ .byte 118,255 // jbe 36dfd <_sk_load_g8_dst_sse2_lowp+0xb9>
.byte 255 // (bad)
.byte 255,156,255,255,255,141,255 // lcall *-0x720001(%rdi,%rdi,8)
.byte 255 // (bad)
@@ -64052,10 +64122,10 @@ HIDDEN _sk_luminance_to_alpha_sse2_lowp
FUNCTION(_sk_luminance_to_alpha_sse2_lowp)
_sk_luminance_to_alpha_sse2_lowp:
.byte 102,15,111,218 // movdqa %xmm2,%xmm3
- .byte 102,15,213,5,28,46,0,0 // pmullw 0x2e1c(%rip),%xmm0 # 39ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x13cc>
- .byte 102,15,213,13,36,46,0,0 // pmullw 0x2e24(%rip),%xmm1 # 39ae0 <_sk_srcover_bgra_8888_sse2_lowp+0x13dc>
+ .byte 102,15,213,5,12,46,0,0 // pmullw 0x2e0c(%rip),%xmm0 # 39c30 <_sk_srcover_bgra_8888_sse2_lowp+0x13bc>
+ .byte 102,15,213,13,20,46,0,0 // pmullw 0x2e14(%rip),%xmm1 # 39c40 <_sk_srcover_bgra_8888_sse2_lowp+0x13cc>
.byte 102,15,253,200 // paddw %xmm0,%xmm1
- .byte 102,15,213,29,40,46,0,0 // pmullw 0x2e28(%rip),%xmm3 # 39af0 <_sk_srcover_bgra_8888_sse2_lowp+0x13ec>
+ .byte 102,15,213,29,24,46,0,0 // pmullw 0x2e18(%rip),%xmm3 # 39c50 <_sk_srcover_bgra_8888_sse2_lowp+0x13dc>
.byte 102,15,253,217 // paddw %xmm1,%xmm3
.byte 102,15,113,211,8 // psrlw $0x8,%xmm3
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -64148,7 +64218,7 @@ _sk_gather_g8_sse2_lowp:
.byte 102,15,196,197,3 // pinsrw $0x3,%ebp,%xmm0
.byte 102,65,15,96,192 // punpcklbw %xmm8,%xmm0
.byte 72,173 // lods %ds:(%rsi),%rax
- .byte 15,40,29,86,36,0,0 // movaps 0x2456(%rip),%xmm3 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 15,40,29,70,36,0,0 // movaps 0x2446(%rip),%xmm3 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,200 // movdqa %xmm0,%xmm1
.byte 102,15,111,208 // movdqa %xmm0,%xmm2
.byte 91 // pop %rbx
@@ -64164,14 +64234,14 @@ FUNCTION(_sk_scale_1_float_sse2_lowp)
_sk_scale_1_float_sse2_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 243,68,15,16,0 // movss (%rax),%xmm8
- .byte 243,68,15,89,5,132,27,0,0 // mulss 0x1b84(%rip),%xmm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 243,68,15,88,5,75,27,0,0 // addss 0x1b4b(%rip),%xmm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,5,132,27,0,0 // mulss 0x1b84(%rip),%xmm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 243,68,15,88,5,75,27,0,0 // addss 0x1b4b(%rip),%xmm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 243,65,15,44,192 // cvttss2si %xmm8,%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
.byte 242,69,15,112,192,0 // pshuflw $0x0,%xmm8,%xmm8
.byte 102,69,15,112,192,80 // pshufd $0x50,%xmm8,%xmm8
.byte 102,65,15,213,192 // pmullw %xmm8,%xmm0
- .byte 102,68,15,111,13,7,36,0,0 // movdqa 0x2407(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,247,35,0,0 // movdqa 0x23f7(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,193 // paddw %xmm9,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,65,15,213,200 // pmullw %xmm8,%xmm1
@@ -64192,13 +64262,13 @@ FUNCTION(_sk_lerp_1_float_sse2_lowp)
_sk_lerp_1_float_sse2_lowp:
.byte 72,173 // lods %ds:(%rsi),%rax
.byte 243,68,15,16,0 // movss (%rax),%xmm8
- .byte 243,68,15,89,5,12,27,0,0 // mulss 0x1b0c(%rip),%xmm8 # 389e0 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
- .byte 243,68,15,88,5,211,26,0,0 // addss 0x1ad3(%rip),%xmm8 # 389b0 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
+ .byte 243,68,15,89,5,12,27,0,0 // mulss 0x1b0c(%rip),%xmm8 # 38b50 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc>
+ .byte 243,68,15,88,5,211,26,0,0 // addss 0x1ad3(%rip),%xmm8 # 38b20 <_sk_srcover_bgra_8888_sse2_lowp+0x2ac>
.byte 243,65,15,44,192 // cvttss2si %xmm8,%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
.byte 242,69,15,112,192,0 // pshuflw $0x0,%xmm8,%xmm8
.byte 102,69,15,112,192,80 // pshufd $0x50,%xmm8,%xmm8
- .byte 102,68,15,111,13,148,35,0,0 // movdqa 0x2394(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,132,35,0,0 // movdqa 0x2384(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,213,192 // pmullw %xmm8,%xmm0
.byte 102,65,15,253,193 // paddw %xmm9,%xmm0
.byte 102,65,15,213,200 // pmullw %xmm8,%xmm1
@@ -64238,19 +64308,19 @@ _sk_scale_u8_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,36 // ja 36fb3 <_sk_scale_u8_sse2_lowp+0x3b>
+ .byte 119,36 // ja 37123 <_sk_scale_u8_sse2_lowp+0x3b>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,218,0,0,0 // lea 0xda(%rip),%r9 # 37078 <_sk_scale_u8_sse2_lowp+0x100>
+ .byte 76,141,13,218,0,0,0 // lea 0xda(%rip),%r9 # 371e8 <_sk_scale_u8_sse2_lowp+0x100>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,109 // jmp 37020 <_sk_scale_u8_sse2_lowp+0xa8>
+ .byte 235,109 // jmp 37190 <_sk_scale_u8_sse2_lowp+0xa8>
.byte 243,69,15,126,4,16 // movq (%r8,%rdx,1),%xmm8
.byte 102,68,15,96,192 // punpcklbw %xmm0,%xmm8
- .byte 235,96 // jmp 37020 <_sk_scale_u8_sse2_lowp+0xa8>
+ .byte 235,96 // jmp 37190 <_sk_scale_u8_sse2_lowp+0xa8>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,68,15,196,192,2 // pinsrw $0x2,%eax,%xmm8
@@ -64258,7 +64328,7 @@ _sk_scale_u8_sse2_lowp:
.byte 102,68,15,110,200 // movd %eax,%xmm9
.byte 102,68,15,96,200 // punpcklbw %xmm0,%xmm9
.byte 243,69,15,16,193 // movss %xmm9,%xmm8
- .byte 235,57 // jmp 37020 <_sk_scale_u8_sse2_lowp+0xa8>
+ .byte 235,57 // jmp 37190 <_sk_scale_u8_sse2_lowp+0xa8>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,68,15,196,192,6 // pinsrw $0x6,%eax,%xmm8
@@ -64269,9 +64339,9 @@ _sk_scale_u8_sse2_lowp:
.byte 102,69,15,110,12,16 // movd (%r8,%rdx,1),%xmm9
.byte 102,68,15,96,200 // punpcklbw %xmm0,%xmm9
.byte 242,69,15,16,193 // movsd %xmm9,%xmm8
- .byte 102,68,15,219,5,103,34,0,0 // pand 0x2267(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,219,5,87,34,0,0 // pand 0x2257(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,213,192 // pmullw %xmm8,%xmm0
- .byte 102,68,15,111,13,89,34,0,0 // movdqa 0x2259(%rip),%xmm9 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,13,73,34,0,0 // movdqa 0x2249(%rip),%xmm9 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,253,193 // paddw %xmm9,%xmm0
.byte 102,15,113,208,8 // psrlw $0x8,%xmm0
.byte 102,65,15,213,200 // pmullw %xmm8,%xmm1
@@ -64313,19 +64383,19 @@ _sk_lerp_u8_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,36 // ja 370cf <_sk_lerp_u8_sse2_lowp+0x3b>
+ .byte 119,36 // ja 3723f <_sk_lerp_u8_sse2_lowp+0x3b>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,30,1,0,0 // lea 0x11e(%rip),%r9 # 371d8 <_sk_lerp_u8_sse2_lowp+0x144>
+ .byte 76,141,13,30,1,0,0 // lea 0x11e(%rip),%r9 # 37348 <_sk_lerp_u8_sse2_lowp+0x144>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,182,4,16 // movzbl (%r8,%rdx,1),%eax
.byte 102,68,15,110,192 // movd %eax,%xmm8
- .byte 235,109 // jmp 3713c <_sk_lerp_u8_sse2_lowp+0xa8>
+ .byte 235,109 // jmp 372ac <_sk_lerp_u8_sse2_lowp+0xa8>
.byte 243,69,15,126,4,16 // movq (%r8,%rdx,1),%xmm8
.byte 102,68,15,96,192 // punpcklbw %xmm0,%xmm8
- .byte 235,96 // jmp 3713c <_sk_lerp_u8_sse2_lowp+0xa8>
+ .byte 235,96 // jmp 372ac <_sk_lerp_u8_sse2_lowp+0xa8>
.byte 65,15,182,68,16,2 // movzbl 0x2(%r8,%rdx,1),%eax
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,68,15,196,192,2 // pinsrw $0x2,%eax,%xmm8
@@ -64333,7 +64403,7 @@ _sk_lerp_u8_sse2_lowp:
.byte 102,68,15,110,200 // movd %eax,%xmm9
.byte 102,68,15,96,200 // punpcklbw %xmm0,%xmm9
.byte 243,69,15,16,193 // movss %xmm9,%xmm8
- .byte 235,57 // jmp 3713c <_sk_lerp_u8_sse2_lowp+0xa8>
+ .byte 235,57 // jmp 372ac <_sk_lerp_u8_sse2_lowp+0xa8>
.byte 65,15,182,68,16,6 // movzbl 0x6(%r8,%rdx,1),%eax
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
.byte 102,68,15,196,192,6 // pinsrw $0x6,%eax,%xmm8
@@ -64344,8 +64414,8 @@ _sk_lerp_u8_sse2_lowp:
.byte 102,69,15,110,12,16 // movd (%r8,%rdx,1),%xmm9
.byte 102,68,15,96,200 // punpcklbw %xmm0,%xmm9
.byte 242,69,15,16,193 // movsd %xmm9,%xmm8
- .byte 102,68,15,219,5,75,33,0,0 // pand 0x214b(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
- .byte 102,68,15,111,21,66,33,0,0 // movdqa 0x2142(%rip),%xmm10 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,219,5,59,33,0,0 // pand 0x213b(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
+ .byte 102,68,15,111,21,50,33,0,0 // movdqa 0x2132(%rip),%xmm10 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,69,15,111,200 // movdqa %xmm8,%xmm9
.byte 102,69,15,239,202 // pxor %xmm10,%xmm9
.byte 102,69,15,111,217 // movdqa %xmm9,%xmm11
@@ -64375,9 +64445,9 @@ _sk_lerp_u8_sse2_lowp:
.byte 102,65,15,111,216 // movdqa %xmm8,%xmm3
.byte 255,224 // jmpq *%rax
.byte 15,31,0 // nopl (%rax)
- .byte 235,254 // jmp 371d8 <_sk_lerp_u8_sse2_lowp+0x144>
+ .byte 235,254 // jmp 37348 <_sk_lerp_u8_sse2_lowp+0x144>
.byte 255 // (bad)
- .byte 255,21,255,255,255,4 // callq *0x4ffffff(%rip) # 50371e0 <_sk_srcover_bgra_8888_sse2_lowp+0x4ffeadc>
+ .byte 255,21,255,255,255,4 // callq *0x4ffffff(%rip) # 5037350 <_sk_srcover_bgra_8888_sse2_lowp+0x4ffeadc>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,84,255,255 // callq *-0x1(%rdi,%rdi,8)
@@ -64407,23 +64477,23 @@ _sk_scale_565_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 3723f <_sk_scale_565_sse2_lowp+0x4b>
+ .byte 119,34 // ja 373af <_sk_scale_565_sse2_lowp+0x4b>
.byte 102,15,239,246 // pxor %xmm6,%xmm6
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,229,1,0,0 // lea 0x1e5(%rip),%r9 # 37410 <_sk_scale_565_sse2_lowp+0x21c>
+ .byte 76,141,13,229,1,0,0 // lea 0x1e5(%rip),%r9 # 37580 <_sk_scale_565_sse2_lowp+0x21c>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,15,110,240 // movd %eax,%xmm6
- .byte 235,67 // jmp 37282 <_sk_scale_565_sse2_lowp+0x8e>
+ .byte 235,67 // jmp 373f2 <_sk_scale_565_sse2_lowp+0x8e>
.byte 243,65,15,111,52,80 // movdqu (%r8,%rdx,2),%xmm6
- .byte 235,59 // jmp 37282 <_sk_scale_565_sse2_lowp+0x8e>
+ .byte 235,59 // jmp 373f2 <_sk_scale_565_sse2_lowp+0x8e>
.byte 102,15,239,246 // pxor %xmm6,%xmm6
.byte 102,65,15,196,116,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm6
.byte 243,69,15,16,12,80 // movss (%r8,%rdx,2),%xmm9
.byte 243,65,15,16,241 // movss %xmm9,%xmm6
- .byte 235,34 // jmp 37282 <_sk_scale_565_sse2_lowp+0x8e>
+ .byte 235,34 // jmp 373f2 <_sk_scale_565_sse2_lowp+0x8e>
.byte 102,15,239,246 // pxor %xmm6,%xmm6
.byte 102,65,15,196,116,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm6
.byte 102,65,15,196,116,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm6
@@ -64431,11 +64501,11 @@ _sk_scale_565_sse2_lowp:
.byte 102,65,15,18,52,80 // movlpd (%r8,%rdx,2),%xmm6
.byte 102,68,15,111,222 // movdqa %xmm6,%xmm11
.byte 102,65,15,113,211,8 // psrlw $0x8,%xmm11
- .byte 102,68,15,219,29,170,39,0,0 // pand 0x27aa(%rip),%xmm11 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,68,15,219,29,154,39,0,0 // pand 0x279a(%rip),%xmm11 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,68,15,111,206 // movdqa %xmm6,%xmm9
.byte 102,65,15,113,209,5 // psrlw $0x5,%xmm9
- .byte 102,68,15,219,13,166,39,0,0 // pand 0x27a6(%rip),%xmm9 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,68,15,111,21,173,39,0,0 // movdqa 0x27ad(%rip),%xmm10 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,68,15,219,13,150,39,0,0 // pand 0x2796(%rip),%xmm9 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,68,15,111,21,157,39,0,0 // movdqa 0x279d(%rip),%xmm10 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,68,15,219,214 // pand %xmm6,%xmm10
.byte 102,15,113,214,13 // psrlw $0xd,%xmm6
.byte 102,65,15,235,243 // por %xmm11,%xmm6
@@ -64447,7 +64517,7 @@ _sk_scale_565_sse2_lowp:
.byte 102,65,15,113,243,3 // psllw $0x3,%xmm11
.byte 102,65,15,113,210,2 // psrlw $0x2,%xmm10
.byte 102,69,15,235,211 // por %xmm11,%xmm10
- .byte 102,68,15,111,37,9,39,0,0 // movdqa 0x2709(%rip),%xmm12 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,37,249,38,0,0 // movdqa 0x26f9(%rip),%xmm12 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,68,15,111,235 // movdqa %xmm3,%xmm13
.byte 102,69,15,239,236 // pxor %xmm12,%xmm13
.byte 102,68,15,111,223 // movdqa %xmm7,%xmm11
@@ -64488,7 +64558,7 @@ _sk_scale_565_sse2_lowp:
.byte 102,68,15,213,201 // pmullw %xmm1,%xmm9
.byte 102,68,15,213,210 // pmullw %xmm2,%xmm10
.byte 102,68,15,213,219 // pmullw %xmm3,%xmm11
- .byte 102,15,111,5,203,30,0,0 // movdqa 0x1ecb(%rip),%xmm0 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,15,111,5,187,30,0,0 // movdqa 0x1ebb(%rip),%xmm0 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,253,240 // paddw %xmm0,%xmm6
.byte 102,68,15,253,200 // paddw %xmm0,%xmm9
.byte 102,68,15,253,208 // paddw %xmm0,%xmm10
@@ -64535,23 +64605,23 @@ _sk_lerp_565_sse2_lowp:
.byte 36,7 // and $0x7,%al
.byte 254,200 // dec %al
.byte 60,6 // cmp $0x6,%al
- .byte 119,34 // ja 37477 <_sk_lerp_565_sse2_lowp+0x4b>
+ .byte 119,34 // ja 375e7 <_sk_lerp_565_sse2_lowp+0x4b>
.byte 102,15,239,210 // pxor %xmm2,%xmm2
.byte 15,182,192 // movzbl %al,%eax
- .byte 76,141,13,45,2,0,0 // lea 0x22d(%rip),%r9 # 37690 <_sk_lerp_565_sse2_lowp+0x264>
+ .byte 76,141,13,45,2,0,0 // lea 0x22d(%rip),%r9 # 37800 <_sk_lerp_565_sse2_lowp+0x264>
.byte 73,99,4,129 // movslq (%r9,%rax,4),%rax
.byte 76,1,200 // add %r9,%rax
.byte 255,224 // jmpq *%rax
.byte 65,15,183,4,80 // movzwl (%r8,%rdx,2),%eax
.byte 102,15,110,208 // movd %eax,%xmm2
- .byte 235,67 // jmp 374ba <_sk_lerp_565_sse2_lowp+0x8e>
+ .byte 235,67 // jmp 3762a <_sk_lerp_565_sse2_lowp+0x8e>
.byte 243,65,15,111,20,80 // movdqu (%r8,%rdx,2),%xmm2
- .byte 235,59 // jmp 374ba <_sk_lerp_565_sse2_lowp+0x8e>
+ .byte 235,59 // jmp 3762a <_sk_lerp_565_sse2_lowp+0x8e>
.byte 102,15,239,210 // pxor %xmm2,%xmm2
.byte 102,65,15,196,84,80,4,2 // pinsrw $0x2,0x4(%r8,%rdx,2),%xmm2
.byte 243,69,15,16,12,80 // movss (%r8,%rdx,2),%xmm9
.byte 243,65,15,16,209 // movss %xmm9,%xmm2
- .byte 235,34 // jmp 374ba <_sk_lerp_565_sse2_lowp+0x8e>
+ .byte 235,34 // jmp 3762a <_sk_lerp_565_sse2_lowp+0x8e>
.byte 102,15,239,210 // pxor %xmm2,%xmm2
.byte 102,65,15,196,84,80,12,6 // pinsrw $0x6,0xc(%r8,%rdx,2),%xmm2
.byte 102,65,15,196,84,80,10,5 // pinsrw $0x5,0xa(%r8,%rdx,2),%xmm2
@@ -64559,11 +64629,11 @@ _sk_lerp_565_sse2_lowp:
.byte 102,65,15,18,20,80 // movlpd (%r8,%rdx,2),%xmm2
.byte 102,68,15,111,218 // movdqa %xmm2,%xmm11
.byte 102,65,15,113,211,8 // psrlw $0x8,%xmm11
- .byte 102,68,15,219,29,114,37,0,0 // pand 0x2572(%rip),%xmm11 # 39a40 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,68,15,219,29,98,37,0,0 // pand 0x2562(%rip),%xmm11 # 39ba0 <_sk_srcover_bgra_8888_sse2_lowp+0x132c>
.byte 102,68,15,111,202 // movdqa %xmm2,%xmm9
.byte 102,65,15,113,209,5 // psrlw $0x5,%xmm9
- .byte 102,68,15,219,13,110,37,0,0 // pand 0x256e(%rip),%xmm9 # 39a50 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
- .byte 102,68,15,111,21,117,37,0,0 // movdqa 0x2575(%rip),%xmm10 # 39a60 <_sk_srcover_bgra_8888_sse2_lowp+0x135c>
+ .byte 102,68,15,219,13,94,37,0,0 // pand 0x255e(%rip),%xmm9 # 39bb0 <_sk_srcover_bgra_8888_sse2_lowp+0x133c>
+ .byte 102,68,15,111,21,101,37,0,0 // movdqa 0x2565(%rip),%xmm10 # 39bc0 <_sk_srcover_bgra_8888_sse2_lowp+0x134c>
.byte 102,68,15,219,210 // pand %xmm2,%xmm10
.byte 102,15,113,210,13 // psrlw $0xd,%xmm2
.byte 102,65,15,235,211 // por %xmm11,%xmm2
@@ -64575,7 +64645,7 @@ _sk_lerp_565_sse2_lowp:
.byte 102,65,15,113,243,3 // psllw $0x3,%xmm11
.byte 102,65,15,113,210,2 // psrlw $0x2,%xmm10
.byte 102,69,15,235,211 // por %xmm11,%xmm10
- .byte 102,68,15,111,37,209,36,0,0 // movdqa 0x24d1(%rip),%xmm12 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x12fc>
+ .byte 102,68,15,111,37,193,36,0,0 // movdqa 0x24c1(%rip),%xmm12 # 39b60 <_sk_srcover_bgra_8888_sse2_lowp+0x12ec>
.byte 102,68,15,111,235 // movdqa %xmm3,%xmm13
.byte 102,69,15,239,236 // pxor %xmm12,%xmm13
.byte 102,68,15,111,223 // movdqa %xmm7,%xmm11
@@ -64614,7 +64684,7 @@ _sk_lerp_565_sse2_lowp:
.byte 102,69,15,235,222 // por %xmm14,%xmm11
.byte 102,15,111,194 // movdqa %xmm2,%xmm0
.byte 102,15,213,209 // pmullw %xmm1,%xmm2
- .byte 102,68,15,111,5,157,28,0,0 // movdqa 0x1c9d(%rip),%xmm8 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,5,141,28,0,0 // movdqa 0x1c8d(%rip),%xmm8 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,65,15,239,192 // pxor %xmm8,%xmm0
.byte 102,15,213,196 // pmullw %xmm4,%xmm0
.byte 102,65,15,253,208 // paddw %xmm8,%xmm2
@@ -64676,7 +64746,7 @@ _sk_clamp_x_1_sse2_lowp:
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 65,15,95,200 // maxps %xmm8,%xmm1
.byte 65,15,95,192 // maxps %xmm8,%xmm0
- .byte 68,15,40,5,48,28,0,0 // movaps 0x1c30(%rip),%xmm8 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,5,32,28,0,0 // movaps 0x1c20(%rip),%xmm8 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 65,15,93,192 // minps %xmm8,%xmm0
.byte 65,15,93,200 // minps %xmm8,%xmm1
.byte 72,173 // lods %ds:(%rsi),%rax
@@ -64694,7 +64764,7 @@ _sk_repeat_x_1_sse2_lowp:
.byte 69,15,194,208,1 // cmpltps %xmm8,%xmm10
.byte 68,15,40,216 // movaps %xmm0,%xmm11
.byte 69,15,194,217,1 // cmpltps %xmm9,%xmm11
- .byte 68,15,40,37,248,27,0,0 // movaps 0x1bf8(%rip),%xmm12 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,37,232,27,0,0 // movaps 0x1be8(%rip),%xmm12 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,220 // andps %xmm12,%xmm11
.byte 69,15,84,212 // andps %xmm12,%xmm10
.byte 69,15,87,237 // xorps %xmm13,%xmm13
@@ -64713,10 +64783,10 @@ HIDDEN _sk_mirror_x_1_sse2_lowp
.globl _sk_mirror_x_1_sse2_lowp
FUNCTION(_sk_mirror_x_1_sse2_lowp)
_sk_mirror_x_1_sse2_lowp:
- .byte 68,15,40,5,32,28,0,0 // movaps 0x1c20(%rip),%xmm8 # 39350 <_sk_srcover_bgra_8888_sse2_lowp+0xc4c>
+ .byte 68,15,40,5,16,28,0,0 // movaps 0x1c10(%rip),%xmm8 # 394b0 <_sk_srcover_bgra_8888_sse2_lowp+0xc3c>
.byte 65,15,88,192 // addps %xmm8,%xmm0
.byte 65,15,88,200 // addps %xmm8,%xmm1
- .byte 68,15,40,21,160,27,0,0 // movaps 0x1ba0(%rip),%xmm10 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,21,144,27,0,0 // movaps 0x1b90(%rip),%xmm10 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,40,217 // movaps %xmm1,%xmm11
.byte 69,15,89,218 // mulps %xmm10,%xmm11
.byte 68,15,89,208 // mulps %xmm0,%xmm10
@@ -64726,7 +64796,7 @@ _sk_mirror_x_1_sse2_lowp:
.byte 69,15,91,233 // cvtdq2ps %xmm9,%xmm13
.byte 69,15,194,213,1 // cmpltps %xmm13,%xmm10
.byte 69,15,194,220,1 // cmpltps %xmm12,%xmm11
- .byte 68,15,40,13,128,27,0,0 // movaps 0x1b80(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,112,27,0,0 // movaps 0x1b70(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,84,217 // andps %xmm9,%xmm11
.byte 69,15,84,209 // andps %xmm9,%xmm10
.byte 69,15,87,246 // xorps %xmm14,%xmm14
@@ -64738,7 +64808,7 @@ _sk_mirror_x_1_sse2_lowp:
.byte 65,15,92,204 // subps %xmm12,%xmm1
.byte 65,15,88,200 // addps %xmm8,%xmm1
.byte 65,15,88,192 // addps %xmm8,%xmm0
- .byte 68,15,40,5,108,32,0,0 // movaps 0x206c(%rip),%xmm8 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
+ .byte 68,15,40,5,92,32,0,0 // movaps 0x205c(%rip),%xmm8 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
.byte 65,15,84,192 // andps %xmm8,%xmm0
.byte 65,15,84,200 // andps %xmm8,%xmm1
.byte 65,15,95,206 // maxps %xmm14,%xmm1
@@ -64767,7 +64837,7 @@ _sk_gradient_sse2_lowp:
.byte 102,15,239,210 // pxor %xmm2,%xmm2
.byte 73,131,248,2 // cmp $0x2,%r8
.byte 102,15,239,219 // pxor %xmm3,%xmm3
- .byte 114,56 // jb 3782c <_sk_gradient_sse2_lowp+0x6c>
+ .byte 114,56 // jb 3799c <_sk_gradient_sse2_lowp+0x6c>
.byte 72,139,88,72 // mov 0x48(%rax),%rbx
.byte 73,255,200 // dec %r8
.byte 72,131,195,4 // add $0x4,%rbx
@@ -64782,7 +64852,7 @@ _sk_gradient_sse2_lowp:
.byte 102,15,250,220 // psubd %xmm4,%xmm3
.byte 72,131,195,4 // add $0x4,%rbx
.byte 73,255,200 // dec %r8
- .byte 117,219 // jne 37807 <_sk_gradient_sse2_lowp+0x47>
+ .byte 117,219 // jne 37977 <_sk_gradient_sse2_lowp+0x47>
.byte 102,15,112,227,78 // pshufd $0x4e,%xmm3,%xmm4
.byte 102,73,15,126,225 // movq %xmm4,%r9
.byte 69,137,200 // mov %r9d,%r8d
@@ -64925,10 +64995,10 @@ _sk_gradient_sse2_lowp:
.byte 15,88,221 // addps %xmm5,%xmm3
.byte 69,15,89,207 // mulps %xmm15,%xmm9
.byte 68,15,88,204 // addps %xmm4,%xmm9
- .byte 15,40,53,231,24,0,0 // movaps 0x18e7(%rip),%xmm6 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 15,40,53,215,24,0,0 // movaps 0x18d7(%rip),%xmm6 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 15,89,222 // mulps %xmm6,%xmm3
.byte 68,15,89,206 // mulps %xmm6,%xmm9
- .byte 15,40,37,233,23,0,0 // movaps 0x17e9(%rip),%xmm4 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,40,37,217,23,0,0 // movaps 0x17d9(%rip),%xmm4 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,88,204 // addps %xmm4,%xmm9
.byte 15,88,220 // addps %xmm4,%xmm3
.byte 243,15,91,219 // cvttps2dq %xmm3,%xmm3
@@ -65024,9 +65094,9 @@ _sk_evenly_spaced_gradient_sse2_lowp:
.byte 72,139,24 // mov (%rax),%rbx
.byte 72,139,104,8 // mov 0x8(%rax),%rbp
.byte 72,255,203 // dec %rbx
- .byte 120,7 // js 37c70 <_sk_evenly_spaced_gradient_sse2_lowp+0x34>
+ .byte 120,7 // js 37de0 <_sk_evenly_spaced_gradient_sse2_lowp+0x34>
.byte 243,72,15,42,211 // cvtsi2ss %rbx,%xmm2
- .byte 235,21 // jmp 37c85 <_sk_evenly_spaced_gradient_sse2_lowp+0x49>
+ .byte 235,21 // jmp 37df5 <_sk_evenly_spaced_gradient_sse2_lowp+0x49>
.byte 73,137,216 // mov %rbx,%r8
.byte 73,209,232 // shr %r8
.byte 131,227,1 // and $0x1,%ebx
@@ -65181,10 +65251,10 @@ _sk_evenly_spaced_gradient_sse2_lowp:
.byte 15,40,68,36,176 // movaps -0x50(%rsp),%xmm0
.byte 68,15,89,192 // mulps %xmm0,%xmm8
.byte 68,15,88,193 // addps %xmm1,%xmm8
- .byte 15,40,13,115,20,0,0 // movaps 0x1473(%rip),%xmm1 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 15,40,13,99,20,0,0 // movaps 0x1463(%rip),%xmm1 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 15,89,217 // mulps %xmm1,%xmm3
.byte 68,15,89,193 // mulps %xmm1,%xmm8
- .byte 15,40,13,117,19,0,0 // movaps 0x1375(%rip),%xmm1 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,40,13,101,19,0,0 // movaps 0x1365(%rip),%xmm1 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,88,193 // addps %xmm1,%xmm8
.byte 15,88,217 // addps %xmm1,%xmm3
.byte 243,15,91,219 // cvttps2dq %xmm3,%xmm3
@@ -65201,7 +65271,7 @@ _sk_evenly_spaced_gradient_sse2_lowp:
.byte 15,40,240 // movaps %xmm0,%xmm6
.byte 68,15,89,206 // mulps %xmm6,%xmm9
.byte 68,15,88,204 // addps %xmm4,%xmm9
- .byte 15,40,5,23,20,0,0 // movaps 0x1417(%rip),%xmm0 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 15,40,5,7,20,0,0 // movaps 0x1407(%rip),%xmm0 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 15,89,208 // mulps %xmm0,%xmm2
.byte 68,15,89,200 // mulps %xmm0,%xmm9
.byte 68,15,88,201 // addps %xmm1,%xmm9
@@ -65280,10 +65350,10 @@ _sk_evenly_spaced_2_stop_gradient_sse2_lowp:
.byte 15,89,216 // mulps %xmm0,%xmm3
.byte 65,15,88,216 // addps %xmm8,%xmm3
.byte 65,15,88,208 // addps %xmm8,%xmm2
- .byte 68,15,40,21,220,18,0,0 // movaps 0x12dc(%rip),%xmm10 # 393d0 <_sk_srcover_bgra_8888_sse2_lowp+0xccc>
+ .byte 68,15,40,21,204,18,0,0 // movaps 0x12cc(%rip),%xmm10 # 39530 <_sk_srcover_bgra_8888_sse2_lowp+0xcbc>
.byte 65,15,89,210 // mulps %xmm10,%xmm2
.byte 65,15,89,218 // mulps %xmm10,%xmm3
- .byte 68,15,40,29,220,17,0,0 // movaps 0x11dc(%rip),%xmm11 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 68,15,40,29,204,17,0,0 // movaps 0x11cc(%rip),%xmm11 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 65,15,88,219 // addps %xmm11,%xmm3
.byte 65,15,88,211 // addps %xmm11,%xmm2
.byte 243,15,91,210 // cvttps2dq %xmm2,%xmm2
@@ -65376,7 +65446,7 @@ _sk_xy_to_unit_angle_sse2_lowp:
.byte 15,40,218 // movaps %xmm2,%xmm3
.byte 15,40,209 // movaps %xmm1,%xmm2
.byte 15,40,200 // movaps %xmm0,%xmm1
- .byte 68,15,40,37,144,21,0,0 // movaps 0x1590(%rip),%xmm12 # 39810 <_sk_srcover_bgra_8888_sse2_lowp+0x110c>
+ .byte 68,15,40,37,128,21,0,0 // movaps 0x1580(%rip),%xmm12 # 39970 <_sk_srcover_bgra_8888_sse2_lowp+0x10fc>
.byte 68,15,40,194 // movaps %xmm2,%xmm8
.byte 69,15,84,196 // andps %xmm12,%xmm8
.byte 68,15,40,217 // movaps %xmm1,%xmm11
@@ -65410,26 +65480,26 @@ _sk_xy_to_unit_angle_sse2_lowp:
.byte 69,15,89,246 // mulps %xmm14,%xmm14
.byte 65,15,40,195 // movaps %xmm11,%xmm0
.byte 15,89,192 // mulps %xmm0,%xmm0
- .byte 68,15,40,37,115,21,0,0 // movaps 0x1573(%rip),%xmm12 # 39880 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
+ .byte 68,15,40,37,99,21,0,0 // movaps 0x1563(%rip),%xmm12 # 399e0 <_sk_srcover_bgra_8888_sse2_lowp+0x116c>
.byte 68,15,40,232 // movaps %xmm0,%xmm13
.byte 69,15,89,236 // mulps %xmm12,%xmm13
.byte 69,15,89,230 // mulps %xmm14,%xmm12
- .byte 68,15,40,61,111,21,0,0 // movaps 0x156f(%rip),%xmm15 # 39890 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
+ .byte 68,15,40,61,95,21,0,0 // movaps 0x155f(%rip),%xmm15 # 399f0 <_sk_srcover_bgra_8888_sse2_lowp+0x117c>
.byte 69,15,88,231 // addps %xmm15,%xmm12
.byte 69,15,88,239 // addps %xmm15,%xmm13
.byte 68,15,89,232 // mulps %xmm0,%xmm13
.byte 69,15,89,230 // mulps %xmm14,%xmm12
- .byte 68,15,40,61,103,21,0,0 // movaps 0x1567(%rip),%xmm15 # 398a0 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
+ .byte 68,15,40,61,87,21,0,0 // movaps 0x1557(%rip),%xmm15 # 39a00 <_sk_srcover_bgra_8888_sse2_lowp+0x118c>
.byte 69,15,88,231 // addps %xmm15,%xmm12
.byte 69,15,88,239 // addps %xmm15,%xmm13
.byte 68,15,89,232 // mulps %xmm0,%xmm13
.byte 69,15,89,230 // mulps %xmm14,%xmm12
- .byte 15,40,5,96,21,0,0 // movaps 0x1560(%rip),%xmm0 # 398b0 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
+ .byte 15,40,5,80,21,0,0 // movaps 0x1550(%rip),%xmm0 # 39a10 <_sk_srcover_bgra_8888_sse2_lowp+0x119c>
.byte 68,15,88,224 // addps %xmm0,%xmm12
.byte 68,15,88,232 // addps %xmm0,%xmm13
.byte 69,15,89,235 // mulps %xmm11,%xmm13
.byte 69,15,89,224 // mulps %xmm8,%xmm12
- .byte 15,40,5,89,21,0,0 // movaps 0x1559(%rip),%xmm0 # 398c0 <_sk_srcover_bgra_8888_sse2_lowp+0x11bc>
+ .byte 15,40,5,73,21,0,0 // movaps 0x1549(%rip),%xmm0 # 39a20 <_sk_srcover_bgra_8888_sse2_lowp+0x11ac>
.byte 68,15,40,192 // movaps %xmm0,%xmm8
.byte 65,15,92,197 // subps %xmm13,%xmm0
.byte 65,15,84,194 // andps %xmm10,%xmm0
@@ -65441,7 +65511,7 @@ _sk_xy_to_unit_angle_sse2_lowp:
.byte 69,15,86,200 // orps %xmm8,%xmm9
.byte 69,15,87,192 // xorps %xmm8,%xmm8
.byte 65,15,194,200,1 // cmpltps %xmm8,%xmm1
- .byte 15,40,5,69,15,0,0 // movaps 0xf45(%rip),%xmm0 # 392e0 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
+ .byte 15,40,5,53,15,0,0 // movaps 0xf35(%rip),%xmm0 # 39440 <_sk_srcover_bgra_8888_sse2_lowp+0xbcc>
.byte 68,15,40,216 // movaps %xmm0,%xmm11
.byte 65,15,92,194 // subps %xmm10,%xmm0
.byte 15,84,193 // andps %xmm1,%xmm0
@@ -65454,7 +65524,7 @@ _sk_xy_to_unit_angle_sse2_lowp:
.byte 65,15,86,211 // orps %xmm11,%xmm2
.byte 68,15,40,219 // movaps %xmm3,%xmm11
.byte 69,15,194,216,1 // cmpltps %xmm8,%xmm11
- .byte 68,15,40,13,29,15,0,0 // movaps 0xf1d(%rip),%xmm9 # 392f0 <_sk_srcover_bgra_8888_sse2_lowp+0xbec>
+ .byte 68,15,40,13,13,15,0,0 // movaps 0xf0d(%rip),%xmm9 # 39450 <_sk_srcover_bgra_8888_sse2_lowp+0xbdc>
.byte 69,15,40,209 // movaps %xmm9,%xmm10
.byte 68,15,92,201 // subps %xmm1,%xmm9
.byte 69,15,84,203 // andps %xmm11,%xmm9
@@ -65512,26 +65582,26 @@ _sk_srcover_rgba_8888_sse2_lowp:
.byte 65,254,202 // dec %r10b
.byte 69,15,182,202 // movzbl %r10b,%r9d
.byte 65,128,249,6 // cmp $0x6,%r9b
- .byte 119,38 // ja 384a3 <_sk_srcover_rgba_8888_sse2_lowp+0x4b>
+ .byte 119,38 // ja 38613 <_sk_srcover_rgba_8888_sse2_lowp+0x4b>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 76,141,29,67,2,0,0 // lea 0x243(%rip),%r11 # 386cc <_sk_srcover_rgba_8888_sse2_lowp+0x274>
+ .byte 76,141,29,67,2,0,0 // lea 0x243(%rip),%r11 # 3883c <_sk_srcover_rgba_8888_sse2_lowp+0x274>
.byte 75,99,4,139 // movslq (%r11,%r9,4),%rax
.byte 76,1,216 // add %r11,%rax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 255,224 // jmpq *%rax
.byte 243,65,15,16,60,144 // movss (%r8,%rdx,4),%xmm7
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,102 // jmp 38509 <_sk_srcover_rgba_8888_sse2_lowp+0xb1>
+ .byte 235,102 // jmp 38679 <_sk_srcover_rgba_8888_sse2_lowp+0xb1>
.byte 102,65,15,16,60,144 // movupd (%r8,%rdx,4),%xmm7
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,87 // jmp 38509 <_sk_srcover_rgba_8888_sse2_lowp+0xb1>
+ .byte 235,87 // jmp 38679 <_sk_srcover_rgba_8888_sse2_lowp+0xb1>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,69,15,18,4,144 // movlpd (%r8,%rdx,4),%xmm8
.byte 102,65,15,40,248 // movapd %xmm8,%xmm7
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
- .byte 235,52 // jmp 38509 <_sk_srcover_rgba_8888_sse2_lowp+0xb1>
+ .byte 235,52 // jmp 38679 <_sk_srcover_rgba_8888_sse2_lowp+0xb1>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 243,65,15,16,100,144,20 // movss 0x14(%r8,%rdx,4),%xmm4
@@ -65548,7 +65618,7 @@ _sk_srcover_rgba_8888_sse2_lowp:
.byte 102,15,114,244,16 // pslld $0x10,%xmm4
.byte 102,15,114,228,16 // psrad $0x10,%xmm4
.byte 102,15,107,236 // packssdw %xmm4,%xmm5
- .byte 102,68,15,111,37,93,13,0,0 // movdqa 0xd5d(%rip),%xmm12 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,37,77,13,0,0 // movdqa 0xd4d(%rip),%xmm12 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,229 // movdqa %xmm5,%xmm4
.byte 102,65,15,219,228 // pand %xmm12,%xmm4
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
@@ -65606,20 +65676,20 @@ _sk_srcover_rgba_8888_sse2_lowp:
.byte 102,15,105,193 // punpckhwd %xmm1,%xmm0
.byte 102,15,235,194 // por %xmm2,%xmm0
.byte 65,128,250,6 // cmp $0x6,%r10b
- .byte 119,24 // ja 3866b <_sk_srcover_rgba_8888_sse2_lowp+0x213>
- .byte 76,141,21,142,0,0,0 // lea 0x8e(%rip),%r10 # 386e8 <_sk_srcover_rgba_8888_sse2_lowp+0x290>
+ .byte 119,24 // ja 387db <_sk_srcover_rgba_8888_sse2_lowp+0x213>
+ .byte 76,141,21,142,0,0,0 // lea 0x8e(%rip),%r10 # 38858 <_sk_srcover_rgba_8888_sse2_lowp+0x290>
.byte 75,99,4,138 // movslq (%r10,%r9,4),%rax
.byte 76,1,208 // add %r10,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,126,36,144 // movd %xmm12,(%r8,%rdx,4)
- .byte 235,73 // jmp 386b4 <_sk_srcover_rgba_8888_sse2_lowp+0x25c>
+ .byte 235,73 // jmp 38824 <_sk_srcover_rgba_8888_sse2_lowp+0x25c>
.byte 243,69,15,127,36,144 // movdqu %xmm12,(%r8,%rdx,4)
.byte 243,65,15,127,68,144,16 // movdqu %xmm0,0x10(%r8,%rdx,4)
- .byte 235,58 // jmp 386b4 <_sk_srcover_rgba_8888_sse2_lowp+0x25c>
+ .byte 235,58 // jmp 38824 <_sk_srcover_rgba_8888_sse2_lowp+0x25c>
.byte 102,65,15,112,196,78 // pshufd $0x4e,%xmm12,%xmm0
.byte 102,65,15,126,68,144,8 // movd %xmm0,0x8(%r8,%rdx,4)
.byte 102,69,15,214,36,144 // movq %xmm12,(%r8,%rdx,4)
- .byte 235,37 // jmp 386b4 <_sk_srcover_rgba_8888_sse2_lowp+0x25c>
+ .byte 235,37 // jmp 38824 <_sk_srcover_rgba_8888_sse2_lowp+0x25c>
.byte 102,15,112,200,78 // pshufd $0x4e,%xmm0,%xmm1
.byte 102,65,15,126,76,144,24 // movd %xmm1,0x18(%r8,%rdx,4)
.byte 102,15,112,200,229 // pshufd $0xe5,%xmm0,%xmm1
@@ -65652,7 +65722,7 @@ _sk_srcover_rgba_8888_sse2_lowp:
.byte 254 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 123,255 // jnp 386e9 <_sk_srcover_rgba_8888_sse2_lowp+0x291>
+ .byte 123,255 // jnp 38859 <_sk_srcover_rgba_8888_sse2_lowp+0x291>
.byte 255 // (bad)
.byte 255,159,255,255,255,146 // lcall *-0x6d000001(%rdi)
.byte 255 // (bad)
@@ -65684,26 +65754,26 @@ _sk_srcover_bgra_8888_sse2_lowp:
.byte 65,254,202 // dec %r10b
.byte 69,15,182,202 // movzbl %r10b,%r9d
.byte 65,128,249,6 // cmp $0x6,%r9b
- .byte 119,38 // ja 3874f <_sk_srcover_bgra_8888_sse2_lowp+0x4b>
+ .byte 119,38 // ja 388bf <_sk_srcover_bgra_8888_sse2_lowp+0x4b>
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 76,141,29,67,2,0,0 // lea 0x243(%rip),%r11 # 38978 <_sk_srcover_bgra_8888_sse2_lowp+0x274>
+ .byte 76,141,29,67,2,0,0 // lea 0x243(%rip),%r11 # 38ae8 <_sk_srcover_bgra_8888_sse2_lowp+0x274>
.byte 75,99,4,139 // movslq (%r11,%r9,4),%rax
.byte 76,1,216 // add %r11,%rax
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 255,224 // jmpq *%rax
.byte 243,65,15,16,60,144 // movss (%r8,%rdx,4),%xmm7
.byte 102,69,15,239,192 // pxor %xmm8,%xmm8
- .byte 235,102 // jmp 387b5 <_sk_srcover_bgra_8888_sse2_lowp+0xb1>
+ .byte 235,102 // jmp 38925 <_sk_srcover_bgra_8888_sse2_lowp+0xb1>
.byte 102,65,15,16,60,144 // movupd (%r8,%rdx,4),%xmm7
.byte 243,69,15,111,68,144,16 // movdqu 0x10(%r8,%rdx,4),%xmm8
- .byte 235,87 // jmp 387b5 <_sk_srcover_bgra_8888_sse2_lowp+0xb1>
+ .byte 235,87 // jmp 38925 <_sk_srcover_bgra_8888_sse2_lowp+0xb1>
.byte 102,65,15,110,100,144,8 // movd 0x8(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 102,15,239,228 // pxor %xmm4,%xmm4
.byte 102,69,15,18,4,144 // movlpd (%r8,%rdx,4),%xmm8
.byte 102,65,15,40,248 // movapd %xmm8,%xmm7
.byte 102,68,15,111,196 // movdqa %xmm4,%xmm8
- .byte 235,52 // jmp 387b5 <_sk_srcover_bgra_8888_sse2_lowp+0xb1>
+ .byte 235,52 // jmp 38925 <_sk_srcover_bgra_8888_sse2_lowp+0xb1>
.byte 102,65,15,110,100,144,24 // movd 0x18(%r8,%rdx,4),%xmm4
.byte 102,68,15,112,196,69 // pshufd $0x45,%xmm4,%xmm8
.byte 243,65,15,16,100,144,20 // movss 0x14(%r8,%rdx,4),%xmm4
@@ -65720,7 +65790,7 @@ _sk_srcover_bgra_8888_sse2_lowp:
.byte 102,15,114,244,16 // pslld $0x10,%xmm4
.byte 102,15,114,228,16 // psrad $0x10,%xmm4
.byte 102,15,107,236 // packssdw %xmm4,%xmm5
- .byte 102,68,15,111,37,177,10,0,0 // movdqa 0xab1(%rip),%xmm12 # 39290 <_sk_srcover_bgra_8888_sse2_lowp+0xb8c>
+ .byte 102,68,15,111,37,161,10,0,0 // movdqa 0xaa1(%rip),%xmm12 # 393f0 <_sk_srcover_bgra_8888_sse2_lowp+0xb7c>
.byte 102,15,111,245 // movdqa %xmm5,%xmm6
.byte 102,65,15,219,244 // pand %xmm12,%xmm6
.byte 102,15,113,213,8 // psrlw $0x8,%xmm5
@@ -65778,20 +65848,20 @@ _sk_srcover_bgra_8888_sse2_lowp:
.byte 102,15,105,193 // punpckhwd %xmm1,%xmm0
.byte 102,15,235,194 // por %xmm2,%xmm0
.byte 65,128,250,6 // cmp $0x6,%r10b
- .byte 119,24 // ja 38917 <_sk_srcover_bgra_8888_sse2_lowp+0x213>
- .byte 76,141,21,142,0,0,0 // lea 0x8e(%rip),%r10 # 38994 <_sk_srcover_bgra_8888_sse2_lowp+0x290>
+ .byte 119,24 // ja 38a87 <_sk_srcover_bgra_8888_sse2_lowp+0x213>
+ .byte 76,141,21,142,0,0,0 // lea 0x8e(%rip),%r10 # 38b04 <_sk_srcover_bgra_8888_sse2_lowp+0x290>
.byte 75,99,4,138 // movslq (%r10,%r9,4),%rax
.byte 76,1,208 // add %r10,%rax
.byte 255,224 // jmpq *%rax
.byte 102,69,15,126,36,144 // movd %xmm12,(%r8,%rdx,4)
- .byte 235,73 // jmp 38960 <_sk_srcover_bgra_8888_sse2_lowp+0x25c>
+ .byte 235,73 // jmp 38ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x25c>
.byte 243,69,15,127,36,144 // movdqu %xmm12,(%r8,%rdx,4)
.byte 243,65,15,127,68,144,16 // movdqu %xmm0,0x10(%r8,%rdx,4)
- .byte 235,58 // jmp 38960 <_sk_srcover_bgra_8888_sse2_lowp+0x25c>
+ .byte 235,58 // jmp 38ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x25c>
.byte 102,65,15,112,196,78 // pshufd $0x4e,%xmm12,%xmm0
.byte 102,65,15,126,68,144,8 // movd %xmm0,0x8(%r8,%rdx,4)
.byte 102,69,15,214,36,144 // movq %xmm12,(%r8,%rdx,4)
- .byte 235,37 // jmp 38960 <_sk_srcover_bgra_8888_sse2_lowp+0x25c>
+ .byte 235,37 // jmp 38ad0 <_sk_srcover_bgra_8888_sse2_lowp+0x25c>
.byte 102,15,112,200,78 // pshufd $0x4e,%xmm0,%xmm1
.byte 102,65,15,126,76,144,24 // movd %xmm1,0x18(%r8,%rdx,4)
.byte 102,15,112,200,229 // pshufd $0xe5,%xmm0,%xmm1
@@ -65824,7 +65894,7 @@ _sk_srcover_bgra_8888_sse2_lowp:
.byte 254 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 123,255 // jnp 38995 <_sk_srcover_bgra_8888_sse2_lowp+0x291>
+ .byte 123,255 // jnp 38b05 <_sk_srcover_bgra_8888_sse2_lowp+0x291>
.byte 255 // (bad)
.byte 255,159,255,255,255,146 // lcall *-0x6d000001(%rdi)
.byte 255 // (bad)
@@ -65857,14 +65927,14 @@ BALIGN4
.byte 252 // cld
.byte 190,0,0,128,191 // mov $0xbf800000,%esi
.byte 0,0 // add %al,(%rax)
- .byte 224,64 // loopne 38a14 <.literal4+0x64>
+ .byte 224,64 // loopne 38b84 <.literal4+0x64>
.byte 154 // (bad)
.byte 153 // cltd
.byte 153 // cltd
.byte 62,61,10,23,63,174 // ds cmp $0xae3f170a,%eax
- .byte 71,225,61 // rex.RXB loope 38a1d <.literal4+0x6d>
+ .byte 71,225,61 // rex.RXB loope 38b8d <.literal4+0x6d>
.byte 0,0 // add %al,(%rax)
- .byte 127,67 // jg 38a27 <.literal4+0x77>
+ .byte 127,67 // jg 38b97 <.literal4+0x77>
.byte 0,0 // add %al,(%rax)
.byte 128,127,145,131 // cmpb $0x83,-0x6f(%rdi)
.byte 158 // sahf
@@ -65901,7 +65971,7 @@ BALIGN4
.byte 0,0 // add %al,(%rax)
.byte 8,33 // or %ah,(%rcx)
.byte 132,55 // test %dh,(%rdi)
- .byte 224,7 // loopne 38a41 <.literal4+0x91>
+ .byte 224,7 // loopne 38bb1 <.literal4+0x91>
.byte 0,0 // add %al,(%rax)
.byte 33,8 // and %ecx,(%rax)
.byte 2,58 // add (%rdx),%bh
@@ -65912,8 +65982,8 @@ BALIGN4
.byte 0,0 // add %al,(%rax)
.byte 0,52,255 // add %dh,(%rdi,%rdi,8)
.byte 255 // (bad)
- .byte 127,0 // jg 38a54 <.literal4+0xa4>
- .byte 119,115 // ja 38ac9 <.literal4+0x119>
+ .byte 127,0 // jg 38bc4 <.literal4+0xa4>
+ .byte 119,115 // ja 38c39 <.literal4+0x119>
.byte 248 // clc
.byte 194,117,191 // retq $0xbf75
.byte 191,63,249,68,180 // mov $0xb444f93f,%edi
@@ -65936,12 +66006,12 @@ BALIGN4
.byte 190,80,128,3,62 // mov $0x3e038050,%esi
.byte 31 // (bad)
.byte 215 // xlat %ds:(%rbx)
- .byte 118,63 // jbe 38adf <.literal4+0x12f>
+ .byte 118,63 // jbe 38c4f <.literal4+0x12f>
.byte 246,64,83,63 // testb $0x3f,0x53(%rax)
.byte 0,0 // add %al,(%rax)
.byte 248 // clc
.byte 65,0,0 // add %al,(%r8)
- .byte 124,66 // jl 38aee <.literal4+0x13e>
+ .byte 124,66 // jl 38c5e <.literal4+0x13e>
.byte 0,240 // add %dh,%al
.byte 0,0 // add %al,(%rax)
.byte 137,136,136,55,0,15 // mov %ecx,0xf003788(%rax)
@@ -65951,9 +66021,9 @@ BALIGN4
.byte 137,136,136,59,15,0 // mov %ecx,0xf3b88(%rax)
.byte 0,0 // add %al,(%rax)
.byte 137,136,136,61,0,0 // mov %ecx,0x3d88(%rax)
- .byte 112,65 // jo 38b11 <.literal4+0x161>
+ .byte 112,65 // jo 38c81 <.literal4+0x161>
.byte 0,255 // add %bh,%bh
- .byte 127,71 // jg 38b1b <.literal4+0x16b>
+ .byte 127,71 // jg 38c8b <.literal4+0x16b>
.byte 89 // pop %rcx
.byte 23 // (bad)
.byte 55 // (bad)
@@ -65974,18 +66044,18 @@ BALIGN4
.byte 0,0 // add %al,(%rax)
.byte 128,0,0 // addb $0x0,(%rax)
.byte 0,191,0,0,192,191 // add %bh,-0x40400000(%rdi)
- .byte 114,28 // jb 38b26 <.literal4+0x176>
+ .byte 114,28 // jb 38c96 <.literal4+0x176>
.byte 199 // (bad)
.byte 62,85 // ds push %rbp
.byte 85 // push %rbp
.byte 149 // xchg %eax,%ebp
.byte 191,0,0,192,63 // mov $0x3fc00000,%edi
.byte 57,142,99,61,114,249 // cmp %ecx,-0x68dc29d(%rsi)
- .byte 127,63 // jg 38b5b <_sk_srcover_bgra_8888_sse2_lowp+0x457>
+ .byte 127,63 // jg 38ccb <_sk_srcover_bgra_8888_sse2_lowp+0x457>
.byte 3,0 // add (%rax),%eax
.byte 0,0 // add %al,(%rax)
.byte 45,16,17,192,18 // sub $0x12c01110,%eax
- .byte 120,57 // js 38b60 <_sk_srcover_bgra_8888_sse2_lowp+0x45c>
+ .byte 120,57 // js 38cd0 <_sk_srcover_bgra_8888_sse2_lowp+0x45c>
.byte 64,32,148,90,62,4,157,30 // and %dl,0x1e9d043e(%rdx,%rbx,2)
.byte 62,0,24 // add %bl,%ds:(%rax)
.byte 161,57,109,165,144,63,252,191,16 // movabs 0x10bffc3f90a56d39,%eax
@@ -65995,7 +66065,7 @@ BALIGN4
.byte 56,255 // cmp %bh,%bh
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 127,0 // jg 38b49 <.literal4+0x199>
+ .byte 127,0 // jg 38cb9 <.literal4+0x199>
.byte 0,128,56,0,64,254 // add %al,-0x1bfffc8(%rax)
.byte 255 // .byte 0xff
@@ -66047,16 +66117,16 @@ BALIGN32
.byte 0,1 // add %al,(%rcx)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038bc8 <_sk_srcover_bgra_8888_sse2_lowp+0xa0004c4>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038d28 <_sk_srcover_bgra_8888_sse2_lowp+0xa0004b4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038bd0 <_sk_srcover_bgra_8888_sse2_lowp+0x120004cc>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038d30 <_sk_srcover_bgra_8888_sse2_lowp+0x120004bc>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038bd8 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0004d4>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038d38 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0004c4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038be0 <_sk_srcover_bgra_8888_sse2_lowp+0x30004dc>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038d40 <_sk_srcover_bgra_8888_sse2_lowp+0x30004cc>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66099,16 +66169,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038c28 <_sk_srcover_bgra_8888_sse2_lowp+0xa000524>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038d88 <_sk_srcover_bgra_8888_sse2_lowp+0xa000514>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038c30 <_sk_srcover_bgra_8888_sse2_lowp+0x1200052c>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038d90 <_sk_srcover_bgra_8888_sse2_lowp+0x1200051c>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038c38 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000534>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038d98 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000524>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038c40 <_sk_srcover_bgra_8888_sse2_lowp+0x300053c>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038da0 <_sk_srcover_bgra_8888_sse2_lowp+0x300052c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66151,16 +66221,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038c88 <_sk_srcover_bgra_8888_sse2_lowp+0xa000584>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038de8 <_sk_srcover_bgra_8888_sse2_lowp+0xa000574>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038c90 <_sk_srcover_bgra_8888_sse2_lowp+0x1200058c>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038df0 <_sk_srcover_bgra_8888_sse2_lowp+0x1200057c>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038c98 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000594>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038df8 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000584>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038ca0 <_sk_srcover_bgra_8888_sse2_lowp+0x300059c>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038e00 <_sk_srcover_bgra_8888_sse2_lowp+0x300058c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66203,16 +66273,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038ce8 <_sk_srcover_bgra_8888_sse2_lowp+0xa0005e4>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038e48 <_sk_srcover_bgra_8888_sse2_lowp+0xa0005d4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038cf0 <_sk_srcover_bgra_8888_sse2_lowp+0x120005ec>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038e50 <_sk_srcover_bgra_8888_sse2_lowp+0x120005dc>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038cf8 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0005f4>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038e58 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0005e4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038d00 <_sk_srcover_bgra_8888_sse2_lowp+0x30005fc>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038e60 <_sk_srcover_bgra_8888_sse2_lowp+0x30005ec>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66255,16 +66325,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038d48 <_sk_srcover_bgra_8888_sse2_lowp+0xa000644>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038ea8 <_sk_srcover_bgra_8888_sse2_lowp+0xa000634>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038d50 <_sk_srcover_bgra_8888_sse2_lowp+0x1200064c>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038eb0 <_sk_srcover_bgra_8888_sse2_lowp+0x1200063c>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038d58 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000654>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038eb8 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000644>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038d60 <_sk_srcover_bgra_8888_sse2_lowp+0x300065c>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038ec0 <_sk_srcover_bgra_8888_sse2_lowp+0x300064c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66307,16 +66377,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038da8 <_sk_srcover_bgra_8888_sse2_lowp+0xa0006a4>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038f08 <_sk_srcover_bgra_8888_sse2_lowp+0xa000694>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038db0 <_sk_srcover_bgra_8888_sse2_lowp+0x120006ac>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038f10 <_sk_srcover_bgra_8888_sse2_lowp+0x1200069c>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038db8 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0006b4>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038f18 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0006a4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038dc0 <_sk_srcover_bgra_8888_sse2_lowp+0x30006bc>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038f20 <_sk_srcover_bgra_8888_sse2_lowp+0x30006ac>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66359,16 +66429,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038e08 <_sk_srcover_bgra_8888_sse2_lowp+0xa000704>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038f68 <_sk_srcover_bgra_8888_sse2_lowp+0xa0006f4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038e10 <_sk_srcover_bgra_8888_sse2_lowp+0x1200070c>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038f70 <_sk_srcover_bgra_8888_sse2_lowp+0x120006fc>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038e18 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000714>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038f78 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000704>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038e20 <_sk_srcover_bgra_8888_sse2_lowp+0x300071c>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038f80 <_sk_srcover_bgra_8888_sse2_lowp+0x300070c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66411,16 +66481,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038e68 <_sk_srcover_bgra_8888_sse2_lowp+0xa000764>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038fc8 <_sk_srcover_bgra_8888_sse2_lowp+0xa000754>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038e70 <_sk_srcover_bgra_8888_sse2_lowp+0x1200076c>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038fd0 <_sk_srcover_bgra_8888_sse2_lowp+0x1200075c>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038e78 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000774>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038fd8 <_sk_srcover_bgra_8888_sse2_lowp+0x1a000764>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038e80 <_sk_srcover_bgra_8888_sse2_lowp+0x300077c>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038fe0 <_sk_srcover_bgra_8888_sse2_lowp+0x300076c>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66463,16 +66533,16 @@ BALIGN32
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a038ec8 <_sk_srcover_bgra_8888_sse2_lowp+0xa0007c4>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a039028 <_sk_srcover_bgra_8888_sse2_lowp+0xa0007b4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12038ed0 <_sk_srcover_bgra_8888_sse2_lowp+0x120007cc>
+ .byte 255,13,255,255,255,17 // decl 0x11ffffff(%rip) # 12039030 <_sk_srcover_bgra_8888_sse2_lowp+0x120007bc>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a038ed8 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0007d4>
+ .byte 255,21,255,255,255,25 // callq *0x19ffffff(%rip) # 1a039038 <_sk_srcover_bgra_8888_sse2_lowp+0x1a0007c4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3038ee0 <_sk_srcover_bgra_8888_sse2_lowp+0x30007dc>
+ .byte 255,29,255,255,255,2 // lcall *0x2ffffff(%rip) # 3039040 <_sk_srcover_bgra_8888_sse2_lowp+0x30007cc>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -66777,7 +66847,7 @@ BALIGN32
.byte 0,255 // add %bh,%bh
.byte 0,0 // add %al,(%rax)
.byte 248 // clc
- .byte 224,255 // loopne 39133 <.const+0x5d3>
+ .byte 224,255 // loopne 39293 <.const+0x5d3>
.byte 248 // clc
.byte 0,63 // add %bh,(%rdi)
.byte 0,31 // add %bl,(%rdi)
@@ -67038,11 +67108,11 @@ BALIGN16
.byte 0,128,191,0,0,128 // add %al,-0x7fffff41(%rax)
.byte 191,0,0,224,64 // mov $0x40e00000,%edi
.byte 0,0 // add %al,(%rax)
- .byte 224,64 // loopne 393a8 <.literal16+0x118>
+ .byte 224,64 // loopne 39508 <.literal16+0x118>
.byte 0,0 // add %al,(%rax)
- .byte 224,64 // loopne 393ac <.literal16+0x11c>
+ .byte 224,64 // loopne 3950c <.literal16+0x11c>
.byte 0,0 // add %al,(%rax)
- .byte 224,64 // loopne 393b0 <.literal16+0x120>
+ .byte 224,64 // loopne 39510 <.literal16+0x120>
.byte 154 // (bad)
.byte 153 // cltd
.byte 153 // cltd
@@ -67062,13 +67132,13 @@ BALIGN16
.byte 10,23 // or (%rdi),%dl
.byte 63 // (bad)
.byte 174 // scas %es:(%rdi),%al
- .byte 71,225,61 // rex.RXB loope 393d1 <.literal16+0x141>
+ .byte 71,225,61 // rex.RXB loope 39531 <.literal16+0x141>
.byte 174 // scas %es:(%rdi),%al
- .byte 71,225,61 // rex.RXB loope 393d5 <.literal16+0x145>
+ .byte 71,225,61 // rex.RXB loope 39535 <.literal16+0x145>
.byte 174 // scas %es:(%rdi),%al
- .byte 71,225,61 // rex.RXB loope 393d9 <.literal16+0x149>
+ .byte 71,225,61 // rex.RXB loope 39539 <.literal16+0x149>
.byte 174 // scas %es:(%rdi),%al
- .byte 71,225,61 // rex.RXB loope 393dd <.literal16+0x14d>
+ .byte 71,225,61 // rex.RXB loope 3953d <.literal16+0x14d>
.byte 255,0 // incl (%rax)
.byte 0,0 // add %al,(%rax)
.byte 255,0 // incl (%rax)
@@ -67079,10 +67149,10 @@ BALIGN16
.byte 0,0 // add %al,(%rax)
.byte 1,255 // add %edi,%edi
.byte 255 // (bad)
- .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a0393b8 <_sk_srcover_bgra_8888_sse2_lowp+0xa000cb4>
+ .byte 255,5,255,255,255,9 // incl 0x9ffffff(%rip) # a039518 <_sk_srcover_bgra_8888_sse2_lowp+0xa000ca4>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,13,255,255,255,2 // decl 0x2ffffff(%rip) # 30393c0 <_sk_srcover_bgra_8888_sse2_lowp+0x3000cbc>
+ .byte 255,13,255,255,255,2 // decl 0x2ffffff(%rip) # 3039520 <_sk_srcover_bgra_8888_sse2_lowp+0x3000cac>
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255,6 // incl (%rsi)
@@ -67097,11 +67167,11 @@ BALIGN16
.byte 255,0 // incl (%rax)
.byte 0,127,67 // add %bh,0x43(%rdi)
.byte 0,0 // add %al,(%rax)
- .byte 127,67 // jg 3941b <.literal16+0x18b>
+ .byte 127,67 // jg 3957b <.literal16+0x18b>
.byte 0,0 // add %al,(%rax)
- .byte 127,67 // jg 3941f <.literal16+0x18f>
+ .byte 127,67 // jg 3957f <.literal16+0x18f>
.byte 0,0 // add %al,(%rax)
- .byte 127,67 // jg 39423 <.literal16+0x193>
+ .byte 127,67 // jg 39583 <.literal16+0x193>
.byte 0,0 // add %al,(%rax)
.byte 128,127,0,0 // cmpb $0x0,0x0(%rdi)
.byte 128,127,0,0 // cmpb $0x0,0x0(%rdi)
@@ -67257,13 +67327,13 @@ BALIGN16
.byte 132,55 // test %dh,(%rdi)
.byte 8,33 // or %ah,(%rcx)
.byte 132,55 // test %dh,(%rdi)
- .byte 224,7 // loopne 39539 <.literal16+0x2a9>
+ .byte 224,7 // loopne 39699 <.literal16+0x2a9>
.byte 0,0 // add %al,(%rax)
- .byte 224,7 // loopne 3953d <.literal16+0x2ad>
+ .byte 224,7 // loopne 3969d <.literal16+0x2ad>
.byte 0,0 // add %al,(%rax)
- .byte 224,7 // loopne 39541 <.literal16+0x2b1>
+ .byte 224,7 // loopne 396a1 <.literal16+0x2b1>
.byte 0,0 // add %al,(%rax)
- .byte 224,7 // loopne 39545 <.literal16+0x2b5>
+ .byte 224,7 // loopne 396a5 <.literal16+0x2b5>
.byte 0,0 // add %al,(%rax)
.byte 33,8 // and %ecx,(%rax)
.byte 2,58 // add (%rdx),%bh
@@ -67317,17 +67387,17 @@ BALIGN16
.byte 0,0 // add %al,(%rax)
.byte 52,255 // xor $0xff,%al
.byte 255 // (bad)
- .byte 127,0 // jg 395b4 <.literal16+0x324>
+ .byte 127,0 // jg 39714 <.literal16+0x324>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 127,0 // jg 395b8 <.literal16+0x328>
+ .byte 127,0 // jg 39718 <.literal16+0x328>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 127,0 // jg 395bc <.literal16+0x32c>
+ .byte 127,0 // jg 3971c <.literal16+0x32c>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 127,0 // jg 395c0 <.literal16+0x330>
- .byte 119,115 // ja 39635 <.literal16+0x3a5>
+ .byte 127,0 // jg 39720 <.literal16+0x330>
+ .byte 119,115 // ja 39795 <.literal16+0x3a5>
.byte 248 // clc
.byte 194,119,115 // retq $0x7377
.byte 248 // clc
@@ -67338,7 +67408,7 @@ BALIGN16
.byte 194,117,191 // retq $0xbf75
.byte 191,63,117,191,191 // mov $0xbfbf753f,%edi
.byte 63 // (bad)
- .byte 117,191 // jne 39599 <.literal16+0x309>
+ .byte 117,191 // jne 396f9 <.literal16+0x309>
.byte 191,63,117,191,191 // mov $0xbfbf753f,%edi
.byte 63 // (bad)
.byte 249 // stc
@@ -67350,7 +67420,7 @@ BALIGN16
.byte 249 // stc
.byte 68,180,62 // rex.R mov $0x3e,%spl
.byte 163,233,220,63,163,233,220,63,163 // movabs %eax,0xa33fdce9a33fdce9
- .byte 233,220,63,163,233 // jmpq ffffffffe9a6d5da <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe9a34ed6>
+ .byte 233,220,63,163,233 // jmpq ffffffffe9a6d73a <_sk_srcover_bgra_8888_sse2_lowp+0xffffffffe9a34ec6>
.byte 220,63 // fdivrl (%rdi)
.byte 81 // push %rcx
.byte 140,242 // mov %?,%edx
@@ -67440,16 +67510,16 @@ BALIGN16
.byte 128,3,62 // addb $0x3e,(%rbx)
.byte 31 // (bad)
.byte 215 // xlat %ds:(%rbx)
- .byte 118,63 // jbe 39723 <.literal16+0x493>
+ .byte 118,63 // jbe 39883 <.literal16+0x493>
.byte 31 // (bad)
.byte 215 // xlat %ds:(%rbx)
- .byte 118,63 // jbe 39727 <.literal16+0x497>
+ .byte 118,63 // jbe 39887 <.literal16+0x497>
.byte 31 // (bad)
.byte 215 // xlat %ds:(%rbx)
- .byte 118,63 // jbe 3972b <.literal16+0x49b>
+ .byte 118,63 // jbe 3988b <.literal16+0x49b>
.byte 31 // (bad)
.byte 215 // xlat %ds:(%rbx)
- .byte 118,63 // jbe 3972f <.literal16+0x49f>
+ .byte 118,63 // jbe 3988f <.literal16+0x49f>
.byte 246,64,83,63 // testb $0x3f,0x53(%rax)
.byte 246,64,83,63 // testb $0x3f,0x53(%rax)
.byte 246,64,83,63 // testb $0x3f,0x53(%rax)
@@ -67470,13 +67540,13 @@ BALIGN16
.byte 65,0,0 // add %al,(%r8)
.byte 248 // clc
.byte 65,0,0 // add %al,(%r8)
- .byte 124,66 // jl 39766 <.literal16+0x4d6>
+ .byte 124,66 // jl 398c6 <.literal16+0x4d6>
.byte 0,0 // add %al,(%rax)
- .byte 124,66 // jl 3976a <.literal16+0x4da>
+ .byte 124,66 // jl 398ca <.literal16+0x4da>
.byte 0,0 // add %al,(%rax)
- .byte 124,66 // jl 3976e <.literal16+0x4de>
+ .byte 124,66 // jl 398ce <.literal16+0x4de>
.byte 0,0 // add %al,(%rax)
- .byte 124,66 // jl 39772 <.literal16+0x4e2>
+ .byte 124,66 // jl 398d2 <.literal16+0x4e2>
.byte 0,240 // add %dh,%al
.byte 0,0 // add %al,(%rax)
.byte 0,240 // add %dh,%al
@@ -67522,25 +67592,25 @@ BALIGN16
.byte 136,136,61,137,136,136 // mov %cl,-0x777776c3(%rax)
.byte 61,137,136,136,61 // cmp $0x3d888889,%eax
.byte 0,0 // add %al,(%rax)
- .byte 112,65 // jo 397f5 <.literal16+0x565>
+ .byte 112,65 // jo 39955 <.literal16+0x565>
.byte 0,0 // add %al,(%rax)
- .byte 112,65 // jo 397f9 <.literal16+0x569>
+ .byte 112,65 // jo 39959 <.literal16+0x569>
.byte 0,0 // add %al,(%rax)
- .byte 112,65 // jo 397fd <.literal16+0x56d>
+ .byte 112,65 // jo 3995d <.literal16+0x56d>
.byte 0,0 // add %al,(%rax)
- .byte 112,65 // jo 39801 <.literal16+0x571>
+ .byte 112,65 // jo 39961 <.literal16+0x571>
.byte 0,128,0,0,0,128 // add %al,-0x80000000(%rax)
.byte 0,0 // add %al,(%rax)
.byte 0,128,0,0,0,128 // add %al,-0x80000000(%rax)
.byte 0,0 // add %al,(%rax)
.byte 255 // (bad)
- .byte 127,0 // jg 397d3 <.literal16+0x543>
+ .byte 127,0 // jg 39933 <.literal16+0x543>
.byte 0,255 // add %bh,%bh
- .byte 127,0 // jg 397d7 <.literal16+0x547>
+ .byte 127,0 // jg 39937 <.literal16+0x547>
.byte 0,255 // add %bh,%bh
- .byte 127,0 // jg 397db <.literal16+0x54b>
+ .byte 127,0 // jg 3993b <.literal16+0x54b>
.byte 0,255 // add %bh,%bh
- .byte 127,0 // jg 397df <.literal16+0x54f>
+ .byte 127,0 // jg 3993f <.literal16+0x54f>
.byte 0,0 // add %al,(%rax)
.byte 4,0 // add $0x0,%al
.byte 0,0 // add %al,(%rax)
@@ -67566,13 +67636,13 @@ BALIGN16
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 127,255 // jg 39818 <.literal16+0x588>
+ .byte 127,255 // jg 39978 <.literal16+0x588>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 127,255 // jg 3981c <.literal16+0x58c>
+ .byte 127,255 // jg 3997c <.literal16+0x58c>
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 127,0 // jg 39821 <.literal16+0x591>
+ .byte 127,0 // jg 39981 <.literal16+0x591>
.byte 0,128,56,0,0,128 // add %al,-0x7fffffc8(%rax)
.byte 56,0 // cmp %al,(%rax)
.byte 0,128,56,0,0,128 // add %al,-0x7fffffc8(%rax)
@@ -67586,13 +67656,13 @@ BALIGN16
.byte 64,254 // rex (bad)
.byte 255,0 // incl (%rax)
.byte 255 // (bad)
- .byte 127,71 // jg 3988b <.literal16+0x5fb>
+ .byte 127,71 // jg 399eb <.literal16+0x5fb>
.byte 0,255 // add %bh,%bh
- .byte 127,71 // jg 3988f <.literal16+0x5ff>
+ .byte 127,71 // jg 399ef <.literal16+0x5ff>
.byte 0,255 // add %bh,%bh
- .byte 127,71 // jg 39893 <.literal16+0x603>
+ .byte 127,71 // jg 399f3 <.literal16+0x603>
.byte 0,255 // add %bh,%bh
- .byte 127,71 // jg 39897 <.literal16+0x607>
+ .byte 127,71 // jg 399f7 <.literal16+0x607>
.byte 208 // (bad)
.byte 179,89 // mov $0x59,%bl
.byte 62,208 // ds (bad)
@@ -67669,13 +67739,13 @@ BALIGN16
.byte 192,191,0,0,192,191,0 // sarb $0x0,-0x40400000(%rdi)
.byte 0,192 // add %al,%al
.byte 191,0,0,192,191 // mov $0xbfc00000,%edi
- .byte 114,28 // jb 3992e <.literal16+0x69e>
+ .byte 114,28 // jb 39a8e <.literal16+0x69e>
.byte 199 // (bad)
- .byte 62,114,28 // jb,pt 39932 <.literal16+0x6a2>
+ .byte 62,114,28 // jb,pt 39a92 <.literal16+0x6a2>
.byte 199 // (bad)
- .byte 62,114,28 // jb,pt 39936 <.literal16+0x6a6>
+ .byte 62,114,28 // jb,pt 39a96 <.literal16+0x6a6>
.byte 199 // (bad)
- .byte 62,114,28 // jb,pt 3993a <.literal16+0x6aa>
+ .byte 62,114,28 // jb,pt 39a9a <.literal16+0x6aa>
.byte 199 // (bad)
.byte 62,85 // ds push %rbp
.byte 85 // push %rbp
@@ -67694,15 +67764,15 @@ BALIGN16
.byte 0,192 // add %al,%al
.byte 63 // (bad)
.byte 57,142,99,61,57,142 // cmp %ecx,-0x71c6c29d(%rsi)
- .byte 99,61,57,142,99,61 // movslq 0x3d638e39(%rip),%edi # 3d672785 <_sk_srcover_bgra_8888_sse2_lowp+0x3d63a081>
+ .byte 99,61,57,142,99,61 // movslq 0x3d638e39(%rip),%edi # 3d6728e5 <_sk_srcover_bgra_8888_sse2_lowp+0x3d63a071>
.byte 57,142,99,61,114,249 // cmp %ecx,-0x68dc29d(%rsi)
- .byte 127,63 // jg 39993 <.literal16+0x703>
- .byte 114,249 // jb 3994f <.literal16+0x6bf>
- .byte 127,63 // jg 39997 <.literal16+0x707>
- .byte 114,249 // jb 39953 <.literal16+0x6c3>
- .byte 127,63 // jg 3999b <.literal16+0x70b>
- .byte 114,249 // jb 39957 <.literal16+0x6c7>
- .byte 127,63 // jg 3999f <.literal16+0x70f>
+ .byte 127,63 // jg 39af3 <.literal16+0x703>
+ .byte 114,249 // jb 39aaf <.literal16+0x6bf>
+ .byte 127,63 // jg 39af7 <.literal16+0x707>
+ .byte 114,249 // jb 39ab3 <.literal16+0x6c3>
+ .byte 127,63 // jg 39afb <.literal16+0x70b>
+ .byte 114,249 // jb 39ab7 <.literal16+0x6c7>
+ .byte 127,63 // jg 39aff <.literal16+0x70f>
.byte 3,0 // add (%rax),%eax
.byte 0,0 // add %al,(%rax)
.byte 3,0 // add (%rax),%eax
@@ -67726,9 +67796,9 @@ BALIGN16
.byte 255 // (bad)
.byte 255 // (bad)
.byte 255 // (bad)
- .byte 255,45,16,17,192,45 // ljmp *0x2dc01110(%rip) # 2dc3aa95 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc02391>
+ .byte 255,45,16,17,192,45 // ljmp *0x2dc01110(%rip) # 2dc3abf5 <_sk_srcover_bgra_8888_sse2_lowp+0x2dc02381>
.byte 16,17 // adc %dl,(%rcx)
- .byte 192,45,16,17,192,45,16 // shrb $0x10,0x2dc01110(%rip) # 2dc3aa9e <_sk_srcover_bgra_8888_sse2_lowp+0x2dc0239a>
+ .byte 192,45,16,17,192,45,16 // shrb $0x10,0x2dc01110(%rip) # 2dc3abfe <_sk_srcover_bgra_8888_sse2_lowp+0x2dc0238a>
.byte 17,192 // adc %eax,%eax
.byte 18,120,57 // adc 0x39(%rax),%bh
.byte 64,18,120,57 // adc 0x39(%rax),%dil
@@ -67832,14 +67902,14 @@ BALIGN16
.byte 0,248 // add %bh,%al
.byte 0,248 // add %bh,%al
.byte 0,248 // add %bh,%al
- .byte 224,255 // loopne 39a81 <.literal16+0x7f1>
- .byte 224,255 // loopne 39a83 <.literal16+0x7f3>
- .byte 224,255 // loopne 39a85 <.literal16+0x7f5>
- .byte 224,255 // loopne 39a87 <.literal16+0x7f7>
- .byte 224,255 // loopne 39a89 <.literal16+0x7f9>
- .byte 224,255 // loopne 39a8b <.literal16+0x7fb>
- .byte 224,255 // loopne 39a8d <.literal16+0x7fd>
- .byte 224,255 // loopne 39a8f <.literal16+0x7ff>
+ .byte 224,255 // loopne 39be1 <.literal16+0x7f1>
+ .byte 224,255 // loopne 39be3 <.literal16+0x7f3>
+ .byte 224,255 // loopne 39be5 <.literal16+0x7f5>
+ .byte 224,255 // loopne 39be7 <.literal16+0x7f7>
+ .byte 224,255 // loopne 39be9 <.literal16+0x7f9>
+ .byte 224,255 // loopne 39beb <.literal16+0x7fb>
+ .byte 224,255 // loopne 39bed <.literal16+0x7fd>
+ .byte 224,255 // loopne 39bef <.literal16+0x7ff>
.byte 15,0,15 // str (%rdi)
.byte 0,15 // add %cl,(%rdi)
.byte 0,15 // add %cl,(%rdi)
@@ -67912,9 +67982,9 @@ BALIGN8
.byte 4,0 // add $0x0,%al
.byte 0,0 // add %al,(%rax)
.byte 255 // (bad)
- .byte 127,0 // jg 39b2b <.literal8+0x1b>
+ .byte 127,0 // jg 39c8b <.literal8+0x1b>
.byte 0,255 // add %bh,%bh
- .byte 127,0 // jg 39b2f <.literal8+0x1f>
+ .byte 127,0 // jg 39c8f <.literal8+0x1f>
.byte 0,0 // add %al,(%rax)
.byte 128,0,0 // addb $0x0,(%rax)
.byte 0 // .byte 0x0