summaryrefslogtreecommitdiff
path: root/plugins/ffap
diff options
context:
space:
mode:
authorGravatar waker <wakeroid@gmail.com>2011-06-15 21:57:06 +0200
committerGravatar waker <wakeroid@gmail.com>2011-06-15 21:57:06 +0200
commitf702e5270db5bd4576765c8415c456e443513006 (patch)
treec901fa7a600bc1fa1496b5af716b7d375be7000a /plugins/ffap
parent10deb233f02b3b3234156c6cf535d06115decd03 (diff)
added better sse2 filter implementation to ape plugin;
added yasm support
Diffstat (limited to 'plugins/ffap')
-rw-r--r--plugins/ffap/Makefile.am22
-rw-r--r--plugins/ffap/dsputil_yasm.asm423
-rw-r--r--plugins/ffap/ffap.c132
-rw-r--r--plugins/ffap/x86inc.asm625
4 files changed, 1072 insertions, 130 deletions
diff --git a/plugins/ffap/Makefile.am b/plugins/ffap/Makefile.am
index a3671fbe..16110b94 100644
--- a/plugins/ffap/Makefile.am
+++ b/plugins/ffap/Makefile.am
@@ -1,8 +1,26 @@
if HAVE_FFAP
ffapdir = $(libdir)/$(PACKAGE)
pkglib_LTLIBRARIES = ffap.la
-ffap_la_SOURCES = ffap.c
+
+topsrcdir = @top_srcdir@
+EXTRA_PROGRAMS = foo
+YASM=$(topsrcdir)/yasmwrapper.sh
+SUFFIXES = .asm
+
+if HAVE_YASM
+INTEL_SRC=dsputil_yasm.asm
+YASM_FLAGS="-f elf -D ARCH_X86_64 -m amd64"
+ffap_la_DEPENDENCIES=dsputil_yasm.lo
+endif
+
+ffap_la_SOURCES = ffap.c $(INTEL_SRC)
+
+if HAVE_YASM
+.asm.lo:
+ $(LIBTOOL) --tag=CC --mode=compile $(YASM) $(YASM_FLAGS) -o $@ $<
+endif
+
ffap_la_LDFLAGS = -module
-AM_CFLAGS = $(CFLAGS) -std=c99
+AM_CFLAGS = $(CFLAGS) -fPIC -std=c99
endif
diff --git a/plugins/ffap/dsputil_yasm.asm b/plugins/ffap/dsputil_yasm.asm
new file mode 100644
index 00000000..e2478a48
--- /dev/null
+++ b/plugins/ffap/dsputil_yasm.asm
@@ -0,0 +1,423 @@
+;******************************************************************************
+;* MMX optimized DSP utils
+;* Copyright (c) 2008 Loren Merritt
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "x86inc.asm"
+
+SECTION_RODATA
+pb_f: times 16 db 15
+pb_zzzzzzzz77777777: times 8 db -1
+pb_7: times 8 db 7
+pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
+pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
+
+section .text align=16
+
+%macro PSWAPD_SSE 2
+ pshufw %1, %2, 0x4e
+%endmacro
+%macro PSWAPD_3DN1 2
+ movq %1, %2
+ psrlq %1, 32
+ punpckldq %1, %2
+%endmacro
+
+%macro FLOAT_TO_INT16_INTERLEAVE6 1
+; void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
+cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
+%ifdef ARCH_X86_64
+ %define lend r10d
+ mov lend, r2d
+%else
+ %define lend dword r2m
+%endif
+ mov src1q, [srcq+1*gprsize]
+ mov src2q, [srcq+2*gprsize]
+ mov src3q, [srcq+3*gprsize]
+ mov src4q, [srcq+4*gprsize]
+ mov src5q, [srcq+5*gprsize]
+ mov srcq, [srcq]
+ sub src1q, srcq
+ sub src2q, srcq
+ sub src3q, srcq
+ sub src4q, srcq
+ sub src5q, srcq
+.loop:
+ cvtps2pi mm0, [srcq]
+ cvtps2pi mm1, [srcq+src1q]
+ cvtps2pi mm2, [srcq+src2q]
+ cvtps2pi mm3, [srcq+src3q]
+ cvtps2pi mm4, [srcq+src4q]
+ cvtps2pi mm5, [srcq+src5q]
+ packssdw mm0, mm3
+ packssdw mm1, mm4
+ packssdw mm2, mm5
+ pswapd mm3, mm0
+ punpcklwd mm0, mm1
+ punpckhwd mm1, mm2
+ punpcklwd mm2, mm3
+ pswapd mm3, mm0
+ punpckldq mm0, mm2
+ punpckhdq mm2, mm1
+ punpckldq mm1, mm3
+ movq [dstq ], mm0
+ movq [dstq+16], mm2
+ movq [dstq+ 8], mm1
+ add srcq, 8
+ add dstq, 24
+ sub lend, 2
+ jg .loop
+ emms
+ RET
+%endmacro ; FLOAT_TO_INT16_INTERLEAVE6
+
+%define pswapd PSWAPD_SSE
+FLOAT_TO_INT16_INTERLEAVE6 sse
+%define cvtps2pi pf2id
+%define pswapd PSWAPD_3DN1
+FLOAT_TO_INT16_INTERLEAVE6 3dnow
+%undef pswapd
+FLOAT_TO_INT16_INTERLEAVE6 3dn2
+%undef cvtps2pi
+
+
+
+%macro SCALARPRODUCT 1
+; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order, int shift)
+cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift
+ shl orderq, 1
+ add v1q, orderq
+ add v2q, orderq
+ neg orderq
+ movd m3, shiftm
+ pxor m2, m2
+.loop:
+ movu m0, [v1q + orderq]
+ movu m1, [v1q + orderq + mmsize]
+ pmaddwd m0, [v2q + orderq]
+ pmaddwd m1, [v2q + orderq + mmsize]
+ paddd m2, m0
+ paddd m2, m1
+ add orderq, mmsize*2
+ jl .loop
+%if mmsize == 16
+ movhlps m0, m2
+ paddd m2, m0
+ psrad m2, m3
+ pshuflw m0, m2, 0x4e
+%else
+ psrad m2, m3
+ pshufw m0, m2, 0x4e
+%endif
+ paddd m2, m0
+ movd eax, m2
+ RET
+
+; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
+cglobal scalarproduct_and_madd_int16_%1, 4,4,8, v1, v2, v3, order, mul
+ shl orderq, 1
+ movd m7, mulm
+%if mmsize == 16
+ pshuflw m7, m7, 0
+ punpcklqdq m7, m7
+%else
+ pshufw m7, m7, 0
+%endif
+ pxor m6, m6
+ add v1q, orderq
+ add v2q, orderq
+ add v3q, orderq
+ neg orderq
+.loop:
+ movu m0, [v2q + orderq]
+ movu m1, [v2q + orderq + mmsize]
+ mova m4, [v1q + orderq]
+ mova m5, [v1q + orderq + mmsize]
+ movu m2, [v3q + orderq]
+ movu m3, [v3q + orderq + mmsize]
+ pmaddwd m0, m4
+ pmaddwd m1, m5
+ pmullw m2, m7
+ pmullw m3, m7
+ paddd m6, m0
+ paddd m6, m1
+ paddw m2, m4
+ paddw m3, m5
+ mova [v1q + orderq], m2
+ mova [v1q + orderq + mmsize], m3
+ add orderq, mmsize*2
+ jl .loop
+%if mmsize == 16
+ movhlps m0, m6
+ paddd m6, m0
+ pshuflw m0, m6, 0x4e
+%else
+ pshufw m0, m6, 0x4e
+%endif
+ paddd m6, m0
+ movd eax, m6
+ RET
+%endmacro
+
+INIT_MMX
+SCALARPRODUCT mmx2
+INIT_XMM
+SCALARPRODUCT sse2
+
+%macro SCALARPRODUCT_LOOP 1
+align 16
+.loop%1:
+ sub orderq, mmsize*2
+%if %1
+ mova m1, m4
+ mova m4, [v2q + orderq]
+ mova m0, [v2q + orderq + mmsize]
+ palignr m1, m0, %1
+ palignr m0, m4, %1
+ mova m3, m5
+ mova m5, [v3q + orderq]
+ mova m2, [v3q + orderq + mmsize]
+ palignr m3, m2, %1
+ palignr m2, m5, %1
+%else
+ mova m0, [v2q + orderq]
+ mova m1, [v2q + orderq + mmsize]
+ mova m2, [v3q + orderq]
+ mova m3, [v3q + orderq + mmsize]
+%endif
+ %define t0 [v1q + orderq]
+ %define t1 [v1q + orderq + mmsize]
+%ifdef ARCH_X86_64
+ mova m8, t0
+ mova m9, t1
+ %define t0 m8
+ %define t1 m9
+%endif
+ pmaddwd m0, t0
+ pmaddwd m1, t1
+ pmullw m2, m7
+ pmullw m3, m7
+ paddw m2, t0
+ paddw m3, t1
+ paddd m6, m0
+ paddd m6, m1
+ mova [v1q + orderq], m2
+ mova [v1q + orderq + mmsize], m3
+ jg .loop%1
+%if %1
+ jmp .end
+%endif
+%endmacro
+
+; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
+cglobal scalarproduct_and_madd_int16_ssse3, 4,5,10, v1, v2, v3, order, mul
+ shl orderq, 1
+ movd m7, mulm
+ pshuflw m7, m7, 0
+ punpcklqdq m7, m7
+ pxor m6, m6
+ mov r4d, v2d
+ and r4d, 15
+ and v2q, ~15
+ and v3q, ~15
+ mova m4, [v2q + orderq]
+ mova m5, [v3q + orderq]
+ ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
+ cmp r4d, 0
+ je .loop0
+ cmp r4d, 2
+ je .loop2
+ cmp r4d, 4
+ je .loop4
+ cmp r4d, 6
+ je .loop6
+ cmp r4d, 8
+ je .loop8
+ cmp r4d, 10
+ je .loop10
+ cmp r4d, 12
+ je .loop12
+SCALARPRODUCT_LOOP 14
+SCALARPRODUCT_LOOP 12
+SCALARPRODUCT_LOOP 10
+SCALARPRODUCT_LOOP 8
+SCALARPRODUCT_LOOP 6
+SCALARPRODUCT_LOOP 4
+SCALARPRODUCT_LOOP 2
+SCALARPRODUCT_LOOP 0
+.end:
+ movhlps m0, m6
+ paddd m6, m0
+ pshuflw m0, m6, 0x4e
+ paddd m6, m0
+ movd eax, m6
+ RET
+
+
+
+; void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
+cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top, diff, w, left, left_top
+ movq mm0, [topq]
+ movq mm2, mm0
+ movd mm4, [left_topq]
+ psllq mm2, 8
+ movq mm1, mm0
+ por mm4, mm2
+ movd mm3, [leftq]
+ psubb mm0, mm4 ; t-tl
+ add dstq, wq
+ add topq, wq
+ add diffq, wq
+ neg wq
+ jmp .skip
+.loop:
+ movq mm4, [topq+wq]
+ movq mm0, mm4
+ psllq mm4, 8
+ por mm4, mm1
+ movq mm1, mm0 ; t
+ psubb mm0, mm4 ; t-tl
+.skip:
+ movq mm2, [diffq+wq]
+%assign i 0
+%rep 8
+ movq mm4, mm0
+ paddb mm4, mm3 ; t-tl+l
+ movq mm5, mm3
+ pmaxub mm3, mm1
+ pminub mm5, mm1
+ pminub mm3, mm4
+ pmaxub mm3, mm5 ; median
+ paddb mm3, mm2 ; +residual
+%if i==0
+ movq mm7, mm3
+ psllq mm7, 56
+%else
+ movq mm6, mm3
+ psrlq mm7, 8
+ psllq mm6, 56
+ por mm7, mm6
+%endif
+%if i<7
+ psrlq mm0, 8
+ psrlq mm1, 8
+ psrlq mm2, 8
+%endif
+%assign i i+1
+%endrep
+ movq [dstq+wq], mm7
+ add wq, 8
+ jl .loop
+ movzx r2d, byte [dstq-1]
+ mov [leftq], r2d
+ movzx r2d, byte [topq-1]
+ mov [left_topq], r2d
+ RET
+
+
+%macro ADD_HFYU_LEFT_LOOP 1 ; %1 = is_aligned
+ add srcq, wq
+ add dstq, wq
+ neg wq
+%%.loop:
+ mova m1, [srcq+wq]
+ mova m2, m1
+ psllw m1, 8
+ paddb m1, m2
+ mova m2, m1
+ pshufb m1, m3
+ paddb m1, m2
+ pshufb m0, m5
+ mova m2, m1
+ pshufb m1, m4
+ paddb m1, m2
+%if mmsize == 16
+ mova m2, m1
+ pshufb m1, m6
+ paddb m1, m2
+%endif
+ paddb m0, m1
+%if %1
+ mova [dstq+wq], m0
+%else
+ movq [dstq+wq], m0
+ movhps [dstq+wq+8], m0
+%endif
+ add wq, mmsize
+ jl %%.loop
+ mov eax, mmsize-1
+ sub eax, wd
+ movd m1, eax
+ pshufb m0, m1
+ movd eax, m0
+ RET
+%endmacro
+
+; int ff_add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left)
+INIT_MMX
+cglobal add_hfyu_left_prediction_ssse3, 3,3,7, dst, src, w, left
+.skip_prologue:
+ mova m5, [pb_7 GLOBAL]
+ mova m4, [pb_zzzz3333zzzzbbbb GLOBAL]
+ mova m3, [pb_zz11zz55zz99zzdd GLOBAL]
+ movd m0, leftm
+ psllq m0, 56
+ ADD_HFYU_LEFT_LOOP 1
+
+INIT_XMM
+cglobal add_hfyu_left_prediction_sse4, 3,3,7, dst, src, w, left
+ mova m5, [pb_f GLOBAL]
+ mova m6, [pb_zzzzzzzz77777777 GLOBAL]
+ mova m4, [pb_zzzz3333zzzzbbbb GLOBAL]
+ mova m3, [pb_zz11zz55zz99zzdd GLOBAL]
+ movd m0, leftm
+ pslldq m0, 15
+ test srcq, 15
+ jnz add_hfyu_left_prediction_ssse3.skip_prologue
+ test dstq, 15
+ jnz .unaligned
+ ADD_HFYU_LEFT_LOOP 1
+.unaligned:
+ ADD_HFYU_LEFT_LOOP 0
+
+
+; float ff_scalarproduct_float_sse(const float *v1, const float *v2, int len)
+cglobal scalarproduct_float_sse, 3,3,2, v1, v2, offset
+ neg offsetq
+ shl offsetq, 2
+ sub v1q, offsetq
+ sub v2q, offsetq
+ xorps xmm0, xmm0
+ .loop:
+ movaps xmm1, [v1q+offsetq]
+ mulps xmm1, [v2q+offsetq]
+ addps xmm0, xmm1
+ add offsetq, 16
+ js .loop
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%ifndef ARCH_X86_64
+ movd r0m, xmm0
+ fld dword r0m
+%endif
+ RET
diff --git a/plugins/ffap/ffap.c b/plugins/ffap/ffap.c
index 7f9022c9..ccfa2b2d 100644
--- a/plugins/ffap/ffap.c
+++ b/plugins/ffap/ffap.c
@@ -1275,89 +1275,8 @@ typedef int x86_reg;
typedef struct { uint64_t a, b; } xmm_reg;
#define DECLARE_ALIGNED(n,t,v) t v __attribute__ ((aligned (n)))
#define DECLARE_ALIGNED_16(t, v) DECLARE_ALIGNED(16, t, v)
-static int32_t scalarproduct_int16_sse2 (int16_t * v1, int16_t * v2, int order, int shift)
-{
- int res = 0;
- DECLARE_ALIGNED_16(xmm_reg, sh);
- x86_reg o = -(order << 1);
-
- v1 += order;
- v2 += order;
- sh.a = shift;
- __asm__ volatile(
- "pxor %%xmm7, %%xmm7 \n\t"
- "1: \n\t"
- "movdqu (%0,%3), %%xmm0 \n\t"
- "movdqu 16(%0,%3), %%xmm1 \n\t"
- "pmaddwd (%1,%3), %%xmm0 \n\t"
- "pmaddwd 16(%1,%3), %%xmm1 \n\t"
- "paddd %%xmm0, %%xmm7 \n\t"
- "paddd %%xmm1, %%xmm7 \n\t"
- "add $32, %3 \n\t"
- "js 1b \n\t"
- "movhlps %%xmm7, %%xmm2 \n\t"
- "paddd %%xmm2, %%xmm7 \n\t"
- "psrad %4, %%xmm7 \n\t"
- "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t"
- "paddd %%xmm2, %%xmm7 \n\t"
- "movd %%xmm7, %2 \n\t"
- : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
- : "m"(sh)
- );
- return res;
-}
-static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
-{
- x86_reg o = -(order << 1);
- v1 += order;
- v2 += order;
- __asm__ volatile(
- "1: \n\t"
- "movdqu (%1,%2), %%xmm0 \n\t"
- "movdqu 16(%1,%2), %%xmm1 \n\t"
- "paddw (%0,%2), %%xmm0 \n\t"
- "paddw 16(%0,%2), %%xmm1 \n\t"
- "movdqa %%xmm0, (%0,%2) \n\t"
- "movdqa %%xmm1, 16(%0,%2) \n\t"
- "add $32, %2 \n\t"
- "js 1b \n\t"
- : "+r"(v1), "+r"(v2), "+r"(o)
- );
-}
-
-static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
-{
- x86_reg o = -(order << 1);
- v1 += order;
- v2 += order;
- __asm__ volatile(
- "1: \n\t"
- "movdqa (%0,%2), %%xmm0 \n\t"
- "movdqa 16(%0,%2), %%xmm2 \n\t"
- "movdqu (%1,%2), %%xmm1 \n\t"
- "movdqu 16(%1,%2), %%xmm3 \n\t"
- "psubw %%xmm1, %%xmm0 \n\t"
- "psubw %%xmm3, %%xmm2 \n\t"
- "movdqa %%xmm0, (%0,%2) \n\t"
- "movdqa %%xmm2, 16(%0,%2) \n\t"
- "add $32, %2 \n\t"
- "js 1b \n\t"
- : "+r"(v1), "+r"(v2), "+r"(o)
- );
-}
#endif
-static int32_t
-scalarproduct_int16_c(int16_t * v1, int16_t * v2, int order, int shift)
-{
- int res = 0;
-
- while (order--)
- res += (*v1++ * *v2++) >> shift;
-
- return res;
-}
-
static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul)
{
int res = 0;
@@ -1368,32 +1287,9 @@ static int32_t scalarproduct_and_madd_int16_c(int16_t *v1, const int16_t *v2, co
return res;
}
-static void
-add_int16_c (int16_t *v1/*align 16*/, int16_t *v2, int len) {
- while (len--) {
- *v1++ += *v2++;
- }
-}
-
-static void
-sub_int16_c (int16_t *v1/*align 16*/, int16_t *v2, int len) {
- while (len--) {
- *v1++ -= *v2++;
- }
-}
-
-static int32_t
-(*scalarproduct_int16)(int16_t * v1, int16_t * v2, int order, int shift);
-
static int32_t
(*scalarproduct_and_madd_int16)(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
-static void
-(*add_int16) (int16_t *v1/*align 16*/, int16_t *v2, int len);
-
-static void
-(*sub_int16) (int16_t *v1/*align 16*/, int16_t *v2, int len);
-
static inline int16_t clip_int16(int a)
{
if ((a+32768) & ~65535) return (a>>31) ^ 32767;
@@ -1425,17 +1321,6 @@ static inline void do_apply_filter(APEContext * ctx, int version, APEFilter *f,
int absres;
while (count--) {
-#if 0
- /* round fixedpoint scalar product */
- res = (scalarproduct_int16(f->delay - order, f->coeffs, order, 0) + (1 << (fracbits - 1))) >> fracbits;
-
-
- if (*data < 0)
- add_int16(f->coeffs, f->adaptcoeffs - order, order);
- else if (*data > 0)
- sub_int16(f->coeffs, f->adaptcoeffs - order, order);
-#endif
-
res = scalarproduct_and_madd_int16(f->coeffs, f->delay - order, f->adaptcoeffs - order, order, APESIGN(*data));
res = (res + (1 << (fracbits - 1))) >> fracbits;
res += *data;
@@ -2019,6 +1904,8 @@ static DB_decoder_t plugin = {
#if HAVE_SSE2 && !ARCH_UNKNOWN
+int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
+
#define FF_MM_MMX 0x0001 ///< standard MMX
#define FF_MM_3DNOW 0x0004 ///< AMD 3DNOW
#define FF_MM_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
@@ -2131,32 +2018,21 @@ DB_plugin_t *
ffap_load (DB_functions_t *api) {
// detect sse2
#if ARCH_ARM
- scalarproduct_int16 = EXTERN_ASMff_scalarproduct_int16_neon;
scalarproduct_and_madd_int16 = EXTERN_ASMff_scalarproduct_and_madd_int16_neon;
- add_int16 = add_int16_c;
- sub_int16 = sub_int16_c;
#elif HAVE_SSE2 && !ARCH_UNKNOWN
-#error SSE2 version is broken in this branch, missing ff_scalarproduct_and_madd_int16_sse2
trace ("ffap: was compiled with sse2 support\n");
int mm_flags = mm_support ();
if (mm_flags & FF_MM_SSE2) {
trace ("ffap: sse2 support detected\n");
- scalarproduct_int16 = scalarproduct_int16_sse2;
- add_int16 = add_int16_sse2;
- sub_int16 = sub_int16_sse2;
+ scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
}
else {
trace ("ffap: sse2 is not supported by CPU\n");
- scalarproduct_int16 = scalarproduct_int16_c;
- add_int16 = add_int16_c;
- sub_int16 = sub_int16_c;
+ scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
}
#else
// trace ("ffap: sse2 support was not compiled in\n");
- scalarproduct_int16 = scalarproduct_int16_c;
scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c;
- add_int16 = add_int16_c;
- sub_int16 = sub_int16_c;
#endif
deadbeef = api;
return DB_PLUGIN (&plugin);
diff --git a/plugins/ffap/x86inc.asm b/plugins/ffap/x86inc.asm
new file mode 100644
index 00000000..c29ef3ee
--- /dev/null
+++ b/plugins/ffap/x86inc.asm
@@ -0,0 +1,625 @@
+;*****************************************************************************
+;* x86inc.asm
+;*****************************************************************************
+;* Copyright (C) 2005-2008 Loren Merritt <lorenm@u.washington.edu>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;*****************************************************************************
+
+%ifdef ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64
+ %else
+ %define UNIX64
+ %endif
+%endif
+
+; FIXME: All of the 64bit asm functions that take a stride as an argument
+; via register, assume that the high dword of that register is filled with 0.
+; This is true in practice (since we never do any 64bit arithmetic on strides,
+; and x264's strides are all positive), but is not guaranteed by the ABI.
+
+; Name of the .rodata section.
+; Kludge: Something on OS X fails to align .rodata even given an align attribute,
+; so use a different read-only section.
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,macho64
+ SECTION .text align=%1
+ %elifidn __OUTPUT_FORMAT__,macho
+ SECTION .text align=%1
+ fakegot:
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+
+; PIC support macros.
+; x86_64 can't fit 64bit address literals in most instruction types,
+; so shared objects (under the assumption that they might be anywhere
+; in memory) must use an address mode that does fit.
+; So all accesses to global variables must use this macro, e.g.
+; mov eax, [foo GLOBAL]
+; instead of
+; mov eax, [foo]
+;
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+
+%ifdef WIN64
+ %define PIC
+%elifndef ARCH_X86_64
+ %undef PIC
+%endif
+%ifdef PIC
+ %define GLOBAL wrt rip
+%else
+ %define GLOBAL
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,0, dst, src, tmp
+; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE
+
+; REP_RET:
+; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
+; which are slow when a normal ret follows a branch.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 6
+ %define r%1q %2
+ %define r%1d %3
+ %define r%1w %4
+ %define r%1b %5
+ %define r%1m %6
+ %ifid %6 ; i.e. it's a register
+ %define r%1mp %2
+ %elifdef ARCH_X86_64 ; memory
+ %define r%1mp qword %6
+ %else
+ %define r%1mp dword %6
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 2
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1b %2
+ %define e%1b %2
+%ifndef ARCH_X86_64
+ %define r%1 e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al
+DECLARE_REG_SIZE bx, bl
+DECLARE_REG_SIZE cx, cl
+DECLARE_REG_SIZE dx, dl
+DECLARE_REG_SIZE si, sil
+DECLARE_REG_SIZE di, dil
+DECLARE_REG_SIZE bp, bpl
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7
+
+%ifdef ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %assign stack_offset stack_offset+gprsize
+%endmacro
+
+%macro POP 1
+ pop %1
+ %assign stack_offset stack_offset-gprsize
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assert failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %assign n_arg_names %%i
+%endmacro
+
+%ifdef WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx, ecx, cx, cl, ecx
+DECLARE_REG 1, rdx, edx, dx, dl, edx
+DECLARE_REG 2, r8, r8d, r8w, r8b, r8d
+DECLARE_REG 3, r9, r9d, r9w, r9b, r9d
+DECLARE_REG 4, rdi, edi, di, dil, [rsp + stack_offset + 40]
+DECLARE_REG 5, rsi, esi, si, sil, [rsp + stack_offset + 48]
+DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 56]
+%define r7m [rsp + stack_offset + 64]
+%define r8m [rsp + stack_offset + 72]
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [rsp + stack_offset + 8 + %1*8]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ ASSERT %2 >= %1
+ %assign regs_used %2
+ ASSERT regs_used <= 7
+ %if %0 > 2
+ %assign xmm_regs_used %3
+ %else
+ %assign xmm_regs_used 0
+ %endif
+ ASSERT xmm_regs_used <= 16
+ %if regs_used > 4
+ push r4
+ push r5
+ %assign stack_offset stack_offset+16
+ %endif
+ %if xmm_regs_used > 6
+ sub rsp, (xmm_regs_used-6)*16+16
+ %assign stack_offset stack_offset+(xmm_regs_used-6)*16+16
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa [rsp + (%%i-6)*16+8], xmm %+ %%i
+ %endrep
+ %endif
+ LOAD_IF_USED 4, %1
+ LOAD_IF_USED 5, %1
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RESTORE_XMM_INTERNAL 1
+ %if xmm_regs_used > 6
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa xmm %+ %%i, [%1 + (%%i-6)*16+8]
+ %endrep
+ add %1, (xmm_regs_used-6)*16+16
+ %endif
+%endmacro
+
+%macro RESTORE_XMM 1
+ RESTORE_XMM_INTERNAL %1
+ %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
+ %assign xmm_regs_used 0
+%endmacro
+
+%macro RET 0
+ RESTORE_XMM_INTERNAL rsp
+ %if regs_used > 4
+ pop r5
+ pop r4
+ %endif
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 4 || xmm_regs_used > 6
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%elifdef ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi, edi, di, dil, edi
+DECLARE_REG 1, rsi, esi, si, sil, esi
+DECLARE_REG 2, rdx, edx, dx, dl, edx
+DECLARE_REG 3, rcx, ecx, cx, cl, ecx
+DECLARE_REG 4, r8, r8d, r8w, r8b, r8d
+DECLARE_REG 5, r9, r9d, r9w, r9b, r9d
+DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 8]
+%define r7m [rsp + stack_offset + 16]
+%define r8m [rsp + stack_offset + 24]
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [rsp - 40 + %1*8]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ ASSERT %2 >= %1
+ ASSERT %2 <= 7
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ ret
+%endmacro
+
+%macro REP_RET 0
+ rep ret
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, eax, ax, al, [esp + stack_offset + 4]
+DECLARE_REG 1, ecx, ecx, cx, cl, [esp + stack_offset + 8]
+DECLARE_REG 2, edx, edx, dx, dl, [esp + stack_offset + 12]
+DECLARE_REG 3, ebx, ebx, bx, bl, [esp + stack_offset + 16]
+DECLARE_REG 4, esi, esi, si, null, [esp + stack_offset + 20]
+DECLARE_REG 5, edi, edi, di, null, [esp + stack_offset + 24]
+DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28]
+%define r7m [esp + stack_offset + 32]
+%define r8m [esp + stack_offset + 36]
+%define rsp esp
+
+%macro PUSH_IF_USED 1 ; reg_id
+ %if %1 < regs_used
+ push r%1
+ %assign stack_offset stack_offset+4
+ %endif
+%endmacro
+
+%macro POP_IF_USED 1 ; reg_id
+ %if %1 < regs_used
+ pop r%1
+ %endif
+%endmacro
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [esp + stack_offset + 4 + %1*4]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, arg_names...
+ ASSERT %2 >= %1
+ %assign regs_used %2
+ ASSERT regs_used <= 7
+ PUSH_IF_USED 3
+ PUSH_IF_USED 4
+ PUSH_IF_USED 5
+ PUSH_IF_USED 6
+ LOAD_IF_USED 0, %1
+ LOAD_IF_USED 1, %1
+ LOAD_IF_USED 2, %1
+ LOAD_IF_USED 3, %1
+ LOAD_IF_USED 4, %1
+ LOAD_IF_USED 5, %1
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ POP_IF_USED 6
+ POP_IF_USED 5
+ POP_IF_USED 4
+ POP_IF_USED 3
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 3
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%endif ;======================================================================
+
+
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Symbol prefix for C linkage
+%macro cglobal 1-2+
+ %xdefine %1 ff_%1
+ %ifdef PREFIX
+ %xdefine %1 _ %+ %1
+ %endif
+ %xdefine %1.skip_prologue %1 %+ .skip_prologue
+ %ifidn __OUTPUT_FORMAT__,elf
+ global %1:function hidden
+ %else
+ global %1
+ %endif
+ align function_align
+ %1:
+ RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
+ %assign stack_offset 0
+ %if %0 > 1
+ PROLOGUE %2
+ %endif
+%endmacro
+
+%macro cextern 1
+ %ifdef PREFIX
+ %xdefine %1 _%1
+ %endif
+ extern %1
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+%assign FENC_STRIDE 16
+%assign FDEC_STRIDE 32
+
+; merge mmx and sse*
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0
+ %define RESET_MM_PERMUTATION INIT_MMX
+ %define mmsize 8
+ %define num_mmregs 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnt movntq
+ %assign %%i 0
+ %rep 8
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %rep 8
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nmm, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro INIT_XMM 0
+ %define RESET_MM_PERMUTATION INIT_XMM
+ %define mmsize 16
+ %define num_mmregs 8
+ %ifdef ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnt movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nxmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+INIT_MMX
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+ %xdefine tmp%2 m%2
+ %xdefine ntmp%2 nm%2
+ %rotate 2
+%endrep
+%rep %0/2
+ %xdefine m%1 tmp%2
+ %xdefine nm%1 ntmp%2
+ %undef tmp%2
+ %undef ntmp%2
+ %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
+%rep %0-1
+%ifdef m%1
+ %xdefine tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 tmp
+ CAT_XDEFINE n, m%1, %1
+ CAT_XDEFINE n, m%2, %2
+%else
+ ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
+ ; Be careful using this mode in nested macros though, as in some cases there may be
+ ; other copies of m# that have already been dereferenced and don't get updated correctly.
+ %xdefine %%n1 n %+ %1
+ %xdefine %%n2 n %+ %2
+ %xdefine tmp m %+ %%n1
+ CAT_XDEFINE m, %%n1, m %+ %%n2
+ CAT_XDEFINE m, %%n2, tmp
+ CAT_XDEFINE n, m %+ %%n1, %%n1
+ CAT_XDEFINE n, m %+ %%n2, %%n2
+%endif
+ %undef tmp
+ %rotate 1
+%endrep
+%endmacro
+
+%macro SAVE_MM_PERMUTATION 1
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %1_m, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE n, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro call 1
+ call %1
+ %ifdef %1_m0
+ LOAD_MM_PERMUTATION %1
+ %endif
+%endmacro
+
+;Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro