aboutsummaryrefslogtreecommitdiffhomepage
path: root/postproc
diff options
context:
space:
mode:
authorGravatar michael <michael@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-11-10 19:46:04 +0000
committerGravatar michael <michael@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-11-10 19:46:04 +0000
commit5f43d4efb06ef7e5d8d3e5c5d574a1e160a8dea4 (patch)
tree860e381bb8251c91673fef49b35bc5ddfd143a20 /postproc
parent672e0016c9db4c32d742831daaa273dfd7252125 (diff)
.align 16
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@2800 b3059339-0415-0410-9bf9-f77b7e298cf2
Diffstat (limited to 'postproc')
-rw-r--r--postproc/rgb2rgb.c4
-rw-r--r--postproc/rgb2rgb_template.c4
-rw-r--r--postproc/swscale.c6
-rw-r--r--postproc/swscale_template.c6
4 files changed, 20 insertions, 0 deletions
diff --git a/postproc/rgb2rgb.c b/postproc/rgb2rgb.c
index 4dc9857582..2232343883 100644
--- a/postproc/rgb2rgb.c
+++ b/postproc/rgb2rgb.c
@@ -585,6 +585,7 @@ void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
#ifdef HAVE_MMX
asm volatile (
"xorl %%eax, %%eax \n\t"
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 32(%0, %%eax) \n\t"
"movq (%0, %%eax), %%mm0 \n\t"
@@ -635,6 +636,7 @@ void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, u
//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
asm volatile(
"xorl %%eax, %%eax \n\t"
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 32(%1, %%eax, 2) \n\t"
PREFETCH" 32(%2, %%eax) \n\t"
@@ -708,6 +710,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
"xorl %%eax, %%eax \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
@@ -757,6 +760,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
asm volatile(
"xorl %%eax, %%eax \n\t"
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
diff --git a/postproc/rgb2rgb_template.c b/postproc/rgb2rgb_template.c
index 4dc9857582..2232343883 100644
--- a/postproc/rgb2rgb_template.c
+++ b/postproc/rgb2rgb_template.c
@@ -585,6 +585,7 @@ void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
#ifdef HAVE_MMX
asm volatile (
"xorl %%eax, %%eax \n\t"
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 32(%0, %%eax) \n\t"
"movq (%0, %%eax), %%mm0 \n\t"
@@ -635,6 +636,7 @@ void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, u
//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
asm volatile(
"xorl %%eax, %%eax \n\t"
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 32(%1, %%eax, 2) \n\t"
PREFETCH" 32(%2, %%eax) \n\t"
@@ -708,6 +710,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
"xorl %%eax, %%eax \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
@@ -757,6 +760,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
asm volatile(
"xorl %%eax, %%eax \n\t"
+ ".align 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
diff --git a/postproc/swscale.c b/postproc/swscale.c
index f40eccf207..103c47d612 100644
--- a/postproc/swscale.c
+++ b/postproc/swscale.c
@@ -143,6 +143,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"punpcklwd %%mm5, %%mm5 \n\t"\
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
"movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
@@ -196,6 +197,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"movq %%mm5, asm_uvalpha1 \n\t"\
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -260,6 +262,7 @@ static int canMMX2BeUsed=0;
#define YSCALEYUV2RGB1 \
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
"movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
@@ -308,6 +311,7 @@ static int canMMX2BeUsed=0;
// do vertical chrominance interpolation
#define YSCALEYUV2RGB1b \
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -1306,6 +1310,7 @@ FUNNY_Y_CODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ".align 16 \n\t"
"1: \n\t"
"movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
"movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
@@ -1437,6 +1442,7 @@ FUNNYUVCODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ".align 16 \n\t"
"1: \n\t"
"movl %0, %%esi \n\t"
"movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
diff --git a/postproc/swscale_template.c b/postproc/swscale_template.c
index f40eccf207..103c47d612 100644
--- a/postproc/swscale_template.c
+++ b/postproc/swscale_template.c
@@ -143,6 +143,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"punpcklwd %%mm5, %%mm5 \n\t"\
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
"movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
@@ -196,6 +197,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"movq %%mm5, asm_uvalpha1 \n\t"\
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -260,6 +262,7 @@ static int canMMX2BeUsed=0;
#define YSCALEYUV2RGB1 \
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
"movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
@@ -308,6 +311,7 @@ static int canMMX2BeUsed=0;
// do vertical chrominance interpolation
#define YSCALEYUV2RGB1b \
"xorl %%eax, %%eax \n\t"\
+ ".align 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -1306,6 +1310,7 @@ FUNNY_Y_CODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ".align 16 \n\t"
"1: \n\t"
"movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
"movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
@@ -1437,6 +1442,7 @@ FUNNYUVCODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ".align 16 \n\t"
"1: \n\t"
"movl %0, %%esi \n\t"
"movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]