From b21e0b6dfce5da423f6e2b760b7f2c784748b3c7 Mon Sep 17 00:00:00 2001 From: Michael Niedermayer Date: Sat, 17 Feb 2007 23:43:02 +0000 Subject: [PATCH] rewrite H264_CHROMA_MC4_TMPL (20% faster) Originally committed as revision 8012 to svn://svn.ffmpeg.org/ffmpeg/trunk --- libavcodec/i386/dsputil_h264_template_mmx.c | 134 +++++++++++++--------------- 1 file changed, 61 insertions(+), 73 deletions(-) diff --git a/libavcodec/i386/dsputil_h264_template_mmx.c b/libavcodec/i386/dsputil_h264_template_mmx.c index 38f94114d..cfa012c48 100644 --- a/libavcodec/i386/dsputil_h264_template_mmx.c +++ b/libavcodec/i386/dsputil_h264_template_mmx.c @@ -184,82 +184,70 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1* static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { - DECLARE_ALIGNED_8(uint64_t, AA); - DECLARE_ALIGNED_8(uint64_t, DD); - int i; - - /* no special case for mv=(0,0) in 4x*, since it's much less common than in 8x*. - * could still save a few cycles, but maybe not worth the complexity. */ - - assert(x<8 && y<8 && x>=0 && y>=0); - - asm volatile("movd %2, %%mm4\n\t" - "movd %3, %%mm6\n\t" - "punpcklwd %%mm4, %%mm4\n\t" - "punpcklwd %%mm6, %%mm6\n\t" - "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */ - "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */ - "movq %%mm4, %%mm5\n\t" - "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */ - "psllw $3, %%mm5\n\t" - "psllw $3, %%mm6\n\t" - "movq %%mm5, %%mm7\n\t" - "paddw %%mm6, %%mm7\n\t" - "movq %%mm4, %1\n\t" /* DD = x * y */ - "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */ - "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */ - "paddw %4, %%mm4\n\t" - "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */ - "pxor %%mm7, %%mm7\n\t" - "movq %%mm4, %0\n\t" - : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64)); - asm volatile( - /* mm0 = src[0..3], mm1 = src[1..4] */ - "movd %0, %%mm0\n\t" - "movd %1, %%mm1\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "punpcklbw %%mm7, %%mm1\n\t" - : : "m" (src[0]), "m" (src[1])); + "pxor %%mm7, %%mm7 \n\t" + "movd %5, %%mm2 \n\t" + "movd %6, %%mm3 \n\t" + "movq %7, %%mm4 \n\t" + "movq %7, %%mm5 \n\t" + "punpcklwd %%mm2, %%mm2 \n\t" + "punpcklwd %%mm3, %%mm3 \n\t" + "punpcklwd %%mm2, %%mm2 \n\t" + "punpcklwd %%mm3, %%mm3 \n\t" + "psubw %%mm2, %%mm4 \n\t" + "psubw %%mm3, %%mm5 \n\t" - for(i=0; i> 6) */ - "paddw %1, %%mm2\n\t" - "psrlw $6, %%mm2\n\t" - "packuswb %%mm7, %%mm2\n\t" - H264_CHROMA_OP4(%0, %%mm2, %%mm3) - "movd %%mm2, %0\n\t" - : "=m" (dst[0]) : "m" (ff_pw_32)); - dst += stride; - } + "1: \n\t" + "movd (%1), %%mm0 \n\t" + "movd 1(%1), %%mm1 \n\t" + "add %3, %1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "pmullw %%mm4, %%mm0 \n\t" + "pmullw %%mm2, %%mm1 \n\t" + "paddw %%mm0, %%mm1 \n\t" + "movq %%mm1, %%mm0 \n\t" + "pmullw %%mm5, %%mm6 \n\t" + "pmullw %%mm3, %%mm1 \n\t" + "paddw %%mm6, %%mm1 \n\t" + "paddw %4, %%mm1 \n\t" + "psrlw $6, %%mm1 \n\t" + "packuswb %%mm1, %%mm1 \n\t" + H264_CHROMA_OP4((%0), %%mm1, %%mm6) + "movd %%mm1, (%0) \n\t" + "add %3, %0 \n\t" + "movd (%1), %%mm6 \n\t" + "movd 1(%1), %%mm1 \n\t" + "add %3, %1 \n\t" + "punpcklbw %%mm7, %%mm6 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "pmullw %%mm4, %%mm6 \n\t" + "pmullw %%mm2, %%mm1 \n\t" + "paddw %%mm6, %%mm1 \n\t" + "movq %%mm1, %%mm6 \n\t" + "pmullw %%mm5, %%mm0 \n\t" + "pmullw %%mm3, %%mm1 \n\t" + "paddw %%mm0, %%mm1 \n\t" + "paddw %4, %%mm1 \n\t" + "psrlw $6, %%mm1 \n\t" + "packuswb %%mm1, %%mm1 \n\t" + H264_CHROMA_OP4((%0), %%mm1, %%mm0) + "movd %%mm1, (%0) \n\t" + "add %3, %0 \n\t" + "sub $2, %2 \n\t" + "jnz 1b \n\t" + : "+r"(dst), "+r"(src), "+r"(h) + : "r"(stride), "m"(ff_pw_32), "m"(x), "m"(y), "m"(ff_pw_8) + ); } #ifdef H264_CHROMA_MC2_TMPL -- 2.11.0