diff --git a/libavcodec/x86/blockdsp.c b/libavcodec/x86/blockdsp.c index b5294242aba5671f0f826f8bc5fadcd992ae02d2..f14bb1f9ad0ecc76f44c1feb754423a3bb1d8c95 100644 --- a/libavcodec/x86/blockdsp.c +++ b/libavcodec/x86/blockdsp.c @@ -33,18 +33,18 @@ static void name(int16_t *blocks) \ { \ __asm__ volatile ( \ - "pxor %%mm7, %%mm7 \n\t" \ - "mov %1, %%"REG_a" \n\t" \ - "1: \n\t" \ - "movq %%mm7, (%0, %%"REG_a") \n\t" \ - "movq %%mm7, 8(%0, %%"REG_a") \n\t" \ - "movq %%mm7, 16(%0, %%"REG_a") \n\t" \ - "movq %%mm7, 24(%0, %%"REG_a") \n\t" \ - "add $32, %%"REG_a" \n\t" \ - "js 1b \n\t" \ + "pxor %%mm7, %%mm7 \n\t" \ + "mov %1, %%"FF_REG_a" \n\t" \ + "1: \n\t" \ + "movq %%mm7, (%0, %%"FF_REG_a") \n\t" \ + "movq %%mm7, 8(%0, %%"FF_REG_a") \n\t" \ + "movq %%mm7, 16(%0, %%"FF_REG_a") \n\t" \ + "movq %%mm7, 24(%0, %%"FF_REG_a") \n\t" \ + "add $32, %%"FF_REG_a" \n\t" \ + "js 1b \n\t" \ :: "r"(((uint8_t *) blocks) + 128 * n), \ "i"(-128 * n) \ - : "%"REG_a); \ + : "%"FF_REG_a); \ } CLEAR_BLOCKS(clear_blocks_mmx, 6) CLEAR_BLOCKS(clear_block_mmx, 1) @@ -68,21 +68,21 @@ static void clear_block_sse(int16_t *block) static void clear_blocks_sse(int16_t *blocks) { __asm__ volatile ( - "xorps %%xmm0, %%xmm0 \n" - "mov %1, %%"REG_a" \n" - "1: \n" - "movaps %%xmm0, (%0, %%"REG_a") \n" - "movaps %%xmm0, 16(%0, %%"REG_a") \n" - "movaps %%xmm0, 32(%0, %%"REG_a") \n" - "movaps %%xmm0, 48(%0, %%"REG_a") \n" - "movaps %%xmm0, 64(%0, %%"REG_a") \n" - "movaps %%xmm0, 80(%0, %%"REG_a") \n" - "movaps %%xmm0, 96(%0, %%"REG_a") \n" - "movaps %%xmm0, 112(%0, %%"REG_a") \n" - "add $128, %%"REG_a" \n" - "js 1b \n" + "xorps %%xmm0, %%xmm0 \n" + "mov %1, %%"FF_REG_a" \n" + "1: \n" + "movaps %%xmm0, (%0, %%"FF_REG_a") \n" + "movaps %%xmm0, 16(%0, %%"FF_REG_a") \n" + "movaps %%xmm0, 32(%0, %%"FF_REG_a") \n" + "movaps %%xmm0, 48(%0, %%"FF_REG_a") \n" + "movaps %%xmm0, 64(%0, %%"FF_REG_a") \n" + "movaps %%xmm0, 80(%0, %%"FF_REG_a") \n" + "movaps %%xmm0, 96(%0, %%"FF_REG_a") \n" + "movaps %%xmm0, 112(%0, %%"FF_REG_a") \n" + "add $128, %%"FF_REG_a" \n" + "js 1b \n" :: "r"(((uint8_t *) blocks) + 128 * 6), "i"(-128 * 6) - : "%"REG_a); + : "%"FF_REG_a); } #endif /* HAVE_INLINE_ASM */ diff --git a/libavcodec/x86/cabac.h b/libavcodec/x86/cabac.h index 40c29947eba7493b12c258365a118cbcb39e868b..205511ef61a6d953b6c04775f82c81efac13b28a 100644 --- a/libavcodec/x86/cabac.h +++ b/libavcodec/x86/cabac.h @@ -72,12 +72,12 @@ "mov "tmpbyte" , "statep" \n\t"\ "test "lowword" , "lowword" \n\t"\ "jnz 2f \n\t"\ - "mov "byte" , %%"REG_c" \n\t"\ - "cmp "end" , %%"REG_c" \n\t"\ + "mov "byte" , %%"FF_REG_c" \n\t"\ + "cmp "end" , %%"FF_REG_c" \n\t"\ "jge 1f \n\t"\ - "add"OPSIZE" $2 , "byte" \n\t"\ + "add"FF_OPSIZE" $2 , "byte" \n\t"\ "1: \n\t"\ - "movzwl (%%"REG_c") , "tmp" \n\t"\ + "movzwl (%%"FF_REG_c"), "tmp" \n\t"\ "lea -1("low") , %%ecx \n\t"\ "xor "low" , %%ecx \n\t"\ "shr $15 , %%ecx \n\t"\ @@ -133,12 +133,12 @@ "mov "tmpbyte" , "statep" \n\t"\ "test "lowword" , "lowword" \n\t"\ " jnz 2f \n\t"\ - "mov "byte" , %%"REG_c" \n\t"\ - "cmp "end" , %%"REG_c" \n\t"\ + "mov "byte" , %%"FF_REG_c" \n\t"\ + "cmp "end" , %%"FF_REG_c" \n\t"\ "jge 1f \n\t"\ - "add"OPSIZE" $2 , "byte" \n\t"\ + "add"FF_OPSIZE" $2 , "byte" \n\t"\ "1: \n\t"\ - "movzwl (%%"REG_c") , "tmp" \n\t"\ + "movzwl (%%"FF_REG_c") , "tmp" \n\t"\ "lea -1("low") , %%ecx \n\t"\ "xor "low" , %%ecx \n\t"\ "shr $15 , %%ecx \n\t"\ @@ -183,7 +183,7 @@ static av_always_inline int get_cabac_inline_x86(CABACContext *c, "i"(offsetof(CABACContext, bytestream)), "i"(offsetof(CABACContext, bytestream_end)) TABLES_ARG - : "%"REG_c, "memory" + : "%"FF_REG_c, "memory" ); return bit & 1; } @@ -214,7 +214,7 @@ static av_always_inline int get_cabac_bypass_sign_x86(CABACContext *c, int val) "addl %%edx, %%eax \n\t" "cmp %c5(%2), %1 \n\t" "jge 1f \n\t" - "add"OPSIZE" $2, %c4(%2) \n\t" + "add"FF_OPSIZE" $2, %c4(%2) \n\t" "1: \n\t" "movl %%eax, %c3(%2) \n\t" @@ -254,7 +254,7 @@ static av_always_inline int get_cabac_bypass_x86(CABACContext *c) "addl %%ecx, %%eax \n\t" "cmp %c5(%2), %1 \n\t" "jge 1f \n\t" - "add"OPSIZE" $2, %c4(%2) \n\t" + "add"FF_OPSIZE" $2, %c4(%2) \n\t" "1: \n\t" "movl %%eax, %c3(%2) \n\t" diff --git a/libavcodec/x86/fpel_mmx.c b/libavcodec/x86/fpel_mmx.c index eef05ecc74dd90c5c145799d6180f6d4be2236ed..813bcc2b37c8977e0e5cae5f130d81dce0a875e2 100644 --- a/libavcodec/x86/fpel_mmx.c +++ b/libavcodec/x86/fpel_mmx.c @@ -79,26 +79,26 @@ void ff_put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { __asm__ volatile ( - "lea (%3, %3), %%"REG_a" \n\t" + "lea (%3, %3), %%"FF_REG_a" \n\t" ".p2align 3 \n\t" "1: \n\t" "movq (%1 ), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "movq (%1 ), %%mm0 \n\t" "movq (%1, %3), %%mm1 \n\t" "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" : "+g"(h), "+r"(pixels), "+r"(block) : "r"((x86_reg)line_size) - : "%"REG_a, "memory" + : "%"FF_REG_a, "memory" ); } @@ -106,7 +106,7 @@ void ff_put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { __asm__ volatile ( - "lea (%3, %3), %%"REG_a" \n\t" + "lea (%3, %3), %%"FF_REG_a" \n\t" ".p2align 3 \n\t" "1: \n\t" "movq (%1 ), %%mm0 \n\t" @@ -117,8 +117,8 @@ void ff_put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, "movq %%mm4, 8(%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "movq (%1 ), %%mm0 \n\t" "movq 8(%1 ), %%mm4 \n\t" "movq (%1, %3), %%mm1 \n\t" @@ -127,13 +127,13 @@ void ff_put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, "movq %%mm4, 8(%2) \n\t" "movq %%mm1, (%2, %3) \n\t" "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" : "+g"(h), "+r"(pixels), "+r"(block) : "r"((x86_reg)line_size) - : "%"REG_a, "memory" + : "%"FF_REG_a, "memory" ); } diff --git a/libavcodec/x86/h264_i386.h b/libavcodec/x86/h264_i386.h index fb33e40784fcc8711f60020622d373c634256597..ad57aa91ab0c73b10fa13a233fd3d546c1679bdd 100644 --- a/libavcodec/x86/h264_i386.h +++ b/libavcodec/x86/h264_i386.h @@ -84,13 +84,13 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, "sub %10, %1 \n\t" "mov %2, %0 \n\t" "movl %7, %%ecx \n\t" - "add %1, %%"REG_c" \n\t" + "add %1, %%"FF_REG_c" \n\t" "movl %%ecx, (%0) \n\t" "test $1, %4 \n\t" " jnz 5f \n\t" - "add"OPSIZE" $4, %2 \n\t" + "add"FF_OPSIZE" $4, %2 \n\t" "4: \n\t" "add $1, %1 \n\t" @@ -98,7 +98,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, " jb 3b \n\t" "mov %2, %0 \n\t" "movl %7, %%ecx \n\t" - "add %1, %%"REG_c" \n\t" + "add %1, %%"FF_REG_c" \n\t" "movl %%ecx, (%0) \n\t" "5: \n\t" "add %9, %k0 \n\t" @@ -109,7 +109,7 @@ static int decode_significance_x86(CABACContext *c, int max_coeff, "i"(offsetof(CABACContext, bytestream)), "i"(offsetof(CABACContext, bytestream_end)) TABLES_ARG - : "%"REG_c, "memory" + : "%"FF_REG_c, "memory" ); return coeff_count; } @@ -175,7 +175,7 @@ static int decode_significance_8x8_x86(CABACContext *c, "test $1, %4 \n\t" " jnz 5f \n\t" - "add"OPSIZE" $4, %2 \n\t" + "add"FF_OPSIZE" $4, %2 \n\t" "4: \n\t" "addl $1, %k6 \n\t" @@ -194,7 +194,7 @@ static int decode_significance_8x8_x86(CABACContext *c, "i"(offsetof(CABACContext, bytestream)), "i"(offsetof(CABACContext, bytestream_end)), "i"(H264_LAST_COEFF_FLAG_OFFSET_8x8_OFFSET) TABLES_ARG - : "%"REG_c, "memory" + : "%"FF_REG_c, "memory" ); return coeff_count; } diff --git a/libavcodec/x86/hpeldsp_rnd_template.c b/libavcodec/x86/hpeldsp_rnd_template.c index d854e8a2fc13340bbdc3dc0c5091a91ad429f493..82231ad13dbc876e61bd892267f5bd1a057e9783 100644 --- a/libavcodec/x86/hpeldsp_rnd_template.c +++ b/libavcodec/x86/hpeldsp_rnd_template.c @@ -32,7 +32,7 @@ static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_ { MOVQ_BFE(mm6); __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" + "lea (%3, %3), %%"FF_REG_a" \n\t" ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" @@ -42,8 +42,8 @@ static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" @@ -51,20 +51,20 @@ static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) - :REG_a, "memory"); + :FF_REG_a, "memory"); } static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { MOVQ_BFE(mm6); __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" + "lea (%3, %3), %%"FF_REG_a" \n\t" ".p2align 3 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" @@ -81,8 +81,8 @@ static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, 8(%2) \n\t" "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm1 \n\t" "movq (%1, %3), %%mm2 \n\t" @@ -97,42 +97,42 @@ static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) "movq %%mm4, 8(%2) \n\t" "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) - :REG_a, "memory"); + :FF_REG_a, "memory"); } static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { MOVQ_BFE(mm6); __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" + "lea (%3, %3), %%"FF_REG_a" \n\t" "movq (%1), %%mm0 \n\t" ".p2align 3 \n\t" "1: \n\t" "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm2 \n\t" + "movq (%1, %%"FF_REG_a"),%%mm2 \n\t" PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm0 \n\t" + "movq (%1, %%"FF_REG_a"),%%mm0 \n\t" PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) "movq %%mm4, (%2) \n\t" "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) - :REG_a, "memory"); + :FF_REG_a, "memory"); } static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) @@ -165,12 +165,12 @@ static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_ { MOVQ_BFE(mm6); __asm__ volatile( - "lea (%3, %3), %%"REG_a" \n\t" + "lea (%3, %3), %%"FF_REG_a" \n\t" "movq (%1), %%mm0 \n\t" ".p2align 3 \n\t" "1: \n\t" "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm2 \n\t" PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) "movq (%2), %%mm3 \n\t" PAVGB_MMX(%%mm3, %%mm4, %%mm0, %%mm6) @@ -178,11 +178,11 @@ static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_ PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6) "movq %%mm0, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm0 \n\t" PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) "movq (%2), %%mm3 \n\t" PAVGB_MMX(%%mm3, %%mm4, %%mm2, %%mm6) @@ -190,12 +190,12 @@ static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_ PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6) "movq %%mm2, (%2) \n\t" "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" + "add %%"FF_REG_a", %1 \n\t" + "add %%"FF_REG_a", %2 \n\t" "subl $4, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels), "+D"(block) :"r"((x86_reg)line_size) - :REG_a, "memory"); + :FF_REG_a, "memory"); } diff --git a/libavcodec/x86/me_cmp_init.c b/libavcodec/x86/me_cmp_init.c index b906bb6caa96a99681133584c0bbfa32f2683060..ee5f5595473d77ca3e789004b10cc135aa3d59ad 100644 --- a/libavcodec/x86/me_cmp_init.c +++ b/libavcodec/x86/me_cmp_init.c @@ -812,15 +812,15 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, __asm__ volatile ( ".p2align 4 \n\t" "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm2 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - "add %3, %%"REG_a" \n\t" + "movq (%1, %%"FF_REG_a"), %%mm0 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm2 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm4 \n\t" + "add %3, %%"FF_REG_a" \n\t" "psubusb %%mm0, %%mm2 \n\t" "psubusb %%mm4, %%mm0 \n\t" - "movq (%1, %%"REG_a"), %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm5 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm1 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm3 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm5 \n\t" "psubusb %%mm1, %%mm3 \n\t" "psubusb %%mm5, %%mm1 \n\t" "por %%mm2, %%mm0 \n\t" @@ -835,7 +835,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, "paddw %%mm3, %%mm2 \n\t" "paddw %%mm2, %%mm0 \n\t" "paddw %%mm0, %%mm6 \n\t" - "add %3, %%"REG_a" \n\t" + "add %3, %%"FF_REG_a" \n\t" " js 1b \n\t" : "+a" (len) : "r" (blk1 - len), "r" (blk2 - len), "r" (stride)); @@ -971,18 +971,18 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, __asm__ volatile ( ".p2align 4 \n\t" "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm0 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm1 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm2 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "paddw %%mm0, %%mm1 \n\t" "paddw %%mm2, %%mm3 \n\t" - "movq (%3, %%"REG_a"), %%mm4 \n\t" - "movq (%3, %%"REG_a"), %%mm2 \n\t" + "movq (%3, %%"FF_REG_a"), %%mm4 \n\t" + "movq (%3, %%"FF_REG_a"), %%mm2 \n\t" "paddw %%mm5, %%mm1 \n\t" "paddw %%mm5, %%mm3 \n\t" "psrlw $1, %%mm1 \n\t" @@ -996,7 +996,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, "punpckhbw %%mm7, %%mm1 \n\t" "paddw %%mm1, %%mm0 \n\t" "paddw %%mm0, %%mm6 \n\t" - "add %4, %%"REG_a" \n\t" + "add %4, %%"FF_REG_a" \n\t" " js 1b \n\t" : "+a" (len) : "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len), @@ -1008,8 +1008,8 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, { x86_reg len = -(stride * h); __asm__ volatile ( - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm0\n\t" + "movq 1(%1, %%"FF_REG_a"), %%mm2\n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" @@ -1020,8 +1020,8 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, "paddw %%mm3, %%mm1 \n\t" ".p2align 4 \n\t" "1: \n\t" - "movq (%2, %%"REG_a"), %%mm2 \n\t" - "movq 1(%2, %%"REG_a"), %%mm4 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm2\n\t" + "movq 1(%2, %%"FF_REG_a"), %%mm4\n\t" "movq %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" @@ -1035,8 +1035,8 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, "paddw %%mm3, %%mm1 \n\t" "paddw %%mm5, %%mm0 \n\t" "paddw %%mm5, %%mm1 \n\t" - "movq (%3, %%"REG_a"), %%mm4 \n\t" - "movq (%3, %%"REG_a"), %%mm5 \n\t" + "movq (%3, %%"FF_REG_a"), %%mm4 \n\t" + "movq (%3, %%"FF_REG_a"), %%mm5 \n\t" "psrlw $2, %%mm0 \n\t" "psrlw $2, %%mm1 \n\t" "packuswb %%mm1, %%mm0 \n\t" @@ -1050,7 +1050,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, "paddw %%mm4, %%mm6 \n\t" "movq %%mm2, %%mm0 \n\t" "movq %%mm3, %%mm1 \n\t" - "add %4, %%"REG_a" \n\t" + "add %4, %%"FF_REG_a" \n\t" " js 1b \n\t" : "+a" (len) : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len), diff --git a/libavcodec/x86/mpegvideo.c b/libavcodec/x86/mpegvideo.c index 33d5cd814d0ae65ff6157702893f79815ce50633..6c0493e6b8010b3328ee6b879904dd3b9c028533 100644 --- a/libavcodec/x86/mpegvideo.c +++ b/libavcodec/x86/mpegvideo.c @@ -187,13 +187,13 @@ __asm__ volatile( "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" + "mov %3, %%"FF_REG_a" \n\t" ".p2align 4 \n\t" "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" + "movq (%0, %%"FF_REG_a"), %%mm0\n\t" + "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t" + "movq (%1, %%"FF_REG_a"), %%mm4\n\t" + "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" @@ -208,8 +208,8 @@ __asm__ volatile( "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw (%0, %%"FF_REG_a"), %%mm4\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psraw $3, %%mm0 \n\t" "psraw $3, %%mm1 \n\t" "psubw %%mm7, %%mm0 \n\t" @@ -222,13 +222,13 @@ __asm__ volatile( "psubw %%mm3, %%mm1 \n\t" "pandn %%mm0, %%mm4 \n\t" "pandn %%mm1, %%mm5 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" + "movq %%mm4, (%0, %%"FF_REG_a")\n\t" + "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t" - "add $16, %%"REG_a" \n\t" + "add $16, %%"FF_REG_a" \n\t" "js 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" + : "%"FF_REG_a, "memory" ); block[0]= block0; } @@ -250,13 +250,13 @@ __asm__ volatile( "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" + "mov %3, %%"FF_REG_a" \n\t" ".p2align 4 \n\t" "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" + "movq (%0, %%"FF_REG_a"), %%mm0\n\t" + "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t" + "movq (%1, %%"FF_REG_a"), %%mm4\n\t" + "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" @@ -275,8 +275,8 @@ __asm__ volatile( "pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw (%0, %%"FF_REG_a"), %%mm4\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psraw $4, %%mm0 \n\t" "psraw $4, %%mm1 \n\t" "psubw %%mm7, %%mm0 \n\t" @@ -289,13 +289,13 @@ __asm__ volatile( "psubw %%mm3, %%mm1 \n\t" "pandn %%mm0, %%mm4 \n\t" "pandn %%mm1, %%mm5 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" + "movq %%mm4, (%0, %%"FF_REG_a")\n\t" + "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t" - "add $16, %%"REG_a" \n\t" + "add $16, %%"FF_REG_a" \n\t" "js 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" + : "%"FF_REG_a, "memory" ); } @@ -322,13 +322,13 @@ __asm__ volatile( "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" + "mov %3, %%"FF_REG_a" \n\t" ".p2align 4 \n\t" "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" + "movq (%0, %%"FF_REG_a"), %%mm0\n\t" + "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t" + "movq (%1, %%"FF_REG_a"), %%mm4\n\t" + "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" @@ -343,8 +343,8 @@ __asm__ volatile( "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw (%0, %%"FF_REG_a"), %%mm4\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psraw $3, %%mm0 \n\t" "psraw $3, %%mm1 \n\t" "pxor %%mm2, %%mm0 \n\t" @@ -353,13 +353,13 @@ __asm__ volatile( "psubw %%mm3, %%mm1 \n\t" "pandn %%mm0, %%mm4 \n\t" "pandn %%mm1, %%mm5 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" + "movq %%mm4, (%0, %%"FF_REG_a")\n\t" + "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t" - "add $16, %%"REG_a" \n\t" + "add $16, %%"FF_REG_a" \n\t" "jng 1b \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" + : "%"FF_REG_a, "memory" ); block[0]= block0; //Note, we do not do mismatch control for intra as errors cannot accumulate @@ -383,13 +383,13 @@ __asm__ volatile( "movd %2, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" + "mov %3, %%"FF_REG_a" \n\t" ".p2align 4 \n\t" "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" + "movq (%0, %%"FF_REG_a"), %%mm0\n\t" + "movq 8(%0, %%"FF_REG_a"), %%mm1\n\t" + "movq (%1, %%"FF_REG_a"), %%mm4\n\t" + "movq 8(%1, %%"FF_REG_a"), %%mm5\n\t" "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] "pxor %%mm2, %%mm2 \n\t" @@ -408,8 +408,8 @@ __asm__ volatile( "paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q "pxor %%mm4, %%mm4 \n\t" "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw (%0, %%"FF_REG_a"), %%mm4\n\t" // block[i] == 0 ? -1 : 0 + "pcmpeqw 8(%0, %%"FF_REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 "psrlw $4, %%mm0 \n\t" "psrlw $4, %%mm1 \n\t" "pxor %%mm2, %%mm0 \n\t" @@ -420,10 +420,10 @@ __asm__ volatile( "pandn %%mm1, %%mm5 \n\t" "pxor %%mm4, %%mm7 \n\t" "pxor %%mm5, %%mm7 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" + "movq %%mm4, (%0, %%"FF_REG_a")\n\t" + "movq %%mm5, 8(%0, %%"FF_REG_a")\n\t" - "add $16, %%"REG_a" \n\t" + "add $16, %%"FF_REG_a" \n\t" "jng 1b \n\t" "movd 124(%0, %3), %%mm0 \n\t" "movq %%mm7, %%mm6 \n\t" @@ -438,7 +438,7 @@ __asm__ volatile( "movd %%mm0, 124(%0, %3) \n\t" ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs) - : "%"REG_a, "memory" + : "%"FF_REG_a, "memory" ); } diff --git a/libavcodec/x86/mpegvideoenc_template.c b/libavcodec/x86/mpegvideoenc_template.c index a54c9042ceb51a32615d85fd88ef4f68100dba8d..72df76b749671a4c6f8f0ebd4f5158f2006f0754 100644 --- a/libavcodec/x86/mpegvideoenc_template.c +++ b/libavcodec/x86/mpegvideoenc_template.c @@ -147,33 +147,33 @@ static int RENAME(dct_quantize)(MpegEncContext *s, if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){ __asm__ volatile( - "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1 + "movd %%"FF_REG_a", "MM"3 \n\t" // last_non_zero_p1 SPREADW(MM"3") "pxor "MM"7, "MM"7 \n\t" // 0 "pxor "MM"4, "MM"4 \n\t" // 0 MOVQ" (%2), "MM"5 \n\t" // qmat[0] "pxor "MM"6, "MM"6 \n\t" "psubw (%3), "MM"6 \n\t" // -bias[0] - "mov $-128, %%"REG_a" \n\t" + "mov $-128, %%"FF_REG_a" \n\t" ".p2align 4 \n\t" "1: \n\t" - MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i] + MOVQ" (%1, %%"FF_REG_a"), "MM"0 \n\t" // block[i] SAVE_SIGN(MM"1", MM"0") // ABS(block[i]) "psubusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0] "pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16 "por "MM"0, "MM"4 \n\t" RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) - MOVQ" "MM"0, (%5, %%"REG_a") \n\t" + MOVQ" "MM"0, (%5, %%"FF_REG_a") \n\t" "pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00 - MOVQ" (%4, %%"REG_a"), "MM"1 \n\t" - MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0 + MOVQ" (%4, %%"FF_REG_a"), "MM"1 \n\t" + MOVQ" "MM"7, (%1, %%"FF_REG_a") \n\t" // 0 "pandn "MM"1, "MM"0 \n\t" PMAXW(MM"0", MM"3") - "add $"MMREG_WIDTH", %%"REG_a" \n\t" + "add $"MMREG_WIDTH", %%"FF_REG_a" \n\t" " js 1b \n\t" PMAX(MM"3", MM"0") - "movd "MM"3, %%"REG_a" \n\t" - "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1 + "movd "MM"3, %%"FF_REG_a" \n\t" + "movzb %%al, %%"FF_REG_a" \n\t" // last_non_zero_p1 : "+a" (last_non_zero_p1) : "r" (block+64), "r" (qmat), "r" (bias), "r" (inv_zigzag_direct16 + 64), "r" (temp_block + 64) @@ -182,32 +182,32 @@ static int RENAME(dct_quantize)(MpegEncContext *s, ); }else{ // FMT_H263 __asm__ volatile( - "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1 + "movd %%"FF_REG_a", "MM"3 \n\t" // last_non_zero_p1 SPREADW(MM"3") "pxor "MM"7, "MM"7 \n\t" // 0 "pxor "MM"4, "MM"4 \n\t" // 0 - "mov $-128, %%"REG_a" \n\t" + "mov $-128, %%"FF_REG_a" \n\t" ".p2align 4 \n\t" "1: \n\t" - MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i] + MOVQ" (%1, %%"FF_REG_a"), "MM"0 \n\t" // block[i] SAVE_SIGN(MM"1", MM"0") // ABS(block[i]) - MOVQ" (%3, %%"REG_a"), "MM"6 \n\t" // bias[0] + MOVQ" (%3, %%"FF_REG_a"), "MM"6 \n\t" // bias[0] "paddusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0] - MOVQ" (%2, %%"REG_a"), "MM"5 \n\t" // qmat[i] + MOVQ" (%2, %%"FF_REG_a"), "MM"5 \n\t" // qmat[i] "pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16 "por "MM"0, "MM"4 \n\t" RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) - MOVQ" "MM"0, (%5, %%"REG_a") \n\t" + MOVQ" "MM"0, (%5, %%"FF_REG_a") \n\t" "pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00 - MOVQ" (%4, %%"REG_a"), "MM"1 \n\t" - MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0 + MOVQ" (%4, %%"FF_REG_a"), "MM"1 \n\t" + MOVQ" "MM"7, (%1, %%"FF_REG_a") \n\t" // 0 "pandn "MM"1, "MM"0 \n\t" PMAXW(MM"0", MM"3") - "add $"MMREG_WIDTH", %%"REG_a" \n\t" + "add $"MMREG_WIDTH", %%"FF_REG_a" \n\t" " js 1b \n\t" PMAX(MM"3", MM"0") - "movd "MM"3, %%"REG_a" \n\t" - "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1 + "movd "MM"3, %%"FF_REG_a" \n\t" + "movzb %%al, %%"FF_REG_a" \n\t" // last_non_zero_p1 : "+a" (last_non_zero_p1) : "r" (block+64), "r" (qmat+64), "r" (bias+64), "r" (inv_zigzag_direct16 + 64), "r" (temp_block + 64) diff --git a/libavcodec/x86/rnd_template.c b/libavcodec/x86/rnd_template.c index a9fb13234b2e8031c49ac3c553ea698b97bff530..0c76d9164779f8ae22fb4cd304d75902a4e18075 100644 --- a/libavcodec/x86/rnd_template.c +++ b/libavcodec/x86/rnd_template.c @@ -46,12 +46,12 @@ STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" "add %3, %1 \n\t" ".p2align 3 \n\t" "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm0\n\t" + "movq 1(%1, %%"FF_REG_a"), %%mm2\n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" @@ -67,11 +67,11 @@ STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, "psrlw $2, %%mm4 \n\t" "psrlw $2, %%mm5 \n\t" "packuswb %%mm5, %%mm4 \n\t" - "movq %%mm4, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" + "movq %%mm4, (%2, %%"FF_REG_a") \n\t" + "add %3, %%"FF_REG_a" \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm2\n\t" // 0 <-> 2 1 <-> 3 + "movq 1(%1, %%"FF_REG_a"), %%mm4\n\t" "movq %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" @@ -87,14 +87,14 @@ STATIC void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, "psrlw $2, %%mm0 \n\t" "psrlw $2, %%mm1 \n\t" "packuswb %%mm1, %%mm0 \n\t" - "movq %%mm0, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" + "movq %%mm0, (%2, %%"FF_REG_a") \n\t" + "add %3, %%"FF_REG_a" \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels) :"D"(block), "r"((x86_reg)line_size) - :REG_a, "memory"); + :FF_REG_a, "memory"); } // avg_pixels @@ -115,12 +115,12 @@ STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" "add %3, %1 \n\t" ".p2align 3 \n\t" "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm0\n\t" + "movq 1(%1, %%"FF_REG_a"), %%mm2\n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" @@ -135,16 +135,16 @@ STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, "paddusw %%mm1, %%mm5 \n\t" "psrlw $2, %%mm4 \n\t" "psrlw $2, %%mm5 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm3 \n\t" "packuswb %%mm5, %%mm4 \n\t" "pcmpeqd %%mm2, %%mm2 \n\t" "paddb %%mm2, %%mm2 \n\t" PAVGB_MMX(%%mm3, %%mm4, %%mm5, %%mm2) - "movq %%mm5, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" + "movq %%mm5, (%2, %%"FF_REG_a") \n\t" + "add %3, %%"FF_REG_a" \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" + "movq (%1, %%"FF_REG_a"), %%mm2\n\t" // 0 <-> 2 1 <-> 3 + "movq 1(%1, %%"FF_REG_a"), %%mm4\n\t" "movq %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" @@ -159,17 +159,17 @@ STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, "paddusw %%mm5, %%mm1 \n\t" "psrlw $2, %%mm0 \n\t" "psrlw $2, %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" + "movq (%2, %%"FF_REG_a"), %%mm3 \n\t" "packuswb %%mm1, %%mm0 \n\t" "pcmpeqd %%mm2, %%mm2 \n\t" "paddb %%mm2, %%mm2 \n\t" PAVGB_MMX(%%mm3, %%mm0, %%mm1, %%mm2) - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" + "movq %%mm1, (%2, %%"FF_REG_a") \n\t" + "add %3, %%"FF_REG_a" \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels) :"D"(block), "r"((x86_reg)line_size) - :REG_a, "memory"); + :FF_REG_a, "memory"); } diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c index 95f5ee42de0179fb2c683d7421f11a2b6092efcb..9bbc234d2559274d12489b1a9cecebfa5f194dc7 100644 --- a/libavcodec/x86/vc1dsp_mmx.c +++ b/libavcodec/x86/vc1dsp_mmx.c @@ -87,7 +87,7 @@ static void vc1_put_ver_16b_shift2_mmx(int16_t *dst, int rnd, int64_t shift) { __asm__ volatile( - "mov $3, %%"REG_c" \n\t" + "mov $3, %%"FF_REG_c" \n\t" LOAD_ROUNDER_MMX("%5") "movq "MANGLE(ff_pw_9)", %%mm6 \n\t" "1: \n\t" @@ -106,12 +106,12 @@ static void vc1_put_ver_16b_shift2_mmx(int16_t *dst, SHIFT2_LINE(168, 4, 1, 2, 3) "sub %6, %0 \n\t" "add $8, %1 \n\t" - "dec %%"REG_c" \n\t" + "dec %%"FF_REG_c" \n\t" "jnz 1b \n\t" : "+r"(src), "+r"(dst) : "r"(stride), "r"(-2*stride), "m"(shift), "m"(rnd), "r"(9*stride-4) - : "%"REG_c, "memory" + : "%"FF_REG_c, "memory" ); } @@ -173,7 +173,7 @@ static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\ {\ rnd = 8-rnd;\ __asm__ volatile(\ - "mov $8, %%"REG_c" \n\t"\ + "mov $8, %%"FF_REG_c" \n\t"\ LOAD_ROUNDER_MMX("%5")\ "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\ "1: \n\t"\ @@ -208,12 +208,12 @@ static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\ "movq %%mm3, (%1) \n\t"\ "add %6, %0 \n\t"\ "add %4, %1 \n\t"\ - "dec %%"REG_c" \n\t"\ + "dec %%"FF_REG_c" \n\t"\ "jnz 1b \n\t"\ : "+r"(src), "+r"(dst)\ : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\ "g"(stride-offset)\ - : "%"REG_c, "memory"\ + : "%"FF_REG_c, "memory"\ );\ } diff --git a/libavutil/x86/asm.h b/libavutil/x86/asm.h index db5f3d5ac554d468c09ef0fd39998e82982dd08e..f005a3a38a76f46d2f3cf1194dd2612f608938d0 100644 --- a/libavutil/x86/asm.h +++ b/libavutil/x86/asm.h @@ -28,45 +28,45 @@ typedef struct xmm_reg { uint64_t a, b; } xmm_reg; typedef struct ymm_reg { uint64_t a, b, c, d; } ymm_reg; #if ARCH_X86_64 -# define OPSIZE "q" -# define REG_a "rax" -# define REG_b "rbx" -# define REG_c "rcx" -# define REG_d "rdx" -# define REG_D "rdi" -# define REG_S "rsi" -# define PTR_SIZE "8" +# define FF_OPSIZE "q" +# define FF_REG_a "rax" +# define FF_REG_b "rbx" +# define FF_REG_c "rcx" +# define FF_REG_d "rdx" +# define FF_REG_D "rdi" +# define FF_REG_S "rsi" +# define FF_PTR_SIZE "8" typedef int64_t x86_reg; -# define REG_SP "rsp" -# define REG_BP "rbp" -# define REGBP rbp -# define REGa rax -# define REGb rbx -# define REGc rcx -# define REGd rdx -# define REGSP rsp +# define FF_REG_SP "rsp" +# define FF_REG_BP "rbp" +# define FF_REGBP rbp +# define FF_REGa rax +# define FF_REGb rbx +# define FF_REGc rcx +# define FF_REGd rdx +# define FF_REGSP rsp #elif ARCH_X86_32 -# define OPSIZE "l" -# define REG_a "eax" -# define REG_b "ebx" -# define REG_c "ecx" -# define REG_d "edx" -# define REG_D "edi" -# define REG_S "esi" -# define PTR_SIZE "4" +# define FF_OPSIZE "l" +# define FF_REG_a "eax" +# define FF_REG_b "ebx" +# define FF_REG_c "ecx" +# define FF_REG_d "edx" +# define FF_REG_D "edi" +# define FF_REG_S "esi" +# define FF_PTR_SIZE "4" typedef int32_t x86_reg; -# define REG_SP "esp" -# define REG_BP "ebp" -# define REGBP ebp -# define REGa eax -# define REGb ebx -# define REGc ecx -# define REGd edx -# define REGSP esp +# define FF_REG_SP "esp" +# define FF_REG_BP "ebp" +# define FF_REGBP ebp +# define FF_REGa eax +# define FF_REGb ebx +# define FF_REGc ecx +# define FF_REGd edx +# define FF_REGSP esp #else typedef int x86_reg; #endif diff --git a/libavutil/x86/cpu.c b/libavutil/x86/cpu.c index 64ea7e73138891c390fd93d1fd1c82f89fad1f68..328ce6de9c2cace55f66689d2c55312a76156d76 100644 --- a/libavutil/x86/cpu.c +++ b/libavutil/x86/cpu.c @@ -41,9 +41,9 @@ /* ebx saving is necessary for PIC. gcc seems unable to see it alone */ #define cpuid(index, eax, ebx, ecx, edx) \ __asm__ volatile ( \ - "mov %%"REG_b", %%"REG_S" \n\t" \ - "cpuid \n\t" \ - "xchg %%"REG_b", %%"REG_S \ + "mov %%"FF_REG_b", %%"FF_REG_S" \n\t" \ + "cpuid \n\t" \ + "xchg %%"FF_REG_b", %%"FF_REG_S \ : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \ : "0" (index)) diff --git a/libswscale/utils.c b/libswscale/utils.c index dcac4b48292c4fdeea312775eeea1655eb11a5d0..ee309bc18ca37bc4b48ac90d12f6a6052800eb4b 100644 --- a/libswscale/utils.c +++ b/libswscale/utils.c @@ -649,9 +649,9 @@ static av_cold int init_hscaler_mmxext(int dstW, int xInc, uint8_t *filterCode, "jmp 9f \n\t" // Begin "0: \n\t" - "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t" - "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t" - "movd 1(%%"REG_c", %%"REG_S"), %%mm1 \n\t" + "movq (%%"FF_REG_d", %%"FF_REG_a"), %%mm3 \n\t" + "movd (%%"FF_REG_c", %%"FF_REG_S"), %%mm0 \n\t" + "movd 1(%%"FF_REG_c", %%"FF_REG_S"), %%mm1 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "pshufw $0xFF, %%mm1, %%mm1 \n\t" @@ -659,14 +659,14 @@ static av_cold int init_hscaler_mmxext(int dstW, int xInc, uint8_t *filterCode, "pshufw $0xFF, %%mm0, %%mm0 \n\t" "2: \n\t" "psubw %%mm1, %%mm0 \n\t" - "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t" + "movl 8(%%"FF_REG_b", %%"FF_REG_a"), %%esi \n\t" "pmullw %%mm3, %%mm0 \n\t" "psllw $7, %%mm1 \n\t" "paddw %%mm1, %%mm0 \n\t" - "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t" + "movq %%mm0, (%%"FF_REG_D", %%"FF_REG_a") \n\t" - "add $8, %%"REG_a" \n\t" + "add $8, %%"FF_REG_a" \n\t" // End "9: \n\t" // "int $3 \n\t" @@ -689,22 +689,22 @@ static av_cold int init_hscaler_mmxext(int dstW, int xInc, uint8_t *filterCode, "jmp 9f \n\t" // Begin "0: \n\t" - "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t" - "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t" + "movq (%%"FF_REG_d", %%"FF_REG_a"), %%mm3 \n\t" + "movd (%%"FF_REG_c", %%"FF_REG_S"), %%mm0 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "pshufw $0xFF, %%mm0, %%mm1 \n\t" "1: \n\t" "pshufw $0xFF, %%mm0, %%mm0 \n\t" "2: \n\t" "psubw %%mm1, %%mm0 \n\t" - "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t" + "movl 8(%%"FF_REG_b", %%"FF_REG_a"), %%esi \n\t" "pmullw %%mm3, %%mm0 \n\t" "psllw $7, %%mm1 \n\t" "paddw %%mm1, %%mm0 \n\t" - "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t" + "movq %%mm0, (%%"FF_REG_D", %%"FF_REG_a") \n\t" - "add $8, %%"REG_a" \n\t" + "add $8, %%"FF_REG_a" \n\t" // End "9: \n\t" // "int $3 \n\t" diff --git a/libswscale/x86/rgb2rgb_template.c b/libswscale/x86/rgb2rgb_template.c index a67a8a6029332fb02a60432f0f2b8a3291f31584..3b9c82b2edd9eec27d12c62a6261b25865d87026 100644 --- a/libswscale/x86/rgb2rgb_template.c +++ b/libswscale/x86/rgb2rgb_template.c @@ -1109,43 +1109,43 @@ static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, int sr unsigned i; x86_reg mmx_size= 23 - src_size; __asm__ volatile ( - "test %%"REG_a", %%"REG_a" \n\t" + "test %%"FF_REG_a", %%"FF_REG_a" \n\t" "jns 2f \n\t" "movq "MANGLE(mask24r)", %%mm5 \n\t" "movq "MANGLE(mask24g)", %%mm6 \n\t" "movq "MANGLE(mask24b)", %%mm7 \n\t" ".p2align 4 \n\t" "1: \n\t" - PREFETCH" 32(%1, %%"REG_a") \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG - "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG - "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B + PREFETCH" 32(%1, %%"FF_REG_a") \n\t" + "movq (%1, %%"FF_REG_a"), %%mm0 \n\t" // BGR BGR BG + "movq (%1, %%"FF_REG_a"), %%mm1 \n\t" // BGR BGR BG + "movq 2(%1, %%"FF_REG_a"), %%mm2 \n\t" // R BGR BGR B "psllq $16, %%mm0 \n\t" // 00 BGR BGR "pand %%mm5, %%mm0 \n\t" "pand %%mm6, %%mm1 \n\t" "pand %%mm7, %%mm2 \n\t" "por %%mm0, %%mm1 \n\t" "por %%mm2, %%mm1 \n\t" - "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG - MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG - "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B - "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR + "movq 6(%1, %%"FF_REG_a"), %%mm0 \n\t" // BGR BGR BG + MOVNTQ" %%mm1, (%2, %%"FF_REG_a") \n\t" // RGB RGB RG + "movq 8(%1, %%"FF_REG_a"), %%mm1 \n\t" // R BGR BGR B + "movq 10(%1, %%"FF_REG_a"), %%mm2 \n\t" // GR BGR BGR "pand %%mm7, %%mm0 \n\t" "pand %%mm5, %%mm1 \n\t" "pand %%mm6, %%mm2 \n\t" "por %%mm0, %%mm1 \n\t" "por %%mm2, %%mm1 \n\t" - "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B - MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R - "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR - "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG + "movq 14(%1, %%"FF_REG_a"), %%mm0 \n\t" // R BGR BGR B + MOVNTQ" %%mm1, 8(%2, %%"FF_REG_a")\n\t" // B RGB RGB R + "movq 16(%1, %%"FF_REG_a"), %%mm1 \n\t" // GR BGR BGR + "movq 18(%1, %%"FF_REG_a"), %%mm2 \n\t" // BGR BGR BG "pand %%mm6, %%mm0 \n\t" "pand %%mm7, %%mm1 \n\t" "pand %%mm5, %%mm2 \n\t" "por %%mm0, %%mm1 \n\t" "por %%mm2, %%mm1 \n\t" - MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t" - "add $24, %%"REG_a" \n\t" + MOVNTQ" %%mm1, 16(%2, %%"FF_REG_a")\n\t" + "add $24, %%"FF_REG_a" \n\t" " js 1b \n\t" "2: \n\t" : "+a" (mmx_size) @@ -1180,20 +1180,20 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u for (y=0; y>1; for (y=0; y>1; for (y=0; yredDither), \ "m" (dummy), "m" (dummy), "m" (dummy),\ "r" (dest), "m" (dstW_reg), "m"(uv_off) \ - : "%"REG_a, "%"REG_d, "%"REG_S \ + : "%"FF_REG_a, "%"FF_REG_d, "%"FF_REG_S \ ); #define YSCALEYUV2PACKEDX_ACCURATE_UV \ __asm__ volatile(\ - "xor %%"REG_a", %%"REG_a" \n\t"\ + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\ ".p2align 4 \n\t"\ "nop \n\t"\ "1: \n\t"\ - "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\ - "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"FF_REG_d" \n\t"\ + "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ "pxor %%mm4, %%mm4 \n\t"\ "pxor %%mm5, %%mm5 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ "pxor %%mm7, %%mm7 \n\t"\ ".p2align 4 \n\t"\ "2: \n\t"\ - "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\ - "add %6, %%"REG_S" \n\t" \ - "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\ - "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\ - "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\ + "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm0 \n\t" /* UsrcData */\ + "add %6, %%"FF_REG_S" \n\t" \ + "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm2 \n\t" /* VsrcData */\ + "mov "STR(APCK_PTR2)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ + "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm1 \n\t" /* UsrcData */\ "movq %%mm0, %%mm3 \n\t"\ "punpcklwd %%mm1, %%mm0 \n\t"\ "punpckhwd %%mm1, %%mm3 \n\t"\ - "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\ + "movq "STR(APCK_COEF)"(%%"FF_REG_d"),%%mm1 \n\t" /* filterCoeff */\ "pmaddwd %%mm1, %%mm0 \n\t"\ "pmaddwd %%mm1, %%mm3 \n\t"\ "paddd %%mm0, %%mm4 \n\t"\ "paddd %%mm3, %%mm5 \n\t"\ - "add %6, %%"REG_S" \n\t" \ - "movq (%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\ - "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\ - "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\ - "test %%"REG_S", %%"REG_S" \n\t"\ + "add %6, %%"FF_REG_S" \n\t"\ + "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm3 \n\t" /* VsrcData */\ + "mov "STR(APCK_SIZE)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ + "add $"STR(APCK_SIZE)", %%"FF_REG_d" \n\t"\ + "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\ "movq %%mm2, %%mm0 \n\t"\ "punpcklwd %%mm3, %%mm2 \n\t"\ "punpckhwd %%mm3, %%mm0 \n\t"\ @@ -148,30 +148,30 @@ "movq %%mm6, "V_TEMP"(%0) \n\t"\ #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \ - "lea "offset"(%0), %%"REG_d" \n\t"\ - "mov (%%"REG_d"), %%"REG_S" \n\t"\ + "lea "offset"(%0), %%"FF_REG_d" \n\t"\ + "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ "pxor %%mm1, %%mm1 \n\t"\ "pxor %%mm5, %%mm5 \n\t"\ "pxor %%mm7, %%mm7 \n\t"\ "pxor %%mm6, %%mm6 \n\t"\ ".p2align 4 \n\t"\ "2: \n\t"\ - "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\ - "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\ - "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\ - "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\ + "movq (%%"FF_REG_S", %%"FF_REG_a", 2), %%mm0 \n\t" /* Y1srcData */\ + "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), %%mm2 \n\t" /* Y2srcData */\ + "mov "STR(APCK_PTR2)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ + "movq (%%"FF_REG_S", %%"FF_REG_a", 2), %%mm4 \n\t" /* Y1srcData */\ "movq %%mm0, %%mm3 \n\t"\ "punpcklwd %%mm4, %%mm0 \n\t"\ "punpckhwd %%mm4, %%mm3 \n\t"\ - "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\ + "movq "STR(APCK_COEF)"(%%"FF_REG_d"), %%mm4 \n\t" /* filterCoeff */\ "pmaddwd %%mm4, %%mm0 \n\t"\ "pmaddwd %%mm4, %%mm3 \n\t"\ "paddd %%mm0, %%mm1 \n\t"\ "paddd %%mm3, %%mm5 \n\t"\ - "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\ - "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\ - "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\ - "test %%"REG_S", %%"REG_S" \n\t"\ + "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), %%mm3 \n\t" /* Y2srcData */\ + "mov "STR(APCK_SIZE)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ + "add $"STR(APCK_SIZE)", %%"FF_REG_d" \n\t"\ + "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\ "movq %%mm2, %%mm0 \n\t"\ "punpcklwd %%mm3, %%mm2 \n\t"\ "punpckhwd %%mm3, %%mm0 \n\t"\ @@ -278,13 +278,13 @@ static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter, "psraw $3, %%mm1 \n\t" "psraw $3, %%mm7 \n\t" "packuswb %%mm7, %%mm1 \n\t" - WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6) + WRITEBGR32(%4, %5, %%FF_REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6) YSCALEYUV2PACKEDX_END } else { YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX "pcmpeqd %%mm7, %%mm7 \n\t" - WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + WRITEBGR32(%4, %5, %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) YSCALEYUV2PACKEDX_END } } @@ -307,13 +307,13 @@ static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter, "psraw $3, %%mm1 \n\t" "psraw $3, %%mm7 \n\t" "packuswb %%mm7, %%mm1 \n\t" - WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6) + WRITEBGR32(%4, %5, %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6) YSCALEYUV2PACKEDX_END } else { YSCALEYUV2PACKEDX YSCALEYUV2RGBX "pcmpeqd %%mm7, %%mm7 \n\t" - WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + WRITEBGR32(%4, %5, %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) YSCALEYUV2PACKEDX_END } } @@ -366,7 +366,7 @@ static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter, "paddusb "GREEN_DITHER"(%0), %%mm4\n\t" "paddusb "RED_DITHER"(%0), %%mm5\n\t" #endif - WRITERGB16(%4, %5, %%REGa) + WRITERGB16(%4, %5, %%FF_REGa) YSCALEYUV2PACKEDX_END } @@ -390,7 +390,7 @@ static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter, "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t" "paddusb "RED_DITHER"(%0), %%mm5 \n\t" #endif - WRITERGB16(%4, %5, %%REGa) + WRITERGB16(%4, %5, %%FF_REGa) YSCALEYUV2PACKEDX_END } @@ -443,7 +443,7 @@ static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter, "paddusb "GREEN_DITHER"(%0), %%mm4\n\t" "paddusb "RED_DITHER"(%0), %%mm5\n\t" #endif - WRITERGB15(%4, %5, %%REGa) + WRITERGB15(%4, %5, %%FF_REGa) YSCALEYUV2PACKEDX_END } @@ -467,7 +467,7 @@ static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter, "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t" "paddusb "RED_DITHER"(%0), %%mm5 \n\t" #endif - WRITERGB15(%4, %5, %%REGa) + WRITERGB15(%4, %5, %%FF_REGa) YSCALEYUV2PACKEDX_END } @@ -593,14 +593,14 @@ static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter, YSCALEYUV2PACKEDX_ACCURATE YSCALEYUV2RGBX - "pxor %%mm7, %%mm7 \n\t" - "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize - "add %4, %%"REG_c" \n\t" - WRITEBGR24(%%REGc, %5, %%REGa) + "pxor %%mm7, %%mm7 \n\t" + "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_c" \n\t" // FIXME optimize + "add %4, %%"FF_REG_c" \n\t" + WRITEBGR24(%%FF_REGc, %5, %%FF_REGa) :: "r" (&c->redDither), "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW_reg), "m"(uv_off) - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S + : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S ); } @@ -617,14 +617,14 @@ static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter, YSCALEYUV2PACKEDX YSCALEYUV2RGBX - "pxor %%mm7, %%mm7 \n\t" - "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize - "add %4, %%"REG_c" \n\t" - WRITEBGR24(%%REGc, %5, %%REGa) + "pxor %%mm7, %%mm7 \n\t" + "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_c" \n\t" // FIXME optimize + "add %4, %%"FF_REG_c" \n\t" + WRITEBGR24(%%FF_REGc, %5, %%FF_REGa) :: "r" (&c->redDither), "m" (dummy), "m" (dummy), "m" (dummy), "r" (dest), "m" (dstW_reg), "m"(uv_off) - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S + : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S ); } @@ -662,7 +662,7 @@ static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter, "psraw $3, %%mm4 \n\t" "psraw $3, %%mm1 \n\t" "psraw $3, %%mm7 \n\t" - WRITEYUY2(%4, %5, %%REGa) + WRITEYUY2(%4, %5, %%FF_REGa) YSCALEYUV2PACKEDX_END } @@ -683,7 +683,7 @@ static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter, "psraw $3, %%mm4 \n\t" "psraw $3, %%mm1 \n\t" "psraw $3, %%mm7 \n\t" - WRITEYUY2(%4, %5, %%REGa) + WRITEYUY2(%4, %5, %%FF_REGa) YSCALEYUV2PACKEDX_END } @@ -794,37 +794,37 @@ static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2], *(const uint16_t **)(&c->u_temp)=abuf0; *(const uint16_t **)(&c->v_temp)=abuf1; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB(%%FF_REGBP, %5) "push %0 \n\t" "push %1 \n\t" "mov "U_TEMP"(%5), %0 \n\t" "mov "V_TEMP"(%5), %1 \n\t" - YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1) + YSCALEYUV2RGB_YA(%%FF_REGBP, %5, %0, %1) "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/ "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/ "packuswb %%mm7, %%mm1 \n\t" "pop %1 \n\t" "pop %0 \n\t" - WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); #endif } else { __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB(%%FF_REGBP, %5) "pcmpeqd %%mm7, %%mm7 \n\t" - WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -841,14 +841,14 @@ static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2], //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" - WRITEBGR24(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITEBGR24(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -864,10 +864,10 @@ static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2], //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP @@ -875,9 +875,9 @@ static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2], "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif - WRITERGB15(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITERGB15(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -893,10 +893,10 @@ static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2], //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP @@ -904,9 +904,9 @@ static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2], "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif - WRITERGB16(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITERGB16(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -962,13 +962,13 @@ static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2], //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2PACKED(%%REGBP, %5) - WRITEYUY2(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2PACKED(%%FF_REGBP, %5) + WRITEYUY2(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -1104,27 +1104,27 @@ static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0, const int16_t *ubuf1 = ubuf[0]; if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1(%%REGBP, %5) - YSCALEYUV2RGB1_ALPHA(%%REGBP) - WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1(%%FF_REGBP, %5) + YSCALEYUV2RGB1_ALPHA(%%FF_REGBP) + WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1(%%FF_REGBP, %5) "pcmpeqd %%mm7, %%mm7 \n\t" - WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -1133,27 +1133,27 @@ static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0, const int16_t *ubuf1 = ubuf[1]; if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) { __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1b(%%REGBP, %5) - YSCALEYUV2RGB1_ALPHA(%%REGBP) - WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1b(%%FF_REGBP, %5) + YSCALEYUV2RGB1_ALPHA(%%FF_REGBP) + WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1b(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1b(%%FF_REGBP, %5) "pcmpeqd %%mm7, %%mm7 \n\t" - WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -1172,28 +1172,28 @@ static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0, if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" - WRITEBGR24(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITEBGR24(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1b(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1b(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" - WRITEBGR24(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITEBGR24(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -1211,10 +1211,10 @@ static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0, if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP @@ -1222,19 +1222,19 @@ static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0, "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif - WRITERGB15(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITERGB15(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1b(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1b(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP @@ -1242,9 +1242,9 @@ static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0, "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif - WRITERGB15(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITERGB15(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -1262,10 +1262,10 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP @@ -1273,19 +1273,19 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif - WRITERGB16(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITERGB16(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2RGB1b(%%REGBP, %5) + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2RGB1b(%%FF_REGBP, %5) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP @@ -1293,9 +1293,9 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif - WRITERGB16(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + WRITERGB16(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -1350,26 +1350,26 @@ static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0, if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster const int16_t *ubuf1 = ubuf[0]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2PACKED1(%%REGBP, %5) - WRITEYUY2(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2PACKED1(%%FF_REGBP, %5) + WRITEYUY2(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { const int16_t *ubuf1 = ubuf[1]; __asm__ volatile( - "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" - "mov %4, %%"REG_b" \n\t" - "push %%"REG_BP" \n\t" - YSCALEYUV2PACKED1b(%%REGBP, %5) - WRITEYUY2(%%REGb, 8280(%5), %%REGBP) - "pop %%"REG_BP" \n\t" - "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" + "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t" + "mov %4, %%"FF_REG_b" \n\t" + "push %%"FF_REG_BP" \n\t" + YSCALEYUV2PACKED1b(%%FF_REGBP, %5) + WRITEYUY2(%%FF_REGb, 8280(%5), %%FF_REGBP) + "pop %%"FF_REG_BP" \n\t" + "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); @@ -1394,43 +1394,43 @@ static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst, __asm__ volatile( #if defined(PIC) - "mov %%"REG_b", %5 \n\t" + "mov %%"FF_REG_b", %5 \n\t" #if ARCH_X86_64 - "mov -8(%%rsp), %%"REG_a" \n\t" - "mov %%"REG_a", %6 \n\t" + "mov -8(%%rsp), %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", %6 \n\t" #endif #else #if ARCH_X86_64 - "mov -8(%%rsp), %%"REG_a" \n\t" - "mov %%"REG_a", %5 \n\t" + "mov -8(%%rsp), %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", %5 \n\t" #endif #endif - "pxor %%mm7, %%mm7 \n\t" - "mov %0, %%"REG_c" \n\t" - "mov %1, %%"REG_D" \n\t" - "mov %2, %%"REG_d" \n\t" - "mov %3, %%"REG_b" \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" // i - PREFETCH" (%%"REG_c") \n\t" - PREFETCH" 32(%%"REG_c") \n\t" - PREFETCH" 64(%%"REG_c") \n\t" + "pxor %%mm7, %%mm7 \n\t" + "mov %0, %%"FF_REG_c" \n\t" + "mov %1, %%"FF_REG_D" \n\t" + "mov %2, %%"FF_REG_d" \n\t" + "mov %3, %%"FF_REG_b" \n\t" + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i + PREFETCH" (%%"FF_REG_c") \n\t" + PREFETCH" 32(%%"FF_REG_c") \n\t" + PREFETCH" 64(%%"FF_REG_c") \n\t" #if ARCH_X86_64 #define CALL_MMXEXT_FILTER_CODE \ - "movl (%%"REG_b"), %%esi \n\t"\ - "call *%4 \n\t"\ - "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\ - "add %%"REG_S", %%"REG_c" \n\t"\ - "add %%"REG_a", %%"REG_D" \n\t"\ - "xor %%"REG_a", %%"REG_a" \n\t"\ + "movl (%%"FF_REG_b"), %%esi \n\t"\ + "call *%4 \n\t"\ + "movl (%%"FF_REG_b", %%"FF_REG_a"), %%esi \n\t"\ + "add %%"FF_REG_S", %%"FF_REG_c" \n\t"\ + "add %%"FF_REG_a", %%"FF_REG_D" \n\t"\ + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\ #else #define CALL_MMXEXT_FILTER_CODE \ - "movl (%%"REG_b"), %%esi \n\t"\ - "call *%4 \n\t"\ - "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\ - "add %%"REG_a", %%"REG_D" \n\t"\ - "xor %%"REG_a", %%"REG_a" \n\t"\ + "movl (%%"FF_REG_b"), %%esi \n\t"\ + "call *%4 \n\t"\ + "addl (%%"FF_REG_b", %%"FF_REG_a"), %%"FF_REG_c" \n\t"\ + "add %%"FF_REG_a", %%"FF_REG_D" \n\t"\ + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\ #endif /* ARCH_X86_64 */ @@ -1444,15 +1444,15 @@ static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst, CALL_MMXEXT_FILTER_CODE #if defined(PIC) - "mov %5, %%"REG_b" \n\t" + "mov %5, %%"FF_REG_b" \n\t" #if ARCH_X86_64 - "mov %6, %%"REG_a" \n\t" - "mov %%"REG_a", -8(%%rsp) \n\t" + "mov %6, %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", -8(%%rsp) \n\t" #endif #else #if ARCH_X86_64 - "mov %5, %%"REG_a" \n\t" - "mov %%"REG_a", -8(%%rsp) \n\t" + "mov %5, %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", -8(%%rsp) \n\t" #endif #endif :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos), @@ -1463,9 +1463,9 @@ static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst, #if ARCH_X86_64 ,"m"(retsave) #endif - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D + : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_D #if !defined(PIC) - ,"%"REG_b + ,"%"FF_REG_b #endif ); @@ -1490,37 +1490,37 @@ static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2, __asm__ volatile( #if defined(PIC) - "mov %%"REG_b", %7 \n\t" + "mov %%"FF_REG_b", %7 \n\t" #if ARCH_X86_64 - "mov -8(%%rsp), %%"REG_a" \n\t" - "mov %%"REG_a", %8 \n\t" + "mov -8(%%rsp), %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", %8 \n\t" #endif #else #if ARCH_X86_64 - "mov -8(%%rsp), %%"REG_a" \n\t" - "mov %%"REG_a", %7 \n\t" + "mov -8(%%rsp), %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", %7 \n\t" #endif #endif - "pxor %%mm7, %%mm7 \n\t" - "mov %0, %%"REG_c" \n\t" - "mov %1, %%"REG_D" \n\t" - "mov %2, %%"REG_d" \n\t" - "mov %3, %%"REG_b" \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" // i - PREFETCH" (%%"REG_c") \n\t" - PREFETCH" 32(%%"REG_c") \n\t" - PREFETCH" 64(%%"REG_c") \n\t" + "pxor %%mm7, %%mm7 \n\t" + "mov %0, %%"FF_REG_c" \n\t" + "mov %1, %%"FF_REG_D" \n\t" + "mov %2, %%"FF_REG_d" \n\t" + "mov %3, %%"FF_REG_b" \n\t" + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i + PREFETCH" (%%"FF_REG_c") \n\t" + PREFETCH" 32(%%"FF_REG_c") \n\t" + PREFETCH" 64(%%"FF_REG_c") \n\t" CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE - "xor %%"REG_a", %%"REG_a" \n\t" // i - "mov %5, %%"REG_c" \n\t" // src - "mov %6, %%"REG_D" \n\t" // buf2 - PREFETCH" (%%"REG_c") \n\t" - PREFETCH" 32(%%"REG_c") \n\t" - PREFETCH" 64(%%"REG_c") \n\t" + "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i + "mov %5, %%"FF_REG_c" \n\t" // src + "mov %6, %%"FF_REG_D" \n\t" // buf2 + PREFETCH" (%%"FF_REG_c") \n\t" + PREFETCH" 32(%%"FF_REG_c") \n\t" + PREFETCH" 64(%%"FF_REG_c") \n\t" CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE @@ -1528,15 +1528,15 @@ static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2, CALL_MMXEXT_FILTER_CODE #if defined(PIC) - "mov %7, %%"REG_b" \n\t" + "mov %7, %%"FF_REG_b" \n\t" #if ARCH_X86_64 - "mov %8, %%"REG_a" \n\t" - "mov %%"REG_a", -8(%%rsp) \n\t" + "mov %8, %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", -8(%%rsp) \n\t" #endif #else #if ARCH_X86_64 - "mov %7, %%"REG_a" \n\t" - "mov %%"REG_a", -8(%%rsp) \n\t" + "mov %7, %%"FF_REG_a" \n\t" + "mov %%"FF_REG_a", -8(%%rsp) \n\t" #endif #endif :: "m" (src1), "m" (dst1), "m" (filter), "m" (filterPos), @@ -1547,9 +1547,9 @@ static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2, #if ARCH_X86_64 ,"m"(retsave) #endif - : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D + : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_D #if !defined(PIC) - ,"%"REG_b + ,"%"FF_REG_b #endif );