提交 694ec061 编写于 作者: F Fabrice Bellard

suppressed nasm dependancy - rewrote forward DCT and motion estimation code


Originally committed as revision 104 to svn://svn.ffmpeg.org/ffmpeg/trunk
上级 c72c6d2d
/*
* MMX optimized forward DCT
* The gcc porting is Copyright (c) 2001 Gerard Lantau.
*
* from fdctam32.c - AP922 MMX(3D-Now) forward-DCT
*
* Intel Application Note AP-922 - fast, precise implementation of DCT
* http://developer.intel.com/vtune/cbts/appnotes.htm
*/
#include "../common.h"
#include "mmx.h"
#define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align)))
//////////////////////////////////////////////////////////////////////
//
// constants for the forward DCT
// -----------------------------
//
// Be sure to check that your compiler is aligning all constants to QWORD
// (8-byte) memory boundaries! Otherwise the unaligned memory access will
// severely stall MMX execution.
//
//////////////////////////////////////////////////////////////////////
#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy
#define SHIFT_FRW_COL BITS_FRW_ACC
#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
//#define RND_FRW_ROW (262144 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_ROW-1)
#define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
//#define RND_FRW_COL (2 * (BITS_FRW_ACC - 1)) //; 1 << (SHIFT_FRW_COL-1)
#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
//concatenated table, for forward DCT transformation
const int16_t fdct_tg_all_16[] ATTR_ALIGN(8) = {
13036, 13036, 13036, 13036, // tg * (2<<16) + 0.5
27146, 27146, 27146, 27146, // tg * (2<<16) + 0.5
-21746, -21746, -21746, -21746, // tg * (2<<16) + 0.5
};
const int16_t cos_4_16[4] = {
-19195, -19195, -19195, -19195, //cos * (2<<16) + 0.5
};
const int16_t ocos_4_16[4] = {
23170, 23170, 23170, 23170, //cos * (2<<15) + 0.5
};
const long long fdct_one_corr ATTR_ALIGN(8) = 0x0001000100010001LL;
const long fdct_r_row[2] ATTR_ALIGN(8) = {RND_FRW_ROW, RND_FRW_ROW };
const int16_t tab_frw_01234567[] ATTR_ALIGN(8) = { // forward_dct coeff table
//row0
16384, 16384, 21407, -8867, // w09 w01 w08 w00
16384, 16384, 8867, -21407, // w13 w05 w12 w04
16384, -16384, 8867, 21407, // w11 w03 w10 w02
-16384, 16384, -21407, -8867, // w15 w07 w14 w06
22725, 12873, 19266, -22725, // w22 w20 w18 w16
19266, 4520, -4520, -12873, // w23 w21 w19 w17
12873, 4520, 4520, 19266, // w30 w28 w26 w24
-22725, 19266, -12873, -22725, // w31 w29 w27 w25
//row1
22725, 22725, 29692, -12299, // w09 w01 w08 w00
22725, 22725, 12299, -29692, // w13 w05 w12 w04
22725, -22725, 12299, 29692, // w11 w03 w10 w02
-22725, 22725, -29692, -12299, // w15 w07 w14 w06
31521, 17855, 26722, -31521, // w22 w20 w18 w16
26722, 6270, -6270, -17855, // w23 w21 w19 w17
17855, 6270, 6270, 26722, // w30 w28 w26 w24
-31521, 26722, -17855, -31521, // w31 w29 w27 w25
//row2
21407, 21407, 27969, -11585, // w09 w01 w08 w00
21407, 21407, 11585, -27969, // w13 w05 w12 w04
21407, -21407, 11585, 27969, // w11 w03 w10 w02
-21407, 21407, -27969, -11585, // w15 w07 w14 w06
29692, 16819, 25172, -29692, // w22 w20 w18 w16
25172, 5906, -5906, -16819, // w23 w21 w19 w17
16819, 5906, 5906, 25172, // w30 w28 w26 w24
-29692, 25172, -16819, -29692, // w31 w29 w27 w25
//row3
19266, 19266, 25172, -10426, // w09 w01 w08 w00
19266, 19266, 10426, -25172, // w13 w05 w12 w04
19266, -19266, 10426, 25172, // w11 w03 w10 w02
-19266, 19266, -25172, -10426, // w15 w07 w14 w06,
26722, 15137, 22654, -26722, // w22 w20 w18 w16
22654, 5315, -5315, -15137, // w23 w21 w19 w17
15137, 5315, 5315, 22654, // w30 w28 w26 w24
-26722, 22654, -15137, -26722, // w31 w29 w27 w25,
//row4
16384, 16384, 21407, -8867, // w09 w01 w08 w00
16384, 16384, 8867, -21407, // w13 w05 w12 w04
16384, -16384, 8867, 21407, // w11 w03 w10 w02
-16384, 16384, -21407, -8867, // w15 w07 w14 w06
22725, 12873, 19266, -22725, // w22 w20 w18 w16
19266, 4520, -4520, -12873, // w23 w21 w19 w17
12873, 4520, 4520, 19266, // w30 w28 w26 w24
-22725, 19266, -12873, -22725, // w31 w29 w27 w25
//row5
19266, 19266, 25172, -10426, // w09 w01 w08 w00
19266, 19266, 10426, -25172, // w13 w05 w12 w04
19266, -19266, 10426, 25172, // w11 w03 w10 w02
-19266, 19266, -25172, -10426, // w15 w07 w14 w06
26722, 15137, 22654, -26722, // w22 w20 w18 w16
22654, 5315, -5315, -15137, // w23 w21 w19 w17
15137, 5315, 5315, 22654, // w30 w28 w26 w24
-26722, 22654, -15137, -26722, // w31 w29 w27 w25
//row6
21407, 21407, 27969, -11585, // w09 w01 w08 w00
21407, 21407, 11585, -27969, // w13 w05 w12 w04
21407, -21407, 11585, 27969, // w11 w03 w10 w02
-21407, 21407, -27969, -11585, // w15 w07 w14 w06,
29692, 16819, 25172, -29692, // w22 w20 w18 w16
25172, 5906, -5906, -16819, // w23 w21 w19 w17
16819, 5906, 5906, 25172, // w30 w28 w26 w24
-29692, 25172, -16819, -29692, // w31 w29 w27 w25,
//row7
22725, 22725, 29692, -12299, // w09 w01 w08 w00
22725, 22725, 12299, -29692, // w13 w05 w12 w04
22725, -22725, 12299, 29692, // w11 w03 w10 w02
-22725, 22725, -29692, -12299, // w15 w07 w14 w06,
31521, 17855, 26722, -31521, // w22 w20 w18 w16
26722, 6270, -6270, -17855, // w23 w21 w19 w17
17855, 6270, 6270, 26722, // w30 w28 w26 w24
-31521, 26722, -17855, -31521 // w31 w29 w27 w25
};
static inline void fdct_col(const int16_t *in, int16_t *out, int offset)
{
movq_m2r(*(in + offset + 1 * 8), mm0);
movq_m2r(*(in + offset + 6 * 8), mm1);
movq_r2r(mm0, mm2);
movq_m2r(*(in + offset + 2 * 8), mm3);
paddsw_r2r(mm1, mm0);
movq_m2r(*(in + offset + 5 * 8), mm4);
psllw_i2r(SHIFT_FRW_COL, mm0);
movq_m2r(*(in + offset + 0 * 8), mm5);
paddsw_r2r(mm3, mm4);
paddsw_m2r(*(in + offset + 7 * 8), mm5);
psllw_i2r(SHIFT_FRW_COL, mm4);
movq_r2r(mm0, mm6);
psubsw_r2r(mm1, mm2);
movq_m2r(*(fdct_tg_all_16 + 4), mm1);
psubsw_r2r(mm4, mm0);
movq_m2r(*(in + offset + 3 * 8), mm7);
pmulhw_r2r(mm0, mm1);
paddsw_m2r(*(in + offset + 4 * 8), mm7);
psllw_i2r(SHIFT_FRW_COL, mm5);
paddsw_r2r(mm4, mm6);
psllw_i2r(SHIFT_FRW_COL, mm7);
movq_r2r(mm5, mm4);
psubsw_r2r(mm7, mm5);
paddsw_r2r(mm5, mm1);
paddsw_r2r(mm7, mm4);
por_m2r(fdct_one_corr, mm1);
psllw_i2r(SHIFT_FRW_COL + 1, mm2);
pmulhw_m2r(*(fdct_tg_all_16 + 4), mm5);
movq_r2r(mm4, mm7);
psubsw_m2r(*(in + offset + 5 * 8), mm3);
psubsw_r2r(mm6, mm4);
movq_r2m(mm1, *(out + offset + 2 * 8));
paddsw_r2r(mm6, mm7);
movq_m2r(*(in + offset + 3 * 8), mm1);
psllw_i2r(SHIFT_FRW_COL + 1, mm3);
psubsw_m2r(*(in + offset + 4 * 8), mm1);
movq_r2r(mm2, mm6);
movq_r2m(mm4, *(out + offset + 4 * 8));
paddsw_r2r(mm3, mm2);
pmulhw_m2r(*ocos_4_16, mm2);
psubsw_r2r(mm3, mm6);
pmulhw_m2r(*ocos_4_16, mm6);
psubsw_r2r(mm0, mm5);
por_m2r(fdct_one_corr, mm5);
psllw_i2r(SHIFT_FRW_COL, mm1);
por_m2r(fdct_one_corr, mm2);
movq_r2r(mm1, mm4);
movq_m2r(*(in + offset + 0 * 8), mm3);
paddsw_r2r(mm6, mm1);
psubsw_m2r(*(in + offset + 7 * 8), mm3);
psubsw_r2r(mm6, mm4);
movq_m2r(*(fdct_tg_all_16 + 0), mm0);
psllw_i2r(SHIFT_FRW_COL, mm3);
movq_m2r(*(fdct_tg_all_16 + 8), mm6);
pmulhw_r2r(mm1, mm0);
movq_r2m(mm7, *(out + offset + 0 * 8));
pmulhw_r2r(mm4, mm6);
movq_r2m(mm5, *(out + offset + 6 * 8));
movq_r2r(mm3, mm7);
movq_m2r(*(fdct_tg_all_16 + 8), mm5);
psubsw_r2r(mm2, mm7);
paddsw_r2r(mm2, mm3);
pmulhw_r2r(mm7, mm5);
paddsw_r2r(mm3, mm0);
paddsw_r2r(mm4, mm6);
pmulhw_m2r(*(fdct_tg_all_16 + 0), mm3);
por_m2r(fdct_one_corr, mm0);
paddsw_r2r(mm7, mm5);
psubsw_r2r(mm6, mm7);
movq_r2m(mm0, *(out + offset + 1 * 8));
paddsw_r2r(mm4, mm5);
movq_r2m(mm7, *(out + offset + 3 * 8));
psubsw_r2r(mm1, mm3);
movq_r2m(mm5, *(out + offset + 5 * 8));
movq_r2m(mm3, *(out + offset + 7 * 8));
}
static inline void fdct_row(const int16_t *in, int16_t *out, const int16_t *table)
{
movd_m2r(*(in + 6), mm5);
punpcklwd_m2r(*(in + 4), mm5);
movq_r2r(mm5, mm2);
psrlq_i2r(0x20, mm5);
movq_m2r(*(in + 0), mm0);
punpcklwd_r2r(mm2, mm5);
movq_r2r(mm0, mm1);
paddsw_r2r(mm5, mm0);
psubsw_r2r(mm5, mm1);
movq_r2r(mm0, mm2);
punpcklwd_r2r(mm1, mm0);
punpckhwd_r2r(mm1, mm2);
movq_r2r(mm2, mm1);
movq_r2r(mm0, mm2);
movq_m2r(*(table + 0), mm3);
punpcklwd_r2r(mm1, mm0);
movq_r2r(mm0, mm5);
punpckldq_r2r(mm0, mm0);
movq_m2r(*(table + 4), mm4);
punpckhwd_r2r(mm1, mm2);
pmaddwd_r2r(mm0, mm3);
movq_r2r(mm2, mm6);
movq_m2r(*(table + 16), mm1);
punpckldq_r2r(mm2, mm2);
pmaddwd_r2r(mm2, mm4);
punpckhdq_r2r(mm5, mm5);
pmaddwd_m2r(*(table + 8), mm0);
punpckhdq_r2r(mm6, mm6);
movq_m2r(*(table + 20), mm7);
pmaddwd_r2r(mm5, mm1);
paddd_m2r(*fdct_r_row, mm3);
pmaddwd_r2r(mm6, mm7);
pmaddwd_m2r(*(table + 12), mm2);
paddd_r2r(mm4, mm3);
pmaddwd_m2r(*(table + 24), mm5);
pmaddwd_m2r(*(table + 28), mm6);
paddd_r2r(mm7, mm1);
paddd_m2r(*fdct_r_row, mm0);
psrad_i2r(SHIFT_FRW_ROW, mm3);
paddd_m2r(*fdct_r_row, mm1);
paddd_r2r(mm2, mm0);
paddd_m2r(*fdct_r_row, mm5);
psrad_i2r(SHIFT_FRW_ROW, mm1);
paddd_r2r(mm6, mm5);
psrad_i2r(SHIFT_FRW_ROW, mm0);
psrad_i2r(SHIFT_FRW_ROW, mm5);
packssdw_r2r(mm0, mm3);
packssdw_r2r(mm5, mm1);
movq_r2r(mm3, mm6);
punpcklwd_r2r(mm1, mm3);
punpckhwd_r2r(mm1, mm6);
movq_r2m(mm3, *(out + 0));
movq_r2m(mm6, *(out + 4));
}
void fdct_mmx(int16_t *block)
{
/* XXX: not thread safe */
static int16_t block_tmp[64] ATTR_ALIGN(8);
int16_t *block1, *out;
const int16_t *table;
int i;
block1 = block_tmp;
fdct_col(block, block1, 0);
fdct_col(block, block1, 4);
block1 = block_tmp;
table = tab_frw_01234567;
out = block;
for(i=8;i>0;i--) {
fdct_row(block1, out, table);
block1 += 8;
table += 32;
out += 8;
}
}
; //////////////////////////////////////////////////////////////////////////////
; //
; // fdctam32.c - AP922 MMX(3D-Now) forward-DCT
; // ----------
; // Intel Application Note AP-922 - fast, precise implementation of DCT
; // http://developer.intel.com/vtune/cbts/appnotes.htm
; // ----------
; //
; // This routine can use a 3D-Now/MMX enhancement to increase the
; // accuracy of the fdct_col_4 macro. The dct_col function uses 3D-Now's
; // PMHULHRW instead of MMX's PMHULHW(and POR). The substitution improves
; // accuracy very slightly with performance penalty. If the target CPU
; // does not support 3D-Now, then this function cannot be executed.
; //
; // For a fast, precise MMX implementation of inverse-DCT
; // visit http://www.elecard.com/peter
; //
; // v1.0 07/22/2000 (initial release)
; //
; // liaor@iname.com http://members.tripod.com/~liaor
; //////////////////////////////////////////////////////////////////////////////
;;;
;;; A.Stevens Jul 2000: ported to nasm syntax and disentangled from
;;; from Win**** compiler specific stuff.
;;; All the real work was done above though.
;;; See above for how to optimise quality on 3DNow! CPU's
;;
;; Macros for code-readability...
;;
%define INP eax ; pointer to (short *blk)
%define OUT ecx ; pointer to output (temporary store space qwTemp[])
%define TABLE ebx ; pointer to tab_frw_01234567[]
%define TABLEF ebx ; pointer to tg_all_16
%define round_frw_row edx
%define x0 INP + 0*16
%define x1 INP + 1*16
%define x2 INP + 2*16
%define x3 INP + 3*16
%define x4 INP + 4*16
%define x5 INP + 5*16
%define x6 INP + 6*16
%define x7 INP + 7*16
%define y0 OUT + 0*16
%define y1 OUT + 1*16
%define y2 OUT + 2*16
%define y3 OUT + 3*16
%define y4 OUT + 4*16
%define y5 OUT + 5*16
%define y6 OUT + 6*16
%define y7 OUT + 7*16
;;
;; Constants for DCT
;;
%define BITS_FRW_ACC 3 ; 2 or 3 for accuracy
%define SHIFT_FRW_COL BITS_FRW_ACC
%define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
%define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
%define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
extern fdct_one_corr
extern fdct_r_row ; Defined in C for convenience
;;
;; Concatenated table of forward dct transformation coeffs.
;;
extern fdct_tg_all_16 ; Defined in C for convenience
;; Offsets into table..
%define tg_1_16 (TABLEF + 0)
%define tg_2_16 (TABLEF + 8)
%define tg_3_16 (TABLEF + 16)
%define cos_4_16 (TABLEF + 24)
%define ocos_4_16 (TABLEF + 32)
;;
;; Concatenated table of forward dct coefficients
;;
extern tab_frw_01234567 ; Defined in C for convenience
;; Offsets into table..
SECTION .text
global fdct_mmx
;;;
;;; void fdct_mmx( short *blk )
;;;
; ////////////////////////////////////////////////////////////////////////
; //
; // The high-level pseudocode for the fdct_am32() routine :
; //
; // fdct_am32()
; // {
; // forward_dct_col03(); // dct_column transform on cols 0-3
; // forward_dct_col47(); // dct_column transform on cols 4-7
; // for ( j = 0; j < 8; j=j+1 )
; // forward_dct_row1(j); // dct_row transform on row #j
; // }
; //
;
align 32
fdct_mmx:
push ebp ; save stack pointer
mov ebp, esp ; link
push ebx
push ecx
push edx
push edi
mov INP, [ebp+8]; ; input data is row 0 of blk[]
;// transform the left half of the matrix (4 columns)
lea TABLEF, [fdct_tg_all_16];
mov OUT, INP;
; lea round_frw_col, [r_frw_col]
; for ( i = 0; i < 2; i = i + 1)
; the for-loop is executed twice. We are better off unrolling the
; loop to avoid branch misprediction.
.mmx32_fdct_col03:
movq mm0, [x1] ; 0 ; x1
;;
movq mm1, [x6] ; 1 ; x6
movq mm2, mm0 ; 2 ; x1
movq mm3, [x2] ; 3 ; x2
paddsw mm0, mm1 ; t1 = x[1] + x[6]
movq mm4, [x5] ; 4 ; x5
psllw mm0, SHIFT_FRW_COL ; t1
movq mm5, [x0] ; 5 ; x0
paddsw mm4, mm3 ; t2 = x[2] + x[5]
paddsw mm5, [x7] ; t0 = x[0] + x[7]
psllw mm4, SHIFT_FRW_COL ; t2
movq mm6, mm0 ; 6 ; t1
psubsw mm2, mm1 ; 1 ; t6 = x[1] - x[6]
movq mm1, [tg_2_16] ; 1 ; tg_2_16
psubsw mm0, mm4 ; tm12 = t1 - t2
movq mm7, [x3] ; 7 ; x3
pmulhw mm1, mm0 ; tm12*tg_2_16
paddsw mm7, [x4] ; t3 = x[3] + x[4]
psllw mm5, SHIFT_FRW_COL ; t0
paddsw mm6, mm4 ; 4 ; tp12 = t1 + t2
psllw mm7, SHIFT_FRW_COL ; t3
movq mm4, mm5 ; 4 ; t0
psubsw mm5, mm7 ; tm03 = t0 - t3
paddsw mm1, mm5 ; y2 = tm03 + tm12*tg_2_16
paddsw mm4, mm7 ; 7 ; tp03 = t0 + t3
por mm1, [fdct_one_corr] ; correction y2 +0.5
psllw mm2, SHIFT_FRW_COL+1 ; t6
pmulhw mm5, [tg_2_16] ; tm03*tg_2_16
movq mm7, mm4 ; 7 ; tp03
psubsw mm3, [x5] ; t5 = x[2] - x[5]
psubsw mm4, mm6 ; y4 = tp03 - tp12
movq [y2], mm1 ; 1 ; save y2
paddsw mm7, mm6 ; 6 ; y0 = tp03 + tp12
movq mm1, [x3] ; 1 ; x3
psllw mm3, SHIFT_FRW_COL+1 ; t5
psubsw mm1, [x4] ; t4 = x[3] - x[4]
movq mm6, mm2 ; 6 ; t6
movq [y4], mm4 ; 4 ; save y4
paddsw mm2, mm3 ; t6 + t5
pmulhw mm2, [ocos_4_16] ; tp65 = (t6 + t5)*cos_4_16
psubsw mm6, mm3 ; 3 ; t6 - t5
pmulhw mm6, [ocos_4_16] ; tm65 = (t6 - t5)*cos_4_16
psubsw mm5, mm0 ; 0 ; y6 = tm03*tg_2_16 - tm12
por mm5, [fdct_one_corr] ; correction y6 +0.5
psllw mm1, SHIFT_FRW_COL ; t4
por mm2, [fdct_one_corr] ; correction tp65 +0.5
movq mm4, mm1 ; 4 ; t4
movq mm3, [x0] ; 3 ; x0
paddsw mm1, mm6 ; tp465 = t4 + tm65
psubsw mm3, [x7] ; t7 = x[0] - x[7]
psubsw mm4, mm6 ; 6 ; tm465 = t4 - tm65
movq mm0, [tg_1_16] ; 0 ; tg_1_16
psllw mm3, SHIFT_FRW_COL ; t7
movq mm6, [tg_3_16] ; 6 ; tg_3_16
pmulhw mm0, mm1 ; tp465*tg_1_16
movq [y0], mm7 ; 7 ; save y0
pmulhw mm6, mm4 ; tm465*tg_3_16
movq [y6], mm5 ; 5 ; save y6
movq mm7, mm3 ; 7 ; t7
movq mm5, [tg_3_16] ; 5 ; tg_3_16
psubsw mm7, mm2 ; tm765 = t7 - tp65
paddsw mm3, mm2 ; 2 ; tp765 = t7 + tp65
pmulhw mm5, mm7 ; tm765*tg_3_16
paddsw mm0, mm3 ; y1 = tp765 + tp465*tg_1_16
paddsw mm6, mm4 ; tm465*tg_3_16
pmulhw mm3, [tg_1_16] ; tp765*tg_1_16
;;
por mm0, [fdct_one_corr] ; correction y1 +0.5
paddsw mm5, mm7 ; tm765*tg_3_16
psubsw mm7, mm6 ; 6 ; y3 = tm765 - tm465*tg_3_16
add INP, 0x08 ; ; increment pointer
movq [y1], mm0 ; 0 ; save y1
paddsw mm5, mm4 ; 4 ; y5 = tm765*tg_3_16 + tm465
movq [y3], mm7 ; 7 ; save y3
psubsw mm3, mm1 ; 1 ; y7 = tp765*tg_1_16 - tp465
movq [y5], mm5 ; 5 ; save y5
.mmx32_fdct_col47: ; begin processing last four columns
movq mm0, [x1] ; 0 ; x1
;;
movq [y7], mm3 ; 3 ; save y7 (columns 0-4)
;;
movq mm1, [x6] ; 1 ; x6
movq mm2, mm0 ; 2 ; x1
movq mm3, [x2] ; 3 ; x2
paddsw mm0, mm1 ; t1 = x[1] + x[6]
movq mm4, [x5] ; 4 ; x5
psllw mm0, SHIFT_FRW_COL ; t1
movq mm5, [x0] ; 5 ; x0
paddsw mm4, mm3 ; t2 = x[2] + x[5]
paddsw mm5, [x7] ; t0 = x[0] + x[7]
psllw mm4, SHIFT_FRW_COL ; t2
movq mm6, mm0 ; 6 ; t1
psubsw mm2, mm1 ; 1 ; t6 = x[1] - x[6]
movq mm1, [tg_2_16] ; 1 ; tg_2_16
psubsw mm0, mm4 ; tm12 = t1 - t2
movq mm7, [x3] ; 7 ; x3
pmulhw mm1, mm0 ; tm12*tg_2_16
paddsw mm7, [x4] ; t3 = x[3] + x[4]
psllw mm5, SHIFT_FRW_COL ; t0
paddsw mm6, mm4 ; 4 ; tp12 = t1 + t2
psllw mm7, SHIFT_FRW_COL ; t3
movq mm4, mm5 ; 4 ; t0
psubsw mm5, mm7 ; tm03 = t0 - t3
paddsw mm1, mm5 ; y2 = tm03 + tm12*tg_2_16
paddsw mm4, mm7 ; 7 ; tp03 = t0 + t3
por mm1, [fdct_one_corr] ; correction y2 +0.5
psllw mm2, SHIFT_FRW_COL+1 ; t6
pmulhw mm5, [tg_2_16] ; tm03*tg_2_16
movq mm7, mm4 ; 7 ; tp03
psubsw mm3, [x5] ; t5 = x[2] - x[5]
psubsw mm4, mm6 ; y4 = tp03 - tp12
movq [y2+8], mm1 ; 1 ; save y2
paddsw mm7, mm6 ; 6 ; y0 = tp03 + tp12
movq mm1, [x3] ; 1 ; x3
psllw mm3, SHIFT_FRW_COL+1 ; t5
psubsw mm1, [x4] ; t4 = x[3] - x[4]
movq mm6, mm2 ; 6 ; t6
movq [y4+8], mm4 ; 4 ; save y4
paddsw mm2, mm3 ; t6 + t5
pmulhw mm2, [ocos_4_16] ; tp65 = (t6 + t5)*cos_4_16
psubsw mm6, mm3 ; 3 ; t6 - t5
pmulhw mm6, [ocos_4_16] ; tm65 = (t6 - t5)*cos_4_16
psubsw mm5, mm0 ; 0 ; y6 = tm03*tg_2_16 - tm12
por mm5, [fdct_one_corr] ; correction y6 +0.5
psllw mm1, SHIFT_FRW_COL ; t4
por mm2, [fdct_one_corr] ; correction tp65 +0.5
movq mm4, mm1 ; 4 ; t4
movq mm3, [x0] ; 3 ; x0
paddsw mm1, mm6 ; tp465 = t4 + tm65
psubsw mm3, [x7] ; t7 = x[0] - x[7]
psubsw mm4, mm6 ; 6 ; tm465 = t4 - tm65
movq mm0, [tg_1_16] ; 0 ; tg_1_16
psllw mm3, SHIFT_FRW_COL ; t7
movq mm6, [tg_3_16] ; 6 ; tg_3_16
pmulhw mm0, mm1 ; tp465*tg_1_16
movq [y0+8], mm7 ; 7 ; save y0
pmulhw mm6, mm4 ; tm465*tg_3_16
movq [y6+8], mm5 ; 5 ; save y6
movq mm7, mm3 ; 7 ; t7
movq mm5, [tg_3_16] ; 5 ; tg_3_16
psubsw mm7, mm2 ; tm765 = t7 - tp65
paddsw mm3, mm2 ; 2 ; tp765 = t7 + tp65
pmulhw mm5, mm7 ; tm765*tg_3_16
paddsw mm0, mm3 ; y1 = tp765 + tp465*tg_1_16
paddsw mm6, mm4 ; tm465*tg_3_16
pmulhw mm3, [tg_1_16] ; tp765*tg_1_16
;;
por mm0, [fdct_one_corr] ; correction y1 +0.5
paddsw mm5, mm7 ; tm765*tg_3_16
psubsw mm7, mm6 ; 6 ; y3 = tm765 - tm465*tg_3_16
;;
movq [y1+8], mm0 ; 0 ; save y1
paddsw mm5, mm4 ; 4 ; y5 = tm765*tg_3_16 + tm465
movq [y3+8], mm7 ; 7 ; save y3
psubsw mm3, mm1 ; 1 ; y7 = tp765*tg_1_16 - tp465
movq [y5+8], mm5 ; 5 ; save y5
movq [y7+8], mm3 ; 3 ; save y7
; emms;
; } ; end of forward_dct_col07()
; done with dct_row transform
; fdct_mmx32_cols() --
; the following subroutine repeats the row-transform operation,
; except with different shift&round constants. This version
; does NOT transpose the output again. Thus the final output
; is transposed with respect to the source.
;
; The output is stored into blk[], which destroys the original
; input data.
mov INP, [ebp+8]; ;; row 0
mov edi, 0x08; ;x = 8
lea TABLE, [tab_frw_01234567]; ; row 0
mov OUT, INP;
lea round_frw_row, [fdct_r_row];
; for ( x = 8; x > 0; --x ) ; transform one row per iteration
; ---------- loop begin
.lp_mmx_fdct_row1:
movd mm5, [INP+12]; ; mm5 = 7 6
punpcklwd mm5, [INP+8] ; mm5 = 5 7 4 6
movq mm2, mm5; ; mm2 = 5 7 4 6
psrlq mm5, 32; ; mm5 = _ _ 5 7
movq mm0, [INP]; ; mm0 = 3 2 1 0
punpcklwd mm5, mm2;; mm5 = 4 5 6 7
movq mm1, mm0; ; mm1 = 3 2 1 0
paddsw mm0, mm5; ; mm0 = [3+4, 2+5, 1+6, 0+7] (xt3, xt2, xt1, xt0)
psubsw mm1, mm5; ; mm1 = [3-4, 2-5, 1-6, 0-7] (xt7, xt6, xt5, xt4)
movq mm2, mm0; ; mm2 = [ xt3 xt2 xt1 xt0 ]
;movq [ xt3xt2xt1xt0 ], mm0;
;movq [ xt7xt6xt5xt4 ], mm1;
punpcklwd mm0, mm1;; mm0 = [ xt5 xt1 xt4 xt0 ]
punpckhwd mm2, mm1;; mm2 = [ xt7 xt3 xt6 xt2 ]
movq mm1, mm2; ; mm1
;; shuffle bytes around
; movq mm0, [INP] ; 0 ; x3 x2 x1 x0
; movq mm1, [INP+8] ; 1 ; x7 x6 x5 x4
movq mm2, mm0 ; 2 ; x3 x2 x1 x0
movq mm3, [TABLE] ; 3 ; w06 w04 w02 w00
punpcklwd mm0, mm1 ; x5 x1 x4 x0
movq mm5, mm0 ; 5 ; x5 x1 x4 x0
punpckldq mm0, mm0 ; x4 x0 x4 x0 [ xt2 xt0 xt2 xt0 ]
movq mm4, [TABLE+8] ; 4 ; w07 w05 w03 w01
punpckhwd mm2, mm1 ; 1 ; x7 x3 x6 x2
pmaddwd mm3, mm0 ; x4*w06+x0*w04 x4*w02+x0*w00
movq mm6, mm2 ; 6 ; x7 x3 x6 x2
movq mm1, [TABLE+32] ; 1 ; w22 w20 w18 w16
punpckldq mm2, mm2 ; x6 x2 x6 x2 [ xt3 xt1 xt3 xt1 ]
pmaddwd mm4, mm2 ; x6*w07+x2*w05 x6*w03+x2*w01
punpckhdq mm5, mm5 ; x5 x1 x5 x1 [ xt6 xt4 xt6 xt4 ]
pmaddwd mm0, [TABLE+16] ; x4*w14+x0*w12 x4*w10+x0*w08
punpckhdq mm6, mm6 ; x7 x3 x7 x3 [ xt7 xt5 xt7 xt5 ]
movq mm7, [TABLE+40] ; 7 ; w23 w21 w19 w17
pmaddwd mm1, mm5 ; x5*w22+x1*w20 x5*w18+x1*w16
;mm3 = a1, a0 (y2,y0)
;mm1 = b1, b0 (y3,y1)
;mm0 = a3,a2 (y6,y4)
;mm5 = b3,b2 (y7,y5)
paddd mm3, [round_frw_row] ; +rounder (y2,y0)
pmaddwd mm7, mm6 ; x7*w23+x3*w21 x7*w19+x3*w17
pmaddwd mm2, [TABLE+24] ; x6*w15+x2*w13 x6*w11+x2*w09
paddd mm3, mm4 ; 4 ; a1=sum(even1) a0=sum(even0) ; now ( y2, y0)
pmaddwd mm5, [TABLE+48] ; x5*w30+x1*w28 x5*w26+x1*w24
;;
pmaddwd mm6, [TABLE+56] ; x7*w31+x3*w29 x7*w27+x3*w25
paddd mm1, mm7 ; 7 ; b1=sum(odd1) b0=sum(odd0) ; now ( y3, y1)
paddd mm0, [round_frw_row] ; +rounder (y6,y4)
psrad mm3, SHIFT_FRW_ROW ; (y2, y0)
paddd mm1, [round_frw_row] ; +rounder (y3,y1)
paddd mm0, mm2 ; 2 ; a3=sum(even3) a2=sum(even2) ; now (y6, y4)
paddd mm5, [round_frw_row] ; +rounder (y7,y5)
psrad mm1, SHIFT_FRW_ROW ; y1=a1+b1 y0=a0+b0
paddd mm5, mm6 ; 6 ; b3=sum(odd3) b2=sum(odd2) ; now ( y7, y5)
psrad mm0, SHIFT_FRW_ROW ;y3=a3+b3 y2=a2+b2
add OUT, 16; ; increment row-output address by 1 row
psrad mm5, SHIFT_FRW_ROW ; y4=a3-b3 y5=a2-b2
add INP, 16; ; increment row-address by 1 row
packssdw mm3, mm0 ; 0 ; y6 y4 y2 y0
packssdw mm1, mm5 ; 3 ; y7 y5 y3 y1
movq mm6, mm3; ; mm0 = y6 y4 y2 y0
punpcklwd mm3, mm1; ; y3 y2 y1 y0
sub edi, 0x01; ; i = i - 1
punpckhwd mm6, mm1; ; y7 y6 y5 y4
add TABLE,64; ; increment to next table
movq [OUT-16], mm3 ; 1 ; save y3 y2 y1 y0
movq [OUT-8], mm6 ; 7 ; save y7 y6 y5 y4
cmp edi, 0x00;
jg near .lp_mmx_fdct_row1; ; begin fdct processing on next row
;;
;; Tidy up and return
;;
pop edi
pop edx
pop ecx
pop ebx
pop ebp ; restore stack pointer
emms
ret
\ No newline at end of file
/*
* MMX optimized motion estimation
* Copyright (c) 2001 Gerard Lantau.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "../dsputil.h"
#include "mmx.h"
static const unsigned long long int mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001;
static const unsigned long long int mm_wtwo __attribute__ ((aligned(8))) = 0x0002000200020002;
/* mm7 is accumulator, mm6 is zero */
static inline void sad_add(const UINT8 *p1, const UINT8 *p2)
{
movq_m2r(*p1, mm0);
movq_m2r(*p2, mm1);
movq_r2r(mm0, mm2);
psubusb_r2r(mm1, mm0);
psubusb_r2r(mm2, mm1);
por_r2r(mm1, mm0); /* mm0 is absolute value */
movq_r2r(mm0, mm1);
punpcklbw_r2r(mm6, mm0);
punpckhbw_r2r(mm6, mm1);
paddusw_r2r(mm0, mm7);
paddusw_r2r(mm1, mm7);
}
/* convert mm7 to value */
static inline int sad_end(void)
{
int res;
movq_r2r(mm7, mm0);
psrlq_i2r(32, mm7);
paddusw_r2r(mm0, mm7);
movq_r2r(mm7, mm0);
psrlq_i2r(16, mm7);
paddusw_r2r(mm0, mm7);
__asm __volatile ("movd %%mm7, %0" : "=a" (res));
return res & 0xffff;
}
int pix_abs16x16_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h)
{
const UINT8 *p1, *p2;
h >>= 1;
p1 = blk1;
p2 = blk2;
pxor_r2r(mm7, mm7); /* mm7 is accumulator */
pxor_r2r(mm6, mm6); /* mm7 is zero constant */
do {
sad_add(p1, p2);
sad_add(p1 + 8, p2 + 8);
p1 += lx;
p2 += lx;
sad_add(p1, p2);
sad_add(p1 + 8, p2 + 8);
p1 += lx;
p2 += lx;
} while (--h);
return sad_end();
}
/* please test it ! */
static inline void sad_add_sse(const UINT8 *p1, const UINT8 *p2)
{
movq_m2r(*(p1 + 0), mm0);
movq_m2r(*(p1 + 8), mm1);
psadbw_m2r(*(p2 + 0), mm0);
psadbw_m2r(*(p2 + 8), mm1);
paddusw_r2r(mm0, mm7);
paddusw_r2r(mm1, mm7);
}
int pix_abs16x16_sse(UINT8 *blk1, UINT8 *blk2, int lx, int h)
{
const UINT8 *p1, *p2;
h >>= 1;
p1 = blk1;
p2 = blk2;
pxor_r2r(mm7, mm7); /* mm7 is accumulator */
do {
sad_add_sse(p1, p2);
p1 += lx;
p2 += lx;
sad_add_sse(p1, p2);
p1 += lx;
p2 += lx;
} while (--h);
return sad_end();
}
#define DUMP(reg) { mmx_t tmp; movq_r2m(reg, tmp); printf(#reg "=%016Lx\n", tmp.uq); }
/* mm7 is accumulator, mm6 is zero */
static inline void sad_add_x2(const UINT8 *p1, const UINT8 *p2, const UINT8 *p3)
{
movq_m2r(*(p2 + 0), mm0);
movq_m2r(*(p3 + 0), mm1);
movq_r2r(mm0, mm2);
movq_r2r(mm1, mm3);
punpcklbw_r2r(mm6, mm0); /* extract 4 bytes low */
punpcklbw_r2r(mm6, mm1);
punpckhbw_r2r(mm6, mm2); /* high */
punpckhbw_r2r(mm6, mm3);
paddusw_r2r(mm1, mm0);
paddusw_r2r(mm3, mm2);
movq_m2r(*(p1 + 0), mm1); /* mm1 : other value */
paddusw_r2r(mm5, mm0); /* + 1 */
paddusw_r2r(mm5, mm2); /* + 1 */
psrlw_i2r(1, mm0);
psrlw_i2r(1, mm2);
packuswb_r2r(mm2, mm0); /* average is in mm0 */
movq_r2r(mm1, mm2);
psubusb_r2r(mm0, mm1);
psubusb_r2r(mm2, mm0);
por_r2r(mm1, mm0); /* mm0 is absolute value */
movq_r2r(mm0, mm1);
punpcklbw_r2r(mm6, mm0);
punpckhbw_r2r(mm6, mm1);
paddusw_r2r(mm0, mm7);
paddusw_r2r(mm1, mm7);
}
int pix_abs16x16_x2_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h)
{
const UINT8 *p1, *p2;
p1 = blk1;
p2 = blk2;
pxor_r2r(mm7, mm7); /* mm7 is accumulator */
pxor_r2r(mm6, mm6); /* mm7 is zero constant */
movq_m2r(mm_wone, mm5); /* one constant */
do {
sad_add_x2(p1, p2, p2 + 1);
sad_add_x2(p1 + 8, p2 + 8, p2 + 9);
p1 += lx;
p2 += lx;
} while (--h);
return sad_end();
}
int pix_abs16x16_y2_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h)
{
const UINT8 *p1, *p2;
p1 = blk1;
p2 = blk2;
pxor_r2r(mm7, mm7); /* mm7 is accumulator */
pxor_r2r(mm6, mm6); /* mm7 is zero constant */
movq_m2r(mm_wone, mm5); /* one constant */
do {
sad_add_x2(p1, p2, p2 + lx);
sad_add_x2(p1 + 8, p2 + 8, p2 + 8 + lx);
p1 += lx;
p2 += lx;
} while (--h);
return sad_end();
}
/* mm7 is accumulator, mm6 is zero */
static inline void sad_add_xy2(const UINT8 *p1, const UINT8 *p2, const UINT8 *p3)
{
movq_m2r(*(p2 + 0), mm0);
movq_m2r(*(p3 + 0), mm1);
movq_r2r(mm0, mm2);
movq_r2r(mm1, mm3);
punpcklbw_r2r(mm6, mm0); /* extract 4 bytes low */
punpcklbw_r2r(mm6, mm1);
punpckhbw_r2r(mm6, mm2); /* high */
punpckhbw_r2r(mm6, mm3);
paddusw_r2r(mm1, mm0);
paddusw_r2r(mm3, mm2);
movq_m2r(*(p2 + 1), mm1);
movq_m2r(*(p3 + 1), mm3);
movq_r2r(mm1, mm4);
punpcklbw_r2r(mm6, mm1); /* low */
punpckhbw_r2r(mm6, mm4); /* high */
paddusw_r2r(mm1, mm0);
paddusw_r2r(mm4, mm2);
movq_r2r(mm3, mm4);
punpcklbw_r2r(mm6, mm3); /* low */
punpckhbw_r2r(mm6, mm4); /* high */
paddusw_r2r(mm3, mm0);
paddusw_r2r(mm4, mm2);
movq_m2r(*(p1 + 0), mm1); /* mm1 : other value */
paddusw_r2r(mm5, mm0); /* + 2 */
paddusw_r2r(mm5, mm2); /* + 2 */
psrlw_i2r(2, mm0);
psrlw_i2r(2, mm2);
packuswb_r2r(mm2, mm0); /* average is in mm0 */
movq_r2r(mm1, mm2);
psubusb_r2r(mm0, mm1);
psubusb_r2r(mm2, mm0);
por_r2r(mm1, mm0); /* mm0 is absolute value */
movq_r2r(mm0, mm1);
punpcklbw_r2r(mm6, mm0);
punpckhbw_r2r(mm6, mm1);
paddusw_r2r(mm0, mm7);
paddusw_r2r(mm1, mm7);
}
int pix_abs16x16_xy2_mmx(UINT8 *blk1, UINT8 *blk2, int lx, int h)
{
const UINT8 *p1, *p2, *p3;
p1 = blk1;
p2 = blk2;
p3 = blk2 + lx;
pxor_r2r(mm7, mm7); /* mm7 is accumulator */
pxor_r2r(mm6, mm6); /* mm7 is zero constant */
movq_m2r(mm_wtwo, mm5); /* one constant */
do {
sad_add_xy2(p1, p2, p2 + lx);
sad_add_xy2(p1 + 8, p2 + 8, p2 + 8 + lx);
p1 += lx;
p2 += lx;
} while (--h);
return sad_end();
}
; MMX/SSE optimized routines for SAD of 16*16 macroblocks
; Copyright (C) Juan J. Sierralta P. <juanjo@atmlab.utfsm.cl>
;
; dist1_* Original Copyright (C) 2000 Chris Atenasio <chris@crud.net>
; Enhancements and rest Copyright (C) 2000 Andrew Stevens <as@comlab.ox.ac.uk>
;
; This program is free software; you can redistribute it and/or
; modify it under the terms of the GNU General Public License
; as published by the Free Software Foundation; either version 2
; of the License, or (at your option) any later version.
;
; This program is distributed in the hope that it will be useful,
; but WITHOUT ANY WARRANTY; without even the implied warranty of
; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
; GNU General Public License for more details.
;
; You should have received a copy of the GNU General Public License
; along with this program; if not, write to the Free Software
; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
;
global pix_abs16x16_mmx
; int pix_abs16x16_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
; esi = p1 (init: blk1)
; edi = p2 (init: blk2)
; ecx = rowsleft (init: h)
; edx = lx;
; mm0 = distance accumulators (4 words)
; mm1 = distance accumulators (4 words)
; mm2 = temp
; mm3 = temp
; mm4 = temp
; mm5 = temp
; mm6 = 0
; mm7 = temp
align 32
pix_abs16x16_mmx:
push ebp ; save frame pointer
mov ebp, esp
push ebx ; Saves registers (called saves convention in
push ecx ; x86 GCC it seems)
push edx ;
push esi
push edi
pxor mm0, mm0 ; zero acculumators
pxor mm1, mm1
pxor mm6, mm6
mov esi, [ebp+8] ; get pix1
mov edi, [ebp+12] ; get pix2
mov edx, [ebp+16] ; get lx
mov ecx, [ebp+20] ; get rowsleft
jmp .nextrow
align 32
.nextrow:
; First 8 bytes of the row
movq mm4, [edi] ; load first 8 bytes of pix2 row
movq mm5, [esi] ; load first 8 bytes of pix1 row
movq mm3, mm4 ; mm4 := abs(mm4-mm5)
movq mm2,[esi+8] ; load last 8 bytes of pix1 row
psubusb mm4, mm5
movq mm7,[edi+8] ; load last 8 bytes of pix2 row
psubusb mm5, mm3
por mm4, mm5
; Last 8 bytes of the row
movq mm3, mm7 ; mm7 := abs(mm7-mm2)
psubusb mm7, mm2
psubusb mm2, mm3
por mm7, mm2
; Now mm4 and mm7 have 16 absdiffs to add
; First 8 bytes of the row2
add edi, edx
movq mm2, [edi] ; load first 8 bytes of pix2 row
add esi, edx
movq mm5, [esi] ; load first 8 bytes of pix1 row
movq mm3, mm2 ; mm2 := abs(mm2-mm5)
psubusb mm2, mm5
movq mm6,[esi+8] ; load last 8 bytes of pix1 row
psubusb mm5, mm3
por mm2, mm5
; Last 8 bytes of the row2
movq mm5,[edi+8] ; load last 8 bytes of pix2 row
movq mm3, mm5 ; mm5 := abs(mm5-mm6)
psubusb mm5, mm6
psubusb mm6, mm3
por mm5, mm6
; Now mm2, mm4, mm5, mm7 have 32 absdiffs
movq mm3, mm7
pxor mm6, mm6 ; Zero mm6
punpcklbw mm3, mm6 ; Unpack to words and add
punpckhbw mm7, mm6
paddusw mm7, mm3
movq mm3, mm5
punpcklbw mm3, mm6 ; Unpack to words and add
punpckhbw mm5, mm6
paddusw mm5, mm3
paddusw mm0, mm7 ; Add to the acumulator (mm0)
paddusw mm1, mm5 ; Add to the acumulator (mm1)
movq mm3, mm4
punpcklbw mm3, mm6 ; Unpack to words and add
punpckhbw mm4, mm6
movq mm5, mm2
paddusw mm4, mm3
punpcklbw mm5, mm6 ; Unpack to words and add
punpckhbw mm2, mm6
paddusw mm2, mm5
; Loop termination
add esi, edx ; update pointers to next row
paddusw mm0, mm4 ; Add to the acumulator (mm0)
add edi, edx
sub ecx,2
paddusw mm1, mm2 ; Add to the acumulator (mm1)
test ecx, ecx ; check rowsleft
jnz near .nextrow
paddusw mm0, mm1
movq mm2, mm0 ; Copy mm0 to mm2
psrlq mm2, 32
paddusw mm0, mm2 ; Add
movq mm3, mm0
psrlq mm3, 16
paddusw mm0, mm3
movd eax, mm0 ; Store return value
and eax, 0xffff
pop edi
pop esi
pop edx
pop ecx
pop ebx
pop ebp ; restore stack pointer
;emms ; clear mmx registers
ret ; return
global pix_abs16x16_sse
; int pix_abs16x16_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
; esi = p1 (init: blk1)
; edi = p2 (init: blk2)
; ecx = rowsleft (init: h)
; edx = lx;
; mm0 = distance accumulators (4 words)
; mm1 = distance accumulators (4 words)
; mm2 = temp
; mm3 = temp
; mm4 = temp
; mm5 = temp
; mm6 = temp
; mm7 = temp
align 32
pix_abs16x16_sse:
push ebp ; save frame pointer
mov ebp, esp
push ebx ; Saves registers (called saves convention in
push ecx ; x86 GCC it seems)
push edx ;
push esi
push edi
pxor mm0, mm0 ; zero acculumators
pxor mm1, mm1
mov esi, [ebp+8] ; get pix1
mov edi, [ebp+12] ; get pix2
mov edx, [ebp+16] ; get lx
mov ecx, [ebp+20] ; get rowsleft
jmp .next4row
align 32
.next4row:
; First row
movq mm4, [edi] ; load first 8 bytes of pix2 row
movq mm5, [edi+8] ; load last 8 bytes of pix2 row
psadbw mm4, [esi] ; SAD of first 8 bytes
psadbw mm5, [esi+8] ; SAD of last 8 bytes
paddw mm0, mm4 ; Add to acumulators
paddw mm1, mm5
; Second row
add edi, edx;
add esi, edx;
movq mm6, [edi] ; load first 8 bytes of pix2 row
movq mm7, [edi+8] ; load last 8 bytes of pix2 row
psadbw mm6, [esi] ; SAD of first 8 bytes
psadbw mm7, [esi+8] ; SAD of last 8 bytes
paddw mm0, mm6 ; Add to acumulators
paddw mm1, mm7
; Third row
add edi, edx;
add esi, edx;
movq mm4, [edi] ; load first 8 bytes of pix2 row
movq mm5, [edi+8] ; load last 8 bytes of pix2 row
psadbw mm4, [esi] ; SAD of first 8 bytes
psadbw mm5, [esi+8] ; SAD of last 8 bytes
paddw mm0, mm4 ; Add to acumulators
paddw mm1, mm5
; Fourth row
add edi, edx;
add esi, edx;
movq mm6, [edi] ; load first 8 bytes of pix2 row
movq mm7, [edi+8] ; load last 8 bytes of pix2 row
psadbw mm6, [esi] ; SAD of first 8 bytes
psadbw mm7, [esi+8] ; SAD of last 8 bytes
paddw mm0, mm6 ; Add to acumulators
paddw mm1, mm7
; Loop termination
add esi, edx ; update pointers to next row
add edi, edx
sub ecx,4
test ecx, ecx ; check rowsleft
jnz near .next4row
paddd mm0, mm1 ; Sum acumulators
movd eax, mm0 ; Store return value
pop edi
pop esi
pop edx
pop ecx
pop ebx
pop ebp ; restore stack pointer
;emms ; clear mmx registers
ret ; return
global pix_abs16x16_x2_mmx
; int pix_abs16x16_x2_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
; esi = p1 (init: blk1)
; edi = p2 (init: blk2)
; ecx = rowsleft (init: h)
; edx = lx;
; mm0 = distance accumulators (4 words)
; mm1 = distance accumulators (4 words)
; mm2 = temp
; mm3 = temp
; mm4 = temp
; mm5 = temp
; mm6 = 0
; mm7 = temp
align 32
pix_abs16x16_x2_mmx:
push ebp ; save frame pointer
mov ebp, esp
push ebx ; Saves registers (called saves convention in
push ecx ; x86 GCC it seems)
push edx ;
push esi
push edi
pxor mm0, mm0 ; zero acculumators
pxor mm1, mm1
pxor mm6, mm6
mov esi, [ebp+8] ; get pix1
mov edi, [ebp+12] ; get pix2
mov edx, [ebp+16] ; get lx
mov ecx, [ebp+20] ; get rowsleft
jmp .nextrow_x2
align 32
.nextrow_x2:
; First 8 bytes of the row
movq mm4, [edi] ; load first 8 bytes of pix2 row
movq mm5, [edi+1] ; load bytes 1-8 of pix2 row
movq mm2, mm4 ; copy mm4 on mm2
movq mm3, mm5 ; copy mm5 on mm3
punpcklbw mm4, mm6 ; first 4 bytes of [edi] on mm4
punpcklbw mm5, mm6 ; first 4 bytes of [edi+1] on mm5
paddusw mm4, mm5 ; mm4 := first 4 bytes interpolated in words
psrlw mm4, 1
punpckhbw mm2, mm6 ; last 4 bytes of [edi] on mm2
punpckhbw mm3, mm6 ; last 4 bytes of [edi+1] on mm3
paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
psrlw mm2, 1
packuswb mm4, mm2 ; pack 8 bytes interpolated on mm4
movq mm5,[esi] ; load first 8 bytes of pix1 row
movq mm3, mm4 ; mm4 := abs(mm4-mm5)
psubusb mm4, mm5
psubusb mm5, mm3
por mm4, mm5
; Last 8 bytes of the row
movq mm7, [edi+8] ; load last 8 bytes of pix2 row
movq mm5, [edi+9] ; load bytes 10-17 of pix2 row
movq mm2, mm7 ; copy mm7 on mm2
movq mm3, mm5 ; copy mm5 on mm3
punpcklbw mm7, mm6 ; first 4 bytes of [edi+8] on mm7
punpcklbw mm5, mm6 ; first 4 bytes of [edi+9] on mm5
paddusw mm7, mm5 ; mm1 := first 4 bytes interpolated in words
psrlw mm7, 1
punpckhbw mm2, mm6 ; last 4 bytes of [edi] on mm2
punpckhbw mm3, mm6 ; last 4 bytes of [edi+1] on mm3
paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
psrlw mm2, 1
packuswb mm7, mm2 ; pack 8 bytes interpolated on mm1
movq mm5,[esi+8] ; load last 8 bytes of pix1 row
movq mm3, mm7 ; mm7 := abs(mm1-mm5)
psubusb mm7, mm5
psubusb mm5, mm3
por mm7, mm5
; Now mm4 and mm7 have 16 absdiffs to add
movq mm3, mm4 ; Make copies of these bytes
movq mm2, mm7
punpcklbw mm4, mm6 ; Unpack to words and add
punpcklbw mm7, mm6
paddusw mm4, mm7
paddusw mm0, mm4 ; Add to the acumulator (mm0)
punpckhbw mm3, mm6 ; Unpack to words and add
punpckhbw mm2, mm6
paddusw mm3, mm2
paddusw mm1, mm3 ; Add to the acumulator (mm1)
; Loop termination
add esi, edx ; update pointers to next row
add edi, edx
sub ecx,1
test ecx, ecx ; check rowsleft
jnz near .nextrow_x2
paddusw mm0, mm1
movq mm1, mm0 ; Copy mm0 to mm1
psrlq mm1, 32
paddusw mm0, mm1 ; Add
movq mm2, mm0
psrlq mm2, 16
paddusw mm0, mm2
movd eax, mm0 ; Store return value
and eax, 0xffff
pop edi
pop esi
pop edx
pop ecx
pop ebx
pop ebp ; restore stack pointer
emms ; clear mmx registers
ret ; return
global pix_abs16x16_y2_mmx
; int pix_abs16x16_y2_mmx(unsigned char *pix1,unsigned char *pix2, int lx, int h);
; esi = p1 (init: blk1)
; edi = p2 (init: blk2)
; ebx = p2 + lx
; ecx = rowsleft (init: h)
; edx = lx;
; mm0 = distance accumulators (4 words)
; mm1 = distance accumulators (4 words)
; mm2 = temp
; mm3 = temp
; mm4 = temp
; mm5 = temp
; mm6 = 0
; mm7 = temp
align 32
pix_abs16x16_y2_mmx:
push ebp ; save frame pointer
mov ebp, esp
push ebx ; Saves registers (called saves convention in
push ecx ; x86 GCC it seems)
push edx ;
push esi
push edi
pxor mm0, mm0 ; zero acculumators
pxor mm1, mm1
pxor mm6, mm6
mov esi, [ebp+8] ; get pix1
mov edi, [ebp+12] ; get pix2
mov edx, [ebp+16] ; get lx
mov ecx, [ebp+20] ; get rowsleft
mov ebx, edi
add ebx, edx
jmp .nextrow_y2
align 32
.nextrow_y2:
; First 8 bytes of the row
movq mm4, [edi] ; load first 8 bytes of pix2 row
movq mm5, [ebx] ; load bytes 1-8 of pix2 row
movq mm2, mm4 ; copy mm4 on mm2
movq mm3, mm5 ; copy mm5 on mm3
punpcklbw mm4, mm6 ; first 4 bytes of [edi] on mm4
punpcklbw mm5, mm6 ; first 4 bytes of [ebx] on mm5
paddusw mm4, mm5 ; mm4 := first 4 bytes interpolated in words
psrlw mm4, 1
punpckhbw mm2, mm6 ; last 4 bytes of [edi] on mm2
punpckhbw mm3, mm6 ; last 4 bytes of [edi+1] on mm3
paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
psrlw mm2, 1
packuswb mm4, mm2 ; pack 8 bytes interpolated on mm4
movq mm5,[esi] ; load first 8 bytes of pix1 row
movq mm3, mm4 ; mm4 := abs(mm4-mm5)
psubusb mm4, mm5
psubusb mm5, mm3
por mm4, mm5
; Last 8 bytes of the row
movq mm7, [edi+8] ; load last 8 bytes of pix2 row
movq mm5, [ebx+8] ; load bytes 10-17 of pix2 row
movq mm2, mm7 ; copy mm7 on mm2
movq mm3, mm5 ; copy mm5 on mm3
punpcklbw mm7, mm6 ; first 4 bytes of [edi+8] on mm7
punpcklbw mm5, mm6 ; first 4 bytes of [ebx+8] on mm5
paddusw mm7, mm5 ; mm1 := first 4 bytes interpolated in words
psrlw mm7, 1
punpckhbw mm2, mm6 ; last 4 bytes of [edi+8] on mm2
punpckhbw mm3, mm6 ; last 4 bytes of [ebx+8] on mm3
paddusw mm2, mm3 ; mm2 := last 4 bytes interpolated in words
psrlw mm2, 1
packuswb mm7, mm2 ; pack 8 bytes interpolated on mm1
movq mm5,[esi+8] ; load last 8 bytes of pix1 row
movq mm3, mm7 ; mm7 := abs(mm1-mm5)
psubusb mm7, mm5
psubusb mm5, mm3
por mm7, mm5
; Now mm4 and mm7 have 16 absdiffs to add
movq mm3, mm4 ; Make copies of these bytes
movq mm2, mm7
punpcklbw mm4, mm6 ; Unpack to words and add
punpcklbw mm7, mm6
paddusw mm4, mm7
paddusw mm0, mm4 ; Add to the acumulator (mm0)
punpckhbw mm3, mm6 ; Unpack to words and add
punpckhbw mm2, mm6
paddusw mm3, mm2
paddusw mm1, mm3 ; Add to the acumulator (mm1)
; Loop termination
add esi, edx ; update pointers to next row
add edi, edx
add ebx, edx
sub ecx,1
test ecx, ecx ; check rowsleft
jnz near .nextrow_y2
paddusw mm0, mm1
movq mm1, mm0 ; Copy mm0 to mm1
psrlq mm1, 32
paddusw mm0, mm1 ; Add
movq mm2, mm0
psrlq mm2, 16
paddusw mm0, mm2
movd eax, mm0 ; Store return value
and eax, 0xffff
pop edi
pop esi
pop edx
pop ecx
pop ebx
pop ebp ; restore stack pointer
emms ; clear mmx registers
ret ; return
global pix_abs16x16_xy2_mmx
; int pix_abs16x16_xy2_mmx(unsigned char *p1,unsigned char *p2,int lx,int h);
; esi = p1 (init: blk1)
; edi = p2 (init: blk2)
; ebx = p1+lx
; ecx = rowsleft (init: h)
; edx = lx;
; mm0 = distance accumulators (4 words)
; mm1 = bytes p2
; mm2 = bytes p1
; mm3 = bytes p1+lx
; I'd love to find someplace to stash p1+1 and p1+lx+1's bytes
; but I don't think thats going to happen in iA32-land...
; mm4 = temp 4 bytes in words interpolating p1, p1+1
; mm5 = temp 4 bytes in words from p2
; mm6 = temp comparison bit mask p1,p2
; mm7 = temp comparison bit mask p2,p1
align 32
pix_abs16x16_xy2_mmx:
push ebp ; save stack pointer
mov ebp, esp ; so that we can do this
push ebx ; Saves registers (called saves convention in
push ecx ; x86 GCC it seems)
push edx ;
push esi
push edi
pxor mm0, mm0 ; zero acculumators
mov esi, [ebp+12] ; get p1
mov edi, [ebp+8] ; get p2
mov edx, [ebp+16] ; get lx
mov ecx, [ebp+20] ; rowsleft := h
mov ebx, esi
add ebx, edx
jmp .nextrowmm11 ; snap to it
align 32
.nextrowmm11:
;;
;; First 8 bytes of row
;;
;; First 4 bytes of 8
movq mm4, [esi] ; mm4 := first 4 bytes p1
pxor mm7, mm7
movq mm2, mm4 ; mm2 records all 8 bytes
punpcklbw mm4, mm7 ; First 4 bytes p1 in Words...
movq mm6, [ebx] ; mm6 := first 4 bytes p1+lx
movq mm3, mm6 ; mm3 records all 8 bytes
punpcklbw mm6, mm7
paddw mm4, mm6
movq mm5, [esi+1] ; mm5 := first 4 bytes p1+1
punpcklbw mm5, mm7 ; First 4 bytes p1 in Words...
paddw mm4, mm5
movq mm6, [ebx+1] ; mm6 := first 4 bytes p1+lx+1
punpcklbw mm6, mm7
paddw mm4, mm6
psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
movq mm5, [edi] ; mm5:=first 4 bytes of p2 in words
movq mm1, mm5
punpcklbw mm5, mm7
movq mm7,mm4
pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
psubw mm6,mm5
pand mm6, mm7
paddw mm0, mm6 ; Add to accumulator
movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
pcmpgtw mm6,mm4
psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
pand mm5, mm6
paddw mm0, mm5 ; Add to accumulator
;; Second 4 bytes of 8
movq mm4, mm2 ; mm4 := Second 4 bytes p1 in words
pxor mm7, mm7
punpckhbw mm4, mm7
movq mm6, mm3 ; mm6 := Second 4 bytes p1+1 in words
punpckhbw mm6, mm7
paddw mm4, mm6
movq mm5, [esi+1] ; mm5 := first 4 bytes p1+1
punpckhbw mm5, mm7 ; First 4 bytes p1 in Words...
paddw mm4, mm5
movq mm6, [ebx+1] ; mm6 := first 4 bytes p1+lx+1
punpckhbw mm6, mm7
paddw mm4, mm6
psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
movq mm5, mm1 ; mm5:= second 4 bytes of p2 in words
punpckhbw mm5, mm7
movq mm7,mm4
pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
psubw mm6,mm5
pand mm6, mm7
paddw mm0, mm6 ; Add to accumulator
movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
pcmpgtw mm6,mm4
psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
pand mm5, mm6
paddw mm0, mm5 ; Add to accumulator
;;
;; Second 8 bytes of row
;;
;; First 4 bytes of 8
movq mm4, [esi+8] ; mm4 := first 4 bytes p1+8
pxor mm7, mm7
movq mm2, mm4 ; mm2 records all 8 bytes
punpcklbw mm4, mm7 ; First 4 bytes p1 in Words...
movq mm6, [ebx+8] ; mm6 := first 4 bytes p1+lx+8
movq mm3, mm6 ; mm3 records all 8 bytes
punpcklbw mm6, mm7
paddw mm4, mm6
movq mm5, [esi+9] ; mm5 := first 4 bytes p1+9
punpcklbw mm5, mm7 ; First 4 bytes p1 in Words...
paddw mm4, mm5
movq mm6, [ebx+9] ; mm6 := first 4 bytes p1+lx+9
punpcklbw mm6, mm7
paddw mm4, mm6
psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
movq mm5, [edi+8] ; mm5:=first 4 bytes of p2+8 in words
movq mm1, mm5
punpcklbw mm5, mm7
movq mm7,mm4
pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
psubw mm6,mm5
pand mm6, mm7
paddw mm0, mm6 ; Add to accumulator
movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
pcmpgtw mm6,mm4
psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
pand mm5, mm6
paddw mm0, mm5 ; Add to accumulator
;; Second 4 bytes of 8
movq mm4, mm2 ; mm4 := Second 4 bytes p1 in words
pxor mm7, mm7
punpckhbw mm4, mm7
movq mm6, mm3 ; mm6 := Second 4 bytes p1+1 in words
punpckhbw mm6, mm7
paddw mm4, mm6
movq mm5, [esi+9] ; mm5 := first 4 bytes p1+1
punpckhbw mm5, mm7 ; First 4 bytes p1 in Words...
paddw mm4, mm5
movq mm6, [ebx+9] ; mm6 := first 4 bytes p1+lx+1
punpckhbw mm6, mm7
paddw mm4, mm6
psrlw mm4, 2 ; mm4 := First 4 bytes interpolated in words
movq mm5, mm1 ; mm5:= second 4 bytes of p2 in words
punpckhbw mm5, mm7
movq mm7,mm4
pcmpgtw mm7,mm5 ; mm7 := [i : W0..3,mm4>mm5]
movq mm6,mm4 ; mm6 := [i : W0..3, (mm4-mm5)*(mm4-mm5 > 0)]
psubw mm6,mm5
pand mm6, mm7
paddw mm0, mm6 ; Add to accumulator
movq mm6,mm5 ; mm6 := [i : W0..3,mm5>mm4]
pcmpgtw mm6,mm4
psubw mm5,mm4 ; mm5 := [i : B0..7, (mm5-mm4)*(mm5-mm4 > 0)]
pand mm5, mm6
paddw mm0, mm5 ; Add to accumulator
;;
;; Loop termination condition... and stepping
;;
add esi, edx ; update pointer to next row
add edi, edx ; ditto
add ebx, edx
sub ecx,1
test ecx, ecx ; check rowsleft
jnz near .nextrowmm11
;; Sum the Accumulators
movq mm4, mm0
psrlq mm4, 32
paddw mm0, mm4
movq mm6, mm0
psrlq mm6, 16
paddw mm0, mm6
movd eax, mm0 ; store return value
and eax, 0xffff
pop edi
pop esi
pop edx
pop ecx
pop ebx
pop ebp ; restore stack pointer
emms ; clear mmx registers
ret ; we now return you to your regular programming
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册