dsputil.asm 19.1 KB
Newer Older
1 2 3 4
;******************************************************************************
;* MMX optimized DSP utils
;* Copyright (c) 2008 Loren Merritt
;*
5
;* This file is part of Libav.
6
;*
7
;* Libav is free software; you can redistribute it and/or
8 9 10 11
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
12
;* Libav is distributed in the hope that it will be useful,
13 14 15 16 17
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
18
;* License along with Libav; if not, write to the Free Software
19
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 21
;******************************************************************************

22
%include "libavutil/x86/x86util.asm"
23

L
Loren Merritt 已提交
24
SECTION_RODATA
25 26
cextern pb_FC
cextern h263_loop_filter_strength
L
Loren Merritt 已提交
27 28 29 30 31
pb_f: times 16 db 15
pb_zzzzzzzz77777777: times 8 db -1
pb_7: times 8 db 7
pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
32
pb_revwords: SHUFFLE_MASK_W 7, 6, 5, 4, 3, 2, 1, 0
33
pd_16384: times 4 dd 16384
34
pb_bswap32: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
L
Loren Merritt 已提交
35

36
SECTION_TEXT
37

D
Diego Biurrun 已提交
38
%macro SCALARPRODUCT 0
39
; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order)
D
Diego Biurrun 已提交
40
cglobal scalarproduct_int16, 3,3,3, v1, v2, order
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
    shl orderq, 1
    add v1q, orderq
    add v2q, orderq
    neg orderq
    pxor    m2, m2
.loop:
    movu    m0, [v1q + orderq]
    movu    m1, [v1q + orderq + mmsize]
    pmaddwd m0, [v2q + orderq]
    pmaddwd m1, [v2q + orderq + mmsize]
    paddd   m2, m0
    paddd   m2, m1
    add     orderq, mmsize*2
    jl .loop
%if mmsize == 16
    movhlps m0, m2
    paddd   m2, m0
    pshuflw m0, m2, 0x4e
%else
    pshufw  m0, m2, 0x4e
%endif
    paddd   m2, m0
    movd   eax, m2
    RET
65 66

; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
D
Diego Biurrun 已提交
67
cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
    shl orderq, 1
    movd    m7, mulm
%if mmsize == 16
    pshuflw m7, m7, 0
    punpcklqdq m7, m7
%else
    pshufw  m7, m7, 0
%endif
    pxor    m6, m6
    add v1q, orderq
    add v2q, orderq
    add v3q, orderq
    neg orderq
.loop:
    movu    m0, [v2q + orderq]
    movu    m1, [v2q + orderq + mmsize]
    mova    m4, [v1q + orderq]
    mova    m5, [v1q + orderq + mmsize]
    movu    m2, [v3q + orderq]
    movu    m3, [v3q + orderq + mmsize]
    pmaddwd m0, m4
    pmaddwd m1, m5
    pmullw  m2, m7
    pmullw  m3, m7
    paddd   m6, m0
    paddd   m6, m1
    paddw   m2, m4
    paddw   m3, m5
    mova    [v1q + orderq], m2
    mova    [v1q + orderq + mmsize], m3
    add     orderq, mmsize*2
    jl .loop
%if mmsize == 16
    movhlps m0, m6
    paddd   m6, m0
    pshuflw m0, m6, 0x4e
%else
    pshufw  m0, m6, 0x4e
%endif
    paddd   m6, m0
    movd   eax, m6
    RET
110 111
%endmacro

D
Diego Biurrun 已提交
112 113 114 115
INIT_MMX mmxext
SCALARPRODUCT
INIT_XMM sse2
SCALARPRODUCT
116

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
%macro SCALARPRODUCT_LOOP 1
align 16
.loop%1:
    sub     orderq, mmsize*2
%if %1
    mova    m1, m4
    mova    m4, [v2q + orderq]
    mova    m0, [v2q + orderq + mmsize]
    palignr m1, m0, %1
    palignr m0, m4, %1
    mova    m3, m5
    mova    m5, [v3q + orderq]
    mova    m2, [v3q + orderq + mmsize]
    palignr m3, m2, %1
    palignr m2, m5, %1
%else
    mova    m0, [v2q + orderq]
    mova    m1, [v2q + orderq + mmsize]
    mova    m2, [v3q + orderq]
    mova    m3, [v3q + orderq + mmsize]
%endif
138 139
    %define t0  [v1q + orderq]
    %define t1  [v1q + orderq + mmsize]
140
%if ARCH_X86_64
141 142 143 144 145 146 147
    mova    m8, t0
    mova    m9, t1
    %define t0  m8
    %define t1  m9
%endif
    pmaddwd m0, t0
    pmaddwd m1, t1
148 149
    pmullw  m2, m7
    pmullw  m3, m7
150 151
    paddw   m2, t0
    paddw   m3, t1
152 153 154 155 156 157 158 159 160 161 162
    paddd   m6, m0
    paddd   m6, m1
    mova    [v1q + orderq], m2
    mova    [v1q + orderq + mmsize], m3
    jg .loop%1
%if %1
    jmp .end
%endif
%endmacro

; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
D
Diego Biurrun 已提交
163 164
INIT_XMM ssse3
cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
    shl orderq, 1
    movd    m7, mulm
    pshuflw m7, m7, 0
    punpcklqdq m7, m7
    pxor    m6, m6
    mov    r4d, v2d
    and    r4d, 15
    and    v2q, ~15
    and    v3q, ~15
    mova    m4, [v2q + orderq]
    mova    m5, [v3q + orderq]
    ; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
    cmp    r4d, 0
    je .loop0
    cmp    r4d, 2
    je .loop2
    cmp    r4d, 4
    je .loop4
    cmp    r4d, 6
    je .loop6
    cmp    r4d, 8
    je .loop8
    cmp    r4d, 10
    je .loop10
    cmp    r4d, 12
    je .loop12
SCALARPRODUCT_LOOP 14
SCALARPRODUCT_LOOP 12
SCALARPRODUCT_LOOP 10
SCALARPRODUCT_LOOP 8
SCALARPRODUCT_LOOP 6
SCALARPRODUCT_LOOP 4
SCALARPRODUCT_LOOP 2
SCALARPRODUCT_LOOP 0
.end:
    movhlps m0, m6
    paddd   m6, m0
    pshuflw m0, m6, 0x4e
    paddd   m6, m0
    movd   eax, m6
    RET

207

208 209 210 211 212
;-----------------------------------------------------------------------------
; void ff_apply_window_int16(int16_t *output, const int16_t *input,
;                            const int16_t *window, unsigned int len)
;-----------------------------------------------------------------------------

D
Diego Biurrun 已提交
213 214 215 216
%macro REVERSE_WORDS 1-2
%if cpuflag(ssse3) && notcpuflag(atom)
    pshufb  %1, %2
%elif cpuflag(sse2)
217 218 219
    pshuflw  %1, %1, 0x1B
    pshufhw  %1, %1, 0x1B
    pshufd   %1, %1, 0x4E
D
Diego Biurrun 已提交
220 221 222
%elif cpuflag(mmxext)
    pshufw   %1, %1, 0x1B
%endif
223 224
%endmacro

D
Diego Biurrun 已提交
225 226 227 228 229
%macro MUL16FIXED 3
%if cpuflag(ssse3) ; dst, src, unused
; dst = ((dst * src) + (1<<14)) >> 15
    pmulhrsw   %1, %2
%elif cpuflag(mmxext) ; dst, src, temp
230 231 232 233 234 235 236 237 238
; dst = (dst * src) >> 15
; pmulhw cuts off the bottom bit, so we have to lshift by 1 and add it back
; in from the pmullw result.
    mova    %3, %1
    pmulhw  %1, %2
    pmullw  %3, %2
    psrlw   %3, 15
    psllw   %1, 1
    por     %1, %3
D
Diego Biurrun 已提交
239
%endif
240 241
%endmacro

D
Diego Biurrun 已提交
242 243 244 245 246 247
%macro APPLY_WINDOW_INT16 1 ; %1 bitexact version
%if %1
cglobal apply_window_int16, 4,5,6, output, input, window, offset, offset2
%else
cglobal apply_window_int16_round, 4,5,6, output, input, window, offset, offset2
%endif
248
    lea     offset2q, [offsetq-mmsize]
D
Diego Biurrun 已提交
249
%if cpuflag(ssse3) && notcpuflag(atom)
250 251
    mova          m5, [pb_revwords]
    ALIGN 16
D
Diego Biurrun 已提交
252 253
%elif %1
    mova          m5, [pd_16384]
254 255
%endif
.loop:
D
Diego Biurrun 已提交
256 257 258 259 260 261 262 263 264 265 266
%if cpuflag(ssse3)
    ; This version does the 16x16->16 multiplication in-place without expanding
    ; to 32-bit. The ssse3 version is bit-identical.
    mova          m0, [windowq+offset2q]
    mova          m1, [ inputq+offset2q]
    pmulhrsw      m1, m0
    REVERSE_WORDS m0, m5
    pmulhrsw      m0, [ inputq+offsetq ]
    mova  [outputq+offset2q], m1
    mova  [outputq+offsetq ], m0
%elif %1
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
    ; This version expands 16-bit to 32-bit, multiplies by the window,
    ; adds 16384 for rounding, right shifts 15, then repacks back to words to
    ; save to the output. The window is reversed for the second half.
    mova          m3, [windowq+offset2q]
    mova          m4, [ inputq+offset2q]
    pxor          m0, m0
    punpcklwd     m0, m3
    punpcklwd     m1, m4
    pmaddwd       m0, m1
    paddd         m0, m5
    psrad         m0, 15
    pxor          m2, m2
    punpckhwd     m2, m3
    punpckhwd     m1, m4
    pmaddwd       m2, m1
    paddd         m2, m5
    psrad         m2, 15
    packssdw      m0, m2
    mova  [outputq+offset2q], m0
    REVERSE_WORDS m3
    mova          m4, [ inputq+offsetq]
    pxor          m0, m0
    punpcklwd     m0, m3
    punpcklwd     m1, m4
    pmaddwd       m0, m1
    paddd         m0, m5
    psrad         m0, 15
    pxor          m2, m2
    punpckhwd     m2, m3
    punpckhwd     m1, m4
    pmaddwd       m2, m1
    paddd         m2, m5
    psrad         m2, 15
    packssdw      m0, m2
    mova  [outputq+offsetq], m0
%else
    ; This version does the 16x16->16 multiplication in-place without expanding
    ; to 32-bit. The mmxext and sse2 versions do not use rounding, and
    ; therefore are not bit-identical to the C version.
    mova          m0, [windowq+offset2q]
    mova          m1, [ inputq+offset2q]
    mova          m2, [ inputq+offsetq ]
    MUL16FIXED    m1, m0, m3
    REVERSE_WORDS m0
    MUL16FIXED    m2, m0, m3
    mova  [outputq+offset2q], m1
    mova  [outputq+offsetq ], m2
%endif
    add      offsetd, mmsize
    sub     offset2d, mmsize
    jae .loop
    REP_RET
%endmacro

D
Diego Biurrun 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333
INIT_MMX mmxext
APPLY_WINDOW_INT16 0
INIT_XMM sse2
APPLY_WINDOW_INT16 0

INIT_MMX mmxext
APPLY_WINDOW_INT16 1
INIT_XMM sse2
APPLY_WINDOW_INT16 1
INIT_XMM ssse3
APPLY_WINDOW_INT16 1
INIT_XMM ssse3, atom
APPLY_WINDOW_INT16 1
334

335

336
; void add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top)
D
Diego Biurrun 已提交
337 338
INIT_MMX mmxext
cglobal add_hfyu_median_prediction, 6,6,0, dst, top, diff, w, left, left_top
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
    movq    mm0, [topq]
    movq    mm2, mm0
    movd    mm4, [left_topq]
    psllq   mm2, 8
    movq    mm1, mm0
    por     mm4, mm2
    movd    mm3, [leftq]
    psubb   mm0, mm4 ; t-tl
    add    dstq, wq
    add    topq, wq
    add   diffq, wq
    neg      wq
    jmp .skip
.loop:
    movq    mm4, [topq+wq]
    movq    mm0, mm4
    psllq   mm4, 8
    por     mm4, mm1
    movq    mm1, mm0 ; t
    psubb   mm0, mm4 ; t-tl
.skip:
    movq    mm2, [diffq+wq]
%assign i 0
%rep 8
    movq    mm4, mm0
    paddb   mm4, mm3 ; t-tl+l
    movq    mm5, mm3
    pmaxub  mm3, mm1
    pminub  mm5, mm1
    pminub  mm3, mm4
    pmaxub  mm3, mm5 ; median
    paddb   mm3, mm2 ; +residual
%if i==0
    movq    mm7, mm3
    psllq   mm7, 56
%else
    movq    mm6, mm3
    psrlq   mm7, 8
    psllq   mm6, 56
    por     mm7, mm6
%endif
%if i<7
    psrlq   mm0, 8
    psrlq   mm1, 8
    psrlq   mm2, 8
%endif
%assign i i+1
%endrep
    movq [dstq+wq], mm7
    add      wq, 8
    jl .loop
    movzx   r2d, byte [dstq-1]
    mov [leftq], r2d
    movzx   r2d, byte [topq-1]
    mov [left_topq], r2d
    RET
L
Loren Merritt 已提交
395 396


397
%macro ADD_HFYU_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
L
Loren Merritt 已提交
398 399 400 401
    add     srcq, wq
    add     dstq, wq
    neg     wq
%%.loop:
402
%if %2
L
Loren Merritt 已提交
403
    mova    m1, [srcq+wq]
404 405 406
%else
    movu    m1, [srcq+wq]
%endif
L
Loren Merritt 已提交
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
    mova    m2, m1
    psllw   m1, 8
    paddb   m1, m2
    mova    m2, m1
    pshufb  m1, m3
    paddb   m1, m2
    pshufb  m0, m5
    mova    m2, m1
    pshufb  m1, m4
    paddb   m1, m2
%if mmsize == 16
    mova    m2, m1
    pshufb  m1, m6
    paddb   m1, m2
%endif
    paddb   m0, m1
%if %1
    mova    [dstq+wq], m0
%else
    movq    [dstq+wq], m0
    movhps  [dstq+wq+8], m0
%endif
    add     wq, mmsize
    jl %%.loop
    mov     eax, mmsize-1
    sub     eax, wd
    movd    m1, eax
    pshufb  m0, m1
    movd    eax, m0
    RET
%endmacro

439
; int add_hfyu_left_prediction(uint8_t *dst, const uint8_t *src, int w, int left)
D
Diego Biurrun 已提交
440 441
INIT_MMX ssse3
cglobal add_hfyu_left_prediction, 3,3,7, dst, src, w, left
L
Loren Merritt 已提交
442
.skip_prologue:
443 444 445
    mova    m5, [pb_7]
    mova    m4, [pb_zzzz3333zzzzbbbb]
    mova    m3, [pb_zz11zz55zz99zzdd]
L
Loren Merritt 已提交
446 447
    movd    m0, leftm
    psllq   m0, 56
448
    ADD_HFYU_LEFT_LOOP 1, 1
L
Loren Merritt 已提交
449

D
Diego Biurrun 已提交
450 451
INIT_XMM sse4
cglobal add_hfyu_left_prediction, 3,3,7, dst, src, w, left
452 453 454 455
    mova    m5, [pb_f]
    mova    m6, [pb_zzzzzzzz77777777]
    mova    m4, [pb_zzzz3333zzzzbbbb]
    mova    m3, [pb_zz11zz55zz99zzdd]
L
Loren Merritt 已提交
456 457 458
    movd    m0, leftm
    pslldq  m0, 15
    test    srcq, 15
459
    jnz .src_unaligned
L
Loren Merritt 已提交
460
    test    dstq, 15
461 462 463 464 465 466
    jnz .dst_unaligned
    ADD_HFYU_LEFT_LOOP 1, 1
.dst_unaligned:
    ADD_HFYU_LEFT_LOOP 0, 1
.src_unaligned:
    ADD_HFYU_LEFT_LOOP 0, 0
L
Loren Merritt 已提交
467

468 469 470 471 472
;-----------------------------------------------------------------------------
; void ff_vector_clip_int32(int32_t *dst, const int32_t *src, int32_t min,
;                           int32_t max, unsigned int len)
;-----------------------------------------------------------------------------

473 474 475 476 477 478
; %1 = number of xmm registers used
; %2 = number of inline load/process/store loops per asm loop
; %3 = process 4*mmsize (%3=0) or 8*mmsize (%3=1) bytes per loop
; %4 = CLIPD function takes min/max as float instead of int (CLIPD_SSE2)
; %5 = suffix
%macro VECTOR_CLIP_INT32 4-5
479
cglobal vector_clip_int32%5, 5,5,%1, dst, src, min, max, len
480
%if %4
481 482 483 484 485 486 487 488 489 490
    cvtsi2ss  m4, minm
    cvtsi2ss  m5, maxm
%else
    movd      m4, minm
    movd      m5, maxm
%endif
    SPLATD    m4
    SPLATD    m5
.loop:
%assign %%i 1
491
%rep %2
492 493 494 495
    mova      m0,  [srcq+mmsize*0*%%i]
    mova      m1,  [srcq+mmsize*1*%%i]
    mova      m2,  [srcq+mmsize*2*%%i]
    mova      m3,  [srcq+mmsize*3*%%i]
496
%if %3
497 498 499 500 501 502 503 504 505
    mova      m7,  [srcq+mmsize*4*%%i]
    mova      m8,  [srcq+mmsize*5*%%i]
    mova      m9,  [srcq+mmsize*6*%%i]
    mova      m10, [srcq+mmsize*7*%%i]
%endif
    CLIPD  m0,  m4, m5, m6
    CLIPD  m1,  m4, m5, m6
    CLIPD  m2,  m4, m5, m6
    CLIPD  m3,  m4, m5, m6
506
%if %3
507 508 509 510 511 512 513 514 515
    CLIPD  m7,  m4, m5, m6
    CLIPD  m8,  m4, m5, m6
    CLIPD  m9,  m4, m5, m6
    CLIPD  m10, m4, m5, m6
%endif
    mova  [dstq+mmsize*0*%%i], m0
    mova  [dstq+mmsize*1*%%i], m1
    mova  [dstq+mmsize*2*%%i], m2
    mova  [dstq+mmsize*3*%%i], m3
516
%if %3
517 518 519 520 521 522 523
    mova  [dstq+mmsize*4*%%i], m7
    mova  [dstq+mmsize*5*%%i], m8
    mova  [dstq+mmsize*6*%%i], m9
    mova  [dstq+mmsize*7*%%i], m10
%endif
%assign %%i %%i+1
%endrep
524 525 526
    add     srcq, mmsize*4*(%2+%3)
    add     dstq, mmsize*4*(%2+%3)
    sub     lend, mmsize*(%2+%3)
527 528 529 530
    jg .loop
    REP_RET
%endmacro

531
INIT_MMX mmx
532
%define CLIPD CLIPD_MMX
533 534 535
VECTOR_CLIP_INT32 0, 1, 0, 0
INIT_XMM sse2
VECTOR_CLIP_INT32 6, 1, 0, 0, _int
536
%define CLIPD CLIPD_SSE2
537 538
VECTOR_CLIP_INT32 6, 2, 0, 1
INIT_XMM sse4
539 540
%define CLIPD CLIPD_SSE41
%ifdef m8
541
VECTOR_CLIP_INT32 11, 1, 1, 0
542
%else
543
VECTOR_CLIP_INT32 6, 1, 0, 0
544
%endif
545

546
; %1 = aligned/unaligned
D
Diego Biurrun 已提交
547
%macro BSWAP_LOOPS  1
548 549 550 551 552 553
    mov      r3, r2
    sar      r2, 3
    jz       .left4_%1
.loop8_%1:
    mov%1    m0, [r1 +  0]
    mov%1    m1, [r1 + 16]
D
Diego Biurrun 已提交
554 555 556 557 558 559
%if cpuflag(ssse3)
    pshufb   m0, m2
    pshufb   m1, m2
    mova     [r0 +  0], m0
    mova     [r0 + 16], m1
%else
560 561 562 563 564 565 566 567 568 569 570 571 572 573
    pshuflw  m0, m0, 10110001b
    pshuflw  m1, m1, 10110001b
    pshufhw  m0, m0, 10110001b
    pshufhw  m1, m1, 10110001b
    mova     m2, m0
    mova     m3, m1
    psllw    m0, 8
    psllw    m1, 8
    psrlw    m2, 8
    psrlw    m3, 8
    por      m2, m0
    por      m3, m1
    mova     [r0 +  0], m2
    mova     [r0 + 16], m3
D
Diego Biurrun 已提交
574
%endif
575
    add      r0, 32
D
Diego Biurrun 已提交
576
    add      r1, 32
577 578 579 580 581 582 583
    dec      r2
    jnz      .loop8_%1
.left4_%1:
    mov      r2, r3
    and      r3, 4
    jz       .left
    mov%1    m0, [r1]
D
Diego Biurrun 已提交
584 585 586 587
%if cpuflag(ssse3)
    pshufb   m0, m2
    mova     [r0], m0
%else
588 589 590 591 592 593 594
    pshuflw  m0, m0, 10110001b
    pshufhw  m0, m0, 10110001b
    mova     m2, m0
    psllw    m0, 8
    psrlw    m2, 8
    por      m2, m0
    mova     [r0], m2
D
Diego Biurrun 已提交
595
%endif
596 597 598 599 600
    add      r1, 16
    add      r0, 16
%endmacro

; void bswap_buf(uint32_t *dst, const uint32_t *src, int w);
D
Diego Biurrun 已提交
601 602 603 604 605 606
%macro BSWAP32_BUF 0
%if cpuflag(ssse3)
cglobal bswap32_buf, 3,4,3
    mov      r3, r1
    mova     m2, [pb_bswap32]
%else
607 608
cglobal bswap32_buf, 3,4,5
    mov      r3, r1
D
Diego Biurrun 已提交
609
%endif
610 611
    and      r3, 15
    jz       .start_align
D
Diego Biurrun 已提交
612
    BSWAP_LOOPS  u
613 614
    jmp      .left
.start_align:
D
Diego Biurrun 已提交
615
    BSWAP_LOOPS  a
616
.left:
D
Diego Biurrun 已提交
617
%if cpuflag(ssse3)
618 619 620 621 622 623 624 625 626 627 628 629 630 631
    mov      r3, r2
    and      r2, 2
    jz       .left1
    movq     m0, [r1]
    pshufb   m0, m2
    movq     [r0], m0
    add      r1, 8
    add      r0, 8
.left1:
    and      r3, 1
    jz       .end
    mov      r2d, [r1]
    bswap    r2d
    mov      [r0], r2d
D
Diego Biurrun 已提交
632 633 634 635 636 637 638 639 640 641 642 643
%else
    and      r2, 3
    jz       .end
.loop2:
    mov      r3d, [r1]
    bswap    r3d
    mov      [r0], r3d
    add      r1, 4
    add      r0, 4
    dec      r2
    jnz      .loop2
%endif
644 645
.end:
    RET
D
Diego Biurrun 已提交
646 647 648 649 650 651 652
%endmacro

INIT_XMM sse2
BSWAP32_BUF

INIT_XMM ssse3
BSWAP32_BUF
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813


%macro H263_LOOP_FILTER 5
    pxor         m7, m7
    mova         m0, [%1]
    mova         m1, [%1]
    mova         m2, [%4]
    mova         m3, [%4]
    punpcklbw    m0, m7
    punpckhbw    m1, m7
    punpcklbw    m2, m7
    punpckhbw    m3, m7
    psubw        m0, m2
    psubw        m1, m3
    mova         m2, [%2]
    mova         m3, [%2]
    mova         m4, [%3]
    mova         m5, [%3]
    punpcklbw    m2, m7
    punpckhbw    m3, m7
    punpcklbw    m4, m7
    punpckhbw    m5, m7
    psubw        m4, m2
    psubw        m5, m3
    psllw        m4, 2
    psllw        m5, 2
    paddw        m4, m0
    paddw        m5, m1
    pxor         m6, m6
    pcmpgtw      m6, m4
    pcmpgtw      m7, m5
    pxor         m4, m6
    pxor         m5, m7
    psubw        m4, m6
    psubw        m5, m7
    psrlw        m4, 3
    psrlw        m5, 3
    packuswb     m4, m5
    packsswb     m6, m7
    pxor         m7, m7
    movd         m2, %5
    punpcklbw    m2, m2
    punpcklbw    m2, m2
    punpcklbw    m2, m2
    psubusb      m2, m4
    mova         m3, m2
    psubusb      m3, m4
    psubb        m2, m3
    mova         m3, [%2]
    mova         m4, [%3]
    pxor         m3, m6
    pxor         m4, m6
    paddusb      m3, m2
    psubusb      m4, m2
    pxor         m3, m6
    pxor         m4, m6
    paddusb      m2, m2
    packsswb     m0, m1
    pcmpgtb      m7, m0
    pxor         m0, m7
    psubb        m0, m7
    mova         m1, m0
    psubusb      m0, m2
    psubb        m1, m0
    pand         m1, [pb_FC]
    psrlw        m1, 2
    pxor         m1, m7
    psubb        m1, m7
    mova         m5, [%1]
    mova         m6, [%4]
    psubb        m5, m1
    paddb        m6, m1
%endmacro

INIT_MMX mmx
; void h263_v_loop_filter(uint8_t *src, int stride, int qscale)
cglobal h263_v_loop_filter, 3,5
    movsxdifnidn r1, r1d
    movsxdifnidn r2, r2d

    lea          r4, [ff_h263_loop_filter_strength]
    movzx       r3d, BYTE [r4+r2]
    movsx        r2, r3b
    shl          r2, 1

    mov          r3, r0
    sub          r3, r1
    mov          r4, r3
    sub          r4, r1
    H263_LOOP_FILTER r4, r3, r0, r0+r1, r2d

    mova       [r3], m3
    mova       [r0], m4
    mova       [r4], m5
    mova    [r0+r1], m6
    RET

%macro TRANSPOSE4X4 2
    movd      m0, [%1]
    movd      m1, [%1+r1]
    movd      m2, [%1+r1*2]
    movd      m3, [%1+r3]
    punpcklbw m0, m1
    punpcklbw m2, m3
    mova      m1, m0
    punpcklwd m0, m2
    punpckhwd m1, m2
    movd [%2+ 0], m0
    punpckhdq m0, m0
    movd [%2+ 8], m0
    movd [%2+16], m1
    punpckhdq m1, m1
    movd [%2+24], m1
%endmacro


; void h263_h_loop_filter(uint8_t *src, int stride, int qscale)
INIT_MMX mmx
cglobal h263_h_loop_filter, 3,5,0,32
    movsxdifnidn r1, r1d
    movsxdifnidn r2, r2d

    lea          r4, [ff_h263_loop_filter_strength]
    movzx       r3d, BYTE [r4+r2]
    movsx        r2, r3b
    shl          r2, 1

    sub          r0, 2
    lea          r3, [r1*3]

    TRANSPOSE4X4 r0, rsp
    lea          r4, [r0+r1*4]
    TRANSPOSE4X4 r4, rsp+4

    H263_LOOP_FILTER rsp, rsp+8, rsp+16, rsp+24, r2d

    mova         m1, m5
    mova         m0, m4
    punpcklbw    m5, m3
    punpcklbw    m4, m6
    punpckhbw    m1, m3
    punpckhbw    m0, m6
    mova         m3, m5
    mova         m6, m1
    punpcklwd    m5, m4
    punpcklwd    m1, m0
    punpckhwd    m3, m4
    punpckhwd    m6, m0
    movd       [r0], m5
    punpckhdq    m5, m5
    movd  [r0+r1*1], m5
    movd  [r0+r1*2], m3
    punpckhdq    m3, m3
    movd    [r0+r3], m3
    movd       [r4], m1
    punpckhdq    m1, m1
    movd  [r4+r1*1], m1
    movd  [r4+r1*2], m6
    punpckhdq    m6, m6
    movd    [r4+r3], m6
    RET