vc1dsp_mmx.c 23.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * VC-1 and WMV3 - DSP functions MMX-optimized
 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
 *
 * Permission is hereby granted, free of charge, to any person
 * obtaining a copy of this software and associated documentation
 * files (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use,
 * copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following
 * conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

27 28
#include "libavutil/x86_cpu.h"
#include "libavcodec/dsputil.h"
29
#include "dsputil_mmx.h"
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57

/** Add rounder from mm7 to mm3 and pack result at destination */
#define NORMALIZE_MMX(SHIFT)                                    \
     "paddw     %%mm7, %%mm3           \n\t" /* +bias-r */      \
     "paddw     %%mm7, %%mm4           \n\t" /* +bias-r */      \
     "psraw     "SHIFT", %%mm3         \n\t"                    \
     "psraw     "SHIFT", %%mm4         \n\t"

#define TRANSFER_DO_PACK                        \
     "packuswb  %%mm4, %%mm3           \n\t"    \
     "movq      %%mm3, (%2)            \n\t"

#define TRANSFER_DONT_PACK                      \
     "movq      %%mm3, 0(%2)           \n\t"    \
     "movq      %%mm4, 8(%2)           \n\t"

/** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
#define DO_UNPACK(reg)  "punpcklbw %%mm0, " reg "\n\t"
#define DONT_UNPACK(reg)

/** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
#define LOAD_ROUNDER_MMX(ROUND)                 \
     "movd      "ROUND", %%mm7         \n\t"    \
     "punpcklwd %%mm7, %%mm7           \n\t"    \
     "punpckldq %%mm7, %%mm7           \n\t"

#define SHIFT2_LINE(OFF, R0,R1,R2,R3)           \
    "paddw     %%mm"#R2", %%mm"#R1"    \n\t"    \
C
Christophe Gisquet 已提交
58
    "movd      (%0,%3), %%mm"#R0"      \n\t"    \
59 60
    "pmullw    %%mm6, %%mm"#R1"        \n\t"    \
    "punpcklbw %%mm0, %%mm"#R0"        \n\t"    \
C
Christophe Gisquet 已提交
61
    "movd      (%0,%2), %%mm"#R3"      \n\t"    \
62 63 64 65
    "psubw     %%mm"#R0", %%mm"#R1"    \n\t"    \
    "punpcklbw %%mm0, %%mm"#R3"        \n\t"    \
    "paddw     %%mm7, %%mm"#R1"        \n\t"    \
    "psubw     %%mm"#R3", %%mm"#R1"    \n\t"    \
C
Christophe Gisquet 已提交
66 67 68
    "psraw     %4, %%mm"#R1"           \n\t"    \
    "movq      %%mm"#R1", "#OFF"(%1)   \n\t"    \
    "add       %2, %0                  \n\t"
69

C
Christophe Gisquet 已提交
70
DECLARE_ALIGNED_16(const uint64_t, ff_pw_9) = 0x0009000900090009ULL;
71 72 73

/** Sacrifying mm6 allows to pipeline loads from src */
static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
74
                                       const uint8_t *src, x86_reg stride,
75 76
                                       int rnd, int64_t shift)
{
D
Diego Pettenò 已提交
77
    __asm__ volatile(
C
Christophe Gisquet 已提交
78 79 80
        "mov       $3, %%"REG_c"           \n\t"
        LOAD_ROUNDER_MMX("%5")
        "movq      "MANGLE(ff_pw_9)", %%mm6 \n\t"
81
        "1:                                \n\t"
C
Christophe Gisquet 已提交
82 83 84
        "movd      (%0), %%mm2             \n\t"
        "add       %2, %0                  \n\t"
        "movd      (%0), %%mm3             \n\t"
85 86 87 88 89 90 91 92 93 94
        "punpcklbw %%mm0, %%mm2            \n\t"
        "punpcklbw %%mm0, %%mm3            \n\t"
        SHIFT2_LINE(  0, 1, 2, 3, 4)
        SHIFT2_LINE( 24, 2, 3, 4, 1)
        SHIFT2_LINE( 48, 3, 4, 1, 2)
        SHIFT2_LINE( 72, 4, 1, 2, 3)
        SHIFT2_LINE( 96, 1, 2, 3, 4)
        SHIFT2_LINE(120, 2, 3, 4, 1)
        SHIFT2_LINE(144, 3, 4, 1, 2)
        SHIFT2_LINE(168, 4, 1, 2, 3)
C
Christophe Gisquet 已提交
95 96 97
        "sub       %6, %0                  \n\t"
        "add       $8, %1                  \n\t"
        "dec       %%"REG_c"               \n\t"
98
        "jnz 1b                            \n\t"
C
Christophe Gisquet 已提交
99 100 101 102
        : "+r"(src), "+r"(dst)
        : "r"(stride), "r"(-2*stride),
          "m"(shift), "m"(rnd), "r"(9*stride-4)
        : "%"REG_c, "memory"
103 104 105 106 107 108 109
    );
}

/**
 * Data is already unpacked, so some operations can directly be made from
 * memory.
 */
110
static void vc1_put_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,
111 112 113 114 115 116
                                       const int16_t *src, int rnd)
{
    int h = 8;

    src -= 1;
    rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */
D
Diego Pettenò 已提交
117
    __asm__ volatile(
118
        LOAD_ROUNDER_MMX("%4")
C
Christophe Gisquet 已提交
119 120
        "movq      "MANGLE(ff_pw_128)", %%mm6\n\t"
        "movq      "MANGLE(ff_pw_9)", %%mm5 \n\t"
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
        "1:                                \n\t"
        "movq      2*0+0(%1), %%mm1        \n\t"
        "movq      2*0+8(%1), %%mm2        \n\t"
        "movq      2*1+0(%1), %%mm3        \n\t"
        "movq      2*1+8(%1), %%mm4        \n\t"
        "paddw     2*3+0(%1), %%mm1        \n\t"
        "paddw     2*3+8(%1), %%mm2        \n\t"
        "paddw     2*2+0(%1), %%mm3        \n\t"
        "paddw     2*2+8(%1), %%mm4        \n\t"
        "pmullw    %%mm5, %%mm3            \n\t"
        "pmullw    %%mm5, %%mm4            \n\t"
        "psubw     %%mm1, %%mm3            \n\t"
        "psubw     %%mm2, %%mm4            \n\t"
        NORMALIZE_MMX("$7")
        /* Remove bias */
        "paddw     %%mm6, %%mm3            \n\t"
        "paddw     %%mm6, %%mm4            \n\t"
        TRANSFER_DO_PACK
        "add       $24, %1                 \n\t"
        "add       %3, %2                  \n\t"
141
        "decl      %0                      \n\t"
142
        "jnz 1b                            \n\t"
C
Christophe Gisquet 已提交
143 144
        : "+r"(h), "+r" (src),  "+r" (dst)
        : "r"(stride), "m"(rnd)
145 146 147 148 149 150 151 152 153 154
        : "memory"
    );
}


/**
 * Purely vertical or horizontal 1/2 shift interpolation.
 * Sacrify mm6 for *9 factor.
 */
static void vc1_put_shift2_mmx(uint8_t *dst, const uint8_t *src,
155
                               x86_reg stride, int rnd, x86_reg offset)
156 157
{
    rnd = 8-rnd;
D
Diego Pettenò 已提交
158
    __asm__ volatile(
C
Christophe Gisquet 已提交
159 160 161
        "mov       $8, %%"REG_c"           \n\t"
        LOAD_ROUNDER_MMX("%5")
        "movq      "MANGLE(ff_pw_9)", %%mm6\n\t"
162
        "1:                                \n\t"
C
Christophe Gisquet 已提交
163 164 165 166 167
        "movd      0(%0   ), %%mm3         \n\t"
        "movd      4(%0   ), %%mm4         \n\t"
        "movd      0(%0,%2), %%mm1         \n\t"
        "movd      4(%0,%2), %%mm2         \n\t"
        "add       %2, %0                  \n\t"
168 169 170 171 172 173
        "punpcklbw %%mm0, %%mm3            \n\t"
        "punpcklbw %%mm0, %%mm4            \n\t"
        "punpcklbw %%mm0, %%mm1            \n\t"
        "punpcklbw %%mm0, %%mm2            \n\t"
        "paddw     %%mm1, %%mm3            \n\t"
        "paddw     %%mm2, %%mm4            \n\t"
C
Christophe Gisquet 已提交
174 175
        "movd      0(%0,%3), %%mm1         \n\t"
        "movd      4(%0,%3), %%mm2         \n\t"
176 177 178 179 180 181
        "pmullw    %%mm6, %%mm3            \n\t" /* 0,9,9,0*/
        "pmullw    %%mm6, %%mm4            \n\t" /* 0,9,9,0*/
        "punpcklbw %%mm0, %%mm1            \n\t"
        "punpcklbw %%mm0, %%mm2            \n\t"
        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,0*/
        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,0*/
C
Christophe Gisquet 已提交
182 183
        "movd      0(%0,%2), %%mm1         \n\t"
        "movd      4(%0,%2), %%mm2         \n\t"
184 185 186 187 188
        "punpcklbw %%mm0, %%mm1            \n\t"
        "punpcklbw %%mm0, %%mm2            \n\t"
        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,-1*/
        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,-1*/
        NORMALIZE_MMX("$4")
C
Christophe Gisquet 已提交
189 190 191 192 193
        "packuswb  %%mm4, %%mm3            \n\t"
        "movq      %%mm3, (%1)             \n\t"
        "add       %6, %0                  \n\t"
        "add       %4, %1                  \n\t"
        "dec       %%"REG_c"               \n\t"
194
        "jnz 1b                            \n\t"
C
Christophe Gisquet 已提交
195
        : "+r"(src),  "+r"(dst)
196
        : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),
C
Christophe Gisquet 已提交
197 198
          "g"(stride-offset)
        : "%"REG_c, "memory"
199 200 201 202 203 204 205
    );
}

/**
 * Filter coefficients made global to allow access by all 1 or 3 quarter shift
 * interpolation functions.
 */
206 207
DECLARE_ASM_CONST(16, uint64_t, ff_pw_53) = 0x0035003500350035ULL;
DECLARE_ASM_CONST(16, uint64_t, ff_pw_18) = 0x0012001200120012ULL;
208 209 210 211 212 213 214 215 216 217 218

/**
 * Core of the 1/4 and 3/4 shift bicubic interpolation.
 *
 * @param UNPACK  Macro unpacking arguments from 8 to 16bits (can be empty).
 * @param MOVQ    "movd 1" or "movq 2", if data read is already unpacked.
 * @param A1      Address of 1st tap (beware of unpacked/packed).
 * @param A2      Address of 2nd tap
 * @param A3      Address of 3rd tap
 * @param A4      Address of 4th tap
 */
C
Christophe Gisquet 已提交
219
#define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4)       \
220 221 222 223
     MOVQ "*0+"A1", %%mm1       \n\t"                           \
     MOVQ "*4+"A1", %%mm2       \n\t"                           \
     UNPACK("%%mm1")                                            \
     UNPACK("%%mm2")                                            \
C
Christophe Gisquet 已提交
224 225
     "pmullw    "MANGLE(ff_pw_3)", %%mm1\n\t"                   \
     "pmullw    "MANGLE(ff_pw_3)", %%mm2\n\t"                   \
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
     MOVQ "*0+"A2", %%mm3       \n\t"                           \
     MOVQ "*4+"A2", %%mm4       \n\t"                           \
     UNPACK("%%mm3")                                            \
     UNPACK("%%mm4")                                            \
     "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                 \
     "pmullw    %%mm6, %%mm4    \n\t" /* *18 */                 \
     "psubw     %%mm1, %%mm3    \n\t" /* 18,-3 */               \
     "psubw     %%mm2, %%mm4    \n\t" /* 18,-3 */               \
     MOVQ "*0+"A4", %%mm1       \n\t"                           \
     MOVQ "*4+"A4", %%mm2       \n\t"                           \
     UNPACK("%%mm1")                                            \
     UNPACK("%%mm2")                                            \
     "psllw     $2, %%mm1       \n\t" /* 4* */                  \
     "psllw     $2, %%mm2       \n\t" /* 4* */                  \
     "psubw     %%mm1, %%mm3    \n\t" /* -4,18,-3 */            \
     "psubw     %%mm2, %%mm4    \n\t" /* -4,18,-3 */            \
     MOVQ "*0+"A3", %%mm1       \n\t"                           \
     MOVQ "*4+"A3", %%mm2       \n\t"                           \
     UNPACK("%%mm1")                                            \
     UNPACK("%%mm2")                                            \
     "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                 \
     "pmullw    %%mm5, %%mm2    \n\t" /* *53 */                 \
     "paddw     %%mm1, %%mm3    \n\t" /* 4,53,18,-3 */          \
     "paddw     %%mm2, %%mm4    \n\t" /* 4,53,18,-3 */

/**
 * Macro to build the vertical 16bits version of vc1_put_shift[13].
 * Here, offset=src_stride. Parameters passed A1 to A4 must use
 * %3 (src_stride) and %4 (3*src_stride).
 *
 * @param  NAME   Either 1 or 3
 * @see MSPEL_FILTER13_CORE for information on A1->A4
 */
#define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4)                    \
static void                                                             \
vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src,      \
262
                                 x86_reg src_stride,                   \
263 264 265 266
                                 int rnd, int64_t shift)                \
{                                                                       \
    int h = 8;                                                          \
    src -= src_stride;                                                  \
D
Diego Pettenò 已提交
267
    __asm__ volatile(                                                       \
268
        LOAD_ROUNDER_MMX("%5")                                          \
C
Christophe Gisquet 已提交
269 270
        "movq      "MANGLE(ff_pw_53)", %%mm5\n\t"                       \
        "movq      "MANGLE(ff_pw_18)", %%mm6\n\t"                       \
271 272
        ASMALIGN(3)                                                     \
        "1:                        \n\t"                                \
C
Christophe Gisquet 已提交
273
        MSPEL_FILTER13_CORE(DO_UNPACK, "movd  1", A1, A2, A3, A4)       \
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
        NORMALIZE_MMX("%6")                                             \
        TRANSFER_DONT_PACK                                              \
        /* Last 3 (in fact 4) bytes on the line */                      \
        "movd      8+"A1", %%mm1   \n\t"                                \
        DO_UNPACK("%%mm1")                                              \
        "movq      %%mm1, %%mm3    \n\t"                                \
        "paddw     %%mm1, %%mm1    \n\t"                                \
        "paddw     %%mm3, %%mm1    \n\t" /* 3* */                       \
        "movd      8+"A2", %%mm3   \n\t"                                \
        DO_UNPACK("%%mm3")                                              \
        "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                      \
        "psubw     %%mm1, %%mm3    \n\t" /*18,-3 */                     \
        "movd      8+"A3", %%mm1   \n\t"                                \
        DO_UNPACK("%%mm1")                                              \
        "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                      \
        "paddw     %%mm1, %%mm3    \n\t" /*53,18,-3 */                  \
        "movd      8+"A4", %%mm1   \n\t"                                \
        DO_UNPACK("%%mm1")                                              \
        "psllw     $2, %%mm1       \n\t" /* 4* */                       \
        "psubw     %%mm1, %%mm3    \n\t"                                \
        "paddw     %%mm7, %%mm3    \n\t"                                \
        "psraw     %6, %%mm3       \n\t"                                \
        "movq      %%mm3, 16(%2)   \n\t"                                \
        "add       %3, %1          \n\t"                                \
        "add       $24, %2         \n\t"                                \
299
        "decl      %0              \n\t"                                \
300
        "jnz 1b                    \n\t"                                \
C
Christophe Gisquet 已提交
301
        : "+r"(h), "+r" (src),  "+r" (dst)                              \
302
        : "r"(src_stride), "r"(3*src_stride),                           \
C
Christophe Gisquet 已提交
303
          "m"(rnd), "m"(shift)                                          \
304 305 306 307 308 309 310 311 312 313 314 315 316
        : "memory"                                                      \
    );                                                                  \
}

/**
 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
 *
 * @param  NAME   Either 1 or 3
 * @see MSPEL_FILTER13_CORE for information on A1->A4
 */
#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4)                    \
static void                                                             \
317
vc1_put_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride,         \
318 319 320 321 322
                                 const int16_t *src, int rnd)           \
{                                                                       \
    int h = 8;                                                          \
    src -= 1;                                                           \
    rnd -= (-4+58+13-3)*256; /* Add -256 bias */                        \
D
Diego Pettenò 已提交
323
    __asm__ volatile(                                                       \
324
        LOAD_ROUNDER_MMX("%4")                                          \
C
Christophe Gisquet 已提交
325 326
        "movq      "MANGLE(ff_pw_18)", %%mm6   \n\t"                    \
        "movq      "MANGLE(ff_pw_53)", %%mm5   \n\t"                    \
327 328
        ASMALIGN(3)                                                     \
        "1:                        \n\t"                                \
C
Christophe Gisquet 已提交
329
        MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4)      \
330 331
        NORMALIZE_MMX("$7")                                             \
        /* Remove bias */                                               \
C
Christophe Gisquet 已提交
332 333
        "paddw     "MANGLE(ff_pw_128)", %%mm3  \n\t"                    \
        "paddw     "MANGLE(ff_pw_128)", %%mm4  \n\t"                    \
334 335 336
        TRANSFER_DO_PACK                                                \
        "add       $24, %1         \n\t"                                \
        "add       %3, %2          \n\t"                                \
337
        "decl      %0              \n\t"                                \
338
        "jnz 1b                    \n\t"                                \
C
Christophe Gisquet 已提交
339 340
        : "+r"(h), "+r" (src),  "+r" (dst)                              \
        : "r"(stride), "m"(rnd)                                         \
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
        : "memory"                                                      \
    );                                                                  \
}

/**
 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
 * Here, offset=src_stride. Parameters passed A1 to A4 must use
 * %3 (offset) and %4 (3*offset).
 *
 * @param  NAME   Either 1 or 3
 * @see MSPEL_FILTER13_CORE for information on A1->A4
 */
#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4)                         \
static void                                                             \
vc1_put_## NAME ## _mmx(uint8_t *dst, const uint8_t *src,               \
356
                        x86_reg stride, int rnd, x86_reg offset)      \
357 358 359 360
{                                                                       \
    int h = 8;                                                          \
    src -= offset;                                                      \
    rnd = 32-rnd;                                                       \
D
Diego Pettenò 已提交
361
    __asm__ volatile (                                                      \
362
        LOAD_ROUNDER_MMX("%6")                                          \
C
Christophe Gisquet 已提交
363 364
        "movq      "MANGLE(ff_pw_53)", %%mm5       \n\t"                \
        "movq      "MANGLE(ff_pw_18)", %%mm6       \n\t"                \
365 366
        ASMALIGN(3)                                                     \
        "1:                        \n\t"                                \
C
Christophe Gisquet 已提交
367
        MSPEL_FILTER13_CORE(DO_UNPACK, "movd   1", A1, A2, A3, A4)      \
368 369 370 371
        NORMALIZE_MMX("$6")                                             \
        TRANSFER_DO_PACK                                                \
        "add       %5, %1          \n\t"                                \
        "add       %5, %2          \n\t"                                \
372
        "decl      %0              \n\t"                                \
373
        "jnz 1b                    \n\t"                                \
C
Christophe Gisquet 已提交
374 375
        : "+r"(h), "+r" (src),  "+r" (dst)                              \
        : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd)             \
376 377 378 379 380 381 382 383 384 385 386 387 388 389
        : "memory"                                                      \
    );                                                                  \
}

/** 1/4 shift bicubic interpolation */
MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)")

/** 3/4 shift bicubic interpolation */
MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
MSPEL_FILTER13_VER_16B(shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)")

390 391 392
typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414

/**
 * Interpolates fractional pel values by applying proper vertical then
 * horizontal filter.
 *
 * @param  dst     Destination buffer for interpolated pels.
 * @param  src     Source buffer.
 * @param  stride  Stride for both src and dst buffers.
 * @param  hmode   Horizontal filter (expressed in quarter pixels shift).
 * @param  hmode   Vertical filter.
 * @param  rnd     Rounding bias.
 */
static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,
                         int hmode, int vmode, int rnd)
{
    static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =
         { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };
    static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =
         { NULL, vc1_put_hor_16b_shift1_mmx, vc1_put_hor_16b_shift2_mmx, vc1_put_hor_16b_shift3_mmx };
    static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =
         { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx };

D
Diego Pettenò 已提交
415
    __asm__ volatile(
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
        "pxor %%mm0, %%mm0         \n\t"
        ::: "memory"
    );

    if (vmode) { /* Vertical filter to apply */
        if (hmode) { /* Horizontal filter to apply, output to tmp */
            static const int shift_value[] = { 0, 5, 1, 5 };
            int              shift = (shift_value[hmode]+shift_value[vmode])>>1;
            int              r;
            DECLARE_ALIGNED_16(int16_t, tmp[12*8]);

            r = (1<<(shift-1)) + rnd-1;
            vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);

            vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);
            return;
        }
        else { /* No horizontal filter, output 8 lines to dst */
            vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);
            return;
        }
    }

    /* Horizontal mode with no vertical mode */
    vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);
}

443
void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd);
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470

/** Macro to ease bicubic filter interpolation functions declarations */
#define DECLARE_FUNCTION(a, b)                                          \
static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
     vc1_mspel_mc(dst, src, stride, a, b, rnd);                         \
}

DECLARE_FUNCTION(0, 1)
DECLARE_FUNCTION(0, 2)
DECLARE_FUNCTION(0, 3)

DECLARE_FUNCTION(1, 0)
DECLARE_FUNCTION(1, 1)
DECLARE_FUNCTION(1, 2)
DECLARE_FUNCTION(1, 3)

DECLARE_FUNCTION(2, 0)
DECLARE_FUNCTION(2, 1)
DECLARE_FUNCTION(2, 2)
DECLARE_FUNCTION(2, 3)

DECLARE_FUNCTION(3, 0)
DECLARE_FUNCTION(3, 1)
DECLARE_FUNCTION(3, 2)
DECLARE_FUNCTION(3, 3)

void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
471
    dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
    dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
    dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
    dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;

    dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
    dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
    dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
    dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;

    dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
    dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
    dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
    dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;

    dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
    dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
    dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
    dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
}