00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #include "libavutil/cpu.h"
00026 #include "libavutil/x86_cpu.h"
00027 #include "libavcodec/dsputil.h"
00028 #include "libavcodec/h264dsp.h"
00029 #include "libavcodec/mpegvideo.h"
00030 #include "libavcodec/simple_idct.h"
00031 #include "libavcodec/ac3dec.h"
00032 #include "dsputil_mmx.h"
00033 #include "idct_xvid.h"
00034
00035
00036
00037
00038
00039 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
00040 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
00041
00042 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
00043 {0x8000000080000000ULL, 0x8000000080000000ULL};
00044
00045 DECLARE_ALIGNED(8, const uint64_t, ff_pw_1 ) = 0x0001000100010001ULL;
00046 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3 ) = {0x0003000300030003ULL, 0x0003000300030003ULL};
00047 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
00048 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
00049 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
00050 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
00051 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
00052 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
00053 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL};
00054 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
00055 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
00056 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
00057 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
00058 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
00059 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
00060 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
00061 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
00062 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
00063 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
00064 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
00065 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
00066
00067 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0 ) = {0x0000000000000000ULL, 0x0000000000000000ULL};
00068 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
00069 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
00070 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
00071 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL;
00072 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
00073 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
00074 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
00075 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
00076 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL};
00077 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
00078 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
00079 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
00080
00081 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
00082 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
00083
00084 #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
00085 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
00086
00087 #define MOVQ_BFE(regd) \
00088 __asm__ volatile ( \
00089 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
00090 "paddb %%" #regd ", %%" #regd " \n\t" ::)
00091
00092 #ifndef PIC
00093 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
00094 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
00095 #else
00096
00097
00098 #define MOVQ_BONE(regd) \
00099 __asm__ volatile ( \
00100 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00101 "psrlw $15, %%" #regd " \n\t" \
00102 "packuswb %%" #regd ", %%" #regd " \n\t" ::)
00103
00104 #define MOVQ_WTWO(regd) \
00105 __asm__ volatile ( \
00106 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00107 "psrlw $15, %%" #regd " \n\t" \
00108 "psllw $1, %%" #regd " \n\t"::)
00109
00110 #endif
00111
00112
00113
00114
00115 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
00116 "movq " #rega ", " #regr " \n\t"\
00117 "pand " #regb ", " #regr " \n\t"\
00118 "pxor " #rega ", " #regb " \n\t"\
00119 "pand " #regfe "," #regb " \n\t"\
00120 "psrlq $1, " #regb " \n\t"\
00121 "paddb " #regb ", " #regr " \n\t"
00122
00123 #define PAVGB_MMX(rega, regb, regr, regfe) \
00124 "movq " #rega ", " #regr " \n\t"\
00125 "por " #regb ", " #regr " \n\t"\
00126 "pxor " #rega ", " #regb " \n\t"\
00127 "pand " #regfe "," #regb " \n\t"\
00128 "psrlq $1, " #regb " \n\t"\
00129 "psubb " #regb ", " #regr " \n\t"
00130
00131
00132 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
00133 "movq " #rega ", " #regr " \n\t"\
00134 "movq " #regc ", " #regp " \n\t"\
00135 "pand " #regb ", " #regr " \n\t"\
00136 "pand " #regd ", " #regp " \n\t"\
00137 "pxor " #rega ", " #regb " \n\t"\
00138 "pxor " #regc ", " #regd " \n\t"\
00139 "pand %%mm6, " #regb " \n\t"\
00140 "pand %%mm6, " #regd " \n\t"\
00141 "psrlq $1, " #regb " \n\t"\
00142 "psrlq $1, " #regd " \n\t"\
00143 "paddb " #regb ", " #regr " \n\t"\
00144 "paddb " #regd ", " #regp " \n\t"
00145
00146 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
00147 "movq " #rega ", " #regr " \n\t"\
00148 "movq " #regc ", " #regp " \n\t"\
00149 "por " #regb ", " #regr " \n\t"\
00150 "por " #regd ", " #regp " \n\t"\
00151 "pxor " #rega ", " #regb " \n\t"\
00152 "pxor " #regc ", " #regd " \n\t"\
00153 "pand %%mm6, " #regb " \n\t"\
00154 "pand %%mm6, " #regd " \n\t"\
00155 "psrlq $1, " #regd " \n\t"\
00156 "psrlq $1, " #regb " \n\t"\
00157 "psubb " #regb ", " #regr " \n\t"\
00158 "psubb " #regd ", " #regp " \n\t"
00159
00160
00161
00162 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
00163 #define SET_RND MOVQ_WONE
00164 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
00165 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
00166 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
00167
00168 #include "dsputil_mmx_rnd_template.c"
00169
00170 #undef DEF
00171 #undef SET_RND
00172 #undef PAVGBP
00173 #undef PAVGB
00174
00175
00176
00177 #define DEF(x, y) x ## _ ## y ##_mmx
00178 #define SET_RND MOVQ_WTWO
00179 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
00180 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
00181
00182 #include "dsputil_mmx_rnd_template.c"
00183
00184 #undef DEF
00185 #undef SET_RND
00186 #undef PAVGBP
00187 #undef PAVGB
00188 #undef OP_AVG
00189
00190
00191
00192
00193 #define DEF(x) x ## _3dnow
00194 #define PAVGB "pavgusb"
00195 #define OP_AVG PAVGB
00196
00197 #include "dsputil_mmx_avg_template.c"
00198
00199 #undef DEF
00200 #undef PAVGB
00201 #undef OP_AVG
00202
00203
00204
00205
00206 #define DEF(x) x ## _mmx2
00207
00208
00209 #define PAVGB "pavgb"
00210 #define OP_AVG PAVGB
00211
00212 #include "dsputil_mmx_avg_template.c"
00213
00214 #undef DEF
00215 #undef PAVGB
00216 #undef OP_AVG
00217
00218 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
00219 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
00220 #define put_pixels16_mmx2 put_pixels16_mmx
00221 #define put_pixels8_mmx2 put_pixels8_mmx
00222 #define put_pixels4_mmx2 put_pixels4_mmx
00223 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
00224 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
00225 #define put_pixels16_3dnow put_pixels16_mmx
00226 #define put_pixels8_3dnow put_pixels8_mmx
00227 #define put_pixels4_3dnow put_pixels4_mmx
00228 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
00229 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
00230
00231
00232
00233
00234 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00235 {
00236 const DCTELEM *p;
00237 uint8_t *pix;
00238
00239
00240 p = block;
00241 pix = pixels;
00242
00243 __asm__ volatile(
00244 "movq %3, %%mm0 \n\t"
00245 "movq 8%3, %%mm1 \n\t"
00246 "movq 16%3, %%mm2 \n\t"
00247 "movq 24%3, %%mm3 \n\t"
00248 "movq 32%3, %%mm4 \n\t"
00249 "movq 40%3, %%mm5 \n\t"
00250 "movq 48%3, %%mm6 \n\t"
00251 "movq 56%3, %%mm7 \n\t"
00252 "packuswb %%mm1, %%mm0 \n\t"
00253 "packuswb %%mm3, %%mm2 \n\t"
00254 "packuswb %%mm5, %%mm4 \n\t"
00255 "packuswb %%mm7, %%mm6 \n\t"
00256 "movq %%mm0, (%0) \n\t"
00257 "movq %%mm2, (%0, %1) \n\t"
00258 "movq %%mm4, (%0, %1, 2) \n\t"
00259 "movq %%mm6, (%0, %2) \n\t"
00260 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
00261 :"memory");
00262 pix += line_size*4;
00263 p += 32;
00264
00265
00266
00267
00268 __asm__ volatile(
00269 "movq (%3), %%mm0 \n\t"
00270 "movq 8(%3), %%mm1 \n\t"
00271 "movq 16(%3), %%mm2 \n\t"
00272 "movq 24(%3), %%mm3 \n\t"
00273 "movq 32(%3), %%mm4 \n\t"
00274 "movq 40(%3), %%mm5 \n\t"
00275 "movq 48(%3), %%mm6 \n\t"
00276 "movq 56(%3), %%mm7 \n\t"
00277 "packuswb %%mm1, %%mm0 \n\t"
00278 "packuswb %%mm3, %%mm2 \n\t"
00279 "packuswb %%mm5, %%mm4 \n\t"
00280 "packuswb %%mm7, %%mm6 \n\t"
00281 "movq %%mm0, (%0) \n\t"
00282 "movq %%mm2, (%0, %1) \n\t"
00283 "movq %%mm4, (%0, %1, 2) \n\t"
00284 "movq %%mm6, (%0, %2) \n\t"
00285 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
00286 :"memory");
00287 }
00288
00289 #define put_signed_pixels_clamped_mmx_half(off) \
00290 "movq "#off"(%2), %%mm1 \n\t"\
00291 "movq 16+"#off"(%2), %%mm2 \n\t"\
00292 "movq 32+"#off"(%2), %%mm3 \n\t"\
00293 "movq 48+"#off"(%2), %%mm4 \n\t"\
00294 "packsswb 8+"#off"(%2), %%mm1 \n\t"\
00295 "packsswb 24+"#off"(%2), %%mm2 \n\t"\
00296 "packsswb 40+"#off"(%2), %%mm3 \n\t"\
00297 "packsswb 56+"#off"(%2), %%mm4 \n\t"\
00298 "paddb %%mm0, %%mm1 \n\t"\
00299 "paddb %%mm0, %%mm2 \n\t"\
00300 "paddb %%mm0, %%mm3 \n\t"\
00301 "paddb %%mm0, %%mm4 \n\t"\
00302 "movq %%mm1, (%0) \n\t"\
00303 "movq %%mm2, (%0, %3) \n\t"\
00304 "movq %%mm3, (%0, %3, 2) \n\t"\
00305 "movq %%mm4, (%0, %1) \n\t"
00306
00307 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00308 {
00309 x86_reg line_skip = line_size;
00310 x86_reg line_skip3;
00311
00312 __asm__ volatile (
00313 "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
00314 "lea (%3, %3, 2), %1 \n\t"
00315 put_signed_pixels_clamped_mmx_half(0)
00316 "lea (%0, %3, 4), %0 \n\t"
00317 put_signed_pixels_clamped_mmx_half(64)
00318 :"+&r" (pixels), "=&r" (line_skip3)
00319 :"r" (block), "r"(line_skip)
00320 :"memory");
00321 }
00322
00323 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00324 {
00325 const DCTELEM *p;
00326 uint8_t *pix;
00327 int i;
00328
00329
00330 p = block;
00331 pix = pixels;
00332 MOVQ_ZERO(mm7);
00333 i = 4;
00334 do {
00335 __asm__ volatile(
00336 "movq (%2), %%mm0 \n\t"
00337 "movq 8(%2), %%mm1 \n\t"
00338 "movq 16(%2), %%mm2 \n\t"
00339 "movq 24(%2), %%mm3 \n\t"
00340 "movq %0, %%mm4 \n\t"
00341 "movq %1, %%mm6 \n\t"
00342 "movq %%mm4, %%mm5 \n\t"
00343 "punpcklbw %%mm7, %%mm4 \n\t"
00344 "punpckhbw %%mm7, %%mm5 \n\t"
00345 "paddsw %%mm4, %%mm0 \n\t"
00346 "paddsw %%mm5, %%mm1 \n\t"
00347 "movq %%mm6, %%mm5 \n\t"
00348 "punpcklbw %%mm7, %%mm6 \n\t"
00349 "punpckhbw %%mm7, %%mm5 \n\t"
00350 "paddsw %%mm6, %%mm2 \n\t"
00351 "paddsw %%mm5, %%mm3 \n\t"
00352 "packuswb %%mm1, %%mm0 \n\t"
00353 "packuswb %%mm3, %%mm2 \n\t"
00354 "movq %%mm0, %0 \n\t"
00355 "movq %%mm2, %1 \n\t"
00356 :"+m"(*pix), "+m"(*(pix+line_size))
00357 :"r"(p)
00358 :"memory");
00359 pix += line_size*2;
00360 p += 16;
00361 } while (--i);
00362 }
00363
00364 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00365 {
00366 __asm__ volatile(
00367 "lea (%3, %3), %%"REG_a" \n\t"
00368 ".p2align 3 \n\t"
00369 "1: \n\t"
00370 "movd (%1), %%mm0 \n\t"
00371 "movd (%1, %3), %%mm1 \n\t"
00372 "movd %%mm0, (%2) \n\t"
00373 "movd %%mm1, (%2, %3) \n\t"
00374 "add %%"REG_a", %1 \n\t"
00375 "add %%"REG_a", %2 \n\t"
00376 "movd (%1), %%mm0 \n\t"
00377 "movd (%1, %3), %%mm1 \n\t"
00378 "movd %%mm0, (%2) \n\t"
00379 "movd %%mm1, (%2, %3) \n\t"
00380 "add %%"REG_a", %1 \n\t"
00381 "add %%"REG_a", %2 \n\t"
00382 "subl $4, %0 \n\t"
00383 "jnz 1b \n\t"
00384 : "+g"(h), "+r" (pixels), "+r" (block)
00385 : "r"((x86_reg)line_size)
00386 : "%"REG_a, "memory"
00387 );
00388 }
00389
00390 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00391 {
00392 __asm__ volatile(
00393 "lea (%3, %3), %%"REG_a" \n\t"
00394 ".p2align 3 \n\t"
00395 "1: \n\t"
00396 "movq (%1), %%mm0 \n\t"
00397 "movq (%1, %3), %%mm1 \n\t"
00398 "movq %%mm0, (%2) \n\t"
00399 "movq %%mm1, (%2, %3) \n\t"
00400 "add %%"REG_a", %1 \n\t"
00401 "add %%"REG_a", %2 \n\t"
00402 "movq (%1), %%mm0 \n\t"
00403 "movq (%1, %3), %%mm1 \n\t"
00404 "movq %%mm0, (%2) \n\t"
00405 "movq %%mm1, (%2, %3) \n\t"
00406 "add %%"REG_a", %1 \n\t"
00407 "add %%"REG_a", %2 \n\t"
00408 "subl $4, %0 \n\t"
00409 "jnz 1b \n\t"
00410 : "+g"(h), "+r" (pixels), "+r" (block)
00411 : "r"((x86_reg)line_size)
00412 : "%"REG_a, "memory"
00413 );
00414 }
00415
00416 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00417 {
00418 __asm__ volatile(
00419 "lea (%3, %3), %%"REG_a" \n\t"
00420 ".p2align 3 \n\t"
00421 "1: \n\t"
00422 "movq (%1), %%mm0 \n\t"
00423 "movq 8(%1), %%mm4 \n\t"
00424 "movq (%1, %3), %%mm1 \n\t"
00425 "movq 8(%1, %3), %%mm5 \n\t"
00426 "movq %%mm0, (%2) \n\t"
00427 "movq %%mm4, 8(%2) \n\t"
00428 "movq %%mm1, (%2, %3) \n\t"
00429 "movq %%mm5, 8(%2, %3) \n\t"
00430 "add %%"REG_a", %1 \n\t"
00431 "add %%"REG_a", %2 \n\t"
00432 "movq (%1), %%mm0 \n\t"
00433 "movq 8(%1), %%mm4 \n\t"
00434 "movq (%1, %3), %%mm1 \n\t"
00435 "movq 8(%1, %3), %%mm5 \n\t"
00436 "movq %%mm0, (%2) \n\t"
00437 "movq %%mm4, 8(%2) \n\t"
00438 "movq %%mm1, (%2, %3) \n\t"
00439 "movq %%mm5, 8(%2, %3) \n\t"
00440 "add %%"REG_a", %1 \n\t"
00441 "add %%"REG_a", %2 \n\t"
00442 "subl $4, %0 \n\t"
00443 "jnz 1b \n\t"
00444 : "+g"(h), "+r" (pixels), "+r" (block)
00445 : "r"((x86_reg)line_size)
00446 : "%"REG_a, "memory"
00447 );
00448 }
00449
00450 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00451 {
00452 __asm__ volatile(
00453 "1: \n\t"
00454 "movdqu (%1), %%xmm0 \n\t"
00455 "movdqu (%1,%3), %%xmm1 \n\t"
00456 "movdqu (%1,%3,2), %%xmm2 \n\t"
00457 "movdqu (%1,%4), %%xmm3 \n\t"
00458 "movdqa %%xmm0, (%2) \n\t"
00459 "movdqa %%xmm1, (%2,%3) \n\t"
00460 "movdqa %%xmm2, (%2,%3,2) \n\t"
00461 "movdqa %%xmm3, (%2,%4) \n\t"
00462 "subl $4, %0 \n\t"
00463 "lea (%1,%3,4), %1 \n\t"
00464 "lea (%2,%3,4), %2 \n\t"
00465 "jnz 1b \n\t"
00466 : "+g"(h), "+r" (pixels), "+r" (block)
00467 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00468 : "memory"
00469 );
00470 }
00471
00472 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00473 {
00474 __asm__ volatile(
00475 "1: \n\t"
00476 "movdqu (%1), %%xmm0 \n\t"
00477 "movdqu (%1,%3), %%xmm1 \n\t"
00478 "movdqu (%1,%3,2), %%xmm2 \n\t"
00479 "movdqu (%1,%4), %%xmm3 \n\t"
00480 "pavgb (%2), %%xmm0 \n\t"
00481 "pavgb (%2,%3), %%xmm1 \n\t"
00482 "pavgb (%2,%3,2), %%xmm2 \n\t"
00483 "pavgb (%2,%4), %%xmm3 \n\t"
00484 "movdqa %%xmm0, (%2) \n\t"
00485 "movdqa %%xmm1, (%2,%3) \n\t"
00486 "movdqa %%xmm2, (%2,%3,2) \n\t"
00487 "movdqa %%xmm3, (%2,%4) \n\t"
00488 "subl $4, %0 \n\t"
00489 "lea (%1,%3,4), %1 \n\t"
00490 "lea (%2,%3,4), %2 \n\t"
00491 "jnz 1b \n\t"
00492 : "+g"(h), "+r" (pixels), "+r" (block)
00493 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00494 : "memory"
00495 );
00496 }
00497
00498 #define CLEAR_BLOCKS(name,n) \
00499 static void name(DCTELEM *blocks)\
00500 {\
00501 __asm__ volatile(\
00502 "pxor %%mm7, %%mm7 \n\t"\
00503 "mov %1, %%"REG_a" \n\t"\
00504 "1: \n\t"\
00505 "movq %%mm7, (%0, %%"REG_a") \n\t"\
00506 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\
00507 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\
00508 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\
00509 "add $32, %%"REG_a" \n\t"\
00510 " js 1b \n\t"\
00511 : : "r" (((uint8_t *)blocks)+128*n),\
00512 "i" (-128*n)\
00513 : "%"REG_a\
00514 );\
00515 }
00516 CLEAR_BLOCKS(clear_blocks_mmx, 6)
00517 CLEAR_BLOCKS(clear_block_mmx, 1)
00518
00519 static void clear_block_sse(DCTELEM *block)
00520 {
00521 __asm__ volatile(
00522 "xorps %%xmm0, %%xmm0 \n"
00523 "movaps %%xmm0, (%0) \n"
00524 "movaps %%xmm0, 16(%0) \n"
00525 "movaps %%xmm0, 32(%0) \n"
00526 "movaps %%xmm0, 48(%0) \n"
00527 "movaps %%xmm0, 64(%0) \n"
00528 "movaps %%xmm0, 80(%0) \n"
00529 "movaps %%xmm0, 96(%0) \n"
00530 "movaps %%xmm0, 112(%0) \n"
00531 :: "r"(block)
00532 : "memory"
00533 );
00534 }
00535
00536 static void clear_blocks_sse(DCTELEM *blocks)
00537 {\
00538 __asm__ volatile(
00539 "xorps %%xmm0, %%xmm0 \n"
00540 "mov %1, %%"REG_a" \n"
00541 "1: \n"
00542 "movaps %%xmm0, (%0, %%"REG_a") \n"
00543 "movaps %%xmm0, 16(%0, %%"REG_a") \n"
00544 "movaps %%xmm0, 32(%0, %%"REG_a") \n"
00545 "movaps %%xmm0, 48(%0, %%"REG_a") \n"
00546 "movaps %%xmm0, 64(%0, %%"REG_a") \n"
00547 "movaps %%xmm0, 80(%0, %%"REG_a") \n"
00548 "movaps %%xmm0, 96(%0, %%"REG_a") \n"
00549 "movaps %%xmm0, 112(%0, %%"REG_a") \n"
00550 "add $128, %%"REG_a" \n"
00551 " js 1b \n"
00552 : : "r" (((uint8_t *)blocks)+128*6),
00553 "i" (-128*6)
00554 : "%"REG_a
00555 );
00556 }
00557
00558 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
00559 x86_reg i=0;
00560 __asm__ volatile(
00561 "jmp 2f \n\t"
00562 "1: \n\t"
00563 "movq (%1, %0), %%mm0 \n\t"
00564 "movq (%2, %0), %%mm1 \n\t"
00565 "paddb %%mm0, %%mm1 \n\t"
00566 "movq %%mm1, (%2, %0) \n\t"
00567 "movq 8(%1, %0), %%mm0 \n\t"
00568 "movq 8(%2, %0), %%mm1 \n\t"
00569 "paddb %%mm0, %%mm1 \n\t"
00570 "movq %%mm1, 8(%2, %0) \n\t"
00571 "add $16, %0 \n\t"
00572 "2: \n\t"
00573 "cmp %3, %0 \n\t"
00574 " js 1b \n\t"
00575 : "+r" (i)
00576 : "r"(src), "r"(dst), "r"((x86_reg)w-15)
00577 );
00578 for(; i<w; i++)
00579 dst[i+0] += src[i+0];
00580 }
00581
00582 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
00583 x86_reg i=0;
00584 __asm__ volatile(
00585 "jmp 2f \n\t"
00586 "1: \n\t"
00587 "movq (%2, %0), %%mm0 \n\t"
00588 "movq 8(%2, %0), %%mm1 \n\t"
00589 "paddb (%3, %0), %%mm0 \n\t"
00590 "paddb 8(%3, %0), %%mm1 \n\t"
00591 "movq %%mm0, (%1, %0) \n\t"
00592 "movq %%mm1, 8(%1, %0) \n\t"
00593 "add $16, %0 \n\t"
00594 "2: \n\t"
00595 "cmp %4, %0 \n\t"
00596 " js 1b \n\t"
00597 : "+r" (i)
00598 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
00599 );
00600 for(; i<w; i++)
00601 dst[i] = src1[i] + src2[i];
00602 }
00603
00604 #if HAVE_7REGS && HAVE_TEN_OPERANDS
00605 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
00606 x86_reg w2 = -w;
00607 x86_reg x;
00608 int l = *left & 0xff;
00609 int tl = *left_top & 0xff;
00610 int t;
00611 __asm__ volatile(
00612 "mov %7, %3 \n"
00613 "1: \n"
00614 "movzbl (%3,%4), %2 \n"
00615 "mov %2, %k3 \n"
00616 "sub %b1, %b3 \n"
00617 "add %b0, %b3 \n"
00618 "mov %2, %1 \n"
00619 "cmp %0, %2 \n"
00620 "cmovg %0, %2 \n"
00621 "cmovg %1, %0 \n"
00622 "cmp %k3, %0 \n"
00623 "cmovg %k3, %0 \n"
00624 "mov %7, %3 \n"
00625 "cmp %2, %0 \n"
00626 "cmovl %2, %0 \n"
00627 "add (%6,%4), %b0 \n"
00628 "mov %b0, (%5,%4) \n"
00629 "inc %4 \n"
00630 "jl 1b \n"
00631 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
00632 :"r"(dst+w), "r"(diff+w), "rm"(top+w)
00633 );
00634 *left = l;
00635 *left_top = tl;
00636 }
00637 #endif
00638
00639 #define H263_LOOP_FILTER \
00640 "pxor %%mm7, %%mm7 \n\t"\
00641 "movq %0, %%mm0 \n\t"\
00642 "movq %0, %%mm1 \n\t"\
00643 "movq %3, %%mm2 \n\t"\
00644 "movq %3, %%mm3 \n\t"\
00645 "punpcklbw %%mm7, %%mm0 \n\t"\
00646 "punpckhbw %%mm7, %%mm1 \n\t"\
00647 "punpcklbw %%mm7, %%mm2 \n\t"\
00648 "punpckhbw %%mm7, %%mm3 \n\t"\
00649 "psubw %%mm2, %%mm0 \n\t"\
00650 "psubw %%mm3, %%mm1 \n\t"\
00651 "movq %1, %%mm2 \n\t"\
00652 "movq %1, %%mm3 \n\t"\
00653 "movq %2, %%mm4 \n\t"\
00654 "movq %2, %%mm5 \n\t"\
00655 "punpcklbw %%mm7, %%mm2 \n\t"\
00656 "punpckhbw %%mm7, %%mm3 \n\t"\
00657 "punpcklbw %%mm7, %%mm4 \n\t"\
00658 "punpckhbw %%mm7, %%mm5 \n\t"\
00659 "psubw %%mm2, %%mm4 \n\t"\
00660 "psubw %%mm3, %%mm5 \n\t"\
00661 "psllw $2, %%mm4 \n\t"\
00662 "psllw $2, %%mm5 \n\t"\
00663 "paddw %%mm0, %%mm4 \n\t"\
00664 "paddw %%mm1, %%mm5 \n\t"\
00665 "pxor %%mm6, %%mm6 \n\t"\
00666 "pcmpgtw %%mm4, %%mm6 \n\t"\
00667 "pcmpgtw %%mm5, %%mm7 \n\t"\
00668 "pxor %%mm6, %%mm4 \n\t"\
00669 "pxor %%mm7, %%mm5 \n\t"\
00670 "psubw %%mm6, %%mm4 \n\t"\
00671 "psubw %%mm7, %%mm5 \n\t"\
00672 "psrlw $3, %%mm4 \n\t"\
00673 "psrlw $3, %%mm5 \n\t"\
00674 "packuswb %%mm5, %%mm4 \n\t"\
00675 "packsswb %%mm7, %%mm6 \n\t"\
00676 "pxor %%mm7, %%mm7 \n\t"\
00677 "movd %4, %%mm2 \n\t"\
00678 "punpcklbw %%mm2, %%mm2 \n\t"\
00679 "punpcklbw %%mm2, %%mm2 \n\t"\
00680 "punpcklbw %%mm2, %%mm2 \n\t"\
00681 "psubusb %%mm4, %%mm2 \n\t"\
00682 "movq %%mm2, %%mm3 \n\t"\
00683 "psubusb %%mm4, %%mm3 \n\t"\
00684 "psubb %%mm3, %%mm2 \n\t"\
00685 "movq %1, %%mm3 \n\t"\
00686 "movq %2, %%mm4 \n\t"\
00687 "pxor %%mm6, %%mm3 \n\t"\
00688 "pxor %%mm6, %%mm4 \n\t"\
00689 "paddusb %%mm2, %%mm3 \n\t"\
00690 "psubusb %%mm2, %%mm4 \n\t"\
00691 "pxor %%mm6, %%mm3 \n\t"\
00692 "pxor %%mm6, %%mm4 \n\t"\
00693 "paddusb %%mm2, %%mm2 \n\t"\
00694 "packsswb %%mm1, %%mm0 \n\t"\
00695 "pcmpgtb %%mm0, %%mm7 \n\t"\
00696 "pxor %%mm7, %%mm0 \n\t"\
00697 "psubb %%mm7, %%mm0 \n\t"\
00698 "movq %%mm0, %%mm1 \n\t"\
00699 "psubusb %%mm2, %%mm0 \n\t"\
00700 "psubb %%mm0, %%mm1 \n\t"\
00701 "pand %5, %%mm1 \n\t"\
00702 "psrlw $2, %%mm1 \n\t"\
00703 "pxor %%mm7, %%mm1 \n\t"\
00704 "psubb %%mm7, %%mm1 \n\t"\
00705 "movq %0, %%mm5 \n\t"\
00706 "movq %3, %%mm6 \n\t"\
00707 "psubb %%mm1, %%mm5 \n\t"\
00708 "paddb %%mm1, %%mm6 \n\t"
00709
00710 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00711 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00712 const int strength= ff_h263_loop_filter_strength[qscale];
00713
00714 __asm__ volatile(
00715
00716 H263_LOOP_FILTER
00717
00718 "movq %%mm3, %1 \n\t"
00719 "movq %%mm4, %2 \n\t"
00720 "movq %%mm5, %0 \n\t"
00721 "movq %%mm6, %3 \n\t"
00722 : "+m" (*(uint64_t*)(src - 2*stride)),
00723 "+m" (*(uint64_t*)(src - 1*stride)),
00724 "+m" (*(uint64_t*)(src + 0*stride)),
00725 "+m" (*(uint64_t*)(src + 1*stride))
00726 : "g" (2*strength), "m"(ff_pb_FC)
00727 );
00728 }
00729 }
00730
00731 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00732 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00733 const int strength= ff_h263_loop_filter_strength[qscale];
00734 DECLARE_ALIGNED(8, uint64_t, temp)[4];
00735 uint8_t *btemp= (uint8_t*)temp;
00736
00737 src -= 2;
00738
00739 transpose4x4(btemp , src , 8, stride);
00740 transpose4x4(btemp+4, src + 4*stride, 8, stride);
00741 __asm__ volatile(
00742 H263_LOOP_FILTER
00743
00744 : "+m" (temp[0]),
00745 "+m" (temp[1]),
00746 "+m" (temp[2]),
00747 "+m" (temp[3])
00748 : "g" (2*strength), "m"(ff_pb_FC)
00749 );
00750
00751 __asm__ volatile(
00752 "movq %%mm5, %%mm1 \n\t"
00753 "movq %%mm4, %%mm0 \n\t"
00754 "punpcklbw %%mm3, %%mm5 \n\t"
00755 "punpcklbw %%mm6, %%mm4 \n\t"
00756 "punpckhbw %%mm3, %%mm1 \n\t"
00757 "punpckhbw %%mm6, %%mm0 \n\t"
00758 "movq %%mm5, %%mm3 \n\t"
00759 "movq %%mm1, %%mm6 \n\t"
00760 "punpcklwd %%mm4, %%mm5 \n\t"
00761 "punpcklwd %%mm0, %%mm1 \n\t"
00762 "punpckhwd %%mm4, %%mm3 \n\t"
00763 "punpckhwd %%mm0, %%mm6 \n\t"
00764 "movd %%mm5, (%0) \n\t"
00765 "punpckhdq %%mm5, %%mm5 \n\t"
00766 "movd %%mm5, (%0,%2) \n\t"
00767 "movd %%mm3, (%0,%2,2) \n\t"
00768 "punpckhdq %%mm3, %%mm3 \n\t"
00769 "movd %%mm3, (%0,%3) \n\t"
00770 "movd %%mm1, (%1) \n\t"
00771 "punpckhdq %%mm1, %%mm1 \n\t"
00772 "movd %%mm1, (%1,%2) \n\t"
00773 "movd %%mm6, (%1,%2,2) \n\t"
00774 "punpckhdq %%mm6, %%mm6 \n\t"
00775 "movd %%mm6, (%1,%3) \n\t"
00776 :: "r" (src),
00777 "r" (src + 4*stride),
00778 "r" ((x86_reg) stride ),
00779 "r" ((x86_reg)(3*stride))
00780 );
00781 }
00782 }
00783
00784
00785
00786 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
00787 {
00788 uint8_t *ptr, *last_line;
00789 int i;
00790
00791 last_line = buf + (height - 1) * wrap;
00792
00793 ptr = buf;
00794 if(w==8)
00795 {
00796 __asm__ volatile(
00797 "1: \n\t"
00798 "movd (%0), %%mm0 \n\t"
00799 "punpcklbw %%mm0, %%mm0 \n\t"
00800 "punpcklwd %%mm0, %%mm0 \n\t"
00801 "punpckldq %%mm0, %%mm0 \n\t"
00802 "movq %%mm0, -8(%0) \n\t"
00803 "movq -8(%0, %2), %%mm1 \n\t"
00804 "punpckhbw %%mm1, %%mm1 \n\t"
00805 "punpckhwd %%mm1, %%mm1 \n\t"
00806 "punpckhdq %%mm1, %%mm1 \n\t"
00807 "movq %%mm1, (%0, %2) \n\t"
00808 "add %1, %0 \n\t"
00809 "cmp %3, %0 \n\t"
00810 " jb 1b \n\t"
00811 : "+r" (ptr)
00812 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00813 );
00814 }
00815 else
00816 {
00817 __asm__ volatile(
00818 "1: \n\t"
00819 "movd (%0), %%mm0 \n\t"
00820 "punpcklbw %%mm0, %%mm0 \n\t"
00821 "punpcklwd %%mm0, %%mm0 \n\t"
00822 "punpckldq %%mm0, %%mm0 \n\t"
00823 "movq %%mm0, -8(%0) \n\t"
00824 "movq %%mm0, -16(%0) \n\t"
00825 "movq -8(%0, %2), %%mm1 \n\t"
00826 "punpckhbw %%mm1, %%mm1 \n\t"
00827 "punpckhwd %%mm1, %%mm1 \n\t"
00828 "punpckhdq %%mm1, %%mm1 \n\t"
00829 "movq %%mm1, (%0, %2) \n\t"
00830 "movq %%mm1, 8(%0, %2) \n\t"
00831 "add %1, %0 \n\t"
00832 "cmp %3, %0 \n\t"
00833 " jb 1b \n\t"
00834 : "+r" (ptr)
00835 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00836 );
00837 }
00838
00839 for(i=0;i<w;i+=4) {
00840
00841 ptr= buf - (i + 1) * wrap - w;
00842 __asm__ volatile(
00843 "1: \n\t"
00844 "movq (%1, %0), %%mm0 \n\t"
00845 "movq %%mm0, (%0) \n\t"
00846 "movq %%mm0, (%0, %2) \n\t"
00847 "movq %%mm0, (%0, %2, 2) \n\t"
00848 "movq %%mm0, (%0, %3) \n\t"
00849 "add $8, %0 \n\t"
00850 "cmp %4, %0 \n\t"
00851 " jb 1b \n\t"
00852 : "+r" (ptr)
00853 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
00854 );
00855 ptr= last_line + (i + 1) * wrap - w;
00856 __asm__ volatile(
00857 "1: \n\t"
00858 "movq (%1, %0), %%mm0 \n\t"
00859 "movq %%mm0, (%0) \n\t"
00860 "movq %%mm0, (%0, %2) \n\t"
00861 "movq %%mm0, (%0, %2, 2) \n\t"
00862 "movq %%mm0, (%0, %3) \n\t"
00863 "add $8, %0 \n\t"
00864 "cmp %4, %0 \n\t"
00865 " jb 1b \n\t"
00866 : "+r" (ptr)
00867 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
00868 );
00869 }
00870 }
00871
00872 #define PAETH(cpu, abs3)\
00873 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
00874 {\
00875 x86_reg i = -bpp;\
00876 x86_reg end = w-3;\
00877 __asm__ volatile(\
00878 "pxor %%mm7, %%mm7 \n"\
00879 "movd (%1,%0), %%mm0 \n"\
00880 "movd (%2,%0), %%mm1 \n"\
00881 "punpcklbw %%mm7, %%mm0 \n"\
00882 "punpcklbw %%mm7, %%mm1 \n"\
00883 "add %4, %0 \n"\
00884 "1: \n"\
00885 "movq %%mm1, %%mm2 \n"\
00886 "movd (%2,%0), %%mm1 \n"\
00887 "movq %%mm2, %%mm3 \n"\
00888 "punpcklbw %%mm7, %%mm1 \n"\
00889 "movq %%mm2, %%mm4 \n"\
00890 "psubw %%mm1, %%mm3 \n"\
00891 "psubw %%mm0, %%mm4 \n"\
00892 "movq %%mm3, %%mm5 \n"\
00893 "paddw %%mm4, %%mm5 \n"\
00894 abs3\
00895 "movq %%mm4, %%mm6 \n"\
00896 "pminsw %%mm5, %%mm6 \n"\
00897 "pcmpgtw %%mm6, %%mm3 \n"\
00898 "pcmpgtw %%mm5, %%mm4 \n"\
00899 "movq %%mm4, %%mm6 \n"\
00900 "pand %%mm3, %%mm4 \n"\
00901 "pandn %%mm3, %%mm6 \n"\
00902 "pandn %%mm0, %%mm3 \n"\
00903 "movd (%3,%0), %%mm0 \n"\
00904 "pand %%mm1, %%mm6 \n"\
00905 "pand %%mm4, %%mm2 \n"\
00906 "punpcklbw %%mm7, %%mm0 \n"\
00907 "movq %6, %%mm5 \n"\
00908 "paddw %%mm6, %%mm0 \n"\
00909 "paddw %%mm2, %%mm3 \n"\
00910 "paddw %%mm3, %%mm0 \n"\
00911 "pand %%mm5, %%mm0 \n"\
00912 "movq %%mm0, %%mm3 \n"\
00913 "packuswb %%mm3, %%mm3 \n"\
00914 "movd %%mm3, (%1,%0) \n"\
00915 "add %4, %0 \n"\
00916 "cmp %5, %0 \n"\
00917 "jle 1b \n"\
00918 :"+r"(i)\
00919 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
00920 "m"(ff_pw_255)\
00921 :"memory"\
00922 );\
00923 }
00924
00925 #define ABS3_MMX2\
00926 "psubw %%mm5, %%mm7 \n"\
00927 "pmaxsw %%mm7, %%mm5 \n"\
00928 "pxor %%mm6, %%mm6 \n"\
00929 "pxor %%mm7, %%mm7 \n"\
00930 "psubw %%mm3, %%mm6 \n"\
00931 "psubw %%mm4, %%mm7 \n"\
00932 "pmaxsw %%mm6, %%mm3 \n"\
00933 "pmaxsw %%mm7, %%mm4 \n"\
00934 "pxor %%mm7, %%mm7 \n"
00935
00936 #define ABS3_SSSE3\
00937 "pabsw %%mm3, %%mm3 \n"\
00938 "pabsw %%mm4, %%mm4 \n"\
00939 "pabsw %%mm5, %%mm5 \n"
00940
00941 PAETH(mmx2, ABS3_MMX2)
00942 #if HAVE_SSSE3
00943 PAETH(ssse3, ABS3_SSSE3)
00944 #endif
00945
00946 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
00947 "paddw " #m4 ", " #m3 " \n\t" \
00948 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" \
00949 "pmullw " #m3 ", %%mm4 \n\t" \
00950 "movq "#in7", " #m3 " \n\t" \
00951 "movq "#in0", %%mm5 \n\t" \
00952 "paddw " #m3 ", %%mm5 \n\t" \
00953 "psubw %%mm5, %%mm4 \n\t" \
00954 "movq "#in1", %%mm5 \n\t" \
00955 "movq "#in2", %%mm6 \n\t" \
00956 "paddw " #m6 ", %%mm5 \n\t" \
00957 "paddw " #m5 ", %%mm6 \n\t" \
00958 "paddw %%mm6, %%mm6 \n\t" \
00959 "psubw %%mm6, %%mm5 \n\t" \
00960 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" \
00961 "paddw " #rnd ", %%mm4 \n\t" \
00962 "paddw %%mm4, %%mm5 \n\t" \
00963 "psraw $5, %%mm5 \n\t"\
00964 "packuswb %%mm5, %%mm5 \n\t"\
00965 OP(%%mm5, out, %%mm7, d)
00966
00967 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
00968 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00969 uint64_t temp;\
00970 \
00971 __asm__ volatile(\
00972 "pxor %%mm7, %%mm7 \n\t"\
00973 "1: \n\t"\
00974 "movq (%0), %%mm0 \n\t" \
00975 "movq %%mm0, %%mm1 \n\t" \
00976 "movq %%mm0, %%mm2 \n\t" \
00977 "punpcklbw %%mm7, %%mm0 \n\t" \
00978 "punpckhbw %%mm7, %%mm1 \n\t" \
00979 "pshufw $0x90, %%mm0, %%mm5 \n\t" \
00980 "pshufw $0x41, %%mm0, %%mm6 \n\t" \
00981 "movq %%mm2, %%mm3 \n\t" \
00982 "movq %%mm2, %%mm4 \n\t" \
00983 "psllq $8, %%mm2 \n\t" \
00984 "psllq $16, %%mm3 \n\t" \
00985 "psllq $24, %%mm4 \n\t" \
00986 "punpckhbw %%mm7, %%mm2 \n\t" \
00987 "punpckhbw %%mm7, %%mm3 \n\t" \
00988 "punpckhbw %%mm7, %%mm4 \n\t" \
00989 "paddw %%mm3, %%mm5 \n\t" \
00990 "paddw %%mm2, %%mm6 \n\t" \
00991 "paddw %%mm5, %%mm5 \n\t" \
00992 "psubw %%mm5, %%mm6 \n\t" \
00993 "pshufw $0x06, %%mm0, %%mm5 \n\t" \
00994 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" \
00995 "paddw %%mm4, %%mm0 \n\t" \
00996 "paddw %%mm1, %%mm5 \n\t" \
00997 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" \
00998 "psubw %%mm5, %%mm0 \n\t" \
00999 "paddw %6, %%mm6 \n\t"\
01000 "paddw %%mm6, %%mm0 \n\t" \
01001 "psraw $5, %%mm0 \n\t"\
01002 "movq %%mm0, %5 \n\t"\
01003 \
01004 \
01005 "movq 5(%0), %%mm0 \n\t" \
01006 "movq %%mm0, %%mm5 \n\t" \
01007 "movq %%mm0, %%mm6 \n\t" \
01008 "psrlq $8, %%mm0 \n\t" \
01009 "psrlq $16, %%mm5 \n\t" \
01010 "punpcklbw %%mm7, %%mm0 \n\t" \
01011 "punpcklbw %%mm7, %%mm5 \n\t" \
01012 "paddw %%mm0, %%mm2 \n\t" \
01013 "paddw %%mm5, %%mm3 \n\t" \
01014 "paddw %%mm2, %%mm2 \n\t" \
01015 "psubw %%mm2, %%mm3 \n\t" \
01016 "movq %%mm6, %%mm2 \n\t" \
01017 "psrlq $24, %%mm6 \n\t" \
01018 "punpcklbw %%mm7, %%mm2 \n\t" \
01019 "punpcklbw %%mm7, %%mm6 \n\t" \
01020 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" \
01021 "paddw %%mm2, %%mm1 \n\t" \
01022 "paddw %%mm6, %%mm4 \n\t" \
01023 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" \
01024 "psubw %%mm4, %%mm3 \n\t" \
01025 "paddw %6, %%mm1 \n\t"\
01026 "paddw %%mm1, %%mm3 \n\t" \
01027 "psraw $5, %%mm3 \n\t"\
01028 "movq %5, %%mm1 \n\t"\
01029 "packuswb %%mm3, %%mm1 \n\t"\
01030 OP_MMX2(%%mm1, (%1),%%mm4, q)\
01031 \
01032 \
01033 "movq 9(%0), %%mm1 \n\t" \
01034 "movq %%mm1, %%mm4 \n\t" \
01035 "movq %%mm1, %%mm3 \n\t" \
01036 "psrlq $8, %%mm1 \n\t" \
01037 "psrlq $16, %%mm4 \n\t" \
01038 "punpcklbw %%mm7, %%mm1 \n\t" \
01039 "punpcklbw %%mm7, %%mm4 \n\t" \
01040 "paddw %%mm1, %%mm5 \n\t" \
01041 "paddw %%mm4, %%mm0 \n\t" \
01042 "paddw %%mm5, %%mm5 \n\t" \
01043 "psubw %%mm5, %%mm0 \n\t" \
01044 "movq %%mm3, %%mm5 \n\t" \
01045 "psrlq $24, %%mm3 \n\t" \
01046 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" \
01047 "punpcklbw %%mm7, %%mm3 \n\t" \
01048 "paddw %%mm3, %%mm2 \n\t" \
01049 "psubw %%mm2, %%mm0 \n\t" \
01050 "movq %%mm5, %%mm2 \n\t" \
01051 "punpcklbw %%mm7, %%mm2 \n\t" \
01052 "punpckhbw %%mm7, %%mm5 \n\t" \
01053 "paddw %%mm2, %%mm6 \n\t" \
01054 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" \
01055 "paddw %6, %%mm0 \n\t"\
01056 "paddw %%mm6, %%mm0 \n\t" \
01057 "psraw $5, %%mm0 \n\t"\
01058 \
01059 \
01060 "paddw %%mm5, %%mm3 \n\t" \
01061 "pshufw $0xF9, %%mm5, %%mm6 \n\t" \
01062 "paddw %%mm4, %%mm6 \n\t" \
01063 "pshufw $0xBE, %%mm5, %%mm4 \n\t" \
01064 "pshufw $0x6F, %%mm5, %%mm5 \n\t" \
01065 "paddw %%mm1, %%mm4 \n\t" \
01066 "paddw %%mm2, %%mm5 \n\t" \
01067 "paddw %%mm6, %%mm6 \n\t" \
01068 "psubw %%mm6, %%mm4 \n\t" \
01069 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" \
01070 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" \
01071 "psubw %%mm5, %%mm3 \n\t" \
01072 "paddw %6, %%mm4 \n\t"\
01073 "paddw %%mm3, %%mm4 \n\t" \
01074 "psraw $5, %%mm4 \n\t"\
01075 "packuswb %%mm4, %%mm0 \n\t"\
01076 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
01077 \
01078 "add %3, %0 \n\t"\
01079 "add %4, %1 \n\t"\
01080 "decl %2 \n\t"\
01081 " jnz 1b \n\t"\
01082 : "+a"(src), "+c"(dst), "+D"(h)\
01083 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(temp), "m"(ROUNDER)\
01084 : "memory"\
01085 );\
01086 }\
01087 \
01088 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01089 int i;\
01090 int16_t temp[16];\
01091 \
01092 for(i=0; i<h; i++)\
01093 {\
01094 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01095 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01096 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01097 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01098 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01099 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
01100 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
01101 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
01102 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
01103 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
01104 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
01105 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
01106 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
01107 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
01108 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
01109 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
01110 __asm__ volatile(\
01111 "movq (%0), %%mm0 \n\t"\
01112 "movq 8(%0), %%mm1 \n\t"\
01113 "paddw %2, %%mm0 \n\t"\
01114 "paddw %2, %%mm1 \n\t"\
01115 "psraw $5, %%mm0 \n\t"\
01116 "psraw $5, %%mm1 \n\t"\
01117 "packuswb %%mm1, %%mm0 \n\t"\
01118 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01119 "movq 16(%0), %%mm0 \n\t"\
01120 "movq 24(%0), %%mm1 \n\t"\
01121 "paddw %2, %%mm0 \n\t"\
01122 "paddw %2, %%mm1 \n\t"\
01123 "psraw $5, %%mm0 \n\t"\
01124 "psraw $5, %%mm1 \n\t"\
01125 "packuswb %%mm1, %%mm0 \n\t"\
01126 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
01127 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01128 : "memory"\
01129 );\
01130 dst+=dstStride;\
01131 src+=srcStride;\
01132 }\
01133 }\
01134 \
01135 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01136 __asm__ volatile(\
01137 "pxor %%mm7, %%mm7 \n\t"\
01138 "1: \n\t"\
01139 "movq (%0), %%mm0 \n\t" \
01140 "movq %%mm0, %%mm1 \n\t" \
01141 "movq %%mm0, %%mm2 \n\t" \
01142 "punpcklbw %%mm7, %%mm0 \n\t" \
01143 "punpckhbw %%mm7, %%mm1 \n\t" \
01144 "pshufw $0x90, %%mm0, %%mm5 \n\t" \
01145 "pshufw $0x41, %%mm0, %%mm6 \n\t" \
01146 "movq %%mm2, %%mm3 \n\t" \
01147 "movq %%mm2, %%mm4 \n\t" \
01148 "psllq $8, %%mm2 \n\t" \
01149 "psllq $16, %%mm3 \n\t" \
01150 "psllq $24, %%mm4 \n\t" \
01151 "punpckhbw %%mm7, %%mm2 \n\t" \
01152 "punpckhbw %%mm7, %%mm3 \n\t" \
01153 "punpckhbw %%mm7, %%mm4 \n\t" \
01154 "paddw %%mm3, %%mm5 \n\t" \
01155 "paddw %%mm2, %%mm6 \n\t" \
01156 "paddw %%mm5, %%mm5 \n\t" \
01157 "psubw %%mm5, %%mm6 \n\t" \
01158 "pshufw $0x06, %%mm0, %%mm5 \n\t" \
01159 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" \
01160 "paddw %%mm4, %%mm0 \n\t" \
01161 "paddw %%mm1, %%mm5 \n\t" \
01162 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" \
01163 "psubw %%mm5, %%mm0 \n\t" \
01164 "paddw %5, %%mm6 \n\t"\
01165 "paddw %%mm6, %%mm0 \n\t" \
01166 "psraw $5, %%mm0 \n\t"\
01167 \
01168 \
01169 "movd 5(%0), %%mm5 \n\t" \
01170 "punpcklbw %%mm7, %%mm5 \n\t" \
01171 "pshufw $0xF9, %%mm5, %%mm6 \n\t" \
01172 "paddw %%mm5, %%mm1 \n\t" \
01173 "paddw %%mm6, %%mm2 \n\t" \
01174 "pshufw $0xBE, %%mm5, %%mm6 \n\t" \
01175 "pshufw $0x6F, %%mm5, %%mm5 \n\t" \
01176 "paddw %%mm6, %%mm3 \n\t" \
01177 "paddw %%mm5, %%mm4 \n\t" \
01178 "paddw %%mm2, %%mm2 \n\t" \
01179 "psubw %%mm2, %%mm3 \n\t" \
01180 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" \
01181 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" \
01182 "psubw %%mm4, %%mm3 \n\t" \
01183 "paddw %5, %%mm1 \n\t"\
01184 "paddw %%mm1, %%mm3 \n\t" \
01185 "psraw $5, %%mm3 \n\t"\
01186 "packuswb %%mm3, %%mm0 \n\t"\
01187 OP_MMX2(%%mm0, (%1), %%mm4, q)\
01188 \
01189 "add %3, %0 \n\t"\
01190 "add %4, %1 \n\t"\
01191 "decl %2 \n\t"\
01192 " jnz 1b \n\t"\
01193 : "+a"(src), "+c"(dst), "+d"(h)\
01194 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ROUNDER)\
01195 : "memory"\
01196 );\
01197 }\
01198 \
01199 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01200 int i;\
01201 int16_t temp[8];\
01202 \
01203 for(i=0; i<h; i++)\
01204 {\
01205 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01206 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01207 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01208 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01209 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01210 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
01211 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
01212 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
01213 __asm__ volatile(\
01214 "movq (%0), %%mm0 \n\t"\
01215 "movq 8(%0), %%mm1 \n\t"\
01216 "paddw %2, %%mm0 \n\t"\
01217 "paddw %2, %%mm1 \n\t"\
01218 "psraw $5, %%mm0 \n\t"\
01219 "psraw $5, %%mm1 \n\t"\
01220 "packuswb %%mm1, %%mm0 \n\t"\
01221 OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01222 :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01223 :"memory"\
01224 );\
01225 dst+=dstStride;\
01226 src+=srcStride;\
01227 }\
01228 }
01229
01230 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
01231 \
01232 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01233 uint64_t temp[17*4];\
01234 uint64_t *temp_ptr= temp;\
01235 int count= 17;\
01236 \
01237 \
01238 __asm__ volatile(\
01239 "pxor %%mm7, %%mm7 \n\t"\
01240 "1: \n\t"\
01241 "movq (%0), %%mm0 \n\t"\
01242 "movq (%0), %%mm1 \n\t"\
01243 "movq 8(%0), %%mm2 \n\t"\
01244 "movq 8(%0), %%mm3 \n\t"\
01245 "punpcklbw %%mm7, %%mm0 \n\t"\
01246 "punpckhbw %%mm7, %%mm1 \n\t"\
01247 "punpcklbw %%mm7, %%mm2 \n\t"\
01248 "punpckhbw %%mm7, %%mm3 \n\t"\
01249 "movq %%mm0, (%1) \n\t"\
01250 "movq %%mm1, 17*8(%1) \n\t"\
01251 "movq %%mm2, 2*17*8(%1) \n\t"\
01252 "movq %%mm3, 3*17*8(%1) \n\t"\
01253 "add $8, %1 \n\t"\
01254 "add %3, %0 \n\t"\
01255 "decl %2 \n\t"\
01256 " jnz 1b \n\t"\
01257 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01258 : "r" ((x86_reg)srcStride)\
01259 : "memory"\
01260 );\
01261 \
01262 temp_ptr= temp;\
01263 count=4;\
01264 \
01265 \
01266 __asm__ volatile(\
01267 \
01268 "1: \n\t"\
01269 "movq (%0), %%mm0 \n\t"\
01270 "movq 8(%0), %%mm1 \n\t"\
01271 "movq 16(%0), %%mm2 \n\t"\
01272 "movq 24(%0), %%mm3 \n\t"\
01273 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
01274 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
01275 "add %4, %1 \n\t"\
01276 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
01277 \
01278 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01279 "add %4, %1 \n\t"\
01280 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01281 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
01282 "add %4, %1 \n\t"\
01283 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
01284 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
01285 "add %4, %1 \n\t"\
01286 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
01287 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
01288 "add %4, %1 \n\t"\
01289 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
01290 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
01291 "add %4, %1 \n\t"\
01292 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
01293 \
01294 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
01295 "add %4, %1 \n\t" \
01296 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
01297 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
01298 \
01299 "add $136, %0 \n\t"\
01300 "add %6, %1 \n\t"\
01301 "decl %2 \n\t"\
01302 " jnz 1b \n\t"\
01303 \
01304 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01305 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
01306 :"memory"\
01307 );\
01308 }\
01309 \
01310 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01311 uint64_t temp[9*2];\
01312 uint64_t *temp_ptr= temp;\
01313 int count= 9;\
01314 \
01315 \
01316 __asm__ volatile(\
01317 "pxor %%mm7, %%mm7 \n\t"\
01318 "1: \n\t"\
01319 "movq (%0), %%mm0 \n\t"\
01320 "movq (%0), %%mm1 \n\t"\
01321 "punpcklbw %%mm7, %%mm0 \n\t"\
01322 "punpckhbw %%mm7, %%mm1 \n\t"\
01323 "movq %%mm0, (%1) \n\t"\
01324 "movq %%mm1, 9*8(%1) \n\t"\
01325 "add $8, %1 \n\t"\
01326 "add %3, %0 \n\t"\
01327 "decl %2 \n\t"\
01328 " jnz 1b \n\t"\
01329 : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01330 : "r" ((x86_reg)srcStride)\
01331 : "memory"\
01332 );\
01333 \
01334 temp_ptr= temp;\
01335 count=2;\
01336 \
01337 \
01338 __asm__ volatile(\
01339 \
01340 "1: \n\t"\
01341 "movq (%0), %%mm0 \n\t"\
01342 "movq 8(%0), %%mm1 \n\t"\
01343 "movq 16(%0), %%mm2 \n\t"\
01344 "movq 24(%0), %%mm3 \n\t"\
01345 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
01346 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
01347 "add %4, %1 \n\t"\
01348 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
01349 \
01350 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01351 "add %4, %1 \n\t"\
01352 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01353 \
01354 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
01355 "add %4, %1 \n\t"\
01356 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
01357 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
01358 \
01359 "add $72, %0 \n\t"\
01360 "add %6, %1 \n\t"\
01361 "decl %2 \n\t"\
01362 " jnz 1b \n\t"\
01363 \
01364 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01365 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
01366 : "memory"\
01367 );\
01368 }\
01369 \
01370 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01371 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
01372 }\
01373 \
01374 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01375 uint64_t temp[8];\
01376 uint8_t * const half= (uint8_t*)temp;\
01377 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01378 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01379 }\
01380 \
01381 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01382 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
01383 }\
01384 \
01385 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01386 uint64_t temp[8];\
01387 uint8_t * const half= (uint8_t*)temp;\
01388 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01389 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
01390 }\
01391 \
01392 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01393 uint64_t temp[8];\
01394 uint8_t * const half= (uint8_t*)temp;\
01395 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01396 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01397 }\
01398 \
01399 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01400 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
01401 }\
01402 \
01403 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01404 uint64_t temp[8];\
01405 uint8_t * const half= (uint8_t*)temp;\
01406 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01407 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
01408 }\
01409 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01410 uint64_t half[8 + 9];\
01411 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01412 uint8_t * const halfHV= ((uint8_t*)half);\
01413 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01414 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01415 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01416 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01417 }\
01418 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01419 uint64_t half[8 + 9];\
01420 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01421 uint8_t * const halfHV= ((uint8_t*)half);\
01422 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01423 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01424 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01425 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01426 }\
01427 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01428 uint64_t half[8 + 9];\
01429 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01430 uint8_t * const halfHV= ((uint8_t*)half);\
01431 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01432 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01433 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01434 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01435 }\
01436 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01437 uint64_t half[8 + 9];\
01438 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01439 uint8_t * const halfHV= ((uint8_t*)half);\
01440 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01441 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01442 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01443 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01444 }\
01445 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01446 uint64_t half[8 + 9];\
01447 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01448 uint8_t * const halfHV= ((uint8_t*)half);\
01449 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01450 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01451 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01452 }\
01453 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01454 uint64_t half[8 + 9];\
01455 uint8_t * const halfH= ((uint8_t*)half) + 64;\
01456 uint8_t * const halfHV= ((uint8_t*)half);\
01457 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01458 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01459 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01460 }\
01461 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01462 uint64_t half[8 + 9];\
01463 uint8_t * const halfH= ((uint8_t*)half);\
01464 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01465 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01466 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01467 }\
01468 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01469 uint64_t half[8 + 9];\
01470 uint8_t * const halfH= ((uint8_t*)half);\
01471 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01472 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01473 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01474 }\
01475 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01476 uint64_t half[9];\
01477 uint8_t * const halfH= ((uint8_t*)half);\
01478 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01479 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01480 }\
01481 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01482 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
01483 }\
01484 \
01485 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01486 uint64_t temp[32];\
01487 uint8_t * const half= (uint8_t*)temp;\
01488 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01489 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01490 }\
01491 \
01492 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01493 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
01494 }\
01495 \
01496 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01497 uint64_t temp[32];\
01498 uint8_t * const half= (uint8_t*)temp;\
01499 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01500 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
01501 }\
01502 \
01503 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01504 uint64_t temp[32];\
01505 uint8_t * const half= (uint8_t*)temp;\
01506 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01507 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01508 }\
01509 \
01510 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01511 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
01512 }\
01513 \
01514 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01515 uint64_t temp[32];\
01516 uint8_t * const half= (uint8_t*)temp;\
01517 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01518 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
01519 }\
01520 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01521 uint64_t half[16*2 + 17*2];\
01522 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01523 uint8_t * const halfHV= ((uint8_t*)half);\
01524 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01525 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01526 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01527 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01528 }\
01529 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01530 uint64_t half[16*2 + 17*2];\
01531 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01532 uint8_t * const halfHV= ((uint8_t*)half);\
01533 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01534 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01535 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01536 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01537 }\
01538 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01539 uint64_t half[16*2 + 17*2];\
01540 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01541 uint8_t * const halfHV= ((uint8_t*)half);\
01542 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01543 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01544 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01545 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01546 }\
01547 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01548 uint64_t half[16*2 + 17*2];\
01549 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01550 uint8_t * const halfHV= ((uint8_t*)half);\
01551 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01552 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01553 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01554 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01555 }\
01556 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01557 uint64_t half[16*2 + 17*2];\
01558 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01559 uint8_t * const halfHV= ((uint8_t*)half);\
01560 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01561 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01562 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01563 }\
01564 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01565 uint64_t half[16*2 + 17*2];\
01566 uint8_t * const halfH= ((uint8_t*)half) + 256;\
01567 uint8_t * const halfHV= ((uint8_t*)half);\
01568 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01569 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01570 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01571 }\
01572 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01573 uint64_t half[17*2];\
01574 uint8_t * const halfH= ((uint8_t*)half);\
01575 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01576 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01577 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01578 }\
01579 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01580 uint64_t half[17*2];\
01581 uint8_t * const halfH= ((uint8_t*)half);\
01582 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01583 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01584 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01585 }\
01586 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01587 uint64_t half[17*2];\
01588 uint8_t * const halfH= ((uint8_t*)half);\
01589 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01590 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01591 }
01592
01593 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
01594 #define AVG_3DNOW_OP(a,b,temp, size) \
01595 "mov" #size " " #b ", " #temp " \n\t"\
01596 "pavgusb " #temp ", " #a " \n\t"\
01597 "mov" #size " " #a ", " #b " \n\t"
01598 #define AVG_MMX2_OP(a,b,temp, size) \
01599 "mov" #size " " #b ", " #temp " \n\t"\
01600 "pavgb " #temp ", " #a " \n\t"\
01601 "mov" #size " " #a ", " #b " \n\t"
01602
01603 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
01604 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
01605 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
01606 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
01607 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
01608 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
01609 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
01610 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
01611 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
01612
01613
01614
01615
01616 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
01617 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01618 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
01619 }
01620 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
01621 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01622 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
01623 }
01624
01625 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
01626 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
01627 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
01628 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
01629 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
01630 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
01631 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
01632 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
01633 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
01634 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
01635 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01636 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
01637 }\
01638 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01639 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
01640 }\
01641 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
01642 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
01643 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
01644 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
01645 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
01646 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
01647 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
01648 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
01649
01650 QPEL_2TAP(put_, 16, mmx2)
01651 QPEL_2TAP(avg_, 16, mmx2)
01652 QPEL_2TAP(put_, 8, mmx2)
01653 QPEL_2TAP(avg_, 8, mmx2)
01654 QPEL_2TAP(put_, 16, 3dnow)
01655 QPEL_2TAP(avg_, 16, 3dnow)
01656 QPEL_2TAP(put_, 8, 3dnow)
01657 QPEL_2TAP(avg_, 8, 3dnow)
01658
01659
01660 #if 0
01661 static void just_return(void) { return; }
01662 #endif
01663
01664 #if HAVE_YASM
01665 typedef void emu_edge_core_func (uint8_t *buf, const uint8_t *src,
01666 x86_reg linesize, x86_reg start_y,
01667 x86_reg end_y, x86_reg block_h,
01668 x86_reg start_x, x86_reg end_x,
01669 x86_reg block_w);
01670 extern emu_edge_core_func ff_emu_edge_core_mmx;
01671 extern emu_edge_core_func ff_emu_edge_core_sse;
01672
01673 static av_always_inline
01674 void emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize,
01675 int block_w, int block_h,
01676 int src_x, int src_y, int w, int h,
01677 emu_edge_core_func *core_fn)
01678 {
01679 int start_y, start_x, end_y, end_x, src_y_add=0;
01680
01681 if(src_y>= h){
01682 src_y_add = h-1-src_y;
01683 src_y=h-1;
01684 }else if(src_y<=-block_h){
01685 src_y_add = 1-block_h-src_y;
01686 src_y=1-block_h;
01687 }
01688 if(src_x>= w){
01689 src+= (w-1-src_x);
01690 src_x=w-1;
01691 }else if(src_x<=-block_w){
01692 src+= (1-block_w-src_x);
01693 src_x=1-block_w;
01694 }
01695
01696 start_y= FFMAX(0, -src_y);
01697 start_x= FFMAX(0, -src_x);
01698 end_y= FFMIN(block_h, h-src_y);
01699 end_x= FFMIN(block_w, w-src_x);
01700 assert(start_x < end_x && block_w > 0);
01701 assert(start_y < end_y && block_h > 0);
01702
01703
01704 src += (src_y_add+start_y)*linesize + start_x;
01705 buf += start_x;
01706 core_fn(buf, src, linesize, start_y, end_y, block_h, start_x, end_x, block_w);
01707 }
01708
01709 #if ARCH_X86_32
01710 static av_noinline
01711 void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, int linesize,
01712 int block_w, int block_h,
01713 int src_x, int src_y, int w, int h)
01714 {
01715 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
01716 w, h, &ff_emu_edge_core_mmx);
01717 }
01718 #endif
01719 static av_noinline
01720 void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, int linesize,
01721 int block_w, int block_h,
01722 int src_x, int src_y, int w, int h)
01723 {
01724 emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
01725 w, h, &ff_emu_edge_core_sse);
01726 }
01727 #endif
01728
01729 typedef void emulated_edge_mc_func (uint8_t *dst, const uint8_t *src,
01730 int linesize, int block_w, int block_h,
01731 int src_x, int src_y, int w, int h);
01732
01733 static av_always_inline
01734 void gmc(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01735 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height,
01736 emulated_edge_mc_func *emu_edge_fn)
01737 {
01738 const int w = 8;
01739 const int ix = ox>>(16+shift);
01740 const int iy = oy>>(16+shift);
01741 const int oxs = ox>>4;
01742 const int oys = oy>>4;
01743 const int dxxs = dxx>>4;
01744 const int dxys = dxy>>4;
01745 const int dyxs = dyx>>4;
01746 const int dyys = dyy>>4;
01747 const uint16_t r4[4] = {r,r,r,r};
01748 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
01749 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
01750 const uint64_t shift2 = 2*shift;
01751 uint8_t edge_buf[(h+1)*stride];
01752 int x, y;
01753
01754 const int dxw = (dxx-(1<<(16+shift)))*(w-1);
01755 const int dyh = (dyy-(1<<(16+shift)))*(h-1);
01756 const int dxh = dxy*(h-1);
01757 const int dyw = dyx*(w-1);
01758 if(
01759 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
01760 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
01761
01762 || (dxx|dxy|dyx|dyy)&15 )
01763 {
01764
01765 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
01766 return;
01767 }
01768
01769 src += ix + iy*stride;
01770 if( (unsigned)ix >= width-w ||
01771 (unsigned)iy >= height-h )
01772 {
01773 emu_edge_fn(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
01774 src = edge_buf;
01775 }
01776
01777 __asm__ volatile(
01778 "movd %0, %%mm6 \n\t"
01779 "pxor %%mm7, %%mm7 \n\t"
01780 "punpcklwd %%mm6, %%mm6 \n\t"
01781 "punpcklwd %%mm6, %%mm6 \n\t"
01782 :: "r"(1<<shift)
01783 );
01784
01785 for(x=0; x<w; x+=4){
01786 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
01787 oxs - dxys + dxxs*(x+1),
01788 oxs - dxys + dxxs*(x+2),
01789 oxs - dxys + dxxs*(x+3) };
01790 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
01791 oys - dyys + dyxs*(x+1),
01792 oys - dyys + dyxs*(x+2),
01793 oys - dyys + dyxs*(x+3) };
01794
01795 for(y=0; y<h; y++){
01796 __asm__ volatile(
01797 "movq %0, %%mm4 \n\t"
01798 "movq %1, %%mm5 \n\t"
01799 "paddw %2, %%mm4 \n\t"
01800 "paddw %3, %%mm5 \n\t"
01801 "movq %%mm4, %0 \n\t"
01802 "movq %%mm5, %1 \n\t"
01803 "psrlw $12, %%mm4 \n\t"
01804 "psrlw $12, %%mm5 \n\t"
01805 : "+m"(*dx4), "+m"(*dy4)
01806 : "m"(*dxy4), "m"(*dyy4)
01807 );
01808
01809 __asm__ volatile(
01810 "movq %%mm6, %%mm2 \n\t"
01811 "movq %%mm6, %%mm1 \n\t"
01812 "psubw %%mm4, %%mm2 \n\t"
01813 "psubw %%mm5, %%mm1 \n\t"
01814 "movq %%mm2, %%mm0 \n\t"
01815 "movq %%mm4, %%mm3 \n\t"
01816 "pmullw %%mm1, %%mm0 \n\t"
01817 "pmullw %%mm5, %%mm3 \n\t"
01818 "pmullw %%mm5, %%mm2 \n\t"
01819 "pmullw %%mm4, %%mm1 \n\t"
01820
01821 "movd %4, %%mm5 \n\t"
01822 "movd %3, %%mm4 \n\t"
01823 "punpcklbw %%mm7, %%mm5 \n\t"
01824 "punpcklbw %%mm7, %%mm4 \n\t"
01825 "pmullw %%mm5, %%mm3 \n\t"
01826 "pmullw %%mm4, %%mm2 \n\t"
01827
01828 "movd %2, %%mm5 \n\t"
01829 "movd %1, %%mm4 \n\t"
01830 "punpcklbw %%mm7, %%mm5 \n\t"
01831 "punpcklbw %%mm7, %%mm4 \n\t"
01832 "pmullw %%mm5, %%mm1 \n\t"
01833 "pmullw %%mm4, %%mm0 \n\t"
01834 "paddw %5, %%mm1 \n\t"
01835 "paddw %%mm3, %%mm2 \n\t"
01836 "paddw %%mm1, %%mm0 \n\t"
01837 "paddw %%mm2, %%mm0 \n\t"
01838
01839 "psrlw %6, %%mm0 \n\t"
01840 "packuswb %%mm0, %%mm0 \n\t"
01841 "movd %%mm0, %0 \n\t"
01842
01843 : "=m"(dst[x+y*stride])
01844 : "m"(src[0]), "m"(src[1]),
01845 "m"(src[stride]), "m"(src[stride+1]),
01846 "m"(*r4), "m"(shift2)
01847 );
01848 src += stride;
01849 }
01850 src += 4-h*stride;
01851 }
01852 }
01853
01854 #if HAVE_YASM
01855 #if ARCH_X86_32
01856 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01857 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01858 {
01859 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01860 width, height, &emulated_edge_mc_mmx);
01861 }
01862 #endif
01863 static void gmc_sse(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01864 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01865 {
01866 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01867 width, height, &emulated_edge_mc_sse);
01868 }
01869 #else
01870 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01871 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01872 {
01873 gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01874 width, height, &ff_emulated_edge_mc);
01875 }
01876 #endif
01877
01878 #define PREFETCH(name, op) \
01879 static void name(void *mem, int stride, int h){\
01880 const uint8_t *p= mem;\
01881 do{\
01882 __asm__ volatile(#op" %0" :: "m"(*p));\
01883 p+= stride;\
01884 }while(--h);\
01885 }
01886 PREFETCH(prefetch_mmx2, prefetcht0)
01887 PREFETCH(prefetch_3dnow, prefetch)
01888 #undef PREFETCH
01889
01890 #include "h264_qpel_mmx.c"
01891
01892 void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
01893 int stride, int h, int x, int y);
01894 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
01895 int stride, int h, int x, int y);
01896 void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
01897 int stride, int h, int x, int y);
01898 void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src,
01899 int stride, int h, int x, int y);
01900 void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
01901 int stride, int h, int x, int y);
01902 void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src,
01903 int stride, int h, int x, int y);
01904
01905 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
01906 int stride, int h, int x, int y);
01907 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
01908 int stride, int h, int x, int y);
01909 void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
01910 int stride, int h, int x, int y);
01911 void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
01912 int stride, int h, int x, int y);
01913 void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
01914 int stride, int h, int x, int y);
01915 void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
01916 int stride, int h, int x, int y);
01917
01918 void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
01919 int stride, int h, int x, int y);
01920 void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
01921 int stride, int h, int x, int y);
01922
01923 void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
01924 int stride, int h, int x, int y);
01925 void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
01926 int stride, int h, int x, int y);
01927
01928 void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
01929 int stride, int h, int x, int y);
01930 void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
01931 int stride, int h, int x, int y);
01932
01933
01934
01935 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01936 put_pixels8_mmx(dst, src, stride, 8);
01937 }
01938 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01939 avg_pixels8_mmx(dst, src, stride, 8);
01940 }
01941 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01942 put_pixels16_mmx(dst, src, stride, 16);
01943 }
01944 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01945 avg_pixels16_mmx(dst, src, stride, 16);
01946 }
01947
01948
01949 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01950 put_pixels8_mmx(dst, src, stride, 8);
01951 }
01952 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01953 avg_pixels8_mmx2(dst, src, stride, 8);
01954 }
01955
01956
01957
01958 #if CONFIG_GPL
01959 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01960 {
01961 ff_mmx_idct (block);
01962 ff_put_pixels_clamped_mmx(block, dest, line_size);
01963 }
01964 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01965 {
01966 ff_mmx_idct (block);
01967 ff_add_pixels_clamped_mmx(block, dest, line_size);
01968 }
01969 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01970 {
01971 ff_mmxext_idct (block);
01972 ff_put_pixels_clamped_mmx(block, dest, line_size);
01973 }
01974 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01975 {
01976 ff_mmxext_idct (block);
01977 ff_add_pixels_clamped_mmx(block, dest, line_size);
01978 }
01979 #endif
01980 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
01981 {
01982 ff_idct_xvid_mmx (block);
01983 ff_put_pixels_clamped_mmx(block, dest, line_size);
01984 }
01985 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
01986 {
01987 ff_idct_xvid_mmx (block);
01988 ff_add_pixels_clamped_mmx(block, dest, line_size);
01989 }
01990 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
01991 {
01992 ff_idct_xvid_mmx2 (block);
01993 ff_put_pixels_clamped_mmx(block, dest, line_size);
01994 }
01995 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
01996 {
01997 ff_idct_xvid_mmx2 (block);
01998 ff_add_pixels_clamped_mmx(block, dest, line_size);
01999 }
02000
02001 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
02002 {
02003 int i;
02004 __asm__ volatile("pxor %%mm7, %%mm7":);
02005 for(i=0; i<blocksize; i+=2) {
02006 __asm__ volatile(
02007 "movq %0, %%mm0 \n\t"
02008 "movq %1, %%mm1 \n\t"
02009 "movq %%mm0, %%mm2 \n\t"
02010 "movq %%mm1, %%mm3 \n\t"
02011 "pfcmpge %%mm7, %%mm2 \n\t"
02012 "pfcmpge %%mm7, %%mm3 \n\t"
02013 "pslld $31, %%mm2 \n\t"
02014 "pxor %%mm2, %%mm1 \n\t"
02015 "movq %%mm3, %%mm4 \n\t"
02016 "pand %%mm1, %%mm3 \n\t"
02017 "pandn %%mm1, %%mm4 \n\t"
02018 "pfadd %%mm0, %%mm3 \n\t"
02019 "pfsub %%mm4, %%mm0 \n\t"
02020 "movq %%mm3, %1 \n\t"
02021 "movq %%mm0, %0 \n\t"
02022 :"+m"(mag[i]), "+m"(ang[i])
02023 ::"memory"
02024 );
02025 }
02026 __asm__ volatile("femms");
02027 }
02028 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
02029 {
02030 int i;
02031
02032 __asm__ volatile(
02033 "movaps %0, %%xmm5 \n\t"
02034 ::"m"(ff_pdw_80000000[0])
02035 );
02036 for(i=0; i<blocksize; i+=4) {
02037 __asm__ volatile(
02038 "movaps %0, %%xmm0 \n\t"
02039 "movaps %1, %%xmm1 \n\t"
02040 "xorps %%xmm2, %%xmm2 \n\t"
02041 "xorps %%xmm3, %%xmm3 \n\t"
02042 "cmpleps %%xmm0, %%xmm2 \n\t"
02043 "cmpleps %%xmm1, %%xmm3 \n\t"
02044 "andps %%xmm5, %%xmm2 \n\t"
02045 "xorps %%xmm2, %%xmm1 \n\t"
02046 "movaps %%xmm3, %%xmm4 \n\t"
02047 "andps %%xmm1, %%xmm3 \n\t"
02048 "andnps %%xmm1, %%xmm4 \n\t"
02049 "addps %%xmm0, %%xmm3 \n\t"
02050 "subps %%xmm4, %%xmm0 \n\t"
02051 "movaps %%xmm3, %1 \n\t"
02052 "movaps %%xmm0, %0 \n\t"
02053 :"+m"(mag[i]), "+m"(ang[i])
02054 ::"memory"
02055 );
02056 }
02057 }
02058
02059 #define IF1(x) x
02060 #define IF0(x)
02061
02062 #define MIX5(mono,stereo)\
02063 __asm__ volatile(\
02064 "movss 0(%2), %%xmm5 \n"\
02065 "movss 8(%2), %%xmm6 \n"\
02066 "movss 24(%2), %%xmm7 \n"\
02067 "shufps $0, %%xmm5, %%xmm5 \n"\
02068 "shufps $0, %%xmm6, %%xmm6 \n"\
02069 "shufps $0, %%xmm7, %%xmm7 \n"\
02070 "1: \n"\
02071 "movaps (%0,%1), %%xmm0 \n"\
02072 "movaps 0x400(%0,%1), %%xmm1 \n"\
02073 "movaps 0x800(%0,%1), %%xmm2 \n"\
02074 "movaps 0xc00(%0,%1), %%xmm3 \n"\
02075 "movaps 0x1000(%0,%1), %%xmm4 \n"\
02076 "mulps %%xmm5, %%xmm0 \n"\
02077 "mulps %%xmm6, %%xmm1 \n"\
02078 "mulps %%xmm5, %%xmm2 \n"\
02079 "mulps %%xmm7, %%xmm3 \n"\
02080 "mulps %%xmm7, %%xmm4 \n"\
02081 stereo("addps %%xmm1, %%xmm0 \n")\
02082 "addps %%xmm1, %%xmm2 \n"\
02083 "addps %%xmm3, %%xmm0 \n"\
02084 "addps %%xmm4, %%xmm2 \n"\
02085 mono("addps %%xmm2, %%xmm0 \n")\
02086 "movaps %%xmm0, (%0,%1) \n"\
02087 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
02088 "add $16, %0 \n"\
02089 "jl 1b \n"\
02090 :"+&r"(i)\
02091 :"r"(samples[0]+len), "r"(matrix)\
02092 :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
02093 "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
02094 "memory"\
02095 );
02096
02097 #define MIX_MISC(stereo)\
02098 __asm__ volatile(\
02099 "1: \n"\
02100 "movaps (%3,%0), %%xmm0 \n"\
02101 stereo("movaps %%xmm0, %%xmm1 \n")\
02102 "mulps %%xmm4, %%xmm0 \n"\
02103 stereo("mulps %%xmm5, %%xmm1 \n")\
02104 "lea 1024(%3,%0), %1 \n"\
02105 "mov %5, %2 \n"\
02106 "2: \n"\
02107 "movaps (%1), %%xmm2 \n"\
02108 stereo("movaps %%xmm2, %%xmm3 \n")\
02109 "mulps (%4,%2), %%xmm2 \n"\
02110 stereo("mulps 16(%4,%2), %%xmm3 \n")\
02111 "addps %%xmm2, %%xmm0 \n"\
02112 stereo("addps %%xmm3, %%xmm1 \n")\
02113 "add $1024, %1 \n"\
02114 "add $32, %2 \n"\
02115 "jl 2b \n"\
02116 "movaps %%xmm0, (%3,%0) \n"\
02117 stereo("movaps %%xmm1, 1024(%3,%0) \n")\
02118 "add $16, %0 \n"\
02119 "jl 1b \n"\
02120 :"+&r"(i), "=&r"(j), "=&r"(k)\
02121 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
02122 :"memory"\
02123 );
02124
02125 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
02126 {
02127 int (*matrix_cmp)[2] = (int(*)[2])matrix;
02128 intptr_t i,j,k;
02129
02130 i = -len*sizeof(float);
02131 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
02132 MIX5(IF0,IF1);
02133 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
02134 MIX5(IF1,IF0);
02135 } else {
02136 DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
02137 j = 2*in_ch*sizeof(float);
02138 __asm__ volatile(
02139 "1: \n"
02140 "sub $8, %0 \n"
02141 "movss (%2,%0), %%xmm4 \n"
02142 "movss 4(%2,%0), %%xmm5 \n"
02143 "shufps $0, %%xmm4, %%xmm4 \n"
02144 "shufps $0, %%xmm5, %%xmm5 \n"
02145 "movaps %%xmm4, (%1,%0,4) \n"
02146 "movaps %%xmm5, 16(%1,%0,4) \n"
02147 "jg 1b \n"
02148 :"+&r"(j)
02149 :"r"(matrix_simd), "r"(matrix)
02150 :"memory"
02151 );
02152 if(out_ch == 2) {
02153 MIX_MISC(IF1);
02154 } else {
02155 MIX_MISC(IF0);
02156 }
02157 }
02158 }
02159
02160 static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1, int len){
02161 x86_reg i = (len-4)*4;
02162 __asm__ volatile(
02163 "1: \n\t"
02164 "movq (%2,%0), %%mm0 \n\t"
02165 "movq 8(%2,%0), %%mm1 \n\t"
02166 "pfmul (%3,%0), %%mm0 \n\t"
02167 "pfmul 8(%3,%0), %%mm1 \n\t"
02168 "movq %%mm0, (%1,%0) \n\t"
02169 "movq %%mm1, 8(%1,%0) \n\t"
02170 "sub $16, %0 \n\t"
02171 "jge 1b \n\t"
02172 "femms \n\t"
02173 :"+r"(i)
02174 :"r"(dst), "r"(src0), "r"(src1)
02175 :"memory"
02176 );
02177 }
02178 static void vector_fmul_sse(float *dst, const float *src0, const float *src1, int len){
02179 x86_reg i = (len-8)*4;
02180 __asm__ volatile(
02181 "1: \n\t"
02182 "movaps (%2,%0), %%xmm0 \n\t"
02183 "movaps 16(%2,%0), %%xmm1 \n\t"
02184 "mulps (%3,%0), %%xmm0 \n\t"
02185 "mulps 16(%3,%0), %%xmm1 \n\t"
02186 "movaps %%xmm0, (%1,%0) \n\t"
02187 "movaps %%xmm1, 16(%1,%0) \n\t"
02188 "sub $32, %0 \n\t"
02189 "jge 1b \n\t"
02190 :"+r"(i)
02191 :"r"(dst), "r"(src0), "r"(src1)
02192 :"memory"
02193 );
02194 }
02195
02196 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
02197 x86_reg i = len*4-16;
02198 __asm__ volatile(
02199 "1: \n\t"
02200 "pswapd 8(%1), %%mm0 \n\t"
02201 "pswapd (%1), %%mm1 \n\t"
02202 "pfmul (%3,%0), %%mm0 \n\t"
02203 "pfmul 8(%3,%0), %%mm1 \n\t"
02204 "movq %%mm0, (%2,%0) \n\t"
02205 "movq %%mm1, 8(%2,%0) \n\t"
02206 "add $16, %1 \n\t"
02207 "sub $16, %0 \n\t"
02208 "jge 1b \n\t"
02209 :"+r"(i), "+r"(src1)
02210 :"r"(dst), "r"(src0)
02211 );
02212 __asm__ volatile("femms");
02213 }
02214 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
02215 x86_reg i = len*4-32;
02216 __asm__ volatile(
02217 "1: \n\t"
02218 "movaps 16(%1), %%xmm0 \n\t"
02219 "movaps (%1), %%xmm1 \n\t"
02220 "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
02221 "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
02222 "mulps (%3,%0), %%xmm0 \n\t"
02223 "mulps 16(%3,%0), %%xmm1 \n\t"
02224 "movaps %%xmm0, (%2,%0) \n\t"
02225 "movaps %%xmm1, 16(%2,%0) \n\t"
02226 "add $32, %1 \n\t"
02227 "sub $32, %0 \n\t"
02228 "jge 1b \n\t"
02229 :"+r"(i), "+r"(src1)
02230 :"r"(dst), "r"(src0)
02231 );
02232 }
02233
02234 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
02235 const float *src2, int len){
02236 x86_reg i = (len-4)*4;
02237 __asm__ volatile(
02238 "1: \n\t"
02239 "movq (%2,%0), %%mm0 \n\t"
02240 "movq 8(%2,%0), %%mm1 \n\t"
02241 "pfmul (%3,%0), %%mm0 \n\t"
02242 "pfmul 8(%3,%0), %%mm1 \n\t"
02243 "pfadd (%4,%0), %%mm0 \n\t"
02244 "pfadd 8(%4,%0), %%mm1 \n\t"
02245 "movq %%mm0, (%1,%0) \n\t"
02246 "movq %%mm1, 8(%1,%0) \n\t"
02247 "sub $16, %0 \n\t"
02248 "jge 1b \n\t"
02249 :"+r"(i)
02250 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02251 :"memory"
02252 );
02253 __asm__ volatile("femms");
02254 }
02255 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
02256 const float *src2, int len){
02257 x86_reg i = (len-8)*4;
02258 __asm__ volatile(
02259 "1: \n\t"
02260 "movaps (%2,%0), %%xmm0 \n\t"
02261 "movaps 16(%2,%0), %%xmm1 \n\t"
02262 "mulps (%3,%0), %%xmm0 \n\t"
02263 "mulps 16(%3,%0), %%xmm1 \n\t"
02264 "addps (%4,%0), %%xmm0 \n\t"
02265 "addps 16(%4,%0), %%xmm1 \n\t"
02266 "movaps %%xmm0, (%1,%0) \n\t"
02267 "movaps %%xmm1, 16(%1,%0) \n\t"
02268 "sub $32, %0 \n\t"
02269 "jge 1b \n\t"
02270 :"+r"(i)
02271 :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02272 :"memory"
02273 );
02274 }
02275
02276 #if HAVE_6REGS
02277 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
02278 const float *win, int len){
02279 x86_reg i = -len*4;
02280 x86_reg j = len*4-8;
02281 __asm__ volatile(
02282 "1: \n"
02283 "pswapd (%5,%1), %%mm1 \n"
02284 "movq (%5,%0), %%mm0 \n"
02285 "pswapd (%4,%1), %%mm5 \n"
02286 "movq (%3,%0), %%mm4 \n"
02287 "movq %%mm0, %%mm2 \n"
02288 "movq %%mm1, %%mm3 \n"
02289 "pfmul %%mm4, %%mm2 \n"
02290 "pfmul %%mm5, %%mm3 \n"
02291 "pfmul %%mm4, %%mm1 \n"
02292 "pfmul %%mm5, %%mm0 \n"
02293 "pfadd %%mm3, %%mm2 \n"
02294 "pfsub %%mm0, %%mm1 \n"
02295 "pswapd %%mm2, %%mm2 \n"
02296 "movq %%mm1, (%2,%0) \n"
02297 "movq %%mm2, (%2,%1) \n"
02298 "sub $8, %1 \n"
02299 "add $8, %0 \n"
02300 "jl 1b \n"
02301 "femms \n"
02302 :"+r"(i), "+r"(j)
02303 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02304 );
02305 }
02306
02307 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
02308 const float *win, int len){
02309 x86_reg i = -len*4;
02310 x86_reg j = len*4-16;
02311 __asm__ volatile(
02312 "1: \n"
02313 "movaps (%5,%1), %%xmm1 \n"
02314 "movaps (%5,%0), %%xmm0 \n"
02315 "movaps (%4,%1), %%xmm5 \n"
02316 "movaps (%3,%0), %%xmm4 \n"
02317 "shufps $0x1b, %%xmm1, %%xmm1 \n"
02318 "shufps $0x1b, %%xmm5, %%xmm5 \n"
02319 "movaps %%xmm0, %%xmm2 \n"
02320 "movaps %%xmm1, %%xmm3 \n"
02321 "mulps %%xmm4, %%xmm2 \n"
02322 "mulps %%xmm5, %%xmm3 \n"
02323 "mulps %%xmm4, %%xmm1 \n"
02324 "mulps %%xmm5, %%xmm0 \n"
02325 "addps %%xmm3, %%xmm2 \n"
02326 "subps %%xmm0, %%xmm1 \n"
02327 "shufps $0x1b, %%xmm2, %%xmm2 \n"
02328 "movaps %%xmm1, (%2,%0) \n"
02329 "movaps %%xmm2, (%2,%1) \n"
02330 "sub $16, %1 \n"
02331 "add $16, %0 \n"
02332 "jl 1b \n"
02333 :"+r"(i), "+r"(j)
02334 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02335 );
02336 }
02337 #endif
02338
02339 static void vector_clipf_sse(float *dst, const float *src, float min, float max,
02340 int len)
02341 {
02342 x86_reg i = (len-16)*4;
02343 __asm__ volatile(
02344 "movss %3, %%xmm4 \n"
02345 "movss %4, %%xmm5 \n"
02346 "shufps $0, %%xmm4, %%xmm4 \n"
02347 "shufps $0, %%xmm5, %%xmm5 \n"
02348 "1: \n\t"
02349 "movaps (%2,%0), %%xmm0 \n\t"
02350 "movaps 16(%2,%0), %%xmm1 \n\t"
02351 "movaps 32(%2,%0), %%xmm2 \n\t"
02352 "movaps 48(%2,%0), %%xmm3 \n\t"
02353 "maxps %%xmm4, %%xmm0 \n\t"
02354 "maxps %%xmm4, %%xmm1 \n\t"
02355 "maxps %%xmm4, %%xmm2 \n\t"
02356 "maxps %%xmm4, %%xmm3 \n\t"
02357 "minps %%xmm5, %%xmm0 \n\t"
02358 "minps %%xmm5, %%xmm1 \n\t"
02359 "minps %%xmm5, %%xmm2 \n\t"
02360 "minps %%xmm5, %%xmm3 \n\t"
02361 "movaps %%xmm0, (%1,%0) \n\t"
02362 "movaps %%xmm1, 16(%1,%0) \n\t"
02363 "movaps %%xmm2, 32(%1,%0) \n\t"
02364 "movaps %%xmm3, 48(%1,%0) \n\t"
02365 "sub $64, %0 \n\t"
02366 "jge 1b \n\t"
02367 :"+&r"(i)
02368 :"r"(dst), "r"(src), "m"(min), "m"(max)
02369 :"memory"
02370 );
02371 }
02372
02373 void ff_vp3_idct_mmx(int16_t *input_data);
02374 void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
02375 void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
02376
02377 void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
02378
02379 void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
02380 void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
02381
02382 void ff_vp3_idct_sse2(int16_t *input_data);
02383 void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
02384 void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
02385
02386 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
02387 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
02388 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02389 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02390 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02391 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
02392 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
02393 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
02394
02395 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
02396
02397 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
02398 {
02399 int mm_flags = av_get_cpu_flags();
02400
02401 if (avctx->dsp_mask) {
02402 if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
02403 mm_flags |= (avctx->dsp_mask & 0xffff);
02404 else
02405 mm_flags &= ~(avctx->dsp_mask & 0xffff);
02406 }
02407
02408 #if 0
02409 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
02410 if (mm_flags & AV_CPU_FLAG_MMX)
02411 av_log(avctx, AV_LOG_INFO, " mmx");
02412 if (mm_flags & AV_CPU_FLAG_MMX2)
02413 av_log(avctx, AV_LOG_INFO, " mmx2");
02414 if (mm_flags & AV_CPU_FLAG_3DNOW)
02415 av_log(avctx, AV_LOG_INFO, " 3dnow");
02416 if (mm_flags & AV_CPU_FLAG_SSE)
02417 av_log(avctx, AV_LOG_INFO, " sse");
02418 if (mm_flags & AV_CPU_FLAG_SSE2)
02419 av_log(avctx, AV_LOG_INFO, " sse2");
02420 av_log(avctx, AV_LOG_INFO, "\n");
02421 #endif
02422
02423 if (mm_flags & AV_CPU_FLAG_MMX) {
02424 const int idct_algo= avctx->idct_algo;
02425
02426 if(avctx->lowres==0){
02427 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
02428 c->idct_put= ff_simple_idct_put_mmx;
02429 c->idct_add= ff_simple_idct_add_mmx;
02430 c->idct = ff_simple_idct_mmx;
02431 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
02432 #if CONFIG_GPL
02433 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
02434 if(mm_flags & AV_CPU_FLAG_MMX2){
02435 c->idct_put= ff_libmpeg2mmx2_idct_put;
02436 c->idct_add= ff_libmpeg2mmx2_idct_add;
02437 c->idct = ff_mmxext_idct;
02438 }else{
02439 c->idct_put= ff_libmpeg2mmx_idct_put;
02440 c->idct_add= ff_libmpeg2mmx_idct_add;
02441 c->idct = ff_mmx_idct;
02442 }
02443 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
02444 #endif
02445 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
02446 idct_algo==FF_IDCT_VP3 && HAVE_YASM){
02447 if(mm_flags & AV_CPU_FLAG_SSE2){
02448 c->idct_put= ff_vp3_idct_put_sse2;
02449 c->idct_add= ff_vp3_idct_add_sse2;
02450 c->idct = ff_vp3_idct_sse2;
02451 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02452 }else{
02453 c->idct_put= ff_vp3_idct_put_mmx;
02454 c->idct_add= ff_vp3_idct_add_mmx;
02455 c->idct = ff_vp3_idct_mmx;
02456 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
02457 }
02458 }else if(idct_algo==FF_IDCT_CAVS){
02459 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02460 }else if(idct_algo==FF_IDCT_XVIDMMX){
02461 if(mm_flags & AV_CPU_FLAG_SSE2){
02462 c->idct_put= ff_idct_xvid_sse2_put;
02463 c->idct_add= ff_idct_xvid_sse2_add;
02464 c->idct = ff_idct_xvid_sse2;
02465 c->idct_permutation_type= FF_SSE2_IDCT_PERM;
02466 }else if(mm_flags & AV_CPU_FLAG_MMX2){
02467 c->idct_put= ff_idct_xvid_mmx2_put;
02468 c->idct_add= ff_idct_xvid_mmx2_add;
02469 c->idct = ff_idct_xvid_mmx2;
02470 }else{
02471 c->idct_put= ff_idct_xvid_mmx_put;
02472 c->idct_add= ff_idct_xvid_mmx_add;
02473 c->idct = ff_idct_xvid_mmx;
02474 }
02475 }
02476 }
02477
02478 c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
02479 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
02480 c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
02481 c->clear_block = clear_block_mmx;
02482 c->clear_blocks = clear_blocks_mmx;
02483 if ((mm_flags & AV_CPU_FLAG_SSE) &&
02484 !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
02485
02486 c->clear_block = clear_block_sse;
02487 c->clear_blocks = clear_blocks_sse;
02488 }
02489
02490 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02491 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
02492 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
02493 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
02494 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
02495
02496 SET_HPEL_FUNCS(put, 0, 16, mmx);
02497 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
02498 SET_HPEL_FUNCS(avg, 0, 16, mmx);
02499 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
02500 SET_HPEL_FUNCS(put, 1, 8, mmx);
02501 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
02502 SET_HPEL_FUNCS(avg, 1, 8, mmx);
02503 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
02504
02505 #if ARCH_X86_32 || !HAVE_YASM
02506 c->gmc= gmc_mmx;
02507 #endif
02508 #if ARCH_X86_32 && HAVE_YASM
02509 c->emulated_edge_mc = emulated_edge_mc_mmx;
02510 #endif
02511
02512 c->add_bytes= add_bytes_mmx;
02513 c->add_bytes_l2= add_bytes_l2_mmx;
02514
02515 c->draw_edges = draw_edges_mmx;
02516
02517 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
02518 c->h263_v_loop_filter= h263_v_loop_filter_mmx;
02519 c->h263_h_loop_filter= h263_h_loop_filter_mmx;
02520 }
02521
02522 #if HAVE_YASM
02523 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
02524 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
02525
02526 c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx;
02527 c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx;
02528 #endif
02529
02530 if (mm_flags & AV_CPU_FLAG_MMX2) {
02531 c->prefetch = prefetch_mmx2;
02532
02533 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
02534 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
02535
02536 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
02537 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
02538 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
02539
02540 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
02541 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
02542
02543 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
02544 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
02545 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
02546
02547 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02548 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
02549 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
02550 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
02551 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
02552 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
02553 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
02554
02555 if (CONFIG_VP3_DECODER && HAVE_YASM) {
02556 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
02557 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
02558 }
02559 }
02560 if (CONFIG_VP3_DECODER && HAVE_YASM) {
02561 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
02562 }
02563
02564 if (CONFIG_VP3_DECODER
02565 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
02566 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
02567 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
02568 }
02569
02570 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02571 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
02572 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
02573 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
02574 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
02575 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
02576 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
02577 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
02578 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
02579 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
02580 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
02581 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
02582 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
02583 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
02584 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
02585 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
02586 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
02587
02588 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
02589 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
02590 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
02591 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
02592 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
02593 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
02594
02595 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
02596 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
02597 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
02598 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
02599 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
02600 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
02601
02602 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
02603 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
02604 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
02605 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
02606
02607 #if HAVE_YASM
02608 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2;
02609 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2;
02610
02611 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
02612 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
02613 c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
02614 c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
02615
02616 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
02617 #endif
02618 #if HAVE_7REGS && HAVE_TEN_OPERANDS
02619 if( mm_flags&AV_CPU_FLAG_3DNOW )
02620 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
02621 #endif
02622
02623 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
02624 } else if (mm_flags & AV_CPU_FLAG_3DNOW) {
02625 c->prefetch = prefetch_3dnow;
02626
02627 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
02628 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
02629
02630 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
02631 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
02632 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
02633
02634 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
02635 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
02636
02637 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
02638 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
02639 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
02640
02641 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02642 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
02643 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
02644 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
02645 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
02646 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
02647 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
02648 }
02649
02650 if (CONFIG_VP3_DECODER
02651 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
02652 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
02653 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
02654 }
02655
02656 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
02657 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
02658 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
02659 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
02660 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
02661 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
02662
02663 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
02664 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
02665 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
02666 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
02667 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
02668 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
02669
02670 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
02671 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
02672 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
02673 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
02674
02675 #if HAVE_YASM
02676 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
02677 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
02678
02679 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow;
02680 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow;
02681 #endif
02682 }
02683
02684
02685 #define H264_QPEL_FUNCS(x, y, CPU)\
02686 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
02687 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
02688 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
02689 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
02690 if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
02691
02692 c->put_pixels_tab[0][0] = put_pixels16_sse2;
02693 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
02694 c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
02695 H264_QPEL_FUNCS(0, 0, sse2);
02696 }
02697 if(mm_flags & AV_CPU_FLAG_SSE2){
02698 H264_QPEL_FUNCS(0, 1, sse2);
02699 H264_QPEL_FUNCS(0, 2, sse2);
02700 H264_QPEL_FUNCS(0, 3, sse2);
02701 H264_QPEL_FUNCS(1, 1, sse2);
02702 H264_QPEL_FUNCS(1, 2, sse2);
02703 H264_QPEL_FUNCS(1, 3, sse2);
02704 H264_QPEL_FUNCS(2, 1, sse2);
02705 H264_QPEL_FUNCS(2, 2, sse2);
02706 H264_QPEL_FUNCS(2, 3, sse2);
02707 H264_QPEL_FUNCS(3, 1, sse2);
02708 H264_QPEL_FUNCS(3, 2, sse2);
02709 H264_QPEL_FUNCS(3, 3, sse2);
02710 }
02711 #if HAVE_SSSE3
02712 if(mm_flags & AV_CPU_FLAG_SSSE3){
02713 H264_QPEL_FUNCS(1, 0, ssse3);
02714 H264_QPEL_FUNCS(1, 1, ssse3);
02715 H264_QPEL_FUNCS(1, 2, ssse3);
02716 H264_QPEL_FUNCS(1, 3, ssse3);
02717 H264_QPEL_FUNCS(2, 0, ssse3);
02718 H264_QPEL_FUNCS(2, 1, ssse3);
02719 H264_QPEL_FUNCS(2, 2, ssse3);
02720 H264_QPEL_FUNCS(2, 3, ssse3);
02721 H264_QPEL_FUNCS(3, 0, ssse3);
02722 H264_QPEL_FUNCS(3, 1, ssse3);
02723 H264_QPEL_FUNCS(3, 2, ssse3);
02724 H264_QPEL_FUNCS(3, 3, ssse3);
02725 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
02726 #if HAVE_YASM
02727 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
02728 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
02729 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
02730 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
02731 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
02732 if (mm_flags & AV_CPU_FLAG_SSE4)
02733 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
02734 #endif
02735 }
02736 #endif
02737
02738 if(mm_flags & AV_CPU_FLAG_3DNOW){
02739 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
02740 c->vector_fmul = vector_fmul_3dnow;
02741 }
02742 if(mm_flags & AV_CPU_FLAG_3DNOWEXT){
02743 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
02744 #if HAVE_6REGS
02745 c->vector_fmul_window = vector_fmul_window_3dnow2;
02746 #endif
02747 }
02748 if(mm_flags & AV_CPU_FLAG_MMX2){
02749 #if HAVE_YASM
02750 c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
02751 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
02752 #endif
02753 }
02754 if(mm_flags & AV_CPU_FLAG_SSE){
02755 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
02756 c->ac3_downmix = ac3_downmix_sse;
02757 c->vector_fmul = vector_fmul_sse;
02758 c->vector_fmul_reverse = vector_fmul_reverse_sse;
02759 c->vector_fmul_add = vector_fmul_add_sse;
02760 #if HAVE_6REGS
02761 c->vector_fmul_window = vector_fmul_window_sse;
02762 #endif
02763 c->vector_clipf = vector_clipf_sse;
02764 #if HAVE_YASM
02765 c->scalarproduct_float = ff_scalarproduct_float_sse;
02766 #endif
02767 }
02768 if(mm_flags & AV_CPU_FLAG_3DNOW)
02769 c->vector_fmul_add = vector_fmul_add_3dnow;
02770 if(mm_flags & AV_CPU_FLAG_SSE2){
02771 #if HAVE_YASM
02772 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
02773 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
02774
02775 c->emulated_edge_mc = emulated_edge_mc_sse;
02776 c->gmc= gmc_sse;
02777 #endif
02778 }
02779 if((mm_flags & AV_CPU_FLAG_SSSE3) && !(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW)) && HAVE_YASM)
02780 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
02781 }
02782
02783 if (CONFIG_ENCODERS)
02784 dsputilenc_init_mmx(c, avctx);
02785
02786 #if 0
02787
02788 get_pixels = just_return;
02789 put_pixels_clamped = just_return;
02790 add_pixels_clamped = just_return;
02791
02792 pix_abs16x16 = just_return;
02793 pix_abs16x16_x2 = just_return;
02794 pix_abs16x16_y2 = just_return;
02795 pix_abs16x16_xy2 = just_return;
02796
02797 put_pixels_tab[0] = just_return;
02798 put_pixels_tab[1] = just_return;
02799 put_pixels_tab[2] = just_return;
02800 put_pixels_tab[3] = just_return;
02801
02802 put_no_rnd_pixels_tab[0] = just_return;
02803 put_no_rnd_pixels_tab[1] = just_return;
02804 put_no_rnd_pixels_tab[2] = just_return;
02805 put_no_rnd_pixels_tab[3] = just_return;
02806
02807 avg_pixels_tab[0] = just_return;
02808 avg_pixels_tab[1] = just_return;
02809 avg_pixels_tab[2] = just_return;
02810 avg_pixels_tab[3] = just_return;
02811
02812 avg_no_rnd_pixels_tab[0] = just_return;
02813 avg_no_rnd_pixels_tab[1] = just_return;
02814 avg_no_rnd_pixels_tab[2] = just_return;
02815 avg_no_rnd_pixels_tab[3] = just_return;
02816
02817
02818
02819 #endif
02820 }