• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/x86/dsputil_mmx.c

Go to the documentation of this file.
00001 /*
00002  * MMX optimized DSP utils
00003  * Copyright (c) 2000, 2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * This file is part of FFmpeg.
00007  *
00008  * FFmpeg is free software; you can redistribute it and/or
00009  * modify it under the terms of the GNU Lesser General Public
00010  * License as published by the Free Software Foundation; either
00011  * version 2.1 of the License, or (at your option) any later version.
00012  *
00013  * FFmpeg is distributed in the hope that it will be useful,
00014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00016  * Lesser General Public License for more details.
00017  *
00018  * You should have received a copy of the GNU Lesser General Public
00019  * License along with FFmpeg; if not, write to the Free Software
00020  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00021  *
00022  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
00023  */
00024 
00025 #include "libavutil/cpu.h"
00026 #include "libavutil/x86_cpu.h"
00027 #include "libavcodec/dsputil.h"
00028 #include "libavcodec/h264dsp.h"
00029 #include "libavcodec/mpegvideo.h"
00030 #include "libavcodec/simple_idct.h"
00031 #include "libavcodec/ac3dec.h"
00032 #include "dsputil_mmx.h"
00033 #include "idct_xvid.h"
00034 #include "diracdsp_mmx.h"
00035 
00036 //#undef NDEBUG
00037 //#include <assert.h>
00038 
00039 /* pixel operations */
00040 DECLARE_ALIGNED(8,  const uint64_t, ff_bone) = 0x0101010101010101ULL;
00041 DECLARE_ALIGNED(8,  const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
00042 
00043 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
00044 {0x8000000080000000ULL, 0x8000000080000000ULL};
00045 
00046 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1  ) = {0x0001000100010001ULL, 0x0001000100010001ULL};
00047 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_2  ) = {0x0002000200020002ULL, 0x0002000200020002ULL};
00048 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_3  ) = {0x0003000300030003ULL, 0x0003000300030003ULL};
00049 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_4  ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
00050 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_5  ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
00051 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_8  ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
00052 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_9  ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
00053 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
00054 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
00055 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL};
00056 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
00057 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
00058 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
00059 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
00060 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
00061 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
00062 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
00063 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
00064 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
00065 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
00066 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
00067 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
00068 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_512) = {0x0200020002000200ULL, 0x0200020002000200ULL};
00069 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_1019)= {0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL};
00070 
00071 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_0  ) = {0x0000000000000000ULL, 0x0000000000000000ULL};
00072 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_1  ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
00073 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_3  ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
00074 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_4  ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
00075 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_7  ) = 0x0707070707070707ULL;
00076 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
00077 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
00078 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
00079 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
00080 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL};
00081 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
00082 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
00083 DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
00084 
00085 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
00086 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
00087 
00088 #define JUMPALIGN() __asm__ volatile (".p2align 3"::)
00089 #define MOVQ_ZERO(regd)  __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
00090 
00091 #define MOVQ_BFE(regd) \
00092     __asm__ volatile ( \
00093     "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
00094     "paddb %%" #regd ", %%" #regd " \n\t" ::)
00095 
00096 #ifndef PIC
00097 #define MOVQ_BONE(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
00098 #define MOVQ_WTWO(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
00099 #else
00100 // for shared library it's better to use this way for accessing constants
00101 // pcmpeqd -> -1
00102 #define MOVQ_BONE(regd) \
00103     __asm__ volatile ( \
00104     "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00105     "psrlw $15, %%" #regd " \n\t" \
00106     "packuswb %%" #regd ", %%" #regd " \n\t" ::)
00107 
00108 #define MOVQ_WTWO(regd) \
00109     __asm__ volatile ( \
00110     "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00111     "psrlw $15, %%" #regd " \n\t" \
00112     "psllw $1, %%" #regd " \n\t"::)
00113 
00114 #endif
00115 
00116 // using regr as temporary and for the output result
00117 // first argument is unmodifed and second is trashed
00118 // regfe is supposed to contain 0xfefefefefefefefe
00119 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
00120     "movq " #rega ", " #regr "  \n\t"\
00121     "pand " #regb ", " #regr "  \n\t"\
00122     "pxor " #rega ", " #regb "  \n\t"\
00123     "pand " #regfe "," #regb "  \n\t"\
00124     "psrlq $1, " #regb "        \n\t"\
00125     "paddb " #regb ", " #regr " \n\t"
00126 
00127 #define PAVGB_MMX(rega, regb, regr, regfe) \
00128     "movq " #rega ", " #regr "  \n\t"\
00129     "por  " #regb ", " #regr "  \n\t"\
00130     "pxor " #rega ", " #regb "  \n\t"\
00131     "pand " #regfe "," #regb "  \n\t"\
00132     "psrlq $1, " #regb "        \n\t"\
00133     "psubb " #regb ", " #regr " \n\t"
00134 
00135 // mm6 is supposed to contain 0xfefefefefefefefe
00136 #define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
00137     "movq " #rega ", " #regr "  \n\t"\
00138     "movq " #regc ", " #regp "  \n\t"\
00139     "pand " #regb ", " #regr "  \n\t"\
00140     "pand " #regd ", " #regp "  \n\t"\
00141     "pxor " #rega ", " #regb "  \n\t"\
00142     "pxor " #regc ", " #regd "  \n\t"\
00143     "pand %%mm6, " #regb "      \n\t"\
00144     "pand %%mm6, " #regd "      \n\t"\
00145     "psrlq $1, " #regb "        \n\t"\
00146     "psrlq $1, " #regd "        \n\t"\
00147     "paddb " #regb ", " #regr " \n\t"\
00148     "paddb " #regd ", " #regp " \n\t"
00149 
00150 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
00151     "movq " #rega ", " #regr "  \n\t"\
00152     "movq " #regc ", " #regp "  \n\t"\
00153     "por  " #regb ", " #regr "  \n\t"\
00154     "por  " #regd ", " #regp "  \n\t"\
00155     "pxor " #rega ", " #regb "  \n\t"\
00156     "pxor " #regc ", " #regd "  \n\t"\
00157     "pand %%mm6, " #regb "      \n\t"\
00158     "pand %%mm6, " #regd "      \n\t"\
00159     "psrlq $1, " #regd "        \n\t"\
00160     "psrlq $1, " #regb "        \n\t"\
00161     "psubb " #regb ", " #regr " \n\t"\
00162     "psubb " #regd ", " #regp " \n\t"
00163 
00164 /***********************************/
00165 /* MMX no rounding */
00166 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
00167 #define SET_RND  MOVQ_WONE
00168 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
00169 #define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
00170 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
00171 
00172 #include "dsputil_mmx_rnd_template.c"
00173 
00174 #undef DEF
00175 #undef SET_RND
00176 #undef PAVGBP
00177 #undef PAVGB
00178 /***********************************/
00179 /* MMX rounding */
00180 
00181 #define DEF(x, y) x ## _ ## y ##_mmx
00182 #define SET_RND  MOVQ_WTWO
00183 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
00184 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
00185 
00186 #include "dsputil_mmx_rnd_template.c"
00187 
00188 #undef DEF
00189 #undef SET_RND
00190 #undef PAVGBP
00191 #undef PAVGB
00192 #undef OP_AVG
00193 
00194 /***********************************/
00195 /* 3Dnow specific */
00196 
00197 #define DEF(x) x ## _3dnow
00198 #define PAVGB "pavgusb"
00199 #define OP_AVG PAVGB
00200 
00201 #include "dsputil_mmx_avg_template.c"
00202 
00203 #undef DEF
00204 #undef PAVGB
00205 #undef OP_AVG
00206 
00207 /***********************************/
00208 /* MMX2 specific */
00209 
00210 #define DEF(x) x ## _mmx2
00211 
00212 /* Introduced only in MMX2 set */
00213 #define PAVGB "pavgb"
00214 #define OP_AVG PAVGB
00215 
00216 #include "dsputil_mmx_avg_template.c"
00217 
00218 #undef DEF
00219 #undef PAVGB
00220 #undef OP_AVG
00221 
00222 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
00223 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
00224 #define put_pixels16_mmx2 put_pixels16_mmx
00225 #define put_pixels8_mmx2 put_pixels8_mmx
00226 #define put_pixels4_mmx2 put_pixels4_mmx
00227 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
00228 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
00229 #define put_pixels16_3dnow put_pixels16_mmx
00230 #define put_pixels8_3dnow put_pixels8_mmx
00231 #define put_pixels4_3dnow put_pixels4_mmx
00232 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
00233 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
00234 
00235 /***********************************/
00236 /* standard MMX */
00237 
00238 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00239 {
00240     const DCTELEM *p;
00241     uint8_t *pix;
00242 
00243     /* read the pixels */
00244     p = block;
00245     pix = pixels;
00246     /* unrolled loop */
00247         __asm__ volatile(
00248                 "movq   %3, %%mm0               \n\t"
00249                 "movq   8%3, %%mm1              \n\t"
00250                 "movq   16%3, %%mm2             \n\t"
00251                 "movq   24%3, %%mm3             \n\t"
00252                 "movq   32%3, %%mm4             \n\t"
00253                 "movq   40%3, %%mm5             \n\t"
00254                 "movq   48%3, %%mm6             \n\t"
00255                 "movq   56%3, %%mm7             \n\t"
00256                 "packuswb %%mm1, %%mm0          \n\t"
00257                 "packuswb %%mm3, %%mm2          \n\t"
00258                 "packuswb %%mm5, %%mm4          \n\t"
00259                 "packuswb %%mm7, %%mm6          \n\t"
00260                 "movq   %%mm0, (%0)             \n\t"
00261                 "movq   %%mm2, (%0, %1)         \n\t"
00262                 "movq   %%mm4, (%0, %1, 2)      \n\t"
00263                 "movq   %%mm6, (%0, %2)         \n\t"
00264                 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
00265                 :"memory");
00266         pix += line_size*4;
00267         p += 32;
00268 
00269     // if here would be an exact copy of the code above
00270     // compiler would generate some very strange code
00271     // thus using "r"
00272     __asm__ volatile(
00273             "movq       (%3), %%mm0             \n\t"
00274             "movq       8(%3), %%mm1            \n\t"
00275             "movq       16(%3), %%mm2           \n\t"
00276             "movq       24(%3), %%mm3           \n\t"
00277             "movq       32(%3), %%mm4           \n\t"
00278             "movq       40(%3), %%mm5           \n\t"
00279             "movq       48(%3), %%mm6           \n\t"
00280             "movq       56(%3), %%mm7           \n\t"
00281             "packuswb %%mm1, %%mm0              \n\t"
00282             "packuswb %%mm3, %%mm2              \n\t"
00283             "packuswb %%mm5, %%mm4              \n\t"
00284             "packuswb %%mm7, %%mm6              \n\t"
00285             "movq       %%mm0, (%0)             \n\t"
00286             "movq       %%mm2, (%0, %1)         \n\t"
00287             "movq       %%mm4, (%0, %1, 2)      \n\t"
00288             "movq       %%mm6, (%0, %2)         \n\t"
00289             ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
00290             :"memory");
00291 }
00292 
00293 #define put_signed_pixels_clamped_mmx_half(off) \
00294             "movq    "#off"(%2), %%mm1          \n\t"\
00295             "movq 16+"#off"(%2), %%mm2          \n\t"\
00296             "movq 32+"#off"(%2), %%mm3          \n\t"\
00297             "movq 48+"#off"(%2), %%mm4          \n\t"\
00298             "packsswb  8+"#off"(%2), %%mm1      \n\t"\
00299             "packsswb 24+"#off"(%2), %%mm2      \n\t"\
00300             "packsswb 40+"#off"(%2), %%mm3      \n\t"\
00301             "packsswb 56+"#off"(%2), %%mm4      \n\t"\
00302             "paddb %%mm0, %%mm1                 \n\t"\
00303             "paddb %%mm0, %%mm2                 \n\t"\
00304             "paddb %%mm0, %%mm3                 \n\t"\
00305             "paddb %%mm0, %%mm4                 \n\t"\
00306             "movq %%mm1, (%0)                   \n\t"\
00307             "movq %%mm2, (%0, %3)               \n\t"\
00308             "movq %%mm3, (%0, %3, 2)            \n\t"\
00309             "movq %%mm4, (%0, %1)               \n\t"
00310 
00311 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00312 {
00313     x86_reg line_skip = line_size;
00314     x86_reg line_skip3;
00315 
00316     __asm__ volatile (
00317             "movq "MANGLE(ff_pb_80)", %%mm0 \n\t"
00318             "lea (%3, %3, 2), %1                \n\t"
00319             put_signed_pixels_clamped_mmx_half(0)
00320             "lea (%0, %3, 4), %0                \n\t"
00321             put_signed_pixels_clamped_mmx_half(64)
00322             :"+&r" (pixels), "=&r" (line_skip3)
00323             :"r" (block), "r"(line_skip)
00324             :"memory");
00325 }
00326 
00327 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00328 {
00329     const DCTELEM *p;
00330     uint8_t *pix;
00331     int i;
00332 
00333     /* read the pixels */
00334     p = block;
00335     pix = pixels;
00336     MOVQ_ZERO(mm7);
00337     i = 4;
00338     do {
00339         __asm__ volatile(
00340                 "movq   (%2), %%mm0     \n\t"
00341                 "movq   8(%2), %%mm1    \n\t"
00342                 "movq   16(%2), %%mm2   \n\t"
00343                 "movq   24(%2), %%mm3   \n\t"
00344                 "movq   %0, %%mm4       \n\t"
00345                 "movq   %1, %%mm6       \n\t"
00346                 "movq   %%mm4, %%mm5    \n\t"
00347                 "punpcklbw %%mm7, %%mm4 \n\t"
00348                 "punpckhbw %%mm7, %%mm5 \n\t"
00349                 "paddsw %%mm4, %%mm0    \n\t"
00350                 "paddsw %%mm5, %%mm1    \n\t"
00351                 "movq   %%mm6, %%mm5    \n\t"
00352                 "punpcklbw %%mm7, %%mm6 \n\t"
00353                 "punpckhbw %%mm7, %%mm5 \n\t"
00354                 "paddsw %%mm6, %%mm2    \n\t"
00355                 "paddsw %%mm5, %%mm3    \n\t"
00356                 "packuswb %%mm1, %%mm0  \n\t"
00357                 "packuswb %%mm3, %%mm2  \n\t"
00358                 "movq   %%mm0, %0       \n\t"
00359                 "movq   %%mm2, %1       \n\t"
00360                 :"+m"(*pix), "+m"(*(pix+line_size))
00361                 :"r"(p)
00362                 :"memory");
00363         pix += line_size*2;
00364         p += 16;
00365     } while (--i);
00366 }
00367 
00368 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00369 {
00370     __asm__ volatile(
00371          "lea (%3, %3), %%"REG_a"       \n\t"
00372          ".p2align 3                    \n\t"
00373          "1:                            \n\t"
00374          "movd (%1), %%mm0              \n\t"
00375          "movd (%1, %3), %%mm1          \n\t"
00376          "movd %%mm0, (%2)              \n\t"
00377          "movd %%mm1, (%2, %3)          \n\t"
00378          "add %%"REG_a", %1             \n\t"
00379          "add %%"REG_a", %2             \n\t"
00380          "movd (%1), %%mm0              \n\t"
00381          "movd (%1, %3), %%mm1          \n\t"
00382          "movd %%mm0, (%2)              \n\t"
00383          "movd %%mm1, (%2, %3)          \n\t"
00384          "add %%"REG_a", %1             \n\t"
00385          "add %%"REG_a", %2             \n\t"
00386          "subl $4, %0                   \n\t"
00387          "jnz 1b                        \n\t"
00388          : "+g"(h), "+r" (pixels),  "+r" (block)
00389          : "r"((x86_reg)line_size)
00390          : "%"REG_a, "memory"
00391         );
00392 }
00393 
00394 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00395 {
00396     __asm__ volatile(
00397          "lea (%3, %3), %%"REG_a"       \n\t"
00398          ".p2align 3                    \n\t"
00399          "1:                            \n\t"
00400          "movq (%1), %%mm0              \n\t"
00401          "movq (%1, %3), %%mm1          \n\t"
00402          "movq %%mm0, (%2)              \n\t"
00403          "movq %%mm1, (%2, %3)          \n\t"
00404          "add %%"REG_a", %1             \n\t"
00405          "add %%"REG_a", %2             \n\t"
00406          "movq (%1), %%mm0              \n\t"
00407          "movq (%1, %3), %%mm1          \n\t"
00408          "movq %%mm0, (%2)              \n\t"
00409          "movq %%mm1, (%2, %3)          \n\t"
00410          "add %%"REG_a", %1             \n\t"
00411          "add %%"REG_a", %2             \n\t"
00412          "subl $4, %0                   \n\t"
00413          "jnz 1b                        \n\t"
00414          : "+g"(h), "+r" (pixels),  "+r" (block)
00415          : "r"((x86_reg)line_size)
00416          : "%"REG_a, "memory"
00417         );
00418 }
00419 
00420 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00421 {
00422     __asm__ volatile(
00423          "lea (%3, %3), %%"REG_a"       \n\t"
00424          ".p2align 3                    \n\t"
00425          "1:                            \n\t"
00426          "movq (%1), %%mm0              \n\t"
00427          "movq 8(%1), %%mm4             \n\t"
00428          "movq (%1, %3), %%mm1          \n\t"
00429          "movq 8(%1, %3), %%mm5         \n\t"
00430          "movq %%mm0, (%2)              \n\t"
00431          "movq %%mm4, 8(%2)             \n\t"
00432          "movq %%mm1, (%2, %3)          \n\t"
00433          "movq %%mm5, 8(%2, %3)         \n\t"
00434          "add %%"REG_a", %1             \n\t"
00435          "add %%"REG_a", %2             \n\t"
00436          "movq (%1), %%mm0              \n\t"
00437          "movq 8(%1), %%mm4             \n\t"
00438          "movq (%1, %3), %%mm1          \n\t"
00439          "movq 8(%1, %3), %%mm5         \n\t"
00440          "movq %%mm0, (%2)              \n\t"
00441          "movq %%mm4, 8(%2)             \n\t"
00442          "movq %%mm1, (%2, %3)          \n\t"
00443          "movq %%mm5, 8(%2, %3)         \n\t"
00444          "add %%"REG_a", %1             \n\t"
00445          "add %%"REG_a", %2             \n\t"
00446          "subl $4, %0                   \n\t"
00447          "jnz 1b                        \n\t"
00448          : "+g"(h), "+r" (pixels),  "+r" (block)
00449          : "r"((x86_reg)line_size)
00450          : "%"REG_a, "memory"
00451         );
00452 }
00453 
00454 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00455 {
00456     __asm__ volatile(
00457          "1:                            \n\t"
00458          "movdqu (%1), %%xmm0           \n\t"
00459          "movdqu (%1,%3), %%xmm1        \n\t"
00460          "movdqu (%1,%3,2), %%xmm2      \n\t"
00461          "movdqu (%1,%4), %%xmm3        \n\t"
00462          "lea (%1,%3,4), %1             \n\t"
00463          "movdqa %%xmm0, (%2)           \n\t"
00464          "movdqa %%xmm1, (%2,%3)        \n\t"
00465          "movdqa %%xmm2, (%2,%3,2)      \n\t"
00466          "movdqa %%xmm3, (%2,%4)        \n\t"
00467          "subl $4, %0                   \n\t"
00468          "lea (%2,%3,4), %2             \n\t"
00469          "jnz 1b                        \n\t"
00470          : "+g"(h), "+r" (pixels),  "+r" (block)
00471          : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00472          : "memory"
00473         );
00474 }
00475 
00476 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00477 {
00478     __asm__ volatile(
00479          "1:                            \n\t"
00480          "movdqu (%1), %%xmm0           \n\t"
00481          "movdqu (%1,%3), %%xmm1        \n\t"
00482          "movdqu (%1,%3,2), %%xmm2      \n\t"
00483          "movdqu (%1,%4), %%xmm3        \n\t"
00484          "lea (%1,%3,4), %1             \n\t"
00485          "pavgb  (%2), %%xmm0           \n\t"
00486          "pavgb  (%2,%3), %%xmm1        \n\t"
00487          "pavgb  (%2,%3,2), %%xmm2      \n\t"
00488          "pavgb  (%2,%4), %%xmm3        \n\t"
00489          "movdqa %%xmm0, (%2)           \n\t"
00490          "movdqa %%xmm1, (%2,%3)        \n\t"
00491          "movdqa %%xmm2, (%2,%3,2)      \n\t"
00492          "movdqa %%xmm3, (%2,%4)        \n\t"
00493          "subl $4, %0                   \n\t"
00494          "lea (%2,%3,4), %2             \n\t"
00495          "jnz 1b                        \n\t"
00496          : "+g"(h), "+r" (pixels),  "+r" (block)
00497          : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00498          : "memory"
00499         );
00500 }
00501 
00502 #define CLEAR_BLOCKS(name,n) \
00503 static void name(DCTELEM *blocks)\
00504 {\
00505     __asm__ volatile(\
00506                 "pxor %%mm7, %%mm7              \n\t"\
00507                 "mov     %1, %%"REG_a"          \n\t"\
00508                 "1:                             \n\t"\
00509                 "movq %%mm7, (%0, %%"REG_a")    \n\t"\
00510                 "movq %%mm7, 8(%0, %%"REG_a")   \n\t"\
00511                 "movq %%mm7, 16(%0, %%"REG_a")  \n\t"\
00512                 "movq %%mm7, 24(%0, %%"REG_a")  \n\t"\
00513                 "add $32, %%"REG_a"             \n\t"\
00514                 " js 1b                         \n\t"\
00515                 : : "r" (((uint8_t *)blocks)+128*n),\
00516                     "i" (-128*n)\
00517                 : "%"REG_a\
00518         );\
00519 }
00520 CLEAR_BLOCKS(clear_blocks_mmx, 6)
00521 CLEAR_BLOCKS(clear_block_mmx, 1)
00522 
00523 static void clear_block_sse(DCTELEM *block)
00524 {
00525     __asm__ volatile(
00526         "xorps  %%xmm0, %%xmm0  \n"
00527         "movaps %%xmm0,    (%0) \n"
00528         "movaps %%xmm0,  16(%0) \n"
00529         "movaps %%xmm0,  32(%0) \n"
00530         "movaps %%xmm0,  48(%0) \n"
00531         "movaps %%xmm0,  64(%0) \n"
00532         "movaps %%xmm0,  80(%0) \n"
00533         "movaps %%xmm0,  96(%0) \n"
00534         "movaps %%xmm0, 112(%0) \n"
00535         :: "r"(block)
00536         : "memory"
00537     );
00538 }
00539 
00540 static void clear_blocks_sse(DCTELEM *blocks)
00541 {\
00542     __asm__ volatile(
00543         "xorps  %%xmm0, %%xmm0  \n"
00544         "mov     %1, %%"REG_a"  \n"
00545         "1:                     \n"
00546         "movaps %%xmm0,    (%0, %%"REG_a") \n"
00547         "movaps %%xmm0,  16(%0, %%"REG_a") \n"
00548         "movaps %%xmm0,  32(%0, %%"REG_a") \n"
00549         "movaps %%xmm0,  48(%0, %%"REG_a") \n"
00550         "movaps %%xmm0,  64(%0, %%"REG_a") \n"
00551         "movaps %%xmm0,  80(%0, %%"REG_a") \n"
00552         "movaps %%xmm0,  96(%0, %%"REG_a") \n"
00553         "movaps %%xmm0, 112(%0, %%"REG_a") \n"
00554         "add $128, %%"REG_a"    \n"
00555         " js 1b                 \n"
00556         : : "r" (((uint8_t *)blocks)+128*6),
00557             "i" (-128*6)
00558         : "%"REG_a
00559     );
00560 }
00561 
00562 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
00563     x86_reg i=0;
00564     __asm__ volatile(
00565         "jmp 2f                         \n\t"
00566         "1:                             \n\t"
00567         "movq  (%1, %0), %%mm0          \n\t"
00568         "movq  (%2, %0), %%mm1          \n\t"
00569         "paddb %%mm0, %%mm1             \n\t"
00570         "movq %%mm1, (%2, %0)           \n\t"
00571         "movq 8(%1, %0), %%mm0          \n\t"
00572         "movq 8(%2, %0), %%mm1          \n\t"
00573         "paddb %%mm0, %%mm1             \n\t"
00574         "movq %%mm1, 8(%2, %0)          \n\t"
00575         "add $16, %0                    \n\t"
00576         "2:                             \n\t"
00577         "cmp %3, %0                     \n\t"
00578         " js 1b                         \n\t"
00579         : "+r" (i)
00580         : "r"(src), "r"(dst), "r"((x86_reg)w-15)
00581     );
00582     for(; i<w; i++)
00583         dst[i+0] += src[i+0];
00584 }
00585 
00586 #if HAVE_7REGS
00587 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
00588     x86_reg w2 = -w;
00589     x86_reg x;
00590     int l = *left & 0xff;
00591     int tl = *left_top & 0xff;
00592     int t;
00593     __asm__ volatile(
00594         "mov    %7, %3 \n"
00595         "1: \n"
00596         "movzbl (%3,%4), %2 \n"
00597         "mov    %2, %k3 \n"
00598         "sub   %b1, %b3 \n"
00599         "add   %b0, %b3 \n"
00600         "mov    %2, %1 \n"
00601         "cmp    %0, %2 \n"
00602         "cmovg  %0, %2 \n"
00603         "cmovg  %1, %0 \n"
00604         "cmp   %k3, %0 \n"
00605         "cmovg %k3, %0 \n"
00606         "mov    %7, %3 \n"
00607         "cmp    %2, %0 \n"
00608         "cmovl  %2, %0 \n"
00609         "add (%6,%4), %b0 \n"
00610         "mov   %b0, (%5,%4) \n"
00611         "inc    %4 \n"
00612         "jl 1b \n"
00613         :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
00614         :"r"(dst+w), "r"(diff+w), "rm"(top+w)
00615     );
00616     *left = l;
00617     *left_top = tl;
00618 }
00619 #endif
00620 
00621 #define H263_LOOP_FILTER \
00622         "pxor %%mm7, %%mm7              \n\t"\
00623         "movq  %0, %%mm0                \n\t"\
00624         "movq  %0, %%mm1                \n\t"\
00625         "movq  %3, %%mm2                \n\t"\
00626         "movq  %3, %%mm3                \n\t"\
00627         "punpcklbw %%mm7, %%mm0         \n\t"\
00628         "punpckhbw %%mm7, %%mm1         \n\t"\
00629         "punpcklbw %%mm7, %%mm2         \n\t"\
00630         "punpckhbw %%mm7, %%mm3         \n\t"\
00631         "psubw %%mm2, %%mm0             \n\t"\
00632         "psubw %%mm3, %%mm1             \n\t"\
00633         "movq  %1, %%mm2                \n\t"\
00634         "movq  %1, %%mm3                \n\t"\
00635         "movq  %2, %%mm4                \n\t"\
00636         "movq  %2, %%mm5                \n\t"\
00637         "punpcklbw %%mm7, %%mm2         \n\t"\
00638         "punpckhbw %%mm7, %%mm3         \n\t"\
00639         "punpcklbw %%mm7, %%mm4         \n\t"\
00640         "punpckhbw %%mm7, %%mm5         \n\t"\
00641         "psubw %%mm2, %%mm4             \n\t"\
00642         "psubw %%mm3, %%mm5             \n\t"\
00643         "psllw $2, %%mm4                \n\t"\
00644         "psllw $2, %%mm5                \n\t"\
00645         "paddw %%mm0, %%mm4             \n\t"\
00646         "paddw %%mm1, %%mm5             \n\t"\
00647         "pxor %%mm6, %%mm6              \n\t"\
00648         "pcmpgtw %%mm4, %%mm6           \n\t"\
00649         "pcmpgtw %%mm5, %%mm7           \n\t"\
00650         "pxor %%mm6, %%mm4              \n\t"\
00651         "pxor %%mm7, %%mm5              \n\t"\
00652         "psubw %%mm6, %%mm4             \n\t"\
00653         "psubw %%mm7, %%mm5             \n\t"\
00654         "psrlw $3, %%mm4                \n\t"\
00655         "psrlw $3, %%mm5                \n\t"\
00656         "packuswb %%mm5, %%mm4          \n\t"\
00657         "packsswb %%mm7, %%mm6          \n\t"\
00658         "pxor %%mm7, %%mm7              \n\t"\
00659         "movd %4, %%mm2                 \n\t"\
00660         "punpcklbw %%mm2, %%mm2         \n\t"\
00661         "punpcklbw %%mm2, %%mm2         \n\t"\
00662         "punpcklbw %%mm2, %%mm2         \n\t"\
00663         "psubusb %%mm4, %%mm2           \n\t"\
00664         "movq %%mm2, %%mm3              \n\t"\
00665         "psubusb %%mm4, %%mm3           \n\t"\
00666         "psubb %%mm3, %%mm2             \n\t"\
00667         "movq %1, %%mm3                 \n\t"\
00668         "movq %2, %%mm4                 \n\t"\
00669         "pxor %%mm6, %%mm3              \n\t"\
00670         "pxor %%mm6, %%mm4              \n\t"\
00671         "paddusb %%mm2, %%mm3           \n\t"\
00672         "psubusb %%mm2, %%mm4           \n\t"\
00673         "pxor %%mm6, %%mm3              \n\t"\
00674         "pxor %%mm6, %%mm4              \n\t"\
00675         "paddusb %%mm2, %%mm2           \n\t"\
00676         "packsswb %%mm1, %%mm0          \n\t"\
00677         "pcmpgtb %%mm0, %%mm7           \n\t"\
00678         "pxor %%mm7, %%mm0              \n\t"\
00679         "psubb %%mm7, %%mm0             \n\t"\
00680         "movq %%mm0, %%mm1              \n\t"\
00681         "psubusb %%mm2, %%mm0           \n\t"\
00682         "psubb %%mm0, %%mm1             \n\t"\
00683         "pand %5, %%mm1                 \n\t"\
00684         "psrlw $2, %%mm1                \n\t"\
00685         "pxor %%mm7, %%mm1              \n\t"\
00686         "psubb %%mm7, %%mm1             \n\t"\
00687         "movq %0, %%mm5                 \n\t"\
00688         "movq %3, %%mm6                 \n\t"\
00689         "psubb %%mm1, %%mm5             \n\t"\
00690         "paddb %%mm1, %%mm6             \n\t"
00691 
00692 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00693     if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00694     const int strength= ff_h263_loop_filter_strength[qscale];
00695 
00696     __asm__ volatile(
00697 
00698         H263_LOOP_FILTER
00699 
00700         "movq %%mm3, %1                 \n\t"
00701         "movq %%mm4, %2                 \n\t"
00702         "movq %%mm5, %0                 \n\t"
00703         "movq %%mm6, %3                 \n\t"
00704         : "+m" (*(uint64_t*)(src - 2*stride)),
00705           "+m" (*(uint64_t*)(src - 1*stride)),
00706           "+m" (*(uint64_t*)(src + 0*stride)),
00707           "+m" (*(uint64_t*)(src + 1*stride))
00708         : "g" (2*strength), "m"(ff_pb_FC)
00709     );
00710     }
00711 }
00712 
00713 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00714     if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00715     const int strength= ff_h263_loop_filter_strength[qscale];
00716     DECLARE_ALIGNED(8, uint64_t, temp)[4];
00717     uint8_t *btemp= (uint8_t*)temp;
00718 
00719     src -= 2;
00720 
00721     transpose4x4(btemp  , src           , 8, stride);
00722     transpose4x4(btemp+4, src + 4*stride, 8, stride);
00723     __asm__ volatile(
00724         H263_LOOP_FILTER // 5 3 4 6
00725 
00726         : "+m" (temp[0]),
00727           "+m" (temp[1]),
00728           "+m" (temp[2]),
00729           "+m" (temp[3])
00730         : "g" (2*strength), "m"(ff_pb_FC)
00731     );
00732 
00733     __asm__ volatile(
00734         "movq %%mm5, %%mm1              \n\t"
00735         "movq %%mm4, %%mm0              \n\t"
00736         "punpcklbw %%mm3, %%mm5         \n\t"
00737         "punpcklbw %%mm6, %%mm4         \n\t"
00738         "punpckhbw %%mm3, %%mm1         \n\t"
00739         "punpckhbw %%mm6, %%mm0         \n\t"
00740         "movq %%mm5, %%mm3              \n\t"
00741         "movq %%mm1, %%mm6              \n\t"
00742         "punpcklwd %%mm4, %%mm5         \n\t"
00743         "punpcklwd %%mm0, %%mm1         \n\t"
00744         "punpckhwd %%mm4, %%mm3         \n\t"
00745         "punpckhwd %%mm0, %%mm6         \n\t"
00746         "movd %%mm5, (%0)               \n\t"
00747         "punpckhdq %%mm5, %%mm5         \n\t"
00748         "movd %%mm5, (%0,%2)            \n\t"
00749         "movd %%mm3, (%0,%2,2)          \n\t"
00750         "punpckhdq %%mm3, %%mm3         \n\t"
00751         "movd %%mm3, (%0,%3)            \n\t"
00752         "movd %%mm1, (%1)               \n\t"
00753         "punpckhdq %%mm1, %%mm1         \n\t"
00754         "movd %%mm1, (%1,%2)            \n\t"
00755         "movd %%mm6, (%1,%2,2)          \n\t"
00756         "punpckhdq %%mm6, %%mm6         \n\t"
00757         "movd %%mm6, (%1,%3)            \n\t"
00758         :: "r" (src),
00759            "r" (src + 4*stride),
00760            "r" ((x86_reg)   stride ),
00761            "r" ((x86_reg)(3*stride))
00762     );
00763     }
00764 }
00765 
00766 /* draw the edges of width 'w' of an image of size width, height
00767    this mmx version can only handle w==8 || w==16 */
00768 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
00769 {
00770     uint8_t *ptr, *last_line;
00771     int i;
00772 
00773     last_line = buf + (height - 1) * wrap;
00774     /* left and right */
00775     ptr = buf;
00776     if(w==8)
00777     {
00778         __asm__ volatile(
00779                 "1:                             \n\t"
00780                 "movd (%0), %%mm0               \n\t"
00781                 "punpcklbw %%mm0, %%mm0         \n\t"
00782                 "punpcklwd %%mm0, %%mm0         \n\t"
00783                 "punpckldq %%mm0, %%mm0         \n\t"
00784                 "movq %%mm0, -8(%0)             \n\t"
00785                 "movq -8(%0, %2), %%mm1         \n\t"
00786                 "punpckhbw %%mm1, %%mm1         \n\t"
00787                 "punpckhwd %%mm1, %%mm1         \n\t"
00788                 "punpckhdq %%mm1, %%mm1         \n\t"
00789                 "movq %%mm1, (%0, %2)           \n\t"
00790                 "add %1, %0                     \n\t"
00791                 "cmp %3, %0                     \n\t"
00792                 " jb 1b                         \n\t"
00793                 : "+r" (ptr)
00794                 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00795         );
00796     }
00797     else
00798     {
00799         __asm__ volatile(
00800                 "1:                             \n\t"
00801                 "movd (%0), %%mm0               \n\t"
00802                 "punpcklbw %%mm0, %%mm0         \n\t"
00803                 "punpcklwd %%mm0, %%mm0         \n\t"
00804                 "punpckldq %%mm0, %%mm0         \n\t"
00805                 "movq %%mm0, -8(%0)             \n\t"
00806                 "movq %%mm0, -16(%0)            \n\t"
00807                 "movq -8(%0, %2), %%mm1         \n\t"
00808                 "punpckhbw %%mm1, %%mm1         \n\t"
00809                 "punpckhwd %%mm1, %%mm1         \n\t"
00810                 "punpckhdq %%mm1, %%mm1         \n\t"
00811                 "movq %%mm1, (%0, %2)           \n\t"
00812                 "movq %%mm1, 8(%0, %2)          \n\t"
00813                 "add %1, %0                     \n\t"
00814                 "cmp %3, %0                     \n\t"
00815                 " jb 1b                         \n\t"
00816                 : "+r" (ptr)
00817                 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00818         );
00819     }
00820 
00821     /* top and bottom (and hopefully also the corners) */
00822     if (sides&EDGE_TOP) {
00823         for(i = 0; i < h; i += 4) {
00824             ptr= buf - (i + 1) * wrap - w;
00825             __asm__ volatile(
00826                     "1:                             \n\t"
00827                     "movq (%1, %0), %%mm0           \n\t"
00828                     "movq %%mm0, (%0)               \n\t"
00829                     "movq %%mm0, (%0, %2)           \n\t"
00830                     "movq %%mm0, (%0, %2, 2)        \n\t"
00831                     "movq %%mm0, (%0, %3)           \n\t"
00832                     "add $8, %0                     \n\t"
00833                     "cmp %4, %0                     \n\t"
00834                     " jb 1b                         \n\t"
00835                     : "+r" (ptr)
00836                     : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
00837             );
00838         }
00839     }
00840 
00841     if (sides&EDGE_BOTTOM) {
00842         for(i = 0; i < h; i += 4) {
00843             ptr= last_line + (i + 1) * wrap - w;
00844             __asm__ volatile(
00845                     "1:                             \n\t"
00846                     "movq (%1, %0), %%mm0           \n\t"
00847                     "movq %%mm0, (%0)               \n\t"
00848                     "movq %%mm0, (%0, %2)           \n\t"
00849                     "movq %%mm0, (%0, %2, 2)        \n\t"
00850                     "movq %%mm0, (%0, %3)           \n\t"
00851                     "add $8, %0                     \n\t"
00852                     "cmp %4, %0                     \n\t"
00853                     " jb 1b                         \n\t"
00854                     : "+r" (ptr)
00855                     : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
00856             );
00857         }
00858     }
00859 }
00860 
00861 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
00862         "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\
00863         "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\
00864         "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\
00865         "movq "#in7", " #m3 "             \n\t" /* d */\
00866         "movq "#in0", %%mm5               \n\t" /* D */\
00867         "paddw " #m3 ", %%mm5             \n\t" /* x4 */\
00868         "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\
00869         "movq "#in1", %%mm5               \n\t" /* C */\
00870         "movq "#in2", %%mm6               \n\t" /* B */\
00871         "paddw " #m6 ", %%mm5             \n\t" /* x3 */\
00872         "paddw " #m5 ", %%mm6             \n\t" /* x2 */\
00873         "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\
00874         "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\
00875         "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\
00876         "paddw " #rnd ", %%mm4            \n\t" /* x2 */\
00877         "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
00878         "psraw $5, %%mm5                  \n\t"\
00879         "packuswb %%mm5, %%mm5            \n\t"\
00880         OP(%%mm5, out, %%mm7, d)
00881 
00882 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
00883 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00884     uint64_t temp;\
00885 \
00886     __asm__ volatile(\
00887         "pxor %%mm7, %%mm7                \n\t"\
00888         "1:                               \n\t"\
00889         "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
00890         "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
00891         "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
00892         "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
00893         "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
00894         "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
00895         "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
00896         "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
00897         "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
00898         "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
00899         "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
00900         "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
00901         "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
00902         "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
00903         "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
00904         "paddw %%mm3, %%mm5               \n\t" /* b */\
00905         "paddw %%mm2, %%mm6               \n\t" /* c */\
00906         "paddw %%mm5, %%mm5               \n\t" /* 2b */\
00907         "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
00908         "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
00909         "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
00910         "paddw %%mm4, %%mm0               \n\t" /* a */\
00911         "paddw %%mm1, %%mm5               \n\t" /* d */\
00912         "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
00913         "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
00914         "paddw %6, %%mm6                  \n\t"\
00915         "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
00916         "psraw $5, %%mm0                  \n\t"\
00917         "movq %%mm0, %5                   \n\t"\
00918         /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
00919         \
00920         "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
00921         "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
00922         "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
00923         "psrlq $8, %%mm0                  \n\t" /* GHIJKLM0 */\
00924         "psrlq $16, %%mm5                 \n\t" /* HIJKLM00 */\
00925         "punpcklbw %%mm7, %%mm0           \n\t" /* 0G0H0I0J */\
00926         "punpcklbw %%mm7, %%mm5           \n\t" /* 0H0I0J0K */\
00927         "paddw %%mm0, %%mm2               \n\t" /* b */\
00928         "paddw %%mm5, %%mm3               \n\t" /* c */\
00929         "paddw %%mm2, %%mm2               \n\t" /* 2b */\
00930         "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
00931         "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
00932         "psrlq $24, %%mm6                 \n\t" /* IJKLM000 */\
00933         "punpcklbw %%mm7, %%mm2           \n\t" /* 0F0G0H0I */\
00934         "punpcklbw %%mm7, %%mm6           \n\t" /* 0I0J0K0L */\
00935         "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
00936         "paddw %%mm2, %%mm1               \n\t" /* a */\
00937         "paddw %%mm6, %%mm4               \n\t" /* d */\
00938         "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
00939         "psubw %%mm4, %%mm3               \n\t" /* - 6b +3c - d */\
00940         "paddw %6, %%mm1                  \n\t"\
00941         "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b +3c - d */\
00942         "psraw $5, %%mm3                  \n\t"\
00943         "movq %5, %%mm1                   \n\t"\
00944         "packuswb %%mm3, %%mm1            \n\t"\
00945         OP_MMX2(%%mm1, (%1),%%mm4, q)\
00946         /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
00947         \
00948         "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
00949         "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
00950         "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
00951         "psrlq $8, %%mm1                  \n\t" /* KLMNOPQ0 */\
00952         "psrlq $16, %%mm4                 \n\t" /* LMNOPQ00 */\
00953         "punpcklbw %%mm7, %%mm1           \n\t" /* 0K0L0M0N */\
00954         "punpcklbw %%mm7, %%mm4           \n\t" /* 0L0M0N0O */\
00955         "paddw %%mm1, %%mm5               \n\t" /* b */\
00956         "paddw %%mm4, %%mm0               \n\t" /* c */\
00957         "paddw %%mm5, %%mm5               \n\t" /* 2b */\
00958         "psubw %%mm5, %%mm0               \n\t" /* c - 2b */\
00959         "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
00960         "psrlq $24, %%mm3                 \n\t" /* MNOPQ000 */\
00961         "pmullw "MANGLE(ff_pw_3)", %%mm0  \n\t" /* 3c - 6b */\
00962         "punpcklbw %%mm7, %%mm3           \n\t" /* 0M0N0O0P */\
00963         "paddw %%mm3, %%mm2               \n\t" /* d */\
00964         "psubw %%mm2, %%mm0               \n\t" /* -6b + 3c - d */\
00965         "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
00966         "punpcklbw %%mm7, %%mm2           \n\t" /* 0J0K0L0M */\
00967         "punpckhbw %%mm7, %%mm5           \n\t" /* 0N0O0P0Q */\
00968         "paddw %%mm2, %%mm6               \n\t" /* a */\
00969         "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
00970         "paddw %6, %%mm0                  \n\t"\
00971         "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
00972         "psraw $5, %%mm0                  \n\t"\
00973         /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
00974         \
00975         "paddw %%mm5, %%mm3               \n\t" /* a */\
00976         "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0O0P0Q0Q */\
00977         "paddw %%mm4, %%mm6               \n\t" /* b */\
00978         "pshufw $0xBE, %%mm5, %%mm4       \n\t" /* 0P0Q0Q0P */\
00979         "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0Q0Q0P0O */\
00980         "paddw %%mm1, %%mm4               \n\t" /* c */\
00981         "paddw %%mm2, %%mm5               \n\t" /* d */\
00982         "paddw %%mm6, %%mm6               \n\t" /* 2b */\
00983         "psubw %%mm6, %%mm4               \n\t" /* c - 2b */\
00984         "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
00985         "pmullw "MANGLE(ff_pw_3)", %%mm4  \n\t" /* 3c - 6b */\
00986         "psubw %%mm5, %%mm3               \n\t" /* -6b + 3c - d */\
00987         "paddw %6, %%mm4                  \n\t"\
00988         "paddw %%mm3, %%mm4               \n\t" /* 20a - 6b + 3c - d */\
00989         "psraw $5, %%mm4                  \n\t"\
00990         "packuswb %%mm4, %%mm0            \n\t"\
00991         OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
00992         \
00993         "add %3, %0                       \n\t"\
00994         "add %4, %1                       \n\t"\
00995         "decl %2                          \n\t"\
00996         " jnz 1b                          \n\t"\
00997         : "+a"(src), "+c"(dst), "+D"(h)\
00998         : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
00999         : "memory"\
01000     );\
01001 }\
01002 \
01003 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01004     int i;\
01005     int16_t temp[16];\
01006     /* quick HACK, XXX FIXME MUST be optimized */\
01007     for(i=0; i<h; i++)\
01008     {\
01009         temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01010         temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01011         temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01012         temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01013         temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01014         temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
01015         temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
01016         temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
01017         temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
01018         temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
01019         temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
01020         temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
01021         temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
01022         temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
01023         temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
01024         temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
01025         __asm__ volatile(\
01026             "movq (%0), %%mm0               \n\t"\
01027             "movq 8(%0), %%mm1              \n\t"\
01028             "paddw %2, %%mm0                \n\t"\
01029             "paddw %2, %%mm1                \n\t"\
01030             "psraw $5, %%mm0                \n\t"\
01031             "psraw $5, %%mm1                \n\t"\
01032             "packuswb %%mm1, %%mm0          \n\t"\
01033             OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01034             "movq 16(%0), %%mm0             \n\t"\
01035             "movq 24(%0), %%mm1             \n\t"\
01036             "paddw %2, %%mm0                \n\t"\
01037             "paddw %2, %%mm1                \n\t"\
01038             "psraw $5, %%mm0                \n\t"\
01039             "psraw $5, %%mm1                \n\t"\
01040             "packuswb %%mm1, %%mm0          \n\t"\
01041             OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
01042             :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01043             : "memory"\
01044         );\
01045         dst+=dstStride;\
01046         src+=srcStride;\
01047     }\
01048 }\
01049 \
01050 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01051     __asm__ volatile(\
01052         "pxor %%mm7, %%mm7                \n\t"\
01053         "1:                               \n\t"\
01054         "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
01055         "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
01056         "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
01057         "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
01058         "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
01059         "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
01060         "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
01061         "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
01062         "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
01063         "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
01064         "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
01065         "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
01066         "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
01067         "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
01068         "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
01069         "paddw %%mm3, %%mm5               \n\t" /* b */\
01070         "paddw %%mm2, %%mm6               \n\t" /* c */\
01071         "paddw %%mm5, %%mm5               \n\t" /* 2b */\
01072         "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
01073         "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
01074         "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
01075         "paddw %%mm4, %%mm0               \n\t" /* a */\
01076         "paddw %%mm1, %%mm5               \n\t" /* d */\
01077         "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
01078         "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
01079         "paddw %5, %%mm6                  \n\t"\
01080         "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
01081         "psraw $5, %%mm0                  \n\t"\
01082         /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
01083         \
01084         "movd 5(%0), %%mm5                \n\t" /* FGHI */\
01085         "punpcklbw %%mm7, %%mm5           \n\t" /* 0F0G0H0I */\
01086         "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0G0H0I0I */\
01087         "paddw %%mm5, %%mm1               \n\t" /* a */\
01088         "paddw %%mm6, %%mm2               \n\t" /* b */\
01089         "pshufw $0xBE, %%mm5, %%mm6       \n\t" /* 0H0I0I0H */\
01090         "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0I0I0H0G */\
01091         "paddw %%mm6, %%mm3               \n\t" /* c */\
01092         "paddw %%mm5, %%mm4               \n\t" /* d */\
01093         "paddw %%mm2, %%mm2               \n\t" /* 2b */\
01094         "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
01095         "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
01096         "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
01097         "psubw %%mm4, %%mm3               \n\t" /* -6b + 3c - d */\
01098         "paddw %5, %%mm1                  \n\t"\
01099         "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b + 3c - d */\
01100         "psraw $5, %%mm3                  \n\t"\
01101         "packuswb %%mm3, %%mm0            \n\t"\
01102         OP_MMX2(%%mm0, (%1), %%mm4, q)\
01103         \
01104         "add %3, %0                       \n\t"\
01105         "add %4, %1                       \n\t"\
01106         "decl %2                          \n\t"\
01107         " jnz 1b                          \n\t"\
01108         : "+a"(src), "+c"(dst), "+d"(h)\
01109         : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
01110         : "memory"\
01111     );\
01112 }\
01113 \
01114 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01115     int i;\
01116     int16_t temp[8];\
01117     /* quick HACK, XXX FIXME MUST be optimized */\
01118     for(i=0; i<h; i++)\
01119     {\
01120         temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01121         temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01122         temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01123         temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01124         temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01125         temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
01126         temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
01127         temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
01128         __asm__ volatile(\
01129             "movq (%0), %%mm0           \n\t"\
01130             "movq 8(%0), %%mm1          \n\t"\
01131             "paddw %2, %%mm0            \n\t"\
01132             "paddw %2, %%mm1            \n\t"\
01133             "psraw $5, %%mm0            \n\t"\
01134             "psraw $5, %%mm1            \n\t"\
01135             "packuswb %%mm1, %%mm0      \n\t"\
01136             OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01137             :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01138             :"memory"\
01139         );\
01140         dst+=dstStride;\
01141         src+=srcStride;\
01142     }\
01143 }
01144 
01145 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
01146 \
01147 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01148     uint64_t temp[17*4];\
01149     uint64_t *temp_ptr= temp;\
01150     int count= 17;\
01151 \
01152     /*FIXME unroll */\
01153     __asm__ volatile(\
01154         "pxor %%mm7, %%mm7              \n\t"\
01155         "1:                             \n\t"\
01156         "movq (%0), %%mm0               \n\t"\
01157         "movq (%0), %%mm1               \n\t"\
01158         "movq 8(%0), %%mm2              \n\t"\
01159         "movq 8(%0), %%mm3              \n\t"\
01160         "punpcklbw %%mm7, %%mm0         \n\t"\
01161         "punpckhbw %%mm7, %%mm1         \n\t"\
01162         "punpcklbw %%mm7, %%mm2         \n\t"\
01163         "punpckhbw %%mm7, %%mm3         \n\t"\
01164         "movq %%mm0, (%1)               \n\t"\
01165         "movq %%mm1, 17*8(%1)           \n\t"\
01166         "movq %%mm2, 2*17*8(%1)         \n\t"\
01167         "movq %%mm3, 3*17*8(%1)         \n\t"\
01168         "add $8, %1                     \n\t"\
01169         "add %3, %0                     \n\t"\
01170         "decl %2                        \n\t"\
01171         " jnz 1b                        \n\t"\
01172         : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01173         : "r" ((x86_reg)srcStride)\
01174         : "memory"\
01175     );\
01176     \
01177     temp_ptr= temp;\
01178     count=4;\
01179     \
01180 /*FIXME reorder for speed */\
01181     __asm__ volatile(\
01182         /*"pxor %%mm7, %%mm7              \n\t"*/\
01183         "1:                             \n\t"\
01184         "movq (%0), %%mm0               \n\t"\
01185         "movq 8(%0), %%mm1              \n\t"\
01186         "movq 16(%0), %%mm2             \n\t"\
01187         "movq 24(%0), %%mm3             \n\t"\
01188         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
01189         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
01190         "add %4, %1                     \n\t"\
01191         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
01192         \
01193         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01194         "add %4, %1                     \n\t"\
01195         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01196         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
01197         "add %4, %1                     \n\t"\
01198         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
01199         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
01200         "add %4, %1                     \n\t"\
01201         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
01202         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
01203         "add %4, %1                     \n\t"\
01204         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
01205         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
01206         "add %4, %1                     \n\t"\
01207         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
01208         \
01209         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
01210         "add %4, %1                     \n\t"  \
01211         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
01212         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
01213         \
01214         "add $136, %0                   \n\t"\
01215         "add %6, %1                     \n\t"\
01216         "decl %2                        \n\t"\
01217         " jnz 1b                        \n\t"\
01218         \
01219         : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01220         : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
01221         :"memory"\
01222     );\
01223 }\
01224 \
01225 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01226     uint64_t temp[9*2];\
01227     uint64_t *temp_ptr= temp;\
01228     int count= 9;\
01229 \
01230     /*FIXME unroll */\
01231     __asm__ volatile(\
01232         "pxor %%mm7, %%mm7              \n\t"\
01233         "1:                             \n\t"\
01234         "movq (%0), %%mm0               \n\t"\
01235         "movq (%0), %%mm1               \n\t"\
01236         "punpcklbw %%mm7, %%mm0         \n\t"\
01237         "punpckhbw %%mm7, %%mm1         \n\t"\
01238         "movq %%mm0, (%1)               \n\t"\
01239         "movq %%mm1, 9*8(%1)            \n\t"\
01240         "add $8, %1                     \n\t"\
01241         "add %3, %0                     \n\t"\
01242         "decl %2                        \n\t"\
01243         " jnz 1b                        \n\t"\
01244         : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01245         : "r" ((x86_reg)srcStride)\
01246         : "memory"\
01247     );\
01248     \
01249     temp_ptr= temp;\
01250     count=2;\
01251     \
01252 /*FIXME reorder for speed */\
01253     __asm__ volatile(\
01254         /*"pxor %%mm7, %%mm7              \n\t"*/\
01255         "1:                             \n\t"\
01256         "movq (%0), %%mm0               \n\t"\
01257         "movq 8(%0), %%mm1              \n\t"\
01258         "movq 16(%0), %%mm2             \n\t"\
01259         "movq 24(%0), %%mm3             \n\t"\
01260         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
01261         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
01262         "add %4, %1                     \n\t"\
01263         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
01264         \
01265         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01266         "add %4, %1                     \n\t"\
01267         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01268         \
01269         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
01270         "add %4, %1                     \n\t"\
01271         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
01272         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
01273                 \
01274         "add $72, %0                    \n\t"\
01275         "add %6, %1                     \n\t"\
01276         "decl %2                        \n\t"\
01277         " jnz 1b                        \n\t"\
01278          \
01279         : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01280         : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
01281         : "memory"\
01282    );\
01283 }\
01284 \
01285 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01286     OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
01287 }\
01288 \
01289 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01290     uint64_t temp[8];\
01291     uint8_t * const half= (uint8_t*)temp;\
01292     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01293     OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01294 }\
01295 \
01296 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01297     OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
01298 }\
01299 \
01300 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01301     uint64_t temp[8];\
01302     uint8_t * const half= (uint8_t*)temp;\
01303     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01304     OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
01305 }\
01306 \
01307 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01308     uint64_t temp[8];\
01309     uint8_t * const half= (uint8_t*)temp;\
01310     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01311     OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01312 }\
01313 \
01314 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01315     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
01316 }\
01317 \
01318 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01319     uint64_t temp[8];\
01320     uint8_t * const half= (uint8_t*)temp;\
01321     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01322     OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
01323 }\
01324 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01325     uint64_t half[8 + 9];\
01326     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01327     uint8_t * const halfHV= ((uint8_t*)half);\
01328     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01329     put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01330     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01331     OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01332 }\
01333 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01334     uint64_t half[8 + 9];\
01335     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01336     uint8_t * const halfHV= ((uint8_t*)half);\
01337     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01338     put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01339     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01340     OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01341 }\
01342 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01343     uint64_t half[8 + 9];\
01344     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01345     uint8_t * const halfHV= ((uint8_t*)half);\
01346     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01347     put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01348     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01349     OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01350 }\
01351 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01352     uint64_t half[8 + 9];\
01353     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01354     uint8_t * const halfHV= ((uint8_t*)half);\
01355     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01356     put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01357     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01358     OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01359 }\
01360 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01361     uint64_t half[8 + 9];\
01362     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01363     uint8_t * const halfHV= ((uint8_t*)half);\
01364     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01365     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01366     OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01367 }\
01368 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01369     uint64_t half[8 + 9];\
01370     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01371     uint8_t * const halfHV= ((uint8_t*)half);\
01372     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01373     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01374     OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01375 }\
01376 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01377     uint64_t half[8 + 9];\
01378     uint8_t * const halfH= ((uint8_t*)half);\
01379     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01380     put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01381     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01382 }\
01383 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01384     uint64_t half[8 + 9];\
01385     uint8_t * const halfH= ((uint8_t*)half);\
01386     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01387     put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01388     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01389 }\
01390 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01391     uint64_t half[9];\
01392     uint8_t * const halfH= ((uint8_t*)half);\
01393     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01394     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01395 }\
01396 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01397     OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
01398 }\
01399 \
01400 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01401     uint64_t temp[32];\
01402     uint8_t * const half= (uint8_t*)temp;\
01403     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01404     OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01405 }\
01406 \
01407 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01408     OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
01409 }\
01410 \
01411 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01412     uint64_t temp[32];\
01413     uint8_t * const half= (uint8_t*)temp;\
01414     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01415     OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
01416 }\
01417 \
01418 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01419     uint64_t temp[32];\
01420     uint8_t * const half= (uint8_t*)temp;\
01421     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01422     OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01423 }\
01424 \
01425 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01426     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
01427 }\
01428 \
01429 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01430     uint64_t temp[32];\
01431     uint8_t * const half= (uint8_t*)temp;\
01432     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01433     OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
01434 }\
01435 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01436     uint64_t half[16*2 + 17*2];\
01437     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01438     uint8_t * const halfHV= ((uint8_t*)half);\
01439     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01440     put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01441     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01442     OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01443 }\
01444 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01445     uint64_t half[16*2 + 17*2];\
01446     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01447     uint8_t * const halfHV= ((uint8_t*)half);\
01448     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01449     put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01450     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01451     OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01452 }\
01453 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01454     uint64_t half[16*2 + 17*2];\
01455     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01456     uint8_t * const halfHV= ((uint8_t*)half);\
01457     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01458     put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01459     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01460     OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01461 }\
01462 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01463     uint64_t half[16*2 + 17*2];\
01464     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01465     uint8_t * const halfHV= ((uint8_t*)half);\
01466     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01467     put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01468     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01469     OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01470 }\
01471 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01472     uint64_t half[16*2 + 17*2];\
01473     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01474     uint8_t * const halfHV= ((uint8_t*)half);\
01475     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01476     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01477     OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01478 }\
01479 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01480     uint64_t half[16*2 + 17*2];\
01481     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01482     uint8_t * const halfHV= ((uint8_t*)half);\
01483     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01484     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01485     OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01486 }\
01487 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01488     uint64_t half[17*2];\
01489     uint8_t * const halfH= ((uint8_t*)half);\
01490     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01491     put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01492     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01493 }\
01494 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01495     uint64_t half[17*2];\
01496     uint8_t * const halfH= ((uint8_t*)half);\
01497     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01498     put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01499     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01500 }\
01501 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01502     uint64_t half[17*2];\
01503     uint8_t * const halfH= ((uint8_t*)half);\
01504     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01505     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01506 }
01507 
01508 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
01509 #define AVG_3DNOW_OP(a,b,temp, size) \
01510 "mov" #size " " #b ", " #temp "   \n\t"\
01511 "pavgusb " #temp ", " #a "        \n\t"\
01512 "mov" #size " " #a ", " #b "      \n\t"
01513 #define AVG_MMX2_OP(a,b,temp, size) \
01514 "mov" #size " " #b ", " #temp "   \n\t"\
01515 "pavgb " #temp ", " #a "          \n\t"\
01516 "mov" #size " " #a ", " #b "      \n\t"
01517 
01518 QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
01519 QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
01520 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
01521 QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
01522 QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
01523 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
01524 QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
01525 QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
01526 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
01527 
01528 /***********************************/
01529 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
01530 
01531 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
01532 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01533     OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
01534 }
01535 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
01536 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01537     OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
01538 }
01539 
01540 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
01541 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
01542 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
01543 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
01544 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
01545                           OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
01546 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
01547                           OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
01548 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
01549                           OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
01550 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01551     OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
01552 }\
01553 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01554     OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
01555 }\
01556 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0,         1,       0)\
01557 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1,        -1,       0)\
01558 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0,         stride,  0)\
01559 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride,   -stride,  0)\
01560 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0,         stride,  1)\
01561 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1,         stride, -1)\
01562 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride,   -stride,  1)\
01563 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
01564 
01565 QPEL_2TAP(put_, 16, mmx2)
01566 QPEL_2TAP(avg_, 16, mmx2)
01567 QPEL_2TAP(put_,  8, mmx2)
01568 QPEL_2TAP(avg_,  8, mmx2)
01569 QPEL_2TAP(put_, 16, 3dnow)
01570 QPEL_2TAP(avg_, 16, 3dnow)
01571 QPEL_2TAP(put_,  8, 3dnow)
01572 QPEL_2TAP(avg_,  8, 3dnow)
01573 
01574 
01575 #if HAVE_YASM
01576 typedef void emu_edge_core_func (uint8_t *buf, const uint8_t *src,
01577                                  x86_reg linesize, x86_reg start_y,
01578                                  x86_reg end_y, x86_reg block_h,
01579                                  x86_reg start_x, x86_reg end_x,
01580                                  x86_reg block_w);
01581 extern emu_edge_core_func ff_emu_edge_core_mmx;
01582 extern emu_edge_core_func ff_emu_edge_core_sse;
01583 
01584 static av_always_inline
01585 void emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize,
01586                       int block_w, int block_h,
01587                       int src_x, int src_y, int w, int h,
01588                       emu_edge_core_func *core_fn)
01589 {
01590     int start_y, start_x, end_y, end_x, src_y_add=0;
01591 
01592     if(src_y>= h){
01593         src_y_add = h-1-src_y;
01594         src_y=h-1;
01595     }else if(src_y<=-block_h){
01596         src_y_add = 1-block_h-src_y;
01597         src_y=1-block_h;
01598     }
01599     if(src_x>= w){
01600         src+= (w-1-src_x);
01601         src_x=w-1;
01602     }else if(src_x<=-block_w){
01603         src+= (1-block_w-src_x);
01604         src_x=1-block_w;
01605     }
01606 
01607     start_y= FFMAX(0, -src_y);
01608     start_x= FFMAX(0, -src_x);
01609     end_y= FFMIN(block_h, h-src_y);
01610     end_x= FFMIN(block_w, w-src_x);
01611     assert(start_x < end_x && block_w > 0);
01612     assert(start_y < end_y && block_h > 0);
01613 
01614     // fill in the to-be-copied part plus all above/below
01615     src += (src_y_add+start_y)*linesize + start_x;
01616     buf += start_x;
01617     core_fn(buf, src, linesize, start_y, end_y, block_h, start_x, end_x, block_w);
01618 }
01619 
01620 #if ARCH_X86_32
01621 static av_noinline
01622 void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, int linesize,
01623                           int block_w, int block_h,
01624                           int src_x, int src_y, int w, int h)
01625 {
01626     emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
01627                      w, h, &ff_emu_edge_core_mmx);
01628 }
01629 #endif
01630 static av_noinline
01631 void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, int linesize,
01632                           int block_w, int block_h,
01633                           int src_x, int src_y, int w, int h)
01634 {
01635     emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
01636                      w, h, &ff_emu_edge_core_sse);
01637 }
01638 #endif /* HAVE_YASM */
01639 
01640 typedef void emulated_edge_mc_func (uint8_t *dst, const uint8_t *src,
01641                                     int linesize, int block_w, int block_h,
01642                                     int src_x, int src_y, int w, int h);
01643 
01644 static av_always_inline
01645 void gmc(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01646          int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height,
01647          emulated_edge_mc_func *emu_edge_fn)
01648 {
01649     const int w = 8;
01650     const int ix = ox>>(16+shift);
01651     const int iy = oy>>(16+shift);
01652     const int oxs = ox>>4;
01653     const int oys = oy>>4;
01654     const int dxxs = dxx>>4;
01655     const int dxys = dxy>>4;
01656     const int dyxs = dyx>>4;
01657     const int dyys = dyy>>4;
01658     const uint16_t r4[4] = {r,r,r,r};
01659     const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
01660     const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
01661     const uint64_t shift2 = 2*shift;
01662     uint8_t edge_buf[(h+1)*stride];
01663     int x, y;
01664 
01665     const int dxw = (dxx-(1<<(16+shift)))*(w-1);
01666     const int dyh = (dyy-(1<<(16+shift)))*(h-1);
01667     const int dxh = dxy*(h-1);
01668     const int dyw = dyx*(w-1);
01669     if( // non-constant fullpel offset (3% of blocks)
01670         ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
01671          (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
01672         // uses more than 16 bits of subpel mv (only at huge resolution)
01673         || (dxx|dxy|dyx|dyy)&15 )
01674     {
01675         //FIXME could still use mmx for some of the rows
01676         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
01677         return;
01678     }
01679 
01680     src += ix + iy*stride;
01681     if( (unsigned)ix >= width-w ||
01682         (unsigned)iy >= height-h )
01683     {
01684         emu_edge_fn(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
01685         src = edge_buf;
01686     }
01687 
01688     __asm__ volatile(
01689         "movd         %0, %%mm6 \n\t"
01690         "pxor      %%mm7, %%mm7 \n\t"
01691         "punpcklwd %%mm6, %%mm6 \n\t"
01692         "punpcklwd %%mm6, %%mm6 \n\t"
01693         :: "r"(1<<shift)
01694     );
01695 
01696     for(x=0; x<w; x+=4){
01697         uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
01698                             oxs - dxys + dxxs*(x+1),
01699                             oxs - dxys + dxxs*(x+2),
01700                             oxs - dxys + dxxs*(x+3) };
01701         uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
01702                             oys - dyys + dyxs*(x+1),
01703                             oys - dyys + dyxs*(x+2),
01704                             oys - dyys + dyxs*(x+3) };
01705 
01706         for(y=0; y<h; y++){
01707             __asm__ volatile(
01708                 "movq   %0,  %%mm4 \n\t"
01709                 "movq   %1,  %%mm5 \n\t"
01710                 "paddw  %2,  %%mm4 \n\t"
01711                 "paddw  %3,  %%mm5 \n\t"
01712                 "movq   %%mm4, %0  \n\t"
01713                 "movq   %%mm5, %1  \n\t"
01714                 "psrlw  $12, %%mm4 \n\t"
01715                 "psrlw  $12, %%mm5 \n\t"
01716                 : "+m"(*dx4), "+m"(*dy4)
01717                 : "m"(*dxy4), "m"(*dyy4)
01718             );
01719 
01720             __asm__ volatile(
01721                 "movq   %%mm6, %%mm2 \n\t"
01722                 "movq   %%mm6, %%mm1 \n\t"
01723                 "psubw  %%mm4, %%mm2 \n\t"
01724                 "psubw  %%mm5, %%mm1 \n\t"
01725                 "movq   %%mm2, %%mm0 \n\t"
01726                 "movq   %%mm4, %%mm3 \n\t"
01727                 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
01728                 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
01729                 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
01730                 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
01731 
01732                 "movd   %4,    %%mm5 \n\t"
01733                 "movd   %3,    %%mm4 \n\t"
01734                 "punpcklbw %%mm7, %%mm5 \n\t"
01735                 "punpcklbw %%mm7, %%mm4 \n\t"
01736                 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
01737                 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
01738 
01739                 "movd   %2,    %%mm5 \n\t"
01740                 "movd   %1,    %%mm4 \n\t"
01741                 "punpcklbw %%mm7, %%mm5 \n\t"
01742                 "punpcklbw %%mm7, %%mm4 \n\t"
01743                 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
01744                 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
01745                 "paddw  %5,    %%mm1 \n\t"
01746                 "paddw  %%mm3, %%mm2 \n\t"
01747                 "paddw  %%mm1, %%mm0 \n\t"
01748                 "paddw  %%mm2, %%mm0 \n\t"
01749 
01750                 "psrlw    %6,    %%mm0 \n\t"
01751                 "packuswb %%mm0, %%mm0 \n\t"
01752                 "movd     %%mm0, %0    \n\t"
01753 
01754                 : "=m"(dst[x+y*stride])
01755                 : "m"(src[0]), "m"(src[1]),
01756                   "m"(src[stride]), "m"(src[stride+1]),
01757                   "m"(*r4), "m"(shift2)
01758             );
01759             src += stride;
01760         }
01761         src += 4-h*stride;
01762     }
01763 }
01764 
01765 #if HAVE_YASM
01766 #if ARCH_X86_32
01767 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01768                     int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01769 {
01770     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01771         width, height, &emulated_edge_mc_mmx);
01772 }
01773 #endif
01774 static void gmc_sse(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01775                     int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01776 {
01777     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01778         width, height, &emulated_edge_mc_sse);
01779 }
01780 #else
01781 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01782                     int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
01783 {
01784     gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r,
01785         width, height, &ff_emulated_edge_mc_8);
01786 }
01787 #endif
01788 
01789 #define PREFETCH(name, op) \
01790 static void name(void *mem, int stride, int h){\
01791     const uint8_t *p= mem;\
01792     do{\
01793         __asm__ volatile(#op" %0" :: "m"(*p));\
01794         p+= stride;\
01795     }while(--h);\
01796 }
01797 PREFETCH(prefetch_mmx2,  prefetcht0)
01798 PREFETCH(prefetch_3dnow, prefetch)
01799 #undef PREFETCH
01800 
01801 #include "h264_qpel_mmx.c"
01802 
01803 void ff_put_h264_chroma_mc8_mmx_rnd   (uint8_t *dst, uint8_t *src,
01804                                        int stride, int h, int x, int y);
01805 void ff_avg_h264_chroma_mc8_mmx2_rnd  (uint8_t *dst, uint8_t *src,
01806                                        int stride, int h, int x, int y);
01807 void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
01808                                        int stride, int h, int x, int y);
01809 
01810 void ff_put_h264_chroma_mc4_mmx       (uint8_t *dst, uint8_t *src,
01811                                        int stride, int h, int x, int y);
01812 void ff_avg_h264_chroma_mc4_mmx2      (uint8_t *dst, uint8_t *src,
01813                                        int stride, int h, int x, int y);
01814 void ff_avg_h264_chroma_mc4_3dnow     (uint8_t *dst, uint8_t *src,
01815                                        int stride, int h, int x, int y);
01816 
01817 void ff_put_h264_chroma_mc2_mmx2      (uint8_t *dst, uint8_t *src,
01818                                        int stride, int h, int x, int y);
01819 void ff_avg_h264_chroma_mc2_mmx2      (uint8_t *dst, uint8_t *src,
01820                                        int stride, int h, int x, int y);
01821 
01822 void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
01823                                        int stride, int h, int x, int y);
01824 void ff_put_h264_chroma_mc4_ssse3     (uint8_t *dst, uint8_t *src,
01825                                        int stride, int h, int x, int y);
01826 
01827 void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
01828                                        int stride, int h, int x, int y);
01829 void ff_avg_h264_chroma_mc4_ssse3     (uint8_t *dst, uint8_t *src,
01830                                        int stride, int h, int x, int y);
01831 
01832 #define CHROMA_MC(OP, NUM, DEPTH, OPT) \
01833 void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
01834                                       (uint8_t *dst, uint8_t *src,\
01835                                        int stride, int h, int x, int y);
01836 
01837 CHROMA_MC(put, 2, 10, mmxext)
01838 CHROMA_MC(avg, 2, 10, mmxext)
01839 CHROMA_MC(put, 4, 10, mmxext)
01840 CHROMA_MC(avg, 4, 10, mmxext)
01841 CHROMA_MC(put, 8, 10, sse2)
01842 CHROMA_MC(avg, 8, 10, sse2)
01843 CHROMA_MC(put, 8, 10, avx)
01844 CHROMA_MC(avg, 8, 10, avx)
01845 
01846 /* CAVS specific */
01847 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01848     put_pixels8_mmx(dst, src, stride, 8);
01849 }
01850 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01851     avg_pixels8_mmx(dst, src, stride, 8);
01852 }
01853 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01854     put_pixels16_mmx(dst, src, stride, 16);
01855 }
01856 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01857     avg_pixels16_mmx(dst, src, stride, 16);
01858 }
01859 
01860 /* VC1 specific */
01861 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01862     put_pixels8_mmx(dst, src, stride, 8);
01863 }
01864 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01865     avg_pixels8_mmx2(dst, src, stride, 8);
01866 }
01867 
01868 /* only used in VP3/5/6 */
01869 static void put_vp_no_rnd_pixels8_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
01870 {
01871 //    START_TIMER
01872     MOVQ_BFE(mm6);
01873     __asm__ volatile(
01874         "1:                             \n\t"
01875         "movq   (%1), %%mm0             \n\t"
01876         "movq   (%2), %%mm1             \n\t"
01877         "movq   (%1,%4), %%mm2          \n\t"
01878         "movq   (%2,%4), %%mm3          \n\t"
01879         PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4,   %%mm2, %%mm3, %%mm5)
01880         "movq   %%mm4, (%3)             \n\t"
01881         "movq   %%mm5, (%3,%4)          \n\t"
01882 
01883         "movq   (%1,%4,2), %%mm0        \n\t"
01884         "movq   (%2,%4,2), %%mm1        \n\t"
01885         "movq   (%1,%5), %%mm2          \n\t"
01886         "movq   (%2,%5), %%mm3          \n\t"
01887         "lea    (%1,%4,4), %1           \n\t"
01888         "lea    (%2,%4,4), %2           \n\t"
01889         PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4,   %%mm2, %%mm3, %%mm5)
01890         "movq   %%mm4, (%3,%4,2)        \n\t"
01891         "movq   %%mm5, (%3,%5)          \n\t"
01892         "lea    (%3,%4,4), %3           \n\t"
01893         "subl   $4, %0                  \n\t"
01894         "jnz    1b                      \n\t"
01895         :"+r"(h), "+r"(a), "+r"(b), "+r"(dst)
01896         :"r"((x86_reg)stride), "r"((x86_reg)3L*stride)
01897         :"memory");
01898 //    STOP_TIMER("put_vp_no_rnd_pixels8_l2_mmx")
01899 }
01900 static void put_vp_no_rnd_pixels16_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h)
01901 {
01902     put_vp_no_rnd_pixels8_l2_mmx(dst, a, b, stride, h);
01903     put_vp_no_rnd_pixels8_l2_mmx(dst+8, a+8, b+8, stride, h);
01904 }
01905 
01906 #if CONFIG_DIRAC_DECODER
01907 #define DIRAC_PIXOP(OPNAME, EXT)\
01908 void ff_ ## OPNAME ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
01909 {\
01910     OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
01911 }\
01912 void ff_ ## OPNAME ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
01913 {\
01914     OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
01915 }\
01916 void ff_ ## OPNAME ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
01917 {\
01918     OPNAME ## _pixels16_ ## EXT(dst   , src[0]   , stride, h);\
01919     OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
01920 }
01921 
01922 DIRAC_PIXOP(put, mmx)
01923 DIRAC_PIXOP(avg, mmx)
01924 DIRAC_PIXOP(avg, mmx2)
01925 
01926 void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
01927 {
01928     put_pixels16_sse2(dst, src[0], stride, h);
01929 }
01930 void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
01931 {
01932     avg_pixels16_sse2(dst, src[0], stride, h);
01933 }
01934 void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
01935 {
01936     put_pixels16_sse2(dst   , src[0]   , stride, h);
01937     put_pixels16_sse2(dst+16, src[0]+16, stride, h);
01938 }
01939 void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
01940 {
01941     avg_pixels16_sse2(dst   , src[0]   , stride, h);
01942     avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
01943 }
01944 #endif
01945 
01946 /* XXX: those functions should be suppressed ASAP when all IDCTs are
01947    converted */
01948 #if CONFIG_GPL
01949 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01950 {
01951     ff_mmx_idct (block);
01952     ff_put_pixels_clamped_mmx(block, dest, line_size);
01953 }
01954 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01955 {
01956     ff_mmx_idct (block);
01957     ff_add_pixels_clamped_mmx(block, dest, line_size);
01958 }
01959 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01960 {
01961     ff_mmxext_idct (block);
01962     ff_put_pixels_clamped_mmx(block, dest, line_size);
01963 }
01964 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01965 {
01966     ff_mmxext_idct (block);
01967     ff_add_pixels_clamped_mmx(block, dest, line_size);
01968 }
01969 #endif
01970 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
01971 {
01972     ff_idct_xvid_mmx (block);
01973     ff_put_pixels_clamped_mmx(block, dest, line_size);
01974 }
01975 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
01976 {
01977     ff_idct_xvid_mmx (block);
01978     ff_add_pixels_clamped_mmx(block, dest, line_size);
01979 }
01980 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
01981 {
01982     ff_idct_xvid_mmx2 (block);
01983     ff_put_pixels_clamped_mmx(block, dest, line_size);
01984 }
01985 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
01986 {
01987     ff_idct_xvid_mmx2 (block);
01988     ff_add_pixels_clamped_mmx(block, dest, line_size);
01989 }
01990 
01991 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
01992 {
01993     int i;
01994     __asm__ volatile("pxor %%mm7, %%mm7":);
01995     for(i=0; i<blocksize; i+=2) {
01996         __asm__ volatile(
01997             "movq    %0,    %%mm0 \n\t"
01998             "movq    %1,    %%mm1 \n\t"
01999             "movq    %%mm0, %%mm2 \n\t"
02000             "movq    %%mm1, %%mm3 \n\t"
02001             "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
02002             "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
02003             "pslld   $31,   %%mm2 \n\t" // keep only the sign bit
02004             "pxor    %%mm2, %%mm1 \n\t"
02005             "movq    %%mm3, %%mm4 \n\t"
02006             "pand    %%mm1, %%mm3 \n\t"
02007             "pandn   %%mm1, %%mm4 \n\t"
02008             "pfadd   %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
02009             "pfsub   %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
02010             "movq    %%mm3, %1    \n\t"
02011             "movq    %%mm0, %0    \n\t"
02012             :"+m"(mag[i]), "+m"(ang[i])
02013             ::"memory"
02014         );
02015     }
02016     __asm__ volatile("femms");
02017 }
02018 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
02019 {
02020     int i;
02021 
02022     __asm__ volatile(
02023             "movaps  %0,     %%xmm5 \n\t"
02024         ::"m"(ff_pdw_80000000[0])
02025     );
02026     for(i=0; i<blocksize; i+=4) {
02027         __asm__ volatile(
02028             "movaps  %0,     %%xmm0 \n\t"
02029             "movaps  %1,     %%xmm1 \n\t"
02030             "xorps   %%xmm2, %%xmm2 \n\t"
02031             "xorps   %%xmm3, %%xmm3 \n\t"
02032             "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
02033             "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
02034             "andps   %%xmm5, %%xmm2 \n\t" // keep only the sign bit
02035             "xorps   %%xmm2, %%xmm1 \n\t"
02036             "movaps  %%xmm3, %%xmm4 \n\t"
02037             "andps   %%xmm1, %%xmm3 \n\t"
02038             "andnps  %%xmm1, %%xmm4 \n\t"
02039             "addps   %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
02040             "subps   %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
02041             "movaps  %%xmm3, %1     \n\t"
02042             "movaps  %%xmm0, %0     \n\t"
02043             :"+m"(mag[i]), "+m"(ang[i])
02044             ::"memory"
02045         );
02046     }
02047 }
02048 
02049 #define IF1(x) x
02050 #define IF0(x)
02051 
02052 #define MIX5(mono,stereo)\
02053     __asm__ volatile(\
02054         "movss          0(%2), %%xmm5 \n"\
02055         "movss          8(%2), %%xmm6 \n"\
02056         "movss         24(%2), %%xmm7 \n"\
02057         "shufps    $0, %%xmm5, %%xmm5 \n"\
02058         "shufps    $0, %%xmm6, %%xmm6 \n"\
02059         "shufps    $0, %%xmm7, %%xmm7 \n"\
02060         "1: \n"\
02061         "movaps       (%0,%1), %%xmm0 \n"\
02062         "movaps  0x400(%0,%1), %%xmm1 \n"\
02063         "movaps  0x800(%0,%1), %%xmm2 \n"\
02064         "movaps  0xc00(%0,%1), %%xmm3 \n"\
02065         "movaps 0x1000(%0,%1), %%xmm4 \n"\
02066         "mulps         %%xmm5, %%xmm0 \n"\
02067         "mulps         %%xmm6, %%xmm1 \n"\
02068         "mulps         %%xmm5, %%xmm2 \n"\
02069         "mulps         %%xmm7, %%xmm3 \n"\
02070         "mulps         %%xmm7, %%xmm4 \n"\
02071  stereo("addps         %%xmm1, %%xmm0 \n")\
02072         "addps         %%xmm1, %%xmm2 \n"\
02073         "addps         %%xmm3, %%xmm0 \n"\
02074         "addps         %%xmm4, %%xmm2 \n"\
02075    mono("addps         %%xmm2, %%xmm0 \n")\
02076         "movaps  %%xmm0,      (%0,%1) \n"\
02077  stereo("movaps  %%xmm2, 0x400(%0,%1) \n")\
02078         "add $16, %0 \n"\
02079         "jl 1b \n"\
02080         :"+&r"(i)\
02081         :"r"(samples[0]+len), "r"(matrix)\
02082         :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
02083                       "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
02084          "memory"\
02085     );
02086 
02087 #define MIX_MISC(stereo)\
02088     __asm__ volatile(\
02089         "1: \n"\
02090         "movaps  (%3,%0), %%xmm0 \n"\
02091  stereo("movaps   %%xmm0, %%xmm1 \n")\
02092         "mulps    %%xmm4, %%xmm0 \n"\
02093  stereo("mulps    %%xmm5, %%xmm1 \n")\
02094         "lea 1024(%3,%0), %1 \n"\
02095         "mov %5, %2 \n"\
02096         "2: \n"\
02097         "movaps   (%1),   %%xmm2 \n"\
02098  stereo("movaps   %%xmm2, %%xmm3 \n")\
02099         "mulps   (%4,%2), %%xmm2 \n"\
02100  stereo("mulps 16(%4,%2), %%xmm3 \n")\
02101         "addps    %%xmm2, %%xmm0 \n"\
02102  stereo("addps    %%xmm3, %%xmm1 \n")\
02103         "add $1024, %1 \n"\
02104         "add $32, %2 \n"\
02105         "jl 2b \n"\
02106         "movaps   %%xmm0,     (%3,%0) \n"\
02107  stereo("movaps   %%xmm1, 1024(%3,%0) \n")\
02108         "add $16, %0 \n"\
02109         "jl 1b \n"\
02110         :"+&r"(i), "=&r"(j), "=&r"(k)\
02111         :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
02112         :"memory"\
02113     );
02114 
02115 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
02116 {
02117     int (*matrix_cmp)[2] = (int(*)[2])matrix;
02118     intptr_t i,j,k;
02119 
02120     i = -len*sizeof(float);
02121     if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
02122         MIX5(IF0,IF1);
02123     } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
02124         MIX5(IF1,IF0);
02125     } else {
02126         DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
02127         j = 2*in_ch*sizeof(float);
02128         __asm__ volatile(
02129             "1: \n"
02130             "sub $8, %0 \n"
02131             "movss     (%2,%0), %%xmm4 \n"
02132             "movss    4(%2,%0), %%xmm5 \n"
02133             "shufps $0, %%xmm4, %%xmm4 \n"
02134             "shufps $0, %%xmm5, %%xmm5 \n"
02135             "movaps %%xmm4,   (%1,%0,4) \n"
02136             "movaps %%xmm5, 16(%1,%0,4) \n"
02137             "jg 1b \n"
02138             :"+&r"(j)
02139             :"r"(matrix_simd), "r"(matrix)
02140             :"memory"
02141         );
02142         if(out_ch == 2) {
02143             MIX_MISC(IF1);
02144         } else {
02145             MIX_MISC(IF0);
02146         }
02147     }
02148 }
02149 
02150 static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1, int len){
02151     x86_reg i = (len-4)*4;
02152     __asm__ volatile(
02153         "1: \n\t"
02154         "movq    (%2,%0), %%mm0 \n\t"
02155         "movq   8(%2,%0), %%mm1 \n\t"
02156         "pfmul   (%3,%0), %%mm0 \n\t"
02157         "pfmul  8(%3,%0), %%mm1 \n\t"
02158         "movq   %%mm0,  (%1,%0) \n\t"
02159         "movq   %%mm1, 8(%1,%0) \n\t"
02160         "sub  $16, %0 \n\t"
02161         "jge 1b \n\t"
02162         "femms  \n\t"
02163         :"+r"(i)
02164         :"r"(dst), "r"(src0), "r"(src1)
02165         :"memory"
02166     );
02167 }
02168 static void vector_fmul_sse(float *dst, const float *src0, const float *src1, int len){
02169     x86_reg i = (len-8)*4;
02170     __asm__ volatile(
02171         "1: \n\t"
02172         "movaps    (%2,%0), %%xmm0 \n\t"
02173         "movaps  16(%2,%0), %%xmm1 \n\t"
02174         "mulps     (%3,%0), %%xmm0 \n\t"
02175         "mulps   16(%3,%0), %%xmm1 \n\t"
02176         "movaps  %%xmm0,   (%1,%0) \n\t"
02177         "movaps  %%xmm1, 16(%1,%0) \n\t"
02178         "sub  $32, %0 \n\t"
02179         "jge 1b \n\t"
02180         :"+r"(i)
02181         :"r"(dst), "r"(src0), "r"(src1)
02182         :"memory"
02183     );
02184 }
02185 
02186 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
02187     x86_reg i = len*4-16;
02188     __asm__ volatile(
02189         "1: \n\t"
02190         "pswapd   8(%1), %%mm0 \n\t"
02191         "pswapd    (%1), %%mm1 \n\t"
02192         "pfmul  (%3,%0), %%mm0 \n\t"
02193         "pfmul 8(%3,%0), %%mm1 \n\t"
02194         "movq  %%mm0,  (%2,%0) \n\t"
02195         "movq  %%mm1, 8(%2,%0) \n\t"
02196         "add   $16, %1 \n\t"
02197         "sub   $16, %0 \n\t"
02198         "jge   1b \n\t"
02199         :"+r"(i), "+r"(src1)
02200         :"r"(dst), "r"(src0)
02201     );
02202     __asm__ volatile("femms");
02203 }
02204 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
02205     x86_reg i = len*4-32;
02206     __asm__ volatile(
02207         "1: \n\t"
02208         "movaps        16(%1), %%xmm0 \n\t"
02209         "movaps          (%1), %%xmm1 \n\t"
02210         "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
02211         "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
02212         "mulps        (%3,%0), %%xmm0 \n\t"
02213         "mulps      16(%3,%0), %%xmm1 \n\t"
02214         "movaps     %%xmm0,   (%2,%0) \n\t"
02215         "movaps     %%xmm1, 16(%2,%0) \n\t"
02216         "add    $32, %1 \n\t"
02217         "sub    $32, %0 \n\t"
02218         "jge    1b \n\t"
02219         :"+r"(i), "+r"(src1)
02220         :"r"(dst), "r"(src0)
02221     );
02222 }
02223 
02224 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
02225                                   const float *src2, int len){
02226     x86_reg i = (len-4)*4;
02227     __asm__ volatile(
02228         "1: \n\t"
02229         "movq    (%2,%0), %%mm0 \n\t"
02230         "movq   8(%2,%0), %%mm1 \n\t"
02231         "pfmul   (%3,%0), %%mm0 \n\t"
02232         "pfmul  8(%3,%0), %%mm1 \n\t"
02233         "pfadd   (%4,%0), %%mm0 \n\t"
02234         "pfadd  8(%4,%0), %%mm1 \n\t"
02235         "movq  %%mm0,   (%1,%0) \n\t"
02236         "movq  %%mm1,  8(%1,%0) \n\t"
02237         "sub  $16, %0 \n\t"
02238         "jge  1b \n\t"
02239         :"+r"(i)
02240         :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02241         :"memory"
02242     );
02243     __asm__ volatile("femms");
02244 }
02245 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
02246                                 const float *src2, int len){
02247     x86_reg i = (len-8)*4;
02248     __asm__ volatile(
02249         "1: \n\t"
02250         "movaps   (%2,%0), %%xmm0 \n\t"
02251         "movaps 16(%2,%0), %%xmm1 \n\t"
02252         "mulps    (%3,%0), %%xmm0 \n\t"
02253         "mulps  16(%3,%0), %%xmm1 \n\t"
02254         "addps    (%4,%0), %%xmm0 \n\t"
02255         "addps  16(%4,%0), %%xmm1 \n\t"
02256         "movaps %%xmm0,   (%1,%0) \n\t"
02257         "movaps %%xmm1, 16(%1,%0) \n\t"
02258         "sub  $32, %0 \n\t"
02259         "jge  1b \n\t"
02260         :"+r"(i)
02261         :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02262         :"memory"
02263     );
02264 }
02265 
02266 #if HAVE_6REGS
02267 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
02268                                       const float *win, int len){
02269     x86_reg i = -len*4;
02270     x86_reg j = len*4-8;
02271     __asm__ volatile(
02272         "1: \n"
02273         "pswapd  (%5,%1), %%mm1 \n"
02274         "movq    (%5,%0), %%mm0 \n"
02275         "pswapd  (%4,%1), %%mm5 \n"
02276         "movq    (%3,%0), %%mm4 \n"
02277         "movq      %%mm0, %%mm2 \n"
02278         "movq      %%mm1, %%mm3 \n"
02279         "pfmul     %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
02280         "pfmul     %%mm5, %%mm3 \n" // src1[    j]*win[len+j]
02281         "pfmul     %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
02282         "pfmul     %%mm5, %%mm0 \n" // src1[    j]*win[len+i]
02283         "pfadd     %%mm3, %%mm2 \n"
02284         "pfsub     %%mm0, %%mm1 \n"
02285         "pswapd    %%mm2, %%mm2 \n"
02286         "movq      %%mm1, (%2,%0) \n"
02287         "movq      %%mm2, (%2,%1) \n"
02288         "sub $8, %1 \n"
02289         "add $8, %0 \n"
02290         "jl 1b \n"
02291         "femms \n"
02292         :"+r"(i), "+r"(j)
02293         :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02294     );
02295 }
02296 
02297 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
02298                                    const float *win, int len){
02299     x86_reg i = -len*4;
02300     x86_reg j = len*4-16;
02301     __asm__ volatile(
02302         "1: \n"
02303         "movaps       (%5,%1), %%xmm1 \n"
02304         "movaps       (%5,%0), %%xmm0 \n"
02305         "movaps       (%4,%1), %%xmm5 \n"
02306         "movaps       (%3,%0), %%xmm4 \n"
02307         "shufps $0x1b, %%xmm1, %%xmm1 \n"
02308         "shufps $0x1b, %%xmm5, %%xmm5 \n"
02309         "movaps        %%xmm0, %%xmm2 \n"
02310         "movaps        %%xmm1, %%xmm3 \n"
02311         "mulps         %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
02312         "mulps         %%xmm5, %%xmm3 \n" // src1[    j]*win[len+j]
02313         "mulps         %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
02314         "mulps         %%xmm5, %%xmm0 \n" // src1[    j]*win[len+i]
02315         "addps         %%xmm3, %%xmm2 \n"
02316         "subps         %%xmm0, %%xmm1 \n"
02317         "shufps $0x1b, %%xmm2, %%xmm2 \n"
02318         "movaps        %%xmm1, (%2,%0) \n"
02319         "movaps        %%xmm2, (%2,%1) \n"
02320         "sub $16, %1 \n"
02321         "add $16, %0 \n"
02322         "jl 1b \n"
02323         :"+r"(i), "+r"(j)
02324         :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02325     );
02326 }
02327 #endif /* HAVE_6REGS */
02328 
02329 static void vector_clipf_sse(float *dst, const float *src, float min, float max,
02330                              int len)
02331 {
02332     x86_reg i = (len-16)*4;
02333     __asm__ volatile(
02334         "movss  %3, %%xmm4 \n"
02335         "movss  %4, %%xmm5 \n"
02336         "shufps $0, %%xmm4, %%xmm4 \n"
02337         "shufps $0, %%xmm5, %%xmm5 \n"
02338         "1: \n\t"
02339         "movaps    (%2,%0), %%xmm0 \n\t" // 3/1 on intel
02340         "movaps  16(%2,%0), %%xmm1 \n\t"
02341         "movaps  32(%2,%0), %%xmm2 \n\t"
02342         "movaps  48(%2,%0), %%xmm3 \n\t"
02343         "maxps      %%xmm4, %%xmm0 \n\t"
02344         "maxps      %%xmm4, %%xmm1 \n\t"
02345         "maxps      %%xmm4, %%xmm2 \n\t"
02346         "maxps      %%xmm4, %%xmm3 \n\t"
02347         "minps      %%xmm5, %%xmm0 \n\t"
02348         "minps      %%xmm5, %%xmm1 \n\t"
02349         "minps      %%xmm5, %%xmm2 \n\t"
02350         "minps      %%xmm5, %%xmm3 \n\t"
02351         "movaps  %%xmm0,   (%1,%0) \n\t"
02352         "movaps  %%xmm1, 16(%1,%0) \n\t"
02353         "movaps  %%xmm2, 32(%1,%0) \n\t"
02354         "movaps  %%xmm3, 48(%1,%0) \n\t"
02355         "sub  $64, %0 \n\t"
02356         "jge 1b \n\t"
02357         :"+&r"(i)
02358         :"r"(dst), "r"(src), "m"(min), "m"(max)
02359         :"memory"
02360     );
02361 }
02362 
02363 void ff_vp3_idct_mmx(int16_t *input_data);
02364 void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
02365 void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
02366 
02367 void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
02368 
02369 void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
02370 void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
02371 
02372 void ff_vp3_idct_sse2(int16_t *input_data);
02373 void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
02374 void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
02375 
02376 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
02377 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
02378 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02379 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02380 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
02381 
02382 void ff_apply_window_int16_mmxext    (int16_t *output, const int16_t *input,
02383                                       const int16_t *window, unsigned int len);
02384 void ff_apply_window_int16_mmxext_ba (int16_t *output, const int16_t *input,
02385                                       const int16_t *window, unsigned int len);
02386 void ff_apply_window_int16_sse2      (int16_t *output, const int16_t *input,
02387                                       const int16_t *window, unsigned int len);
02388 void ff_apply_window_int16_sse2_ba   (int16_t *output, const int16_t *input,
02389                                       const int16_t *window, unsigned int len);
02390 void ff_apply_window_int16_ssse3     (int16_t *output, const int16_t *input,
02391                                       const int16_t *window, unsigned int len);
02392 void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
02393                                       const int16_t *window, unsigned int len);
02394 
02395 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
02396 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
02397 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
02398 
02399 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
02400 
02401 void ff_vector_clip_int32_mmx     (int32_t *dst, const int32_t *src, int32_t min,
02402                                    int32_t max, unsigned int len);
02403 void ff_vector_clip_int32_sse2    (int32_t *dst, const int32_t *src, int32_t min,
02404                                    int32_t max, unsigned int len);
02405 void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src, int32_t min,
02406                                    int32_t max, unsigned int len);
02407 void ff_vector_clip_int32_sse4    (int32_t *dst, const int32_t *src, int32_t min,
02408                                    int32_t max, unsigned int len);
02409 
02410 extern void ff_butterflies_float_interleave_sse(float *dst, const float *src0,
02411                                                 const float *src1, int len);
02412 extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
02413                                                 const float *src1, int len);
02414 
02415 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
02416 {
02417     int mm_flags = av_get_cpu_flags();
02418     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
02419     const int bit_depth = avctx->bits_per_raw_sample;
02420 
02421     if (avctx->dsp_mask) {
02422         if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
02423             mm_flags |= (avctx->dsp_mask & 0xffff);
02424         else
02425             mm_flags &= ~(avctx->dsp_mask & 0xffff);
02426     }
02427 
02428 #if 0
02429     av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
02430     if (mm_flags & AV_CPU_FLAG_MMX)
02431         av_log(avctx, AV_LOG_INFO, " mmx");
02432     if (mm_flags & AV_CPU_FLAG_MMX2)
02433         av_log(avctx, AV_LOG_INFO, " mmx2");
02434     if (mm_flags & AV_CPU_FLAG_3DNOW)
02435         av_log(avctx, AV_LOG_INFO, " 3dnow");
02436     if (mm_flags & AV_CPU_FLAG_SSE)
02437         av_log(avctx, AV_LOG_INFO, " sse");
02438     if (mm_flags & AV_CPU_FLAG_SSE2)
02439         av_log(avctx, AV_LOG_INFO, " sse2");
02440     av_log(avctx, AV_LOG_INFO, "\n");
02441 #endif
02442 
02443     if (mm_flags & AV_CPU_FLAG_MMX) {
02444         const int idct_algo= avctx->idct_algo;
02445 
02446         if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) {
02447             if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
02448                 c->idct_put= ff_simple_idct_put_mmx;
02449                 c->idct_add= ff_simple_idct_add_mmx;
02450                 c->idct    = ff_simple_idct_mmx;
02451                 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
02452 #if CONFIG_GPL
02453             }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
02454                 if(mm_flags & AV_CPU_FLAG_MMX2){
02455                     c->idct_put= ff_libmpeg2mmx2_idct_put;
02456                     c->idct_add= ff_libmpeg2mmx2_idct_add;
02457                     c->idct    = ff_mmxext_idct;
02458                 }else{
02459                     c->idct_put= ff_libmpeg2mmx_idct_put;
02460                     c->idct_add= ff_libmpeg2mmx_idct_add;
02461                     c->idct    = ff_mmx_idct;
02462                 }
02463                 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
02464 #endif
02465             }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
02466                      idct_algo==FF_IDCT_VP3 && HAVE_YASM){
02467                 if(mm_flags & AV_CPU_FLAG_SSE2){
02468                     c->idct_put= ff_vp3_idct_put_sse2;
02469                     c->idct_add= ff_vp3_idct_add_sse2;
02470                     c->idct    = ff_vp3_idct_sse2;
02471                     c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02472                 }else{
02473                     c->idct_put= ff_vp3_idct_put_mmx;
02474                     c->idct_add= ff_vp3_idct_add_mmx;
02475                     c->idct    = ff_vp3_idct_mmx;
02476                     c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
02477                 }
02478             }else if(idct_algo==FF_IDCT_CAVS){
02479                     c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02480             }else if(idct_algo==FF_IDCT_XVIDMMX){
02481                 if(mm_flags & AV_CPU_FLAG_SSE2){
02482                     c->idct_put= ff_idct_xvid_sse2_put;
02483                     c->idct_add= ff_idct_xvid_sse2_add;
02484                     c->idct    = ff_idct_xvid_sse2;
02485                     c->idct_permutation_type= FF_SSE2_IDCT_PERM;
02486                 }else if(mm_flags & AV_CPU_FLAG_MMX2){
02487                     c->idct_put= ff_idct_xvid_mmx2_put;
02488                     c->idct_add= ff_idct_xvid_mmx2_add;
02489                     c->idct    = ff_idct_xvid_mmx2;
02490                 }else{
02491                     c->idct_put= ff_idct_xvid_mmx_put;
02492                     c->idct_add= ff_idct_xvid_mmx_add;
02493                     c->idct    = ff_idct_xvid_mmx;
02494                 }
02495             }
02496         }
02497 
02498         c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
02499         c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
02500         c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
02501         if (!high_bit_depth) {
02502         c->clear_block  = clear_block_mmx;
02503         c->clear_blocks = clear_blocks_mmx;
02504         if ((mm_flags & AV_CPU_FLAG_SSE) &&
02505             !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
02506             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
02507             c->clear_block  = clear_block_sse;
02508             c->clear_blocks = clear_blocks_sse;
02509         }
02510         }
02511 
02512 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02513         c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
02514         c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
02515         c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
02516         c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
02517 
02518         if (!high_bit_depth) {
02519         SET_HPEL_FUNCS(put, 0, 16, mmx);
02520         SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
02521         SET_HPEL_FUNCS(avg, 0, 16, mmx);
02522         SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
02523         SET_HPEL_FUNCS(put, 1, 8, mmx);
02524         SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
02525         SET_HPEL_FUNCS(avg, 1, 8, mmx);
02526         SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
02527         }
02528 
02529 #if ARCH_X86_32 || !HAVE_YASM
02530         c->gmc= gmc_mmx;
02531 #endif
02532 #if ARCH_X86_32 && HAVE_YASM
02533         if (!high_bit_depth)
02534         c->emulated_edge_mc = emulated_edge_mc_mmx;
02535 #endif
02536 
02537         c->add_bytes= add_bytes_mmx;
02538 
02539         if (!high_bit_depth)
02540             c->draw_edges = draw_edges_mmx;
02541 
02542         c->put_no_rnd_pixels_l2[0]= put_vp_no_rnd_pixels16_l2_mmx;
02543         c->put_no_rnd_pixels_l2[1]= put_vp_no_rnd_pixels8_l2_mmx;
02544 
02545         if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
02546             c->h263_v_loop_filter= h263_v_loop_filter_mmx;
02547             c->h263_h_loop_filter= h263_h_loop_filter_mmx;
02548         }
02549 
02550 #if HAVE_YASM
02551         if (!high_bit_depth && CONFIG_H264CHROMA) {
02552         c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
02553         c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
02554         }
02555 
02556         c->vector_clip_int32 = ff_vector_clip_int32_mmx;
02557 #endif
02558 
02559         if (mm_flags & AV_CPU_FLAG_MMX2) {
02560             c->prefetch = prefetch_mmx2;
02561 
02562             if (!high_bit_depth) {
02563             c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
02564             c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
02565 
02566             c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
02567             c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
02568             c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
02569 
02570             c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
02571             c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
02572 
02573             c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
02574             c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
02575             c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
02576             }
02577 
02578             if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02579                 if (!high_bit_depth) {
02580                 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
02581                 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
02582                 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
02583                 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
02584                 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
02585                 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
02586                 }
02587 
02588                 if (CONFIG_VP3_DECODER && HAVE_YASM) {
02589                     c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
02590                     c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
02591                 }
02592             }
02593             if (CONFIG_VP3_DECODER && HAVE_YASM) {
02594                 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
02595             }
02596 
02597             if (CONFIG_VP3_DECODER
02598                 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
02599                 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
02600                 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
02601             }
02602 
02603 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
02604             c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
02605             c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
02606             c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
02607             c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
02608             c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
02609             c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
02610             c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
02611             c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
02612             c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
02613             c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
02614             c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
02615             c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
02616             c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
02617             c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
02618             c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
02619             c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU
02620 
02621             SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
02622             SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
02623             SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
02624             SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
02625             SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
02626             SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
02627 
02628             if (!high_bit_depth) {
02629             SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
02630             SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
02631             SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
02632             SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
02633             SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
02634             SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
02635             }
02636             else if (bit_depth == 10) {
02637 #if HAVE_YASM
02638 #if !ARCH_X86_64
02639                 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
02640                 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
02641                 SET_QPEL_FUNCS(put_h264_qpel, 1, 8,  10_mmxext, ff_);
02642                 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8,  10_mmxext, ff_);
02643 #endif
02644                 SET_QPEL_FUNCS(put_h264_qpel, 2, 4,  10_mmxext, ff_);
02645                 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4,  10_mmxext, ff_);
02646 #endif
02647             }
02648 
02649             SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
02650             SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
02651             SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
02652             SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
02653 
02654 #if HAVE_YASM
02655             if (!high_bit_depth && CONFIG_H264CHROMA) {
02656             c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
02657             c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
02658             c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
02659             c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
02660             }
02661             if (bit_depth == 10 && CONFIG_H264CHROMA) {
02662                 c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_10_mmxext;
02663                 c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_10_mmxext;
02664                 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_10_mmxext;
02665                 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_10_mmxext;
02666             }
02667 
02668             c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
02669 #endif
02670 #if HAVE_7REGS
02671             if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW))
02672                 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
02673 #endif
02674 
02675         } else if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW)) {
02676             c->prefetch = prefetch_3dnow;
02677 
02678             if (!high_bit_depth) {
02679             c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
02680             c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
02681 
02682             c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
02683             c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
02684             c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
02685 
02686             c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
02687             c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
02688 
02689             c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
02690             c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
02691             c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
02692 
02693             if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02694                 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
02695                 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
02696                 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
02697                 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
02698                 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
02699                 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
02700             }
02701             }
02702 
02703             if (CONFIG_VP3_DECODER
02704                 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
02705                 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
02706                 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
02707             }
02708 
02709             SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
02710             SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
02711             SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
02712             SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
02713             SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
02714             SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
02715 
02716             if (!high_bit_depth) {
02717             SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
02718             SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
02719             SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
02720             SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
02721             SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
02722             SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
02723             }
02724 
02725             SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
02726             SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
02727             SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
02728             SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
02729 
02730 #if HAVE_YASM
02731             if (!high_bit_depth && CONFIG_H264CHROMA) {
02732             c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
02733             c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
02734             }
02735 
02736 #endif
02737         }
02738 
02739 
02740 #define H264_QPEL_FUNCS(x, y, CPU)\
02741             c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
02742             c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
02743             c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
02744             c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
02745         if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
02746             // these functions are slower than mmx on AMD, but faster on Intel
02747             if (!high_bit_depth) {
02748             c->put_pixels_tab[0][0] = put_pixels16_sse2;
02749             c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
02750             c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
02751             H264_QPEL_FUNCS(0, 0, sse2);
02752             }
02753         }
02754         if(mm_flags & AV_CPU_FLAG_SSE2){
02755             if (!high_bit_depth) {
02756                 H264_QPEL_FUNCS(0, 1, sse2);
02757                 H264_QPEL_FUNCS(0, 2, sse2);
02758                 H264_QPEL_FUNCS(0, 3, sse2);
02759                 H264_QPEL_FUNCS(1, 1, sse2);
02760                 H264_QPEL_FUNCS(1, 2, sse2);
02761                 H264_QPEL_FUNCS(1, 3, sse2);
02762                 H264_QPEL_FUNCS(2, 1, sse2);
02763                 H264_QPEL_FUNCS(2, 2, sse2);
02764                 H264_QPEL_FUNCS(2, 3, sse2);
02765                 H264_QPEL_FUNCS(3, 1, sse2);
02766                 H264_QPEL_FUNCS(3, 2, sse2);
02767                 H264_QPEL_FUNCS(3, 3, sse2);
02768             }
02769 #if HAVE_YASM
02770 #define H264_QPEL_FUNCS_10(x, y, CPU)\
02771             c->put_h264_qpel_pixels_tab[0][x+y*4] = ff_put_h264_qpel16_mc##x##y##_10_##CPU;\
02772             c->put_h264_qpel_pixels_tab[1][x+y*4] = ff_put_h264_qpel8_mc##x##y##_10_##CPU;\
02773             c->avg_h264_qpel_pixels_tab[0][x+y*4] = ff_avg_h264_qpel16_mc##x##y##_10_##CPU;\
02774             c->avg_h264_qpel_pixels_tab[1][x+y*4] = ff_avg_h264_qpel8_mc##x##y##_10_##CPU;
02775             if (bit_depth == 10) {
02776                 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
02777                 SET_QPEL_FUNCS(put_h264_qpel, 1, 8,  10_sse2, ff_);
02778                 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
02779                 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8,  10_sse2, ff_);
02780                 H264_QPEL_FUNCS_10(1, 0, sse2_cache64)
02781                 H264_QPEL_FUNCS_10(2, 0, sse2_cache64)
02782                 H264_QPEL_FUNCS_10(3, 0, sse2_cache64)
02783 
02784                 if (CONFIG_H264CHROMA) {
02785                     c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
02786                     c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
02787                 }
02788             }
02789 #endif
02790         }
02791 #if HAVE_SSSE3
02792         if(mm_flags & AV_CPU_FLAG_SSSE3){
02793             if (!high_bit_depth) {
02794             H264_QPEL_FUNCS(1, 0, ssse3);
02795             H264_QPEL_FUNCS(1, 1, ssse3);
02796             H264_QPEL_FUNCS(1, 2, ssse3);
02797             H264_QPEL_FUNCS(1, 3, ssse3);
02798             H264_QPEL_FUNCS(2, 0, ssse3);
02799             H264_QPEL_FUNCS(2, 1, ssse3);
02800             H264_QPEL_FUNCS(2, 2, ssse3);
02801             H264_QPEL_FUNCS(2, 3, ssse3);
02802             H264_QPEL_FUNCS(3, 0, ssse3);
02803             H264_QPEL_FUNCS(3, 1, ssse3);
02804             H264_QPEL_FUNCS(3, 2, ssse3);
02805             H264_QPEL_FUNCS(3, 3, ssse3);
02806             }
02807 #if HAVE_YASM
02808             else if (bit_depth == 10) {
02809                 H264_QPEL_FUNCS_10(1, 0, ssse3_cache64)
02810                 H264_QPEL_FUNCS_10(2, 0, ssse3_cache64)
02811                 H264_QPEL_FUNCS_10(3, 0, ssse3_cache64)
02812             }
02813             if (!high_bit_depth && CONFIG_H264CHROMA) {
02814             c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
02815             c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
02816             c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
02817             c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
02818             }
02819             c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
02820             if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
02821                 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
02822 #endif
02823         }
02824 #endif
02825 
02826         if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW)) {
02827             c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
02828             c->vector_fmul = vector_fmul_3dnow;
02829         }
02830         if (HAVE_AMD3DNOWEXT && (mm_flags & AV_CPU_FLAG_3DNOWEXT)) {
02831             c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
02832 #if HAVE_6REGS
02833             c->vector_fmul_window = vector_fmul_window_3dnow2;
02834 #endif
02835         }
02836         if(mm_flags & AV_CPU_FLAG_MMX2){
02837 #if HAVE_YASM
02838             c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
02839             c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
02840             if (avctx->flags & CODEC_FLAG_BITEXACT) {
02841                 c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
02842             } else {
02843                 c->apply_window_int16 = ff_apply_window_int16_mmxext;
02844             }
02845 #endif
02846         }
02847         if(mm_flags & AV_CPU_FLAG_SSE){
02848             c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
02849             c->ac3_downmix = ac3_downmix_sse;
02850             c->vector_fmul = vector_fmul_sse;
02851             c->vector_fmul_reverse = vector_fmul_reverse_sse;
02852             c->vector_fmul_add = vector_fmul_add_sse;
02853 #if HAVE_6REGS
02854             c->vector_fmul_window = vector_fmul_window_sse;
02855 #endif
02856             c->vector_clipf = vector_clipf_sse;
02857 #if HAVE_YASM
02858             c->scalarproduct_float = ff_scalarproduct_float_sse;
02859             c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
02860 
02861             if (!high_bit_depth)
02862                 c->emulated_edge_mc = emulated_edge_mc_sse;
02863             c->gmc = gmc_sse;
02864 #endif
02865         }
02866         if (HAVE_AMD3DNOW && (mm_flags & AV_CPU_FLAG_3DNOW))
02867             c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
02868         if(mm_flags & AV_CPU_FLAG_SSE2){
02869 #if HAVE_YASM
02870             c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
02871             c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
02872             if (mm_flags & AV_CPU_FLAG_ATOM) {
02873                 c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
02874             } else {
02875                 c->vector_clip_int32 = ff_vector_clip_int32_sse2;
02876             }
02877             if (avctx->flags & CODEC_FLAG_BITEXACT) {
02878                 c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
02879             } else {
02880                 if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
02881                     c->apply_window_int16 = ff_apply_window_int16_sse2;
02882                 }
02883             }
02884 #endif
02885         }
02886         if (mm_flags & AV_CPU_FLAG_SSSE3) {
02887 #if HAVE_YASM
02888             if (mm_flags & AV_CPU_FLAG_ATOM) {
02889                 c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
02890             } else {
02891                 c->apply_window_int16 = ff_apply_window_int16_ssse3;
02892             }
02893             if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) { // cachesplit
02894                 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
02895             }
02896 #endif
02897         }
02898 
02899         if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) {
02900 #if HAVE_YASM
02901             c->vector_clip_int32 = ff_vector_clip_int32_sse4;
02902 #endif
02903         }
02904 
02905 #if HAVE_AVX && HAVE_YASM
02906         if (mm_flags & AV_CPU_FLAG_AVX) {
02907             if (bit_depth == 10) {
02908                 //AVX implies !cache64.
02909                 //TODO: Port cache(32|64) detection from x264.
02910                 H264_QPEL_FUNCS_10(1, 0, sse2)
02911                 H264_QPEL_FUNCS_10(2, 0, sse2)
02912                 H264_QPEL_FUNCS_10(3, 0, sse2)
02913 
02914                 if (CONFIG_H264CHROMA) {
02915                     c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
02916                     c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
02917                 }
02918             }
02919             c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
02920         }
02921 #endif
02922     }
02923 
02924     if (CONFIG_ENCODERS)
02925         dsputilenc_init_mmx(c, avctx);
02926 }
Generated on Fri Feb 1 2013 14:34:47 for FFmpeg by doxygen 1.7.1