• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/gmc_altivec.c

Go to the documentation of this file.
00001 /*
00002  * GMC (Global Motion Compensation)
00003  * AltiVec-enabled
00004  * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
00005  *
00006  * This file is part of FFmpeg.
00007  *
00008  * FFmpeg is free software; you can redistribute it and/or
00009  * modify it under the terms of the GNU Lesser General Public
00010  * License as published by the Free Software Foundation; either
00011  * version 2.1 of the License, or (at your option) any later version.
00012  *
00013  * FFmpeg is distributed in the hope that it will be useful,
00014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00016  * Lesser General Public License for more details.
00017  *
00018  * You should have received a copy of the GNU Lesser General Public
00019  * License along with FFmpeg; if not, write to the Free Software
00020  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00021  */
00022 
00023 #include "libavcodec/dsputil.h"
00024 #include "util_altivec.h"
00025 #include "types_altivec.h"
00026 #include "dsputil_altivec.h"
00027 
00028 /*
00029   altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
00030   to preserve proper dst alignment.
00031 */
00032 void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
00033 {
00034     const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
00035     const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
00036         {
00037             (16-x16)*(16-y16), /* A */
00038             (   x16)*(16-y16), /* B */
00039             (16-x16)*(   y16), /* C */
00040             (   x16)*(   y16), /* D */
00041             0, 0, 0, 0         /* padding */
00042         };
00043     register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
00044     register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
00045     register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
00046     register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
00047     int i;
00048     unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
00049     unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
00050 
00051     tempA = vec_ld(0, (unsigned short*)ABCD);
00052     Av = vec_splat(tempA, 0);
00053     Bv = vec_splat(tempA, 1);
00054     Cv = vec_splat(tempA, 2);
00055     Dv = vec_splat(tempA, 3);
00056 
00057     rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);
00058 
00059     // we'll be able to pick-up our 9 char elements
00060     // at src from those 32 bytes
00061     // we load the first batch here, as inside the loop
00062     // we can re-use 'src+stride' from one iteration
00063     // as the 'src' of the next.
00064     src_0 = vec_ld(0, src);
00065     src_1 = vec_ld(16, src);
00066     srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
00067 
00068     if (src_really_odd != 0x0000000F) {
00069         // if src & 0xF == 0xF, then (src+1) is properly aligned
00070         // on the second vector.
00071         srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
00072     } else {
00073         srcvB = src_1;
00074     }
00075     srcvA = vec_mergeh(vczero, srcvA);
00076     srcvB = vec_mergeh(vczero, srcvB);
00077 
00078     for(i=0; i<h; i++) {
00079         dst_odd = (unsigned long)dst & 0x0000000F;
00080         src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
00081 
00082         dstv = vec_ld(0, dst);
00083 
00084         // we we'll be able to pick-up our 9 char elements
00085         // at src + stride from those 32 bytes
00086         // then reuse the resulting 2 vectors srvcC and srcvD
00087         // as the next srcvA and srcvB
00088         src_0 = vec_ld(stride + 0, src);
00089         src_1 = vec_ld(stride + 16, src);
00090         srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
00091 
00092         if (src_really_odd != 0x0000000F) {
00093             // if src & 0xF == 0xF, then (src+1) is properly aligned
00094             // on the second vector.
00095             srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
00096         } else {
00097             srcvD = src_1;
00098         }
00099 
00100         srcvC = vec_mergeh(vczero, srcvC);
00101         srcvD = vec_mergeh(vczero, srcvD);
00102 
00103 
00104         // OK, now we (finally) do the math :-)
00105         // those four instructions replaces 32 int muls & 32 int adds.
00106         // isn't AltiVec nice ?
00107         tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
00108         tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
00109         tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
00110         tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
00111 
00112         srcvA = srcvC;
00113         srcvB = srcvD;
00114 
00115         tempD = vec_sr(tempD, vcsr8);
00116 
00117         dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
00118 
00119         if (dst_odd) {
00120             dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
00121         } else {
00122             dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
00123         }
00124 
00125         vec_st(dstv2, 0, dst);
00126 
00127         dst += stride;
00128         src += stride;
00129     }
00130 }
Generated on Fri Feb 1 2013 14:34:41 for FFmpeg by doxygen 1.7.1