• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/h264_altivec.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
00003  *
00004  * This file is part of FFmpeg.
00005  *
00006  * FFmpeg is free software; you can redistribute it and/or
00007  * modify it under the terms of the GNU Lesser General Public
00008  * License as published by the Free Software Foundation; either
00009  * version 2.1 of the License, or (at your option) any later version.
00010  *
00011  * FFmpeg is distributed in the hope that it will be useful,
00012  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00013  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  * Lesser General Public License for more details.
00015  *
00016  * You should have received a copy of the GNU Lesser General Public
00017  * License along with FFmpeg; if not, write to the Free Software
00018  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00019  */
00020 
00021 #include "libavcodec/dsputil.h"
00022 #include "libavcodec/h264data.h"
00023 
00024 #include "gcc_fixes.h"
00025 
00026 #include "dsputil_ppc.h"
00027 #include "dsputil_altivec.h"
00028 #include "util_altivec.h"
00029 #include "types_altivec.h"
00030 
00031 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
00032 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
00033 
00034 #define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
00035 #define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
00036 #define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
00037 #define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
00038 #define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
00039 #define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
00040 #define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
00041 #define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
00042 #define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
00043 #include "h264_template_altivec.c"
00044 #undef OP_U8_ALTIVEC
00045 #undef PREFIX_h264_chroma_mc8_altivec
00046 #undef PREFIX_h264_chroma_mc8_num
00047 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00048 #undef PREFIX_h264_qpel16_h_lowpass_num
00049 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00050 #undef PREFIX_h264_qpel16_v_lowpass_num
00051 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00052 #undef PREFIX_h264_qpel16_hv_lowpass_num
00053 
00054 #define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
00055 #define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
00056 #define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
00057 #define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
00058 #define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
00059 #define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
00060 #define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
00061 #define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
00062 #define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
00063 #include "h264_template_altivec.c"
00064 #undef OP_U8_ALTIVEC
00065 #undef PREFIX_h264_chroma_mc8_altivec
00066 #undef PREFIX_h264_chroma_mc8_num
00067 #undef PREFIX_h264_qpel16_h_lowpass_altivec
00068 #undef PREFIX_h264_qpel16_h_lowpass_num
00069 #undef PREFIX_h264_qpel16_v_lowpass_altivec
00070 #undef PREFIX_h264_qpel16_v_lowpass_num
00071 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
00072 #undef PREFIX_h264_qpel16_hv_lowpass_num
00073 
00074 #define H264_MC(OPNAME, SIZE, CODETYPE) \
00075 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
00076     OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
00077 }\
00078 \
00079 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
00080     DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00081     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00082     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00083 }\
00084 \
00085 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00086     OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00087 }\
00088 \
00089 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00090     DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00091     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00092     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
00093 }\
00094 \
00095 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00096     DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00097     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00098     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
00099 }\
00100 \
00101 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00102     OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
00103 }\
00104 \
00105 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00106     DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
00107     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
00108     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
00109 }\
00110 \
00111 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00112     DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00113     DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00114     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00115     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00116     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00117 }\
00118 \
00119 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00120     DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00121     DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00122     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00123     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00124     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00125 }\
00126 \
00127 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00128     DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00129     DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00130     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00131     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00132     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00133 }\
00134 \
00135 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00136     DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00137     DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00138     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00139     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00140     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
00141 }\
00142 \
00143 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00144     DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00145     OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
00146 }\
00147 \
00148 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00149     DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00150     DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00151     DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00152     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
00153     put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00154     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00155 }\
00156 \
00157 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00158     DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
00159     DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00160     DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00161     put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
00162     put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00163     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
00164 }\
00165 \
00166 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00167     DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00168     DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00169     DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00170     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
00171     put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00172     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00173 }\
00174 \
00175 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
00176     DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
00177     DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
00178     DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
00179     put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
00180     put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
00181     OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
00182 }\
00183 
00184 /* this code assume that stride % 16 == 0 */
00185 void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
00186    DECLARE_ALIGNED_16(signed int, ABCD[4]) =
00187                         {((8 - x) * (8 - y)),
00188                              ((x) * (8 - y)),
00189                          ((8 - x) * (y)),
00190                              ((x) * (y))};
00191     register int i;
00192     vec_u8 fperm;
00193     const vec_s32 vABCD = vec_ld(0, ABCD);
00194     const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
00195     const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
00196     const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
00197     const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
00198     LOAD_ZERO;
00199     const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
00200     const vec_u16 v6us  = vec_splat_u16(6);
00201     register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
00202     register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
00203 
00204     vec_u8 vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
00205     vec_u8 vsrc0uc, vsrc1uc;
00206     vec_s16 vsrc0ssH, vsrc1ssH;
00207     vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
00208     vec_s16 vsrc2ssH, vsrc3ssH, psum;
00209     vec_u8 vdst, ppsum, fsum;
00210 
00211     if (((unsigned long)dst) % 16 == 0) {
00212         fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
00213                            0x14, 0x15, 0x16, 0x17,
00214                            0x08, 0x09, 0x0A, 0x0B,
00215                            0x0C, 0x0D, 0x0E, 0x0F};
00216     } else {
00217         fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
00218                            0x04, 0x05, 0x06, 0x07,
00219                            0x18, 0x19, 0x1A, 0x1B,
00220                            0x1C, 0x1D, 0x1E, 0x1F};
00221     }
00222 
00223     vsrcAuc = vec_ld(0, src);
00224 
00225     if (loadSecond)
00226         vsrcBuc = vec_ld(16, src);
00227     vsrcperm0 = vec_lvsl(0, src);
00228     vsrcperm1 = vec_lvsl(1, src);
00229 
00230     vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
00231     if (reallyBadAlign)
00232         vsrc1uc = vsrcBuc;
00233     else
00234         vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
00235 
00236     vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
00237     vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
00238 
00239     if (!loadSecond) {// -> !reallyBadAlign
00240         for (i = 0 ; i < h ; i++) {
00241 
00242 
00243             vsrcCuc = vec_ld(stride + 0, src);
00244 
00245             vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
00246             vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
00247 
00248             vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
00249             vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
00250 
00251             psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
00252             psum = vec_mladd(vB, vsrc1ssH, psum);
00253             psum = vec_mladd(vC, vsrc2ssH, psum);
00254             psum = vec_mladd(vD, vsrc3ssH, psum);
00255             psum = vec_add(v28ss, psum);
00256             psum = vec_sra(psum, v6us);
00257 
00258             vdst = vec_ld(0, dst);
00259             ppsum = (vec_u8)vec_packsu(psum, psum);
00260             fsum = vec_perm(vdst, ppsum, fperm);
00261 
00262             vec_st(fsum, 0, dst);
00263 
00264             vsrc0ssH = vsrc2ssH;
00265             vsrc1ssH = vsrc3ssH;
00266 
00267             dst += stride;
00268             src += stride;
00269         }
00270     } else {
00271         vec_u8 vsrcDuc;
00272         for (i = 0 ; i < h ; i++) {
00273             vsrcCuc = vec_ld(stride + 0, src);
00274             vsrcDuc = vec_ld(stride + 16, src);
00275 
00276             vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
00277             if (reallyBadAlign)
00278                 vsrc3uc = vsrcDuc;
00279             else
00280                 vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
00281 
00282             vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
00283             vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
00284 
00285             psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
00286             psum = vec_mladd(vB, vsrc1ssH, psum);
00287             psum = vec_mladd(vC, vsrc2ssH, psum);
00288             psum = vec_mladd(vD, vsrc3ssH, psum);
00289             psum = vec_add(v28ss, psum);
00290             psum = vec_sr(psum, v6us);
00291 
00292             vdst = vec_ld(0, dst);
00293             ppsum = (vec_u8)vec_pack(psum, psum);
00294             fsum = vec_perm(vdst, ppsum, fperm);
00295 
00296             vec_st(fsum, 0, dst);
00297 
00298             vsrc0ssH = vsrc2ssH;
00299             vsrc1ssH = vsrc3ssH;
00300 
00301             dst += stride;
00302             src += stride;
00303         }
00304     }
00305 }
00306 
00307 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00308                                     const uint8_t * src2, int dst_stride,
00309                                     int src_stride1, int h)
00310 {
00311     int i;
00312     vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00313 
00314     mask_ = vec_lvsl(0, src2);
00315 
00316     for (i = 0; i < h; i++) {
00317 
00318         tmp1 = vec_ld(i * src_stride1, src1);
00319         mask = vec_lvsl(i * src_stride1, src1);
00320         tmp2 = vec_ld(i * src_stride1 + 15, src1);
00321 
00322         a = vec_perm(tmp1, tmp2, mask);
00323 
00324         tmp1 = vec_ld(i * 16, src2);
00325         tmp2 = vec_ld(i * 16 + 15, src2);
00326 
00327         b = vec_perm(tmp1, tmp2, mask_);
00328 
00329         tmp1 = vec_ld(0, dst);
00330         mask = vec_lvsl(0, dst);
00331         tmp2 = vec_ld(15, dst);
00332 
00333         d = vec_avg(a, b);
00334 
00335         edges = vec_perm(tmp2, tmp1, mask);
00336 
00337         align = vec_lvsr(0, dst);
00338 
00339         tmp2 = vec_perm(d, edges, align);
00340         tmp1 = vec_perm(edges, d, align);
00341 
00342         vec_st(tmp2, 15, dst);
00343         vec_st(tmp1, 0 , dst);
00344 
00345         dst += dst_stride;
00346     }
00347 }
00348 
00349 static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
00350                                     const uint8_t * src2, int dst_stride,
00351                                     int src_stride1, int h)
00352 {
00353     int i;
00354     vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
00355 
00356     mask_ = vec_lvsl(0, src2);
00357 
00358     for (i = 0; i < h; i++) {
00359 
00360         tmp1 = vec_ld(i * src_stride1, src1);
00361         mask = vec_lvsl(i * src_stride1, src1);
00362         tmp2 = vec_ld(i * src_stride1 + 15, src1);
00363 
00364         a = vec_perm(tmp1, tmp2, mask);
00365 
00366         tmp1 = vec_ld(i * 16, src2);
00367         tmp2 = vec_ld(i * 16 + 15, src2);
00368 
00369         b = vec_perm(tmp1, tmp2, mask_);
00370 
00371         tmp1 = vec_ld(0, dst);
00372         mask = vec_lvsl(0, dst);
00373         tmp2 = vec_ld(15, dst);
00374 
00375         d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
00376 
00377         edges = vec_perm(tmp2, tmp1, mask);
00378 
00379         align = vec_lvsr(0, dst);
00380 
00381         tmp2 = vec_perm(d, edges, align);
00382         tmp1 = vec_perm(edges, d, align);
00383 
00384         vec_st(tmp2, 15, dst);
00385         vec_st(tmp1, 0 , dst);
00386 
00387         dst += dst_stride;
00388     }
00389 }
00390 
00391 /* Implemented but could be faster
00392 #define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
00393 #define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
00394  */
00395 
00396 H264_MC(put_, 16, altivec)
00397 H264_MC(avg_, 16, altivec)
00398 
00399 
00400 /****************************************************************************
00401  * IDCT transform:
00402  ****************************************************************************/
00403 
00404 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)               \
00405     /* 1st stage */                                               \
00406     vz0 = vec_add(vb0,vb2);       /* temp[0] = Y[0] + Y[2] */     \
00407     vz1 = vec_sub(vb0,vb2);       /* temp[1] = Y[0] - Y[2] */     \
00408     vz2 = vec_sra(vb1,vec_splat_u16(1));                          \
00409     vz2 = vec_sub(vz2,vb3);       /* temp[2] = Y[1].1/2 - Y[3] */ \
00410     vz3 = vec_sra(vb3,vec_splat_u16(1));                          \
00411     vz3 = vec_add(vb1,vz3);       /* temp[3] = Y[1] + Y[3].1/2 */ \
00412     /* 2nd stage: output */                                       \
00413     va0 = vec_add(vz0,vz3);       /* x[0] = temp[0] + temp[3] */  \
00414     va1 = vec_add(vz1,vz2);       /* x[1] = temp[1] + temp[2] */  \
00415     va2 = vec_sub(vz1,vz2);       /* x[2] = temp[1] - temp[2] */  \
00416     va3 = vec_sub(vz0,vz3)        /* x[3] = temp[0] - temp[3] */
00417 
00418 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
00419     b0 = vec_mergeh( a0, a0 ); \
00420     b1 = vec_mergeh( a1, a0 ); \
00421     b2 = vec_mergeh( a2, a0 ); \
00422     b3 = vec_mergeh( a3, a0 ); \
00423     a0 = vec_mergeh( b0, b2 ); \
00424     a1 = vec_mergel( b0, b2 ); \
00425     a2 = vec_mergeh( b1, b3 ); \
00426     a3 = vec_mergel( b1, b3 ); \
00427     b0 = vec_mergeh( a0, a2 ); \
00428     b1 = vec_mergel( a0, a2 ); \
00429     b2 = vec_mergeh( a1, a3 ); \
00430     b3 = vec_mergel( a1, a3 )
00431 
00432 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \
00433     vdst_orig = vec_ld(0, dst);                               \
00434     vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);          \
00435     vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst);         \
00436     va = vec_add(va, vdst_ss);                                \
00437     va_u8 = vec_packsu(va, zero_s16v);                        \
00438     va_u32 = vec_splat((vec_u32)va_u8, 0);                  \
00439     vec_ste(va_u32, element, (uint32_t*)dst);
00440 
00441 static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00442 {
00443     vec_s16 va0, va1, va2, va3;
00444     vec_s16 vz0, vz1, vz2, vz3;
00445     vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
00446     vec_u8 va_u8;
00447     vec_u32 va_u32;
00448     vec_s16 vdst_ss;
00449     const vec_u16 v6us = vec_splat_u16(6);
00450     vec_u8 vdst, vdst_orig;
00451     vec_u8 vdst_mask = vec_lvsl(0, dst);
00452     int element = ((unsigned long)dst & 0xf) >> 2;
00453     LOAD_ZERO;
00454 
00455     block[0] += 32;  /* add 32 as a DC-level for rounding */
00456 
00457     vtmp0 = vec_ld(0,block);
00458     vtmp1 = vec_sld(vtmp0, vtmp0, 8);
00459     vtmp2 = vec_ld(16,block);
00460     vtmp3 = vec_sld(vtmp2, vtmp2, 8);
00461 
00462     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00463     VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
00464     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
00465 
00466     va0 = vec_sra(va0,v6us);
00467     va1 = vec_sra(va1,v6us);
00468     va2 = vec_sra(va2,v6us);
00469     va3 = vec_sra(va3,v6us);
00470 
00471     VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
00472     dst += stride;
00473     VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
00474     dst += stride;
00475     VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
00476     dst += stride;
00477     VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
00478 }
00479 
00480 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\
00481     /*        a0  = SRC(0) + SRC(4); */ \
00482     vec_s16 a0v = vec_add(s0, s4);    \
00483     /*        a2  = SRC(0) - SRC(4); */ \
00484     vec_s16 a2v = vec_sub(s0, s4);    \
00485     /*        a4  =           (SRC(2)>>1) - SRC(6); */ \
00486     vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6);    \
00487     /*        a6  =           (SRC(6)>>1) + SRC(2); */ \
00488     vec_s16 a6v = vec_add(vec_sra(s6, onev), s2);    \
00489     /*        b0  =         a0 + a6; */ \
00490     vec_s16 b0v = vec_add(a0v, a6v);  \
00491     /*        b2  =         a2 + a4; */ \
00492     vec_s16 b2v = vec_add(a2v, a4v);  \
00493     /*        b4  =         a2 - a4; */ \
00494     vec_s16 b4v = vec_sub(a2v, a4v);  \
00495     /*        b6  =         a0 - a6; */ \
00496     vec_s16 b6v = vec_sub(a0v, a6v);  \
00497     /* a1 =  SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
00498     /*        a1 =             (SRC(5)-SRC(3)) -  (SRC(7)  +  (SRC(7)>>1)); */ \
00499     vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
00500     /* a3 =  SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
00501     /*        a3 =             (SRC(7)+SRC(1)) -  (SRC(3)  +  (SRC(3)>>1)); */ \
00502     vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
00503     /* a5 =  SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
00504     /*        a5 =             (SRC(7)-SRC(1)) +   SRC(5) +   (SRC(5)>>1); */ \
00505     vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
00506     /*        a7 =                SRC(5)+SRC(3) +  SRC(1) +   (SRC(1)>>1); */ \
00507     vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
00508     /*        b1 =                  (a7>>2)  +  a1; */ \
00509     vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
00510     /*        b3 =          a3 +        (a5>>2); */ \
00511     vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
00512     /*        b5 =                  (a3>>2)  -   a5; */ \
00513     vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
00514     /*        b7 =           a7 -        (a1>>2); */ \
00515     vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
00516     /* DST(0,    b0 + b7); */ \
00517     d0 = vec_add(b0v, b7v); \
00518     /* DST(1,    b2 + b5); */ \
00519     d1 = vec_add(b2v, b5v); \
00520     /* DST(2,    b4 + b3); */ \
00521     d2 = vec_add(b4v, b3v); \
00522     /* DST(3,    b6 + b1); */ \
00523     d3 = vec_add(b6v, b1v); \
00524     /* DST(4,    b6 - b1); */ \
00525     d4 = vec_sub(b6v, b1v); \
00526     /* DST(5,    b4 - b3); */ \
00527     d5 = vec_sub(b4v, b3v); \
00528     /* DST(6,    b2 - b5); */ \
00529     d6 = vec_sub(b2v, b5v); \
00530     /* DST(7,    b0 - b7); */ \
00531     d7 = vec_sub(b0v, b7v); \
00532 }
00533 
00534 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
00535     /* unaligned load */                                       \
00536     vec_u8 hv = vec_ld( 0, dest );                           \
00537     vec_u8 lv = vec_ld( 7, dest );                           \
00538     vec_u8 dstv   = vec_perm( hv, lv, (vec_u8)perm_ldv );  \
00539     vec_s16 idct_sh6 = vec_sra(idctv, sixv);                 \
00540     vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv);   \
00541     vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16);  \
00542     vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum);        \
00543     vec_u8 edgehv;                                           \
00544     /* unaligned store */                                      \
00545     vec_u8 bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );\
00546     vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv );     \
00547     lv    = vec_sel( lv, bodyv, edgelv );                      \
00548     vec_st( lv, 7, dest );                                     \
00549     hv    = vec_ld( 0, dest );                                 \
00550     edgehv = vec_perm( zero_u8v, sel, perm_stv );              \
00551     hv    = vec_sel( hv, bodyv, edgehv );                      \
00552     vec_st( hv, 0, dest );                                     \
00553  }
00554 
00555 void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
00556     vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
00557     vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
00558     vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
00559 
00560     vec_u8 perm_ldv = vec_lvsl(0, dst);
00561     vec_u8 perm_stv = vec_lvsr(8, dst);
00562 
00563     const vec_u16 onev = vec_splat_u16(1);
00564     const vec_u16 twov = vec_splat_u16(2);
00565     const vec_u16 sixv = vec_splat_u16(6);
00566 
00567     const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
00568     LOAD_ZERO;
00569 
00570     dct[0] += 32; // rounding for the >>6 at the end
00571 
00572     s0 = vec_ld(0x00, (int16_t*)dct);
00573     s1 = vec_ld(0x10, (int16_t*)dct);
00574     s2 = vec_ld(0x20, (int16_t*)dct);
00575     s3 = vec_ld(0x30, (int16_t*)dct);
00576     s4 = vec_ld(0x40, (int16_t*)dct);
00577     s5 = vec_ld(0x50, (int16_t*)dct);
00578     s6 = vec_ld(0x60, (int16_t*)dct);
00579     s7 = vec_ld(0x70, (int16_t*)dct);
00580 
00581     IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
00582                      d0, d1, d2, d3, d4, d5, d6, d7);
00583 
00584     TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );
00585 
00586     IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,
00587                      idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
00588 
00589     ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
00590     ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
00591     ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
00592     ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
00593     ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
00594     ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
00595     ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
00596     ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
00597 }
00598 
00599 static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
00600 {
00601     vec_s16 dc16;
00602     vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
00603     LOAD_ZERO;
00604     DECLARE_ALIGNED_16(int, dc);
00605     int i;
00606 
00607     dc = (block[0] + 32) >> 6;
00608     dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
00609 
00610     if (size == 4)
00611         dc16 = vec_sld(dc16, zero_s16v, 8);
00612     dcplus = vec_packsu(dc16, zero_s16v);
00613     dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
00614 
00615     aligner = vec_lvsr(0, dst);
00616     dcplus = vec_perm(dcplus, dcplus, aligner);
00617     dcminus = vec_perm(dcminus, dcminus, aligner);
00618 
00619     for (i = 0; i < size; i += 4) {
00620         v0 = vec_ld(0, dst+0*stride);
00621         v1 = vec_ld(0, dst+1*stride);
00622         v2 = vec_ld(0, dst+2*stride);
00623         v3 = vec_ld(0, dst+3*stride);
00624 
00625         v0 = vec_adds(v0, dcplus);
00626         v1 = vec_adds(v1, dcplus);
00627         v2 = vec_adds(v2, dcplus);
00628         v3 = vec_adds(v3, dcplus);
00629 
00630         v0 = vec_subs(v0, dcminus);
00631         v1 = vec_subs(v1, dcminus);
00632         v2 = vec_subs(v2, dcminus);
00633         v3 = vec_subs(v3, dcminus);
00634 
00635         vec_st(v0, 0, dst+0*stride);
00636         vec_st(v1, 0, dst+1*stride);
00637         vec_st(v2, 0, dst+2*stride);
00638         vec_st(v3, 0, dst+3*stride);
00639 
00640         dst += 4*stride;
00641     }
00642 }
00643 
00644 static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00645 {
00646     h264_idct_dc_add_internal(dst, block, stride, 4);
00647 }
00648 
00649 static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
00650 {
00651     h264_idct_dc_add_internal(dst, block, stride, 8);
00652 }
00653 
00654 static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00655     int i;
00656     for(i=0; i<16; i++){
00657         int nnz = nnzc[ scan8[i] ];
00658         if(nnz){
00659             if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00660             else                      ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
00661         }
00662     }
00663 }
00664 
00665 static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00666     int i;
00667     for(i=0; i<16; i++){
00668         if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
00669         else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00670     }
00671 }
00672 
00673 static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00674     int i;
00675     for(i=0; i<16; i+=4){
00676         int nnz = nnzc[ scan8[i] ];
00677         if(nnz){
00678             if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
00679             else                      ff_h264_idct8_add_altivec   (dst + block_offset[i], block + i*16, stride);
00680         }
00681     }
00682 }
00683 
00684 static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
00685     int i;
00686     for(i=16; i<16+8; i++){
00687         if(nnzc[ scan8[i] ])
00688             ff_h264_idct_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
00689         else if(block[i*16])
00690             h264_idct_dc_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
00691     }
00692 }
00693 
00694 #define transpose4x16(r0, r1, r2, r3) {      \
00695     register vec_u8 r4;                    \
00696     register vec_u8 r5;                    \
00697     register vec_u8 r6;                    \
00698     register vec_u8 r7;                    \
00699                                              \
00700     r4 = vec_mergeh(r0, r2);  /*0, 2 set 0*/ \
00701     r5 = vec_mergel(r0, r2);  /*0, 2 set 1*/ \
00702     r6 = vec_mergeh(r1, r3);  /*1, 3 set 0*/ \
00703     r7 = vec_mergel(r1, r3);  /*1, 3 set 1*/ \
00704                                              \
00705     r0 = vec_mergeh(r4, r6);  /*all set 0*/  \
00706     r1 = vec_mergel(r4, r6);  /*all set 1*/  \
00707     r2 = vec_mergeh(r5, r7);  /*all set 2*/  \
00708     r3 = vec_mergel(r5, r7);  /*all set 3*/  \
00709 }
00710 
00711 static inline void write16x4(uint8_t *dst, int dst_stride,
00712                              register vec_u8 r0, register vec_u8 r1,
00713                              register vec_u8 r2, register vec_u8 r3) {
00714     DECLARE_ALIGNED_16(unsigned char, result[64]);
00715     uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
00716     int int_dst_stride = dst_stride/4;
00717 
00718     vec_st(r0, 0, result);
00719     vec_st(r1, 16, result);
00720     vec_st(r2, 32, result);
00721     vec_st(r3, 48, result);
00722     /* FIXME: there has to be a better way!!!! */
00723     *dst_int = *src_int;
00724     *(dst_int+   int_dst_stride) = *(src_int + 1);
00725     *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
00726     *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
00727     *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
00728     *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
00729     *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
00730     *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
00731     *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
00732     *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
00733     *(dst_int+10*int_dst_stride) = *(src_int + 10);
00734     *(dst_int+11*int_dst_stride) = *(src_int + 11);
00735     *(dst_int+12*int_dst_stride) = *(src_int + 12);
00736     *(dst_int+13*int_dst_stride) = *(src_int + 13);
00737     *(dst_int+14*int_dst_stride) = *(src_int + 14);
00738     *(dst_int+15*int_dst_stride) = *(src_int + 15);
00739 }
00740 
00744 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
00745     register vec_u8 r0  = unaligned_load(0,             src);            \
00746     register vec_u8 r1  = unaligned_load(   src_stride, src);            \
00747     register vec_u8 r2  = unaligned_load(2* src_stride, src);            \
00748     register vec_u8 r3  = unaligned_load(3* src_stride, src);            \
00749     register vec_u8 r4  = unaligned_load(4* src_stride, src);            \
00750     register vec_u8 r5  = unaligned_load(5* src_stride, src);            \
00751     register vec_u8 r6  = unaligned_load(6* src_stride, src);            \
00752     register vec_u8 r7  = unaligned_load(7* src_stride, src);            \
00753     register vec_u8 r14 = unaligned_load(14*src_stride, src);            \
00754     register vec_u8 r15 = unaligned_load(15*src_stride, src);            \
00755                                                                            \
00756     r8  = unaligned_load( 8*src_stride, src);                              \
00757     r9  = unaligned_load( 9*src_stride, src);                              \
00758     r10 = unaligned_load(10*src_stride, src);                              \
00759     r11 = unaligned_load(11*src_stride, src);                              \
00760     r12 = unaligned_load(12*src_stride, src);                              \
00761     r13 = unaligned_load(13*src_stride, src);                              \
00762                                                                            \
00763     /*Merge first pairs*/                                                  \
00764     r0 = vec_mergeh(r0, r8);    /*0, 8*/                                   \
00765     r1 = vec_mergeh(r1, r9);    /*1, 9*/                                   \
00766     r2 = vec_mergeh(r2, r10);   /*2,10*/                                   \
00767     r3 = vec_mergeh(r3, r11);   /*3,11*/                                   \
00768     r4 = vec_mergeh(r4, r12);   /*4,12*/                                   \
00769     r5 = vec_mergeh(r5, r13);   /*5,13*/                                   \
00770     r6 = vec_mergeh(r6, r14);   /*6,14*/                                   \
00771     r7 = vec_mergeh(r7, r15);   /*7,15*/                                   \
00772                                                                            \
00773     /*Merge second pairs*/                                                 \
00774     r8  = vec_mergeh(r0, r4);   /*0,4, 8,12 set 0*/                        \
00775     r9  = vec_mergel(r0, r4);   /*0,4, 8,12 set 1*/                        \
00776     r10 = vec_mergeh(r1, r5);   /*1,5, 9,13 set 0*/                        \
00777     r11 = vec_mergel(r1, r5);   /*1,5, 9,13 set 1*/                        \
00778     r12 = vec_mergeh(r2, r6);   /*2,6,10,14 set 0*/                        \
00779     r13 = vec_mergel(r2, r6);   /*2,6,10,14 set 1*/                        \
00780     r14 = vec_mergeh(r3, r7);   /*3,7,11,15 set 0*/                        \
00781     r15 = vec_mergel(r3, r7);   /*3,7,11,15 set 1*/                        \
00782                                                                            \
00783     /*Third merge*/                                                        \
00784     r0 = vec_mergeh(r8,  r12);  /*0,2,4,6,8,10,12,14 set 0*/               \
00785     r1 = vec_mergel(r8,  r12);  /*0,2,4,6,8,10,12,14 set 1*/               \
00786     r2 = vec_mergeh(r9,  r13);  /*0,2,4,6,8,10,12,14 set 2*/               \
00787     r4 = vec_mergeh(r10, r14);  /*1,3,5,7,9,11,13,15 set 0*/               \
00788     r5 = vec_mergel(r10, r14);  /*1,3,5,7,9,11,13,15 set 1*/               \
00789     r6 = vec_mergeh(r11, r15);  /*1,3,5,7,9,11,13,15 set 2*/               \
00790     /* Don't need to compute 3 and 7*/                                     \
00791                                                                            \
00792     /*Final merge*/                                                        \
00793     r8  = vec_mergeh(r0, r4);   /*all set 0*/                              \
00794     r9  = vec_mergel(r0, r4);   /*all set 1*/                              \
00795     r10 = vec_mergeh(r1, r5);   /*all set 2*/                              \
00796     r11 = vec_mergel(r1, r5);   /*all set 3*/                              \
00797     r12 = vec_mergeh(r2, r6);   /*all set 4*/                              \
00798     r13 = vec_mergel(r2, r6);   /*all set 5*/                              \
00799     /* Don't need to compute 14 and 15*/                                   \
00800                                                                            \
00801 }
00802 
00803 // out: o = |x-y| < a
00804 static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
00805                                          register vec_u8 y,
00806                                          register vec_u8 a) {
00807 
00808     register vec_u8 diff = vec_subs(x, y);
00809     register vec_u8 diffneg = vec_subs(y, x);
00810     register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
00811     o = (vec_u8)vec_cmplt(o, a);
00812     return o;
00813 }
00814 
00815 static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
00816                                            register vec_u8 p1,
00817                                            register vec_u8 q0,
00818                                            register vec_u8 q1,
00819                                            register vec_u8 alpha,
00820                                            register vec_u8 beta) {
00821 
00822     register vec_u8 mask;
00823     register vec_u8 tempmask;
00824 
00825     mask = diff_lt_altivec(p0, q0, alpha);
00826     tempmask = diff_lt_altivec(p1, p0, beta);
00827     mask = vec_and(mask, tempmask);
00828     tempmask = diff_lt_altivec(q1, q0, beta);
00829     mask = vec_and(mask, tempmask);
00830 
00831     return mask;
00832 }
00833 
00834 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
00835 static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
00836                                        register vec_u8 p1,
00837                                        register vec_u8 p2,
00838                                        register vec_u8 q0,
00839                                        register vec_u8 tc0) {
00840 
00841     register vec_u8 average = vec_avg(p0, q0);
00842     register vec_u8 temp;
00843     register vec_u8 uncliped;
00844     register vec_u8 ones;
00845     register vec_u8 max;
00846     register vec_u8 min;
00847     register vec_u8 newp1;
00848 
00849     temp = vec_xor(average, p2);
00850     average = vec_avg(average, p2);     /*avg(p2, avg(p0, q0)) */
00851     ones = vec_splat_u8(1);
00852     temp = vec_and(temp, ones);         /*(p2^avg(p0, q0)) & 1 */
00853     uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
00854     max = vec_adds(p1, tc0);
00855     min = vec_subs(p1, tc0);
00856     newp1 = vec_max(min, uncliped);
00857     newp1 = vec_min(max, newp1);
00858     return newp1;
00859 }
00860 
00861 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                           \
00862                                                                                                   \
00863     const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                               \
00864                                                                                                   \
00865     register vec_u8 pq0bit = vec_xor(p0,q0);                                                    \
00866     register vec_u8 q1minus;                                                                    \
00867     register vec_u8 p0minus;                                                                    \
00868     register vec_u8 stage1;                                                                     \
00869     register vec_u8 stage2;                                                                     \
00870     register vec_u8 vec160;                                                                     \
00871     register vec_u8 delta;                                                                      \
00872     register vec_u8 deltaneg;                                                                   \
00873                                                                                                   \
00874     q1minus = vec_nor(q1, q1);                 /* 255 - q1 */                                     \
00875     stage1 = vec_avg(p1, q1minus);             /* (p1 - q1 + 256)>>1 */                           \
00876     stage2 = vec_sr(stage1, vec_splat_u8(1));  /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */     \
00877     p0minus = vec_nor(p0, p0);                 /* 255 - p0 */                                     \
00878     stage1 = vec_avg(q0, p0minus);             /* (q0 - p0 + 256)>>1 */                           \
00879     pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                    \
00880     stage2 = vec_avg(stage2, pq0bit);          /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
00881     stage2 = vec_adds(stage2, stage1);         /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */  \
00882     vec160 = vec_ld(0, &A0v);                                                                     \
00883     deltaneg = vec_subs(vec160, stage2);       /* -d */                                           \
00884     delta = vec_subs(stage2, vec160);          /* d */                                            \
00885     deltaneg = vec_min(tc0masked, deltaneg);                                                      \
00886     delta = vec_min(tc0masked, delta);                                                            \
00887     p0 = vec_subs(p0, deltaneg);                                                                  \
00888     q0 = vec_subs(q0, delta);                                                                     \
00889     p0 = vec_adds(p0, delta);                                                                     \
00890     q0 = vec_adds(q0, deltaneg);                                                                  \
00891 }
00892 
00893 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \
00894     DECLARE_ALIGNED_16(unsigned char, temp[16]);                                             \
00895     register vec_u8 alphavec;                                                              \
00896     register vec_u8 betavec;                                                               \
00897     register vec_u8 mask;                                                                  \
00898     register vec_u8 p1mask;                                                                \
00899     register vec_u8 q1mask;                                                                \
00900     register vector signed   char tc0vec;                                                    \
00901     register vec_u8 finaltc0;                                                              \
00902     register vec_u8 tc0masked;                                                             \
00903     register vec_u8 newp1;                                                                 \
00904     register vec_u8 newq1;                                                                 \
00905                                                                                              \
00906     temp[0] = alpha;                                                                         \
00907     temp[1] = beta;                                                                          \
00908     alphavec = vec_ld(0, temp);                                                              \
00909     betavec = vec_splat(alphavec, 0x1);                                                      \
00910     alphavec = vec_splat(alphavec, 0x0);                                                     \
00911     mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */            \
00912                                                                                              \
00913     *((int *)temp) = *((int *)tc0);                                                          \
00914     tc0vec = vec_ld(0, (signed char*)temp);                                                  \
00915     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
00916     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
00917     mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));  /* if tc0[i] >= 0 */         \
00918     finaltc0 = vec_and((vec_u8)tc0vec, mask);     /* tc = tc0 */                           \
00919                                                                                              \
00920     p1mask = diff_lt_altivec(p2, p0, betavec);                                               \
00921     p1mask = vec_and(p1mask, mask);                             /* if ( |p2 - p0| < beta) */ \
00922     tc0masked = vec_and(p1mask, (vec_u8)tc0vec);                                           \
00923     finaltc0 = vec_sub(finaltc0, p1mask);                       /* tc++ */                   \
00924     newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      \
00925     /*end if*/                                                                               \
00926                                                                                              \
00927     q1mask = diff_lt_altivec(q2, q0, betavec);                                               \
00928     q1mask = vec_and(q1mask, mask);                             /* if ( |q2 - q0| < beta ) */\
00929     tc0masked = vec_and(q1mask, (vec_u8)tc0vec);                                           \
00930     finaltc0 = vec_sub(finaltc0, q1mask);                       /* tc++ */                   \
00931     newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      \
00932     /*end if*/                                                                               \
00933                                                                                              \
00934     h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            \
00935     p1 = newp1;                                                                              \
00936     q1 = newq1;                                                                              \
00937 }
00938 
00939 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00940 
00941     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
00942         register vec_u8 p2 = vec_ld(-3*stride, pix);
00943         register vec_u8 p1 = vec_ld(-2*stride, pix);
00944         register vec_u8 p0 = vec_ld(-1*stride, pix);
00945         register vec_u8 q0 = vec_ld(0, pix);
00946         register vec_u8 q1 = vec_ld(stride, pix);
00947         register vec_u8 q2 = vec_ld(2*stride, pix);
00948         h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
00949         vec_st(p1, -2*stride, pix);
00950         vec_st(p0, -1*stride, pix);
00951         vec_st(q0, 0, pix);
00952         vec_st(q1, stride, pix);
00953     }
00954 }
00955 
00956 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
00957 
00958     register vec_u8 line0, line1, line2, line3, line4, line5;
00959     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
00960         return;
00961     readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
00962     h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
00963     transpose4x16(line1, line2, line3, line4);
00964     write16x4(pix-2, stride, line1, line2, line3, line4);
00965 }
00966 
00967 static av_always_inline
00968 void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h)
00969 {
00970     int y, aligned;
00971     vec_u8 vblock;
00972     vec_s16 vtemp, vweight, voffset, v0, v1;
00973     vec_u16 vlog2_denom;
00974     DECLARE_ALIGNED_16(int32_t, temp[4]);
00975     LOAD_ZERO;
00976 
00977     offset <<= log2_denom;
00978     if(log2_denom) offset += 1<<(log2_denom-1);
00979     temp[0] = log2_denom;
00980     temp[1] = weight;
00981     temp[2] = offset;
00982 
00983     vtemp = (vec_s16)vec_ld(0, temp);
00984     vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
00985     vweight = vec_splat(vtemp, 3);
00986     voffset = vec_splat(vtemp, 5);
00987     aligned = !((unsigned long)block & 0xf);
00988 
00989     for (y=0; y<h; y++) {
00990         vblock = vec_ld(0, block);
00991 
00992         v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
00993         v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
00994 
00995         if (w == 16 || aligned) {
00996             v0 = vec_mladd(v0, vweight, zero_s16v);
00997             v0 = vec_adds(v0, voffset);
00998             v0 = vec_sra(v0, vlog2_denom);
00999         }
01000         if (w == 16 || !aligned) {
01001             v1 = vec_mladd(v1, vweight, zero_s16v);
01002             v1 = vec_adds(v1, voffset);
01003             v1 = vec_sra(v1, vlog2_denom);
01004         }
01005         vblock = vec_packsu(v0, v1);
01006         vec_st(vblock, 0, block);
01007 
01008         block += stride;
01009     }
01010 }
01011 
01012 static av_always_inline
01013 void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
01014                                int weightd, int weights, int offset, int w, int h)
01015 {
01016     int y, dst_aligned, src_aligned;
01017     vec_u8 vsrc, vdst;
01018     vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
01019     vec_u16 vlog2_denom;
01020     DECLARE_ALIGNED_16(int32_t, temp[4]);
01021     LOAD_ZERO;
01022 
01023     offset = ((offset + 1) | 1) << log2_denom;
01024     temp[0] = log2_denom+1;
01025     temp[1] = weights;
01026     temp[2] = weightd;
01027     temp[3] = offset;
01028 
01029     vtemp = (vec_s16)vec_ld(0, temp);
01030     vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
01031     vweights = vec_splat(vtemp, 3);
01032     vweightd = vec_splat(vtemp, 5);
01033     voffset = vec_splat(vtemp, 7);
01034     dst_aligned = !((unsigned long)dst & 0xf);
01035     src_aligned = !((unsigned long)src & 0xf);
01036 
01037     for (y=0; y<h; y++) {
01038         vdst = vec_ld(0, dst);
01039         vsrc = vec_ld(0, src);
01040 
01041         v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
01042         v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
01043         v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
01044         v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
01045 
01046         if (w == 8) {
01047             if (src_aligned)
01048                 v3 = v2;
01049             else
01050                 v2 = v3;
01051         }
01052 
01053         if (w == 16 || dst_aligned) {
01054             v0 = vec_mladd(v0, vweightd, zero_s16v);
01055             v2 = vec_mladd(v2, vweights, zero_s16v);
01056 
01057             v0 = vec_adds(v0, voffset);
01058             v0 = vec_adds(v0, v2);
01059             v0 = vec_sra(v0, vlog2_denom);
01060         }
01061         if (w == 16 || !dst_aligned) {
01062             v1 = vec_mladd(v1, vweightd, zero_s16v);
01063             v3 = vec_mladd(v3, vweights, zero_s16v);
01064 
01065             v1 = vec_adds(v1, voffset);
01066             v1 = vec_adds(v1, v3);
01067             v1 = vec_sra(v1, vlog2_denom);
01068         }
01069         vdst = vec_packsu(v0, v1);
01070         vec_st(vdst, 0, dst);
01071 
01072         dst += stride;
01073         src += stride;
01074     }
01075 }
01076 
01077 #define H264_WEIGHT(W,H) \
01078 static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
01079     weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
01080 }\
01081 static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
01082     biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
01083 }
01084 
01085 H264_WEIGHT(16,16)
01086 H264_WEIGHT(16, 8)
01087 H264_WEIGHT( 8,16)
01088 H264_WEIGHT( 8, 8)
01089 H264_WEIGHT( 8, 4)
01090 
01091 void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
01092 
01093     if (has_altivec()) {
01094         c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
01095         c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
01096         c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
01097         c->h264_idct_add = ff_h264_idct_add_altivec;
01098         c->h264_idct_add8 = ff_h264_idct_add8_altivec;
01099         c->h264_idct_add16 = ff_h264_idct_add16_altivec;
01100         c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
01101         c->h264_idct_dc_add= h264_idct_dc_add_altivec;
01102         c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
01103         c->h264_idct8_add = ff_h264_idct8_add_altivec;
01104         c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
01105         c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
01106         c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
01107 
01108 #define dspfunc(PFX, IDX, NUM) \
01109         c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
01110         c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
01111         c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
01112         c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
01113         c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
01114         c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
01115         c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
01116         c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
01117         c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
01118         c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
01119         c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
01120         c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
01121         c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
01122         c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
01123         c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
01124         c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
01125 
01126         dspfunc(put_h264_qpel, 0, 16);
01127         dspfunc(avg_h264_qpel, 0, 16);
01128 #undef dspfunc
01129 
01130         c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec;
01131         c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec;
01132         c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec;
01133         c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec;
01134         c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec;
01135         c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec;
01136         c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec;
01137         c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec;
01138         c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec;
01139         c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec;
01140     }
01141 }

Generated on Sat Feb 16 2013 09:23:13 for ffmpeg by  doxygen 1.7.1