Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / gmc_altivec.c @ b550bfaa

History | View | Annotate | Download (4.76 KB)

1
/*
2
 * GMC (Global Motion Compensation)
3
 * AltiVec-enabled
4
 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22

    
23
#include "dsputil.h"
24

    
25
#include "gcc_fixes.h"
26

    
27
#include "dsputil_altivec.h"
28

    
29
/*
30
  altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
31
  to preserve proper dst alignement.
32
*/
33
#define GMC1_PERF_COND (h==8)
34
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
35
{
36
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
37
    const unsigned short __attribute__ ((aligned(16))) rounder_a[8] =
38
      {rounder, rounder, rounder, rounder,
39
       rounder, rounder, rounder, rounder};
40
    const unsigned short __attribute__ ((aligned(16))) ABCD[8] =
41
      {
42
        (16-x16)*(16-y16), /* A */
43
        (   x16)*(16-y16), /* B */
44
        (16-x16)*(   y16), /* C */
45
        (   x16)*(   y16), /* D */
46
        0, 0, 0, 0         /* padding */
47
      };
48
    register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
49
    register const_vector unsigned short vcsr8 = (const_vector unsigned short)vec_splat_u16(8);
50
    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
51
    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
52
    int i;
53
    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
54
    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
55

    
56

    
57
POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
58

    
59
    tempA = vec_ld(0, (unsigned short*)ABCD);
60
    Av = vec_splat(tempA, 0);
61
    Bv = vec_splat(tempA, 1);
62
    Cv = vec_splat(tempA, 2);
63
    Dv = vec_splat(tempA, 3);
64

    
65
    rounderV = vec_ld(0, (unsigned short*)rounder_a);
66

    
67
    // we'll be able to pick-up our 9 char elements
68
    // at src from those 32 bytes
69
    // we load the first batch here, as inside the loop
70
    // we can re-use 'src+stride' from one iteration
71
    // as the 'src' of the next.
72
    src_0 = vec_ld(0, src);
73
    src_1 = vec_ld(16, src);
74
    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
75

    
76
    if (src_really_odd != 0x0000000F)
77
    { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
78
      srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
79
    }
80
    else
81
    {
82
      srcvB = src_1;
83
    }
84
    srcvA = vec_mergeh(vczero, srcvA);
85
    srcvB = vec_mergeh(vczero, srcvB);
86

    
87
    for(i=0; i<h; i++)
88
    {
89
      dst_odd = (unsigned long)dst & 0x0000000F;
90
      src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
91

    
92
      dstv = vec_ld(0, dst);
93

    
94
      // we we'll be able to pick-up our 9 char elements
95
      // at src + stride from those 32 bytes
96
      // then reuse the resulting 2 vectors srvcC and srcvD
97
      // as the next srcvA and srcvB
98
      src_0 = vec_ld(stride + 0, src);
99
      src_1 = vec_ld(stride + 16, src);
100
      srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
101

    
102
      if (src_really_odd != 0x0000000F)
103
      { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
104
        srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
105
      }
106
      else
107
      {
108
        srcvD = src_1;
109
      }
110

    
111
      srcvC = vec_mergeh(vczero, srcvC);
112
      srcvD = vec_mergeh(vczero, srcvD);
113

    
114

    
115
      // OK, now we (finally) do the math :-)
116
      // those four instructions replaces 32 int muls & 32 int adds.
117
      // isn't AltiVec nice ?
118
      tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
119
      tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
120
      tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
121
      tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
122

    
123
      srcvA = srcvC;
124
      srcvB = srcvD;
125

    
126
      tempD = vec_sr(tempD, vcsr8);
127

    
128
      dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
129

    
130
      if (dst_odd)
131
      {
132
        dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
133
      }
134
      else
135
      {
136
        dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
137
      }
138

    
139
      vec_st(dstv2, 0, dst);
140

    
141
      dst += stride;
142
      src += stride;
143
    }
144

    
145
POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
146
}