Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / gmc_altivec.c @ 84dc2d8a

History | View | Annotate | Download (4.81 KB)

1
/*
2
 * GMC (Global Motion Compensation)
3
 * AltiVec-enabled
4
 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22

    
23
#include "libavcodec/dsputil.h"
24
#include "dsputil_ppc.h"
25
#include "util_altivec.h"
26
#include "types_altivec.h"
27

    
28
/*
29
  altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
30
  to preserve proper dst alignment.
31
*/
32
#define GMC1_PERF_COND (h==8)
33
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
34
{
35
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
36
    const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
37
    const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
38
        {
39
            (16-x16)*(16-y16), /* A */
40
            (   x16)*(16-y16), /* B */
41
            (16-x16)*(   y16), /* C */
42
            (   x16)*(   y16), /* D */
43
            0, 0, 0, 0         /* padding */
44
        };
45
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
46
    register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
47
    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
48
    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
49
    int i;
50
    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
51
    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
52

    
53

    
54
POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
55

    
56
    tempA = vec_ld(0, (unsigned short*)ABCD);
57
    Av = vec_splat(tempA, 0);
58
    Bv = vec_splat(tempA, 1);
59
    Cv = vec_splat(tempA, 2);
60
    Dv = vec_splat(tempA, 3);
61

    
62
    rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);
63

    
64
    // we'll be able to pick-up our 9 char elements
65
    // at src from those 32 bytes
66
    // we load the first batch here, as inside the loop
67
    // we can re-use 'src+stride' from one iteration
68
    // as the 'src' of the next.
69
    src_0 = vec_ld(0, src);
70
    src_1 = vec_ld(16, src);
71
    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
72

    
73
    if (src_really_odd != 0x0000000F) {
74
        // if src & 0xF == 0xF, then (src+1) is properly aligned
75
        // on the second vector.
76
        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
77
    } else {
78
        srcvB = src_1;
79
    }
80
    srcvA = vec_mergeh(vczero, srcvA);
81
    srcvB = vec_mergeh(vczero, srcvB);
82

    
83
    for(i=0; i<h; i++) {
84
        dst_odd = (unsigned long)dst & 0x0000000F;
85
        src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
86

    
87
        dstv = vec_ld(0, dst);
88

    
89
        // we we'll be able to pick-up our 9 char elements
90
        // at src + stride from those 32 bytes
91
        // then reuse the resulting 2 vectors srvcC and srcvD
92
        // as the next srcvA and srcvB
93
        src_0 = vec_ld(stride + 0, src);
94
        src_1 = vec_ld(stride + 16, src);
95
        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
96

    
97
        if (src_really_odd != 0x0000000F) {
98
            // if src & 0xF == 0xF, then (src+1) is properly aligned
99
            // on the second vector.
100
            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
101
        } else {
102
            srcvD = src_1;
103
        }
104

    
105
        srcvC = vec_mergeh(vczero, srcvC);
106
        srcvD = vec_mergeh(vczero, srcvD);
107

    
108

    
109
        // OK, now we (finally) do the math :-)
110
        // those four instructions replaces 32 int muls & 32 int adds.
111
        // isn't AltiVec nice ?
112
        tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
113
        tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
114
        tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
115
        tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
116

    
117
        srcvA = srcvC;
118
        srcvB = srcvD;
119

    
120
        tempD = vec_sr(tempD, vcsr8);
121

    
122
        dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
123

    
124
        if (dst_odd) {
125
            dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
126
        } else {
127
            dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
128
        }
129

    
130
        vec_st(dstv2, 0, dst);
131

    
132
        dst += stride;
133
        src += stride;
134
    }
135

    
136
POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
137
}