Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / gmc_altivec.c @ 2912e87a

History | View | Annotate | Download (4.6 KB)

1
/*
2
 * GMC (Global Motion Compensation)
3
 * AltiVec-enabled
4
 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5
 *
6
 * This file is part of Libav.
7
 *
8
 * Libav is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * Libav is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with Libav; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22

    
23
#include "libavcodec/dsputil.h"
24
#include "util_altivec.h"
25
#include "types_altivec.h"
26
#include "dsputil_altivec.h"
27

    
28
/*
29
  altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
30
  to preserve proper dst alignment.
31
*/
32
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
33
{
34
    const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
35
    const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] =
36
        {
37
            (16-x16)*(16-y16), /* A */
38
            (   x16)*(16-y16), /* B */
39
            (16-x16)*(   y16), /* C */
40
            (   x16)*(   y16), /* D */
41
            0, 0, 0, 0         /* padding */
42
        };
43
    register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
44
    register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8);
45
    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
46
    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
47
    int i;
48
    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
49
    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
50

    
51
    tempA = vec_ld(0, (unsigned short*)ABCD);
52
    Av = vec_splat(tempA, 0);
53
    Bv = vec_splat(tempA, 1);
54
    Cv = vec_splat(tempA, 2);
55
    Dv = vec_splat(tempA, 3);
56

    
57
    rounderV = vec_splat((vec_u16)vec_lde(0, &rounder_a), 0);
58

    
59
    // we'll be able to pick-up our 9 char elements
60
    // at src from those 32 bytes
61
    // we load the first batch here, as inside the loop
62
    // we can re-use 'src+stride' from one iteration
63
    // as the 'src' of the next.
64
    src_0 = vec_ld(0, src);
65
    src_1 = vec_ld(16, src);
66
    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
67

    
68
    if (src_really_odd != 0x0000000F) {
69
        // if src & 0xF == 0xF, then (src+1) is properly aligned
70
        // on the second vector.
71
        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
72
    } else {
73
        srcvB = src_1;
74
    }
75
    srcvA = vec_mergeh(vczero, srcvA);
76
    srcvB = vec_mergeh(vczero, srcvB);
77

    
78
    for(i=0; i<h; i++) {
79
        dst_odd = (unsigned long)dst & 0x0000000F;
80
        src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
81

    
82
        dstv = vec_ld(0, dst);
83

    
84
        // we we'll be able to pick-up our 9 char elements
85
        // at src + stride from those 32 bytes
86
        // then reuse the resulting 2 vectors srvcC and srcvD
87
        // as the next srcvA and srcvB
88
        src_0 = vec_ld(stride + 0, src);
89
        src_1 = vec_ld(stride + 16, src);
90
        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
91

    
92
        if (src_really_odd != 0x0000000F) {
93
            // if src & 0xF == 0xF, then (src+1) is properly aligned
94
            // on the second vector.
95
            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
96
        } else {
97
            srcvD = src_1;
98
        }
99

    
100
        srcvC = vec_mergeh(vczero, srcvC);
101
        srcvD = vec_mergeh(vczero, srcvD);
102

    
103

    
104
        // OK, now we (finally) do the math :-)
105
        // those four instructions replaces 32 int muls & 32 int adds.
106
        // isn't AltiVec nice ?
107
        tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
108
        tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
109
        tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
110
        tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
111

    
112
        srcvA = srcvC;
113
        srcvB = srcvD;
114

    
115
        tempD = vec_sr(tempD, vcsr8);
116

    
117
        dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
118

    
119
        if (dst_odd) {
120
            dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
121
        } else {
122
            dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
123
        }
124

    
125
        vec_st(dstv2, 0, dst);
126

    
127
        dst += stride;
128
        src += stride;
129
    }
130
}