Revision a6b4448c libavcodec/ppc/int_altivec.c

View differences:

libavcodec/ppc/int_altivec.c
79 79
static void add_int16_altivec(int16_t * v1, int16_t * v2, int order)
80 80
{
81 81
    int i;
82
    register vec_s16_t vec, *pv;
82
    register vec_s16 vec, *pv;
83 83

  
84 84
    for(i = 0; i < order; i += 8){
85
        pv = (vec_s16_t*)v2;
85
        pv = (vec_s16*)v2;
86 86
        vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
87 87
        vec_st(vec_add(vec_ld(0, v1), vec), 0, v1);
88 88
        v1 += 8;
......
93 93
static void sub_int16_altivec(int16_t * v1, int16_t * v2, int order)
94 94
{
95 95
    int i;
96
    register vec_s16_t vec, *pv;
96
    register vec_s16 vec, *pv;
97 97

  
98 98
    for(i = 0; i < order; i += 8){
99
        pv = (vec_s16_t*)v2;
99
        pv = (vec_s16*)v2;
100 100
        vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
101 101
        vec_st(vec_sub(vec_ld(0, v1), vec), 0, v1);
102 102
        v1 += 8;
......
108 108
{
109 109
    int i;
110 110
    LOAD_ZERO;
111
    register vec_s16_t vec1, *pv;
112
    register vec_s32_t res = vec_splat_s32(0), t;
113
    register vec_u32_t shifts;
111
    register vec_s16 vec1, *pv;
112
    register vec_s32 res = vec_splat_s32(0), t;
113
    register vec_u32 shifts;
114 114
    DECLARE_ALIGNED_16(int32_t, ires);
115 115

  
116 116
    shifts = zero_u32v;
......
121 121
    if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01));
122 122

  
123 123
    for(i = 0; i < order; i += 8){
124
        pv = (vec_s16_t*)v1;
124
        pv = (vec_s16*)v1;
125 125
        vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
126 126
        t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
127 127
        t = vec_sr(t, shifts);

Also available in: Unified diff