Statistics
| Branch: | Revision:

ffmpeg / libavcodec / alpha / dsputil_alpha.c @ 8c7b533e

History | View | Annotate | Download (13.2 KB)

1
/*
2
 * Alpha optimized DSP utils
3
 * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
4
 *
5
 * This library is free software; you can redistribute it and/or
6
 * modify it under the terms of the GNU Lesser General Public
7
 * License as published by the Free Software Foundation; either
8
 * version 2 of the License, or (at your option) any later version.
9
 *
10
 * This library is distributed in the hope that it will be useful,
11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
 * Lesser General Public License for more details.
14
 *
15
 * You should have received a copy of the GNU Lesser General Public
16
 * License along with this library; if not, write to the Free Software
17
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18
 */
19

    
20
#include "asm.h"
21
#include "../dsputil.h"
22

    
23
void simple_idct_axp(DCTELEM *block);
24

    
25
void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels,
26
                        int line_size, int h);
27
void put_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels,
28
                                int line_size);
29
void add_pixels_clamped_mvi_asm(const DCTELEM *block, uint8_t *pixels, 
30
                                int line_size);
31

    
32
void get_pixels_mvi(DCTELEM *restrict block,
33
                    const uint8_t *restrict pixels, int line_size);
34
void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
35
                     int stride);
36
int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
37
int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
38
int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
39
int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
40
int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
41

    
42
#if 0
43
/* These functions were the base for the optimized assembler routines,
44
   and remain here for documentation purposes.  */
45
static void put_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, 
46
                                   int line_size)
47
{
48
    int i = 8;
49
    uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */
50

51
    ASM_ACCEPT_MVI;
52

53
    do {
54
        uint64_t shorts0, shorts1;
55

56
        shorts0 = ldq(block);
57
        shorts0 = maxsw4(shorts0, 0);
58
        shorts0 = minsw4(shorts0, clampmask);
59
        stl(pkwb(shorts0), pixels);
60

61
        shorts1 = ldq(block + 4);
62
        shorts1 = maxsw4(shorts1, 0);
63
        shorts1 = minsw4(shorts1, clampmask);
64
        stl(pkwb(shorts1), pixels + 4);
65

66
        pixels += line_size;
67
        block += 8;
68
    } while (--i);
69
}
70

71
void add_pixels_clamped_mvi(const DCTELEM *block, uint8_t *pixels, 
72
                            int line_size)
73
{
74
    int h = 8;
75
    /* Keep this function a leaf function by generating the constants
76
       manually (mainly for the hack value ;-).  */
77
    uint64_t clampmask = zap(-1, 0xaa); /* 0x00ff00ff00ff00ff */
78
    uint64_t signmask  = zap(-1, 0x33);
79
    signmask ^= signmask >> 1;  /* 0x8000800080008000 */
80

81
    ASM_ACCEPT_MVI;
82

83
    do {
84
        uint64_t shorts0, pix0, signs0;
85
        uint64_t shorts1, pix1, signs1;
86

87
        shorts0 = ldq(block);
88
        shorts1 = ldq(block + 4);
89

90
        pix0    = unpkbw(ldl(pixels));
91
        /* Signed subword add (MMX paddw).  */
92
        signs0  = shorts0 & signmask;
93
        shorts0 &= ~signmask;
94
        shorts0 += pix0;
95
        shorts0 ^= signs0;
96
        /* Clamp. */
97
        shorts0 = maxsw4(shorts0, 0);
98
        shorts0 = minsw4(shorts0, clampmask);   
99

100
        /* Next 4.  */
101
        pix1    = unpkbw(ldl(pixels + 4));
102
        signs1  = shorts1 & signmask;
103
        shorts1 &= ~signmask;
104
        shorts1 += pix1;
105
        shorts1 ^= signs1;
106
        shorts1 = maxsw4(shorts1, 0);
107
        shorts1 = minsw4(shorts1, clampmask);
108

109
        stl(pkwb(shorts0), pixels);
110
        stl(pkwb(shorts1), pixels + 4);
111

112
        pixels += line_size;
113
        block += 8;
114
    } while (--h);
115
}
116
#endif
117

    
118
static void clear_blocks_axp(DCTELEM *blocks) {
119
    uint64_t *p = (uint64_t *) blocks;
120
    int n = sizeof(DCTELEM) * 6 * 64;
121

    
122
    do {
123
        p[0] = 0;
124
        p[1] = 0;
125
        p[2] = 0;
126
        p[3] = 0;
127
        p[4] = 0;
128
        p[5] = 0;
129
        p[6] = 0;
130
        p[7] = 0;
131
        p += 8;
132
        n -= 8 * 8;
133
    } while (n);
134
}
135

    
136
static inline uint64_t avg2_no_rnd(uint64_t a, uint64_t b)
137
{
138
    return (a & b) + (((a ^ b) & BYTE_VEC(0xfe)) >> 1);
139
}
140

    
141
static inline uint64_t avg2(uint64_t a, uint64_t b)
142
{
143
    return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);    
144
}
145

    
146
#if 0
147
/* The XY2 routines basically utilize this scheme, but reuse parts in
148
   each iteration.  */
149
static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
150
{
151
    uint64_t r1 = ((l1 & ~BYTE_VEC(0x03)) >> 2)
152
                + ((l2 & ~BYTE_VEC(0x03)) >> 2)
153
                + ((l3 & ~BYTE_VEC(0x03)) >> 2)
154
                + ((l4 & ~BYTE_VEC(0x03)) >> 2);
155
    uint64_t r2 = ((  (l1 & BYTE_VEC(0x03))
156
                    + (l2 & BYTE_VEC(0x03))
157
                    + (l3 & BYTE_VEC(0x03))
158
                    + (l4 & BYTE_VEC(0x03))
159
                    + BYTE_VEC(0x02)) >> 2) & BYTE_VEC(0x03);
160
    return r1 + r2;
161
}
162
#endif
163

    
164
#define OP(LOAD, STORE)                         \
165
    do {                                        \
166
        STORE(LOAD(pixels), block);             \
167
        pixels += line_size;                    \
168
        block += line_size;                     \
169
    } while (--h)
170

    
171
#define OP_X2(LOAD, STORE)                                      \
172
    do {                                                        \
173
        uint64_t pix1, pix2;                                    \
174
                                                                \
175
        pix1 = LOAD(pixels);                                    \
176
        pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56);        \
177
        STORE(AVG2(pix1, pix2), block);                         \
178
        pixels += line_size;                                    \
179
        block += line_size;                                     \
180
    } while (--h)
181

    
182
#define OP_Y2(LOAD, STORE)                      \
183
    do {                                        \
184
        uint64_t pix = LOAD(pixels);            \
185
        do {                                    \
186
            uint64_t next_pix;                  \
187
                                                \
188
            pixels += line_size;                \
189
            next_pix = LOAD(pixels);            \
190
            STORE(AVG2(pix, next_pix), block);  \
191
            block += line_size;                 \
192
            pix = next_pix;                     \
193
        } while (--h);                          \
194
    } while (0)
195

    
196
#define OP_XY2(LOAD, STORE)                                                 \
197
    do {                                                                    \
198
        uint64_t pix1 = LOAD(pixels);                                       \
199
        uint64_t pix2 = pix1 >> 8 | ((uint64_t) pixels[8] << 56);           \
200
        uint64_t pix_l = (pix1 & BYTE_VEC(0x03))                            \
201
                       + (pix2 & BYTE_VEC(0x03));                           \
202
        uint64_t pix_h = ((pix1 & ~BYTE_VEC(0x03)) >> 2)                    \
203
                       + ((pix2 & ~BYTE_VEC(0x03)) >> 2);                   \
204
                                                                            \
205
        do {                                                                \
206
            uint64_t npix1, npix2;                                          \
207
            uint64_t npix_l, npix_h;                                        \
208
            uint64_t avg;                                                   \
209
                                                                            \
210
            pixels += line_size;                                            \
211
            npix1 = LOAD(pixels);                                           \
212
            npix2 = npix1 >> 8 | ((uint64_t) pixels[8] << 56);              \
213
            npix_l = (npix1 & BYTE_VEC(0x03))                               \
214
                   + (npix2 & BYTE_VEC(0x03));                              \
215
            npix_h = ((npix1 & ~BYTE_VEC(0x03)) >> 2)                       \
216
                   + ((npix2 & ~BYTE_VEC(0x03)) >> 2);                      \
217
            avg = (((pix_l + npix_l + AVG4_ROUNDER) >> 2) & BYTE_VEC(0x03)) \
218
                + pix_h + npix_h;                                           \
219
            STORE(avg, block);                                              \
220
                                                                            \
221
            block += line_size;                                             \
222
            pix_l = npix_l;                                                 \
223
            pix_h = npix_h;                                                 \
224
        } while (--h);                                                      \
225
    } while (0)
226

    
227
#define MAKE_OP(OPNAME, SUFF, OPKIND, STORE)                                \
228
static void OPNAME ## _pixels ## SUFF ## _axp                               \
229
        (uint8_t *restrict block, const uint8_t *restrict pixels,           \
230
         int line_size, int h)                                              \
231
{                                                                           \
232
    if ((size_t) pixels & 0x7) {                                            \
233
        OPKIND(uldq, STORE);                                                \
234
    } else {                                                                \
235
        OPKIND(ldq, STORE);                                                 \
236
    }                                                                       \
237
}                                                                           \
238
                                                                            \
239
static void OPNAME ## _pixels16 ## SUFF ## _axp                             \
240
        (uint8_t *restrict block, const uint8_t *restrict pixels,           \
241
         int line_size, int h)                                              \
242
{                                                                           \
243
    OPNAME ## _pixels ## SUFF ## _axp(block,     pixels,     line_size, h); \
244
    OPNAME ## _pixels ## SUFF ## _axp(block + 8, pixels + 8, line_size, h); \
245
}
246

    
247
#define PIXOP(OPNAME, STORE)                    \
248
    MAKE_OP(OPNAME, ,     OP,     STORE)        \
249
    MAKE_OP(OPNAME, _x2,  OP_X2,  STORE)        \
250
    MAKE_OP(OPNAME, _y2,  OP_Y2,  STORE)        \
251
    MAKE_OP(OPNAME, _xy2, OP_XY2, STORE)
252

    
253
/* Rounding primitives.  */
254
#define AVG2 avg2
255
#define AVG4 avg4
256
#define AVG4_ROUNDER BYTE_VEC(0x02)
257
#define STORE(l, b) stq(l, b)
258
PIXOP(put, STORE);
259

    
260
#undef STORE
261
#define STORE(l, b) stq(AVG2(l, ldq(b)), b);
262
PIXOP(avg, STORE);
263

    
264
/* Not rounding primitives.  */
265
#undef AVG2
266
#undef AVG4
267
#undef AVG4_ROUNDER
268
#undef STORE
269
#define AVG2 avg2_no_rnd
270
#define AVG4 avg4_no_rnd
271
#define AVG4_ROUNDER BYTE_VEC(0x01)
272
#define STORE(l, b) stq(l, b)
273
PIXOP(put_no_rnd, STORE);
274

    
275
#undef STORE
276
#define STORE(l, b) stq(AVG2(l, ldq(b)), b);
277
PIXOP(avg_no_rnd, STORE);
278

    
279
void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels,
280
                          int line_size, int h)
281
{
282
    put_pixels_axp_asm(block,     pixels,     line_size, h);
283
    put_pixels_axp_asm(block + 8, pixels + 8, line_size, h);
284
}
285

    
286
void dsputil_init_alpha(void)
287
{
288
    put_pixels_tab[0][0] = put_pixels16_axp_asm;
289
    put_pixels_tab[0][1] = put_pixels16_x2_axp;
290
    put_pixels_tab[0][2] = put_pixels16_y2_axp;
291
    put_pixels_tab[0][3] = put_pixels16_xy2_axp;
292

    
293
    put_no_rnd_pixels_tab[0][0] = put_pixels16_axp_asm;
294
    put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_axp;
295
    put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_axp;
296
    put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_axp;
297

    
298
    avg_pixels_tab[0][0] = avg_pixels16_axp;
299
    avg_pixels_tab[0][1] = avg_pixels16_x2_axp;
300
    avg_pixels_tab[0][2] = avg_pixels16_y2_axp;
301
    avg_pixels_tab[0][3] = avg_pixels16_xy2_axp;
302

    
303
    avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_axp;
304
    avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_axp;
305
    avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_axp;
306
    avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_axp;
307

    
308
    put_pixels_tab[1][0] = put_pixels_axp_asm;
309
    put_pixels_tab[1][1] = put_pixels_x2_axp;
310
    put_pixels_tab[1][2] = put_pixels_y2_axp;
311
    put_pixels_tab[1][3] = put_pixels_xy2_axp;
312

    
313
    put_no_rnd_pixels_tab[1][0] = put_pixels_axp_asm;
314
    put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels_x2_axp;
315
    put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels_y2_axp;
316
    put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels_xy2_axp;
317

    
318
    avg_pixels_tab[1][0] = avg_pixels_axp;
319
    avg_pixels_tab[1][1] = avg_pixels_x2_axp;
320
    avg_pixels_tab[1][2] = avg_pixels_y2_axp;
321
    avg_pixels_tab[1][3] = avg_pixels_xy2_axp;
322

    
323
    avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels_axp;
324
    avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels_x2_axp;
325
    avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels_y2_axp;
326
    avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels_xy2_axp;
327

    
328
    clear_blocks = clear_blocks_axp;
329

    
330
    /* amask clears all bits that correspond to present features.  */
331
    if (amask(AMASK_MVI) == 0) {
332
        put_pixels_clamped = put_pixels_clamped_mvi_asm;
333
        add_pixels_clamped = add_pixels_clamped_mvi_asm;
334

    
335
        get_pixels       = get_pixels_mvi;
336
        diff_pixels      = diff_pixels_mvi;
337
        pix_abs8x8       = pix_abs8x8_mvi;
338
        pix_abs16x16     = pix_abs16x16_mvi;
339
        pix_abs16x16_x2  = pix_abs16x16_x2_mvi;
340
        pix_abs16x16_y2  = pix_abs16x16_y2_mvi;
341
        pix_abs16x16_xy2 = pix_abs16x16_xy2_mvi;
342
    }
343
}