Statistics
| Branch: | Revision:

ffmpeg / libavcodec / h264pred_template.c @ 5ada2524

History | View | Annotate | Download (29.5 KB)

1
/*
2
 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3
 * Copyright (c) 2003-2011 Michael Niedermayer <michaelni@gmx.at>
4
 *
5
 * This file is part of Libav.
6
 *
7
 * Libav is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * Libav is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with Libav; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
/**
23
 * @file
24
 * H.264 / AVC / MPEG4 part10 prediction functions.
25
 * @author Michael Niedermayer <michaelni@gmx.at>
26
 */
27

    
28
#include "mathops.h"
29
#include "dsputil.h"
30

    
31
static void pred4x4_vertical_c(uint8_t *src, const uint8_t *topright, int stride){
32
    const uint32_t a= ((uint32_t*)(src-stride))[0];
33
    ((uint32_t*)(src+0*stride))[0]= a;
34
    ((uint32_t*)(src+1*stride))[0]= a;
35
    ((uint32_t*)(src+2*stride))[0]= a;
36
    ((uint32_t*)(src+3*stride))[0]= a;
37
}
38

    
39
static void pred4x4_horizontal_c(uint8_t *src, const uint8_t *topright, int stride){
40
    ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
41
    ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
42
    ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
43
    ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
44
}
45

    
46
static void pred4x4_dc_c(uint8_t *src, const uint8_t *topright, int stride){
47
    const int dc= (  src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
48
                   + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
49

    
50
    ((uint32_t*)(src+0*stride))[0]=
51
    ((uint32_t*)(src+1*stride))[0]=
52
    ((uint32_t*)(src+2*stride))[0]=
53
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
54
}
55

    
56
static void pred4x4_left_dc_c(uint8_t *src, const uint8_t *topright, int stride){
57
    const int dc= (  src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
58

    
59
    ((uint32_t*)(src+0*stride))[0]=
60
    ((uint32_t*)(src+1*stride))[0]=
61
    ((uint32_t*)(src+2*stride))[0]=
62
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
63
}
64

    
65
static void pred4x4_top_dc_c(uint8_t *src, const uint8_t *topright, int stride){
66
    const int dc= (  src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
67

    
68
    ((uint32_t*)(src+0*stride))[0]=
69
    ((uint32_t*)(src+1*stride))[0]=
70
    ((uint32_t*)(src+2*stride))[0]=
71
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
72
}
73

    
74
static void pred4x4_128_dc_c(uint8_t *src, const uint8_t *topright, int stride){
75
    ((uint32_t*)(src+0*stride))[0]=
76
    ((uint32_t*)(src+1*stride))[0]=
77
    ((uint32_t*)(src+2*stride))[0]=
78
    ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
79
}
80

    
81
static void pred4x4_127_dc_c(uint8_t *src, const uint8_t *topright, int stride){
82
    ((uint32_t*)(src+0*stride))[0]=
83
    ((uint32_t*)(src+1*stride))[0]=
84
    ((uint32_t*)(src+2*stride))[0]=
85
    ((uint32_t*)(src+3*stride))[0]= 127U*0x01010101U;
86
}
87

    
88
static void pred4x4_129_dc_c(uint8_t *src, const uint8_t *topright, int stride){
89
    ((uint32_t*)(src+0*stride))[0]=
90
    ((uint32_t*)(src+1*stride))[0]=
91
    ((uint32_t*)(src+2*stride))[0]=
92
    ((uint32_t*)(src+3*stride))[0]= 129U*0x01010101U;
93
}
94

    
95

    
96
#define LOAD_TOP_RIGHT_EDGE\
97
    const int av_unused t4= topright[0];\
98
    const int av_unused t5= topright[1];\
99
    const int av_unused t6= topright[2];\
100
    const int av_unused t7= topright[3];\
101

    
102
#define LOAD_DOWN_LEFT_EDGE\
103
    const int av_unused l4= src[-1+4*stride];\
104
    const int av_unused l5= src[-1+5*stride];\
105
    const int av_unused l6= src[-1+6*stride];\
106
    const int av_unused l7= src[-1+7*stride];\
107

    
108
#define LOAD_LEFT_EDGE\
109
    const int av_unused l0= src[-1+0*stride];\
110
    const int av_unused l1= src[-1+1*stride];\
111
    const int av_unused l2= src[-1+2*stride];\
112
    const int av_unused l3= src[-1+3*stride];\
113

    
114
#define LOAD_TOP_EDGE\
115
    const int av_unused t0= src[ 0-1*stride];\
116
    const int av_unused t1= src[ 1-1*stride];\
117
    const int av_unused t2= src[ 2-1*stride];\
118
    const int av_unused t3= src[ 3-1*stride];\
119

    
120
static void pred4x4_down_right_c(uint8_t *src, const uint8_t *topright, int stride){
121
    const int lt= src[-1-1*stride];
122
    LOAD_TOP_EDGE
123
    LOAD_LEFT_EDGE
124

    
125
    src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
126
    src[0+2*stride]=
127
    src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
128
    src[0+1*stride]=
129
    src[1+2*stride]=
130
    src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
131
    src[0+0*stride]=
132
    src[1+1*stride]=
133
    src[2+2*stride]=
134
    src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
135
    src[1+0*stride]=
136
    src[2+1*stride]=
137
    src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
138
    src[2+0*stride]=
139
    src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
140
    src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
141
}
142

    
143
static void pred4x4_down_left_c(uint8_t *src, const uint8_t *topright, int stride){
144
    LOAD_TOP_EDGE
145
    LOAD_TOP_RIGHT_EDGE
146
//    LOAD_LEFT_EDGE
147

    
148
    src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
149
    src[1+0*stride]=
150
    src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
151
    src[2+0*stride]=
152
    src[1+1*stride]=
153
    src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
154
    src[3+0*stride]=
155
    src[2+1*stride]=
156
    src[1+2*stride]=
157
    src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
158
    src[3+1*stride]=
159
    src[2+2*stride]=
160
    src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
161
    src[3+2*stride]=
162
    src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
163
    src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
164
}
165

    
166
static void pred4x4_vertical_right_c(uint8_t *src, const uint8_t *topright, int stride){
167
    const int lt= src[-1-1*stride];
168
    LOAD_TOP_EDGE
169
    LOAD_LEFT_EDGE
170

    
171
    src[0+0*stride]=
172
    src[1+2*stride]=(lt + t0 + 1)>>1;
173
    src[1+0*stride]=
174
    src[2+2*stride]=(t0 + t1 + 1)>>1;
175
    src[2+0*stride]=
176
    src[3+2*stride]=(t1 + t2 + 1)>>1;
177
    src[3+0*stride]=(t2 + t3 + 1)>>1;
178
    src[0+1*stride]=
179
    src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
180
    src[1+1*stride]=
181
    src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
182
    src[2+1*stride]=
183
    src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
184
    src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
185
    src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
186
    src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
187
}
188

    
189
static void pred4x4_vertical_left_c(uint8_t *src, const uint8_t *topright, int stride){
190
    LOAD_TOP_EDGE
191
    LOAD_TOP_RIGHT_EDGE
192

    
193
    src[0+0*stride]=(t0 + t1 + 1)>>1;
194
    src[1+0*stride]=
195
    src[0+2*stride]=(t1 + t2 + 1)>>1;
196
    src[2+0*stride]=
197
    src[1+2*stride]=(t2 + t3 + 1)>>1;
198
    src[3+0*stride]=
199
    src[2+2*stride]=(t3 + t4+ 1)>>1;
200
    src[3+2*stride]=(t4 + t5+ 1)>>1;
201
    src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
202
    src[1+1*stride]=
203
    src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
204
    src[2+1*stride]=
205
    src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
206
    src[3+1*stride]=
207
    src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
208
    src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
209
}
210

    
211
static void pred4x4_horizontal_up_c(uint8_t *src, const uint8_t *topright, int stride){
212
    LOAD_LEFT_EDGE
213

    
214
    src[0+0*stride]=(l0 + l1 + 1)>>1;
215
    src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
216
    src[2+0*stride]=
217
    src[0+1*stride]=(l1 + l2 + 1)>>1;
218
    src[3+0*stride]=
219
    src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
220
    src[2+1*stride]=
221
    src[0+2*stride]=(l2 + l3 + 1)>>1;
222
    src[3+1*stride]=
223
    src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
224
    src[3+2*stride]=
225
    src[1+3*stride]=
226
    src[0+3*stride]=
227
    src[2+2*stride]=
228
    src[2+3*stride]=
229
    src[3+3*stride]=l3;
230
}
231

    
232
static void pred4x4_horizontal_down_c(uint8_t *src, const uint8_t *topright, int stride){
233
    const int lt= src[-1-1*stride];
234
    LOAD_TOP_EDGE
235
    LOAD_LEFT_EDGE
236

    
237
    src[0+0*stride]=
238
    src[2+1*stride]=(lt + l0 + 1)>>1;
239
    src[1+0*stride]=
240
    src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
241
    src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
242
    src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
243
    src[0+1*stride]=
244
    src[2+2*stride]=(l0 + l1 + 1)>>1;
245
    src[1+1*stride]=
246
    src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
247
    src[0+2*stride]=
248
    src[2+3*stride]=(l1 + l2+ 1)>>1;
249
    src[1+2*stride]=
250
    src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
251
    src[0+3*stride]=(l2 + l3 + 1)>>1;
252
    src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
253
}
254

    
255
static void pred16x16_vertical_c(uint8_t *src, int stride){
256
    int i;
257
    const uint32_t a= ((uint32_t*)(src-stride))[0];
258
    const uint32_t b= ((uint32_t*)(src-stride))[1];
259
    const uint32_t c= ((uint32_t*)(src-stride))[2];
260
    const uint32_t d= ((uint32_t*)(src-stride))[3];
261

    
262
    for(i=0; i<16; i++){
263
        ((uint32_t*)(src+i*stride))[0]= a;
264
        ((uint32_t*)(src+i*stride))[1]= b;
265
        ((uint32_t*)(src+i*stride))[2]= c;
266
        ((uint32_t*)(src+i*stride))[3]= d;
267
    }
268
}
269

    
270
static void pred16x16_horizontal_c(uint8_t *src, int stride){
271
    int i;
272

    
273
    for(i=0; i<16; i++){
274
        ((uint32_t*)(src+i*stride))[0]=
275
        ((uint32_t*)(src+i*stride))[1]=
276
        ((uint32_t*)(src+i*stride))[2]=
277
        ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
278
    }
279
}
280

    
281
static void pred16x16_dc_c(uint8_t *src, int stride){
282
    int i, dc=0;
283

    
284
    for(i=0;i<16; i++){
285
        dc+= src[-1+i*stride];
286
    }
287

    
288
    for(i=0;i<16; i++){
289
        dc+= src[i-stride];
290
    }
291

    
292
    dc= 0x01010101*((dc + 16)>>5);
293

    
294
    for(i=0; i<16; i++){
295
        ((uint32_t*)(src+i*stride))[0]=
296
        ((uint32_t*)(src+i*stride))[1]=
297
        ((uint32_t*)(src+i*stride))[2]=
298
        ((uint32_t*)(src+i*stride))[3]= dc;
299
    }
300
}
301

    
302
static void pred16x16_left_dc_c(uint8_t *src, int stride){
303
    int i, dc=0;
304

    
305
    for(i=0;i<16; i++){
306
        dc+= src[-1+i*stride];
307
    }
308

    
309
    dc= 0x01010101*((dc + 8)>>4);
310

    
311
    for(i=0; i<16; i++){
312
        ((uint32_t*)(src+i*stride))[0]=
313
        ((uint32_t*)(src+i*stride))[1]=
314
        ((uint32_t*)(src+i*stride))[2]=
315
        ((uint32_t*)(src+i*stride))[3]= dc;
316
    }
317
}
318

    
319
static void pred16x16_top_dc_c(uint8_t *src, int stride){
320
    int i, dc=0;
321

    
322
    for(i=0;i<16; i++){
323
        dc+= src[i-stride];
324
    }
325
    dc= 0x01010101*((dc + 8)>>4);
326

    
327
    for(i=0; i<16; i++){
328
        ((uint32_t*)(src+i*stride))[0]=
329
        ((uint32_t*)(src+i*stride))[1]=
330
        ((uint32_t*)(src+i*stride))[2]=
331
        ((uint32_t*)(src+i*stride))[3]= dc;
332
    }
333
}
334

    
335
static void pred16x16_128_dc_c(uint8_t *src, int stride){
336
    int i;
337

    
338
    for(i=0; i<16; i++){
339
        ((uint32_t*)(src+i*stride))[0]=
340
        ((uint32_t*)(src+i*stride))[1]=
341
        ((uint32_t*)(src+i*stride))[2]=
342
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
343
    }
344
}
345

    
346
static void pred16x16_127_dc_c(uint8_t *src, int stride){
347
    int i;
348

    
349
    for(i=0; i<16; i++){
350
        ((uint32_t*)(src+i*stride))[0]=
351
        ((uint32_t*)(src+i*stride))[1]=
352
        ((uint32_t*)(src+i*stride))[2]=
353
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*127U;
354
    }
355
}
356

    
357
static void pred16x16_129_dc_c(uint8_t *src, int stride){
358
    int i;
359

    
360
    for(i=0; i<16; i++){
361
        ((uint32_t*)(src+i*stride))[0]=
362
        ((uint32_t*)(src+i*stride))[1]=
363
        ((uint32_t*)(src+i*stride))[2]=
364
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*129U;
365
    }
366
}
367

    
368
static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3, const int rv40){
369
  int i, j, k;
370
  int a;
371
  uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
372
  const uint8_t * const src0 = src+7-stride;
373
  const uint8_t *src1 = src+8*stride-1;
374
  const uint8_t *src2 = src1-2*stride;      // == src+6*stride-1;
375
  int H = src0[1] - src0[-1];
376
  int V = src1[0] - src2[ 0];
377
  for(k=2; k<=8; ++k) {
378
    src1 += stride; src2 -= stride;
379
    H += k*(src0[k] - src0[-k]);
380
    V += k*(src1[0] - src2[ 0]);
381
  }
382
  if(svq3){
383
    H = ( 5*(H/4) ) / 16;
384
    V = ( 5*(V/4) ) / 16;
385

    
386
    /* required for 100% accuracy */
387
    i = H; H = V; V = i;
388
  }else if(rv40){
389
    H = ( H + (H>>2) ) >> 4;
390
    V = ( V + (V>>2) ) >> 4;
391
  }else{
392
    H = ( 5*H+32 ) >> 6;
393
    V = ( 5*V+32 ) >> 6;
394
  }
395

    
396
  a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
397
  for(j=16; j>0; --j) {
398
    int b = a;
399
    a += V;
400
    for(i=-16; i<0; i+=4) {
401
      src[16+i] = cm[ (b    ) >> 5 ];
402
      src[17+i] = cm[ (b+  H) >> 5 ];
403
      src[18+i] = cm[ (b+2*H) >> 5 ];
404
      src[19+i] = cm[ (b+3*H) >> 5 ];
405
      b += 4*H;
406
    }
407
    src += stride;
408
  }
409
}
410

    
411
static void pred16x16_plane_c(uint8_t *src, int stride){
412
    pred16x16_plane_compat_c(src, stride, 0, 0);
413
}
414

    
415
static void pred8x8_vertical_c(uint8_t *src, int stride){
416
    int i;
417
    const uint32_t a= ((uint32_t*)(src-stride))[0];
418
    const uint32_t b= ((uint32_t*)(src-stride))[1];
419

    
420
    for(i=0; i<8; i++){
421
        ((uint32_t*)(src+i*stride))[0]= a;
422
        ((uint32_t*)(src+i*stride))[1]= b;
423
    }
424
}
425

    
426
static void pred8x8_horizontal_c(uint8_t *src, int stride){
427
    int i;
428

    
429
    for(i=0; i<8; i++){
430
        ((uint32_t*)(src+i*stride))[0]=
431
        ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
432
    }
433
}
434

    
435
static void pred8x8_128_dc_c(uint8_t *src, int stride){
436
    int i;
437

    
438
    for(i=0; i<8; i++){
439
        ((uint32_t*)(src+i*stride))[0]=
440
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
441
    }
442
}
443

    
444
static void pred8x8_127_dc_c(uint8_t *src, int stride){
445
    int i;
446

    
447
    for(i=0; i<8; i++){
448
        ((uint32_t*)(src+i*stride))[0]=
449
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*127U;
450
    }
451
}
452
static void pred8x8_129_dc_c(uint8_t *src, int stride){
453
    int i;
454

    
455
    for(i=0; i<8; i++){
456
        ((uint32_t*)(src+i*stride))[0]=
457
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*129U;
458
    }
459
}
460

    
461
static void pred8x8_left_dc_c(uint8_t *src, int stride){
462
    int i;
463
    int dc0, dc2;
464

    
465
    dc0=dc2=0;
466
    for(i=0;i<4; i++){
467
        dc0+= src[-1+i*stride];
468
        dc2+= src[-1+(i+4)*stride];
469
    }
470
    dc0= 0x01010101*((dc0 + 2)>>2);
471
    dc2= 0x01010101*((dc2 + 2)>>2);
472

    
473
    for(i=0; i<4; i++){
474
        ((uint32_t*)(src+i*stride))[0]=
475
        ((uint32_t*)(src+i*stride))[1]= dc0;
476
    }
477
    for(i=4; i<8; i++){
478
        ((uint32_t*)(src+i*stride))[0]=
479
        ((uint32_t*)(src+i*stride))[1]= dc2;
480
    }
481
}
482

    
483
static void pred8x8_top_dc_c(uint8_t *src, int stride){
484
    int i;
485
    int dc0, dc1;
486

    
487
    dc0=dc1=0;
488
    for(i=0;i<4; i++){
489
        dc0+= src[i-stride];
490
        dc1+= src[4+i-stride];
491
    }
492
    dc0= 0x01010101*((dc0 + 2)>>2);
493
    dc1= 0x01010101*((dc1 + 2)>>2);
494

    
495
    for(i=0; i<4; i++){
496
        ((uint32_t*)(src+i*stride))[0]= dc0;
497
        ((uint32_t*)(src+i*stride))[1]= dc1;
498
    }
499
    for(i=4; i<8; i++){
500
        ((uint32_t*)(src+i*stride))[0]= dc0;
501
        ((uint32_t*)(src+i*stride))[1]= dc1;
502
    }
503
}
504

    
505
static void pred8x8_dc_c(uint8_t *src, int stride){
506
    int i;
507
    int dc0, dc1, dc2, dc3;
508

    
509
    dc0=dc1=dc2=0;
510
    for(i=0;i<4; i++){
511
        dc0+= src[-1+i*stride] + src[i-stride];
512
        dc1+= src[4+i-stride];
513
        dc2+= src[-1+(i+4)*stride];
514
    }
515
    dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
516
    dc0= 0x01010101*((dc0 + 4)>>3);
517
    dc1= 0x01010101*((dc1 + 2)>>2);
518
    dc2= 0x01010101*((dc2 + 2)>>2);
519

    
520
    for(i=0; i<4; i++){
521
        ((uint32_t*)(src+i*stride))[0]= dc0;
522
        ((uint32_t*)(src+i*stride))[1]= dc1;
523
    }
524
    for(i=4; i<8; i++){
525
        ((uint32_t*)(src+i*stride))[0]= dc2;
526
        ((uint32_t*)(src+i*stride))[1]= dc3;
527
    }
528
}
529

    
530
//the following 4 function should not be optimized!
531
static void pred8x8_mad_cow_dc_l0t(uint8_t *src, int stride){
532
    pred8x8_top_dc_c(src, stride);
533
    pred4x4_dc_c(src, NULL, stride);
534
}
535

    
536
static void pred8x8_mad_cow_dc_0lt(uint8_t *src, int stride){
537
    pred8x8_dc_c(src, stride);
538
    pred4x4_top_dc_c(src, NULL, stride);
539
}
540

    
541
static void pred8x8_mad_cow_dc_l00(uint8_t *src, int stride){
542
    pred8x8_left_dc_c(src, stride);
543
    pred4x4_128_dc_c(src + 4*stride    , NULL, stride);
544
    pred4x4_128_dc_c(src + 4*stride + 4, NULL, stride);
545
}
546

    
547
static void pred8x8_mad_cow_dc_0l0(uint8_t *src, int stride){
548
    pred8x8_left_dc_c(src, stride);
549
    pred4x4_128_dc_c(src    , NULL, stride);
550
    pred4x4_128_dc_c(src + 4, NULL, stride);
551
}
552

    
553
static void pred8x8_plane_c(uint8_t *src, int stride){
554
  int j, k;
555
  int a;
556
  uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
557
  const uint8_t * const src0 = src+3-stride;
558
  const uint8_t *src1 = src+4*stride-1;
559
  const uint8_t *src2 = src1-2*stride;      // == src+2*stride-1;
560
  int H = src0[1] - src0[-1];
561
  int V = src1[0] - src2[ 0];
562
  for(k=2; k<=4; ++k) {
563
    src1 += stride; src2 -= stride;
564
    H += k*(src0[k] - src0[-k]);
565
    V += k*(src1[0] - src2[ 0]);
566
  }
567
  H = ( 17*H+16 ) >> 5;
568
  V = ( 17*V+16 ) >> 5;
569

    
570
  a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
571
  for(j=8; j>0; --j) {
572
    int b = a;
573
    a += V;
574
    src[0] = cm[ (b    ) >> 5 ];
575
    src[1] = cm[ (b+  H) >> 5 ];
576
    src[2] = cm[ (b+2*H) >> 5 ];
577
    src[3] = cm[ (b+3*H) >> 5 ];
578
    src[4] = cm[ (b+4*H) >> 5 ];
579
    src[5] = cm[ (b+5*H) >> 5 ];
580
    src[6] = cm[ (b+6*H) >> 5 ];
581
    src[7] = cm[ (b+7*H) >> 5 ];
582
    src += stride;
583
  }
584
}
585

    
586
#define SRC(x,y) src[(x)+(y)*stride]
587
#define PL(y) \
588
    const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
589
#define PREDICT_8x8_LOAD_LEFT \
590
    const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
591
                     + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
592
    PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
593
    const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
594

    
595
#define PT(x) \
596
    const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
597
#define PREDICT_8x8_LOAD_TOP \
598
    const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
599
                     + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
600
    PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
601
    const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
602
                     + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
603

    
604
#define PTR(x) \
605
    t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
606
#define PREDICT_8x8_LOAD_TOPRIGHT \
607
    int t8, t9, t10, t11, t12, t13, t14, t15; \
608
    if(has_topright) { \
609
        PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
610
        t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
611
    } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
612

    
613
#define PREDICT_8x8_LOAD_TOPLEFT \
614
    const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
615

    
616
#define PREDICT_8x8_DC(v) \
617
    int y; \
618
    for( y = 0; y < 8; y++ ) { \
619
        ((uint32_t*)src)[0] = \
620
        ((uint32_t*)src)[1] = v; \
621
        src += stride; \
622
    }
623

    
624
static void pred8x8l_128_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
625
{
626
    PREDICT_8x8_DC(0x80808080);
627
}
628
static void pred8x8l_left_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
629
{
630
    PREDICT_8x8_LOAD_LEFT;
631
    const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
632
    PREDICT_8x8_DC(dc);
633
}
634
static void pred8x8l_top_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
635
{
636
    PREDICT_8x8_LOAD_TOP;
637
    const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
638
    PREDICT_8x8_DC(dc);
639
}
640
static void pred8x8l_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
641
{
642
    PREDICT_8x8_LOAD_LEFT;
643
    PREDICT_8x8_LOAD_TOP;
644
    const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
645
                         +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
646
    PREDICT_8x8_DC(dc);
647
}
648
static void pred8x8l_horizontal_c(uint8_t *src, int has_topleft, int has_topright, int stride)
649
{
650
    PREDICT_8x8_LOAD_LEFT;
651
#define ROW(y) ((uint32_t*)(src+y*stride))[0] =\
652
               ((uint32_t*)(src+y*stride))[1] = 0x01010101 * l##y
653
    ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
654
#undef ROW
655
}
656
static void pred8x8l_vertical_c(uint8_t *src, int has_topleft, int has_topright, int stride)
657
{
658
    int y;
659
    PREDICT_8x8_LOAD_TOP;
660
    src[0] = t0;
661
    src[1] = t1;
662
    src[2] = t2;
663
    src[3] = t3;
664
    src[4] = t4;
665
    src[5] = t5;
666
    src[6] = t6;
667
    src[7] = t7;
668
    for( y = 1; y < 8; y++ )
669
        *(uint64_t*)(src+y*stride) = *(uint64_t*)src;
670
}
671
static void pred8x8l_down_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
672
{
673
    PREDICT_8x8_LOAD_TOP;
674
    PREDICT_8x8_LOAD_TOPRIGHT;
675
    SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
676
    SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
677
    SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
678
    SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
679
    SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
680
    SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
681
    SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
682
    SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
683
    SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
684
    SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
685
    SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
686
    SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
687
    SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
688
    SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
689
    SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
690
}
691
static void pred8x8l_down_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
692
{
693
    PREDICT_8x8_LOAD_TOP;
694
    PREDICT_8x8_LOAD_LEFT;
695
    PREDICT_8x8_LOAD_TOPLEFT;
696
    SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
697
    SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
698
    SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
699
    SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
700
    SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
701
    SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
702
    SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
703
    SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
704
    SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
705
    SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
706
    SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
707
    SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
708
    SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
709
    SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
710
    SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
711

    
712
}
713
static void pred8x8l_vertical_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
714
{
715
    PREDICT_8x8_LOAD_TOP;
716
    PREDICT_8x8_LOAD_LEFT;
717
    PREDICT_8x8_LOAD_TOPLEFT;
718
    SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
719
    SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
720
    SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
721
    SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
722
    SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
723
    SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
724
    SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
725
    SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
726
    SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
727
    SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
728
    SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
729
    SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
730
    SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
731
    SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
732
    SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
733
    SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
734
    SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
735
    SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
736
    SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
737
    SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
738
    SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
739
    SRC(7,0)= (t6 + t7 + 1) >> 1;
740
}
741
static void pred8x8l_horizontal_down_c(uint8_t *src, int has_topleft, int has_topright, int stride)
742
{
743
    PREDICT_8x8_LOAD_TOP;
744
    PREDICT_8x8_LOAD_LEFT;
745
    PREDICT_8x8_LOAD_TOPLEFT;
746
    SRC(0,7)= (l6 + l7 + 1) >> 1;
747
    SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
748
    SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
749
    SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
750
    SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
751
    SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
752
    SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
753
    SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
754
    SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
755
    SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
756
    SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
757
    SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
758
    SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
759
    SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
760
    SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
761
    SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
762
    SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
763
    SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
764
    SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
765
    SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
766
    SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
767
    SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
768
}
769
static void pred8x8l_vertical_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
770
{
771
    PREDICT_8x8_LOAD_TOP;
772
    PREDICT_8x8_LOAD_TOPRIGHT;
773
    SRC(0,0)= (t0 + t1 + 1) >> 1;
774
    SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
775
    SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
776
    SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
777
    SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
778
    SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
779
    SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
780
    SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
781
    SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
782
    SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
783
    SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
784
    SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
785
    SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
786
    SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
787
    SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
788
    SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
789
    SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
790
    SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
791
    SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
792
    SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
793
    SRC(7,6)= (t10 + t11 + 1) >> 1;
794
    SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
795
}
796
static void pred8x8l_horizontal_up_c(uint8_t *src, int has_topleft, int has_topright, int stride)
797
{
798
    PREDICT_8x8_LOAD_LEFT;
799
    SRC(0,0)= (l0 + l1 + 1) >> 1;
800
    SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
801
    SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
802
    SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
803
    SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
804
    SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
805
    SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
806
    SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
807
    SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
808
    SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
809
    SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
810
    SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
811
    SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
812
    SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
813
    SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
814
    SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
815
    SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
816
    SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
817
}
818
#undef PREDICT_8x8_LOAD_LEFT
819
#undef PREDICT_8x8_LOAD_TOP
820
#undef PREDICT_8x8_LOAD_TOPLEFT
821
#undef PREDICT_8x8_LOAD_TOPRIGHT
822
#undef PREDICT_8x8_DC
823
#undef PTR
824
#undef PT
825
#undef PL
826
#undef SRC
827

    
828
static void pred4x4_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
829
    int i;
830
    pix -= stride;
831
    for(i=0; i<4; i++){
832
        uint8_t v = pix[0];
833
        pix[1*stride]= v += block[0];
834
        pix[2*stride]= v += block[4];
835
        pix[3*stride]= v += block[8];
836
        pix[4*stride]= v +  block[12];
837
        pix++;
838
        block++;
839
    }
840
}
841

    
842
static void pred4x4_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
843
    int i;
844
    for(i=0; i<4; i++){
845
        uint8_t v = pix[-1];
846
        pix[0]= v += block[0];
847
        pix[1]= v += block[1];
848
        pix[2]= v += block[2];
849
        pix[3]= v +  block[3];
850
        pix+= stride;
851
        block+= 4;
852
    }
853
}
854

    
855
static void pred8x8l_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
856
    int i;
857
    pix -= stride;
858
    for(i=0; i<8; i++){
859
        uint8_t v = pix[0];
860
        pix[1*stride]= v += block[0];
861
        pix[2*stride]= v += block[8];
862
        pix[3*stride]= v += block[16];
863
        pix[4*stride]= v += block[24];
864
        pix[5*stride]= v += block[32];
865
        pix[6*stride]= v += block[40];
866
        pix[7*stride]= v += block[48];
867
        pix[8*stride]= v +  block[56];
868
        pix++;
869
        block++;
870
    }
871
}
872

    
873
static void pred8x8l_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
874
    int i;
875
    for(i=0; i<8; i++){
876
        uint8_t v = pix[-1];
877
        pix[0]= v += block[0];
878
        pix[1]= v += block[1];
879
        pix[2]= v += block[2];
880
        pix[3]= v += block[3];
881
        pix[4]= v += block[4];
882
        pix[5]= v += block[5];
883
        pix[6]= v += block[6];
884
        pix[7]= v +  block[7];
885
        pix+= stride;
886
        block+= 8;
887
    }
888
}
889

    
890
static void pred16x16_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
891
    int i;
892
    for(i=0; i<16; i++)
893
        pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
894
}
895

    
896
static void pred16x16_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
897
    int i;
898
    for(i=0; i<16; i++)
899
        pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
900
}
901

    
902
static void pred8x8_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
903
    int i;
904
    for(i=0; i<4; i++)
905
        pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
906
}
907

    
908
static void pred8x8_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
909
    int i;
910
    for(i=0; i<4; i++)
911
        pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
912
}