Revision 5ada2524 libavcodec/h264pred.c

View differences:

libavcodec/h264pred.c
25 25
 * @author Michael Niedermayer <michaelni@gmx.at>
26 26
 */
27 27

  
28
#include "avcodec.h"
29
#include "mpegvideo.h"
30 28
#include "h264pred.h"
31
#include "mathops.h"
32

  
33
static void pred4x4_vertical_c(uint8_t *src, const uint8_t *topright, int stride){
34
    const uint32_t a= ((uint32_t*)(src-stride))[0];
35
    ((uint32_t*)(src+0*stride))[0]= a;
36
    ((uint32_t*)(src+1*stride))[0]= a;
37
    ((uint32_t*)(src+2*stride))[0]= a;
38
    ((uint32_t*)(src+3*stride))[0]= a;
39
}
40

  
41
static void pred4x4_horizontal_c(uint8_t *src, const uint8_t *topright, int stride){
42
    ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
43
    ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
44
    ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
45
    ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
46
}
47

  
48
static void pred4x4_dc_c(uint8_t *src, const uint8_t *topright, int stride){
49
    const int dc= (  src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
50
                   + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
51

  
52
    ((uint32_t*)(src+0*stride))[0]=
53
    ((uint32_t*)(src+1*stride))[0]=
54
    ((uint32_t*)(src+2*stride))[0]=
55
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
56
}
57

  
58
static void pred4x4_left_dc_c(uint8_t *src, const uint8_t *topright, int stride){
59
    const int dc= (  src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
60

  
61
    ((uint32_t*)(src+0*stride))[0]=
62
    ((uint32_t*)(src+1*stride))[0]=
63
    ((uint32_t*)(src+2*stride))[0]=
64
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
65
}
66

  
67
static void pred4x4_top_dc_c(uint8_t *src, const uint8_t *topright, int stride){
68
    const int dc= (  src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
69

  
70
    ((uint32_t*)(src+0*stride))[0]=
71
    ((uint32_t*)(src+1*stride))[0]=
72
    ((uint32_t*)(src+2*stride))[0]=
73
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
74
}
75

  
76
static void pred4x4_128_dc_c(uint8_t *src, const uint8_t *topright, int stride){
77
    ((uint32_t*)(src+0*stride))[0]=
78
    ((uint32_t*)(src+1*stride))[0]=
79
    ((uint32_t*)(src+2*stride))[0]=
80
    ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
81
}
82

  
83
static void pred4x4_127_dc_c(uint8_t *src, const uint8_t *topright, int stride){
84
    ((uint32_t*)(src+0*stride))[0]=
85
    ((uint32_t*)(src+1*stride))[0]=
86
    ((uint32_t*)(src+2*stride))[0]=
87
    ((uint32_t*)(src+3*stride))[0]= 127U*0x01010101U;
88
}
89

  
90
static void pred4x4_129_dc_c(uint8_t *src, const uint8_t *topright, int stride){
91
    ((uint32_t*)(src+0*stride))[0]=
92
    ((uint32_t*)(src+1*stride))[0]=
93
    ((uint32_t*)(src+2*stride))[0]=
94
    ((uint32_t*)(src+3*stride))[0]= 129U*0x01010101U;
95
}
96

  
97

  
98
#define LOAD_TOP_RIGHT_EDGE\
99
    const int av_unused t4= topright[0];\
100
    const int av_unused t5= topright[1];\
101
    const int av_unused t6= topright[2];\
102
    const int av_unused t7= topright[3];\
103

  
104
#define LOAD_DOWN_LEFT_EDGE\
105
    const int av_unused l4= src[-1+4*stride];\
106
    const int av_unused l5= src[-1+5*stride];\
107
    const int av_unused l6= src[-1+6*stride];\
108
    const int av_unused l7= src[-1+7*stride];\
109

  
110
#define LOAD_LEFT_EDGE\
111
    const int av_unused l0= src[-1+0*stride];\
112
    const int av_unused l1= src[-1+1*stride];\
113
    const int av_unused l2= src[-1+2*stride];\
114
    const int av_unused l3= src[-1+3*stride];\
115

  
116
#define LOAD_TOP_EDGE\
117
    const int av_unused t0= src[ 0-1*stride];\
118
    const int av_unused t1= src[ 1-1*stride];\
119
    const int av_unused t2= src[ 2-1*stride];\
120
    const int av_unused t3= src[ 3-1*stride];\
29
#include "h264pred_template.c"
121 30

  
122 31
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
123 32
    const int lt= src[-1-1*stride];
124 33
    LOAD_TOP_EDGE
125 34
    LOAD_TOP_RIGHT_EDGE
126 35
    uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
127
                            (t0 + 2*t1 + t2 + 2) >> 2,
128
                            (t1 + 2*t2 + t3 + 2) >> 2,
129
                            (t2 + 2*t3 + t4 + 2) >> 2);
36
                          (t0 + 2*t1 + t2 + 2) >> 2,
37
                          (t1 + 2*t2 + t3 + 2) >> 2,
38
                          (t2 + 2*t3 + t4 + 2) >> 2);
130 39

  
131 40
    AV_WN32A(src+0*stride, v);
132 41
    AV_WN32A(src+1*stride, v);
......
144 53
    AV_WN32A(src+3*stride, ((l2 + 2*l3 + l3 + 2) >> 2)*0x01010101);
145 54
}
146 55

  
147
static void pred4x4_down_right_c(uint8_t *src, const uint8_t *topright, int stride){
148
    const int lt= src[-1-1*stride];
149
    LOAD_TOP_EDGE
150
    LOAD_LEFT_EDGE
151

  
152
    src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
153
    src[0+2*stride]=
154
    src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
155
    src[0+1*stride]=
156
    src[1+2*stride]=
157
    src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
158
    src[0+0*stride]=
159
    src[1+1*stride]=
160
    src[2+2*stride]=
161
    src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
162
    src[1+0*stride]=
163
    src[2+1*stride]=
164
    src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
165
    src[2+0*stride]=
166
    src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
167
    src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
168
}
169

  
170
static void pred4x4_down_left_c(uint8_t *src, const uint8_t *topright, int stride){
171
    LOAD_TOP_EDGE
172
    LOAD_TOP_RIGHT_EDGE
173
//    LOAD_LEFT_EDGE
174

  
175
    src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
176
    src[1+0*stride]=
177
    src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
178
    src[2+0*stride]=
179
    src[1+1*stride]=
180
    src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
181
    src[3+0*stride]=
182
    src[2+1*stride]=
183
    src[1+2*stride]=
184
    src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
185
    src[3+1*stride]=
186
    src[2+2*stride]=
187
    src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
188
    src[3+2*stride]=
189
    src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
190
    src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
191
}
192

  
193 56
static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright, int stride){
194 57
    LOAD_TOP_EDGE
195 58
    LOAD_LEFT_EDGE
......
261 124
    src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
262 125
}
263 126

  
264
static void pred4x4_vertical_right_c(uint8_t *src, const uint8_t *topright, int stride){
265
    const int lt= src[-1-1*stride];
266
    LOAD_TOP_EDGE
267
    LOAD_LEFT_EDGE
268

  
269
    src[0+0*stride]=
270
    src[1+2*stride]=(lt + t0 + 1)>>1;
271
    src[1+0*stride]=
272
    src[2+2*stride]=(t0 + t1 + 1)>>1;
273
    src[2+0*stride]=
274
    src[3+2*stride]=(t1 + t2 + 1)>>1;
275
    src[3+0*stride]=(t2 + t3 + 1)>>1;
276
    src[0+1*stride]=
277
    src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
278
    src[1+1*stride]=
279
    src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
280
    src[2+1*stride]=
281
    src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
282
    src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
283
    src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
284
    src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
285
}
286

  
287
static void pred4x4_vertical_left_c(uint8_t *src, const uint8_t *topright, int stride){
288
    LOAD_TOP_EDGE
289
    LOAD_TOP_RIGHT_EDGE
290

  
291
    src[0+0*stride]=(t0 + t1 + 1)>>1;
292
    src[1+0*stride]=
293
    src[0+2*stride]=(t1 + t2 + 1)>>1;
294
    src[2+0*stride]=
295
    src[1+2*stride]=(t2 + t3 + 1)>>1;
296
    src[3+0*stride]=
297
    src[2+2*stride]=(t3 + t4+ 1)>>1;
298
    src[3+2*stride]=(t4 + t5+ 1)>>1;
299
    src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
300
    src[1+1*stride]=
301
    src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
302
    src[2+1*stride]=
303
    src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
304
    src[3+1*stride]=
305
    src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
306
    src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
307
}
308

  
309 127
static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright, int stride,
310
                                      const int l0, const int l1, const int l2, const int l3, const int l4){
128
                                       const int l0, const int l1, const int l2, const int l3, const int l4){
311 129
    LOAD_TOP_EDGE
312 130
    LOAD_TOP_RIGHT_EDGE
313 131

  
......
364 182
    src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
365 183
}
366 184

  
367
static void pred4x4_horizontal_up_c(uint8_t *src, const uint8_t *topright, int stride){
368
    LOAD_LEFT_EDGE
369

  
370
    src[0+0*stride]=(l0 + l1 + 1)>>1;
371
    src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
372
    src[2+0*stride]=
373
    src[0+1*stride]=(l1 + l2 + 1)>>1;
374
    src[3+0*stride]=
375
    src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
376
    src[2+1*stride]=
377
    src[0+2*stride]=(l2 + l3 + 1)>>1;
378
    src[3+1*stride]=
379
    src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
380
    src[3+2*stride]=
381
    src[1+3*stride]=
382
    src[0+3*stride]=
383
    src[2+2*stride]=
384
    src[2+3*stride]=
385
    src[3+3*stride]=l3;
386
}
387

  
388 185
static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright, int stride){
389 186
    LOAD_LEFT_EDGE
390 187
    LOAD_DOWN_LEFT_EDGE
......
432 229
    src[3+3*stride]=l3;
433 230
}
434 231

  
435
static void pred4x4_horizontal_down_c(uint8_t *src, const uint8_t *topright, int stride){
436
    const int lt= src[-1-1*stride];
437
    LOAD_TOP_EDGE
438
    LOAD_LEFT_EDGE
439

  
440
    src[0+0*stride]=
441
    src[2+1*stride]=(lt + l0 + 1)>>1;
442
    src[1+0*stride]=
443
    src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
444
    src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
445
    src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
446
    src[0+1*stride]=
447
    src[2+2*stride]=(l0 + l1 + 1)>>1;
448
    src[1+1*stride]=
449
    src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
450
    src[0+2*stride]=
451
    src[2+3*stride]=(l1 + l2+ 1)>>1;
452
    src[1+2*stride]=
453
    src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
454
    src[0+3*stride]=(l2 + l3 + 1)>>1;
455
    src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
456
}
457

  
458 232
static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
459 233
    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
460 234
    uint8_t *top = src-stride;
......
470 244
    }
471 245
}
472 246

  
473
static void pred16x16_vertical_c(uint8_t *src, int stride){
474
    int i;
475
    const uint32_t a= ((uint32_t*)(src-stride))[0];
476
    const uint32_t b= ((uint32_t*)(src-stride))[1];
477
    const uint32_t c= ((uint32_t*)(src-stride))[2];
478
    const uint32_t d= ((uint32_t*)(src-stride))[3];
479

  
480
    for(i=0; i<16; i++){
481
        ((uint32_t*)(src+i*stride))[0]= a;
482
        ((uint32_t*)(src+i*stride))[1]= b;
483
        ((uint32_t*)(src+i*stride))[2]= c;
484
        ((uint32_t*)(src+i*stride))[3]= d;
485
    }
486
}
487

  
488
static void pred16x16_horizontal_c(uint8_t *src, int stride){
489
    int i;
490

  
491
    for(i=0; i<16; i++){
492
        ((uint32_t*)(src+i*stride))[0]=
493
        ((uint32_t*)(src+i*stride))[1]=
494
        ((uint32_t*)(src+i*stride))[2]=
495
        ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
496
    }
497
}
498

  
499
static void pred16x16_dc_c(uint8_t *src, int stride){
500
    int i, dc=0;
501

  
502
    for(i=0;i<16; i++){
503
        dc+= src[-1+i*stride];
504
    }
505

  
506
    for(i=0;i<16; i++){
507
        dc+= src[i-stride];
508
    }
509

  
510
    dc= 0x01010101*((dc + 16)>>5);
511

  
512
    for(i=0; i<16; i++){
513
        ((uint32_t*)(src+i*stride))[0]=
514
        ((uint32_t*)(src+i*stride))[1]=
515
        ((uint32_t*)(src+i*stride))[2]=
516
        ((uint32_t*)(src+i*stride))[3]= dc;
517
    }
518
}
519

  
520
static void pred16x16_left_dc_c(uint8_t *src, int stride){
521
    int i, dc=0;
522

  
523
    for(i=0;i<16; i++){
524
        dc+= src[-1+i*stride];
525
    }
526

  
527
    dc= 0x01010101*((dc + 8)>>4);
528

  
529
    for(i=0; i<16; i++){
530
        ((uint32_t*)(src+i*stride))[0]=
531
        ((uint32_t*)(src+i*stride))[1]=
532
        ((uint32_t*)(src+i*stride))[2]=
533
        ((uint32_t*)(src+i*stride))[3]= dc;
534
    }
535
}
536

  
537
static void pred16x16_top_dc_c(uint8_t *src, int stride){
538
    int i, dc=0;
539

  
540
    for(i=0;i<16; i++){
541
        dc+= src[i-stride];
542
    }
543
    dc= 0x01010101*((dc + 8)>>4);
544

  
545
    for(i=0; i<16; i++){
546
        ((uint32_t*)(src+i*stride))[0]=
547
        ((uint32_t*)(src+i*stride))[1]=
548
        ((uint32_t*)(src+i*stride))[2]=
549
        ((uint32_t*)(src+i*stride))[3]= dc;
550
    }
551
}
552

  
553
static void pred16x16_128_dc_c(uint8_t *src, int stride){
554
    int i;
555

  
556
    for(i=0; i<16; i++){
557
        ((uint32_t*)(src+i*stride))[0]=
558
        ((uint32_t*)(src+i*stride))[1]=
559
        ((uint32_t*)(src+i*stride))[2]=
560
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
561
    }
562
}
563

  
564
static void pred16x16_127_dc_c(uint8_t *src, int stride){
565
    int i;
566

  
567
    for(i=0; i<16; i++){
568
        ((uint32_t*)(src+i*stride))[0]=
569
        ((uint32_t*)(src+i*stride))[1]=
570
        ((uint32_t*)(src+i*stride))[2]=
571
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*127U;
572
    }
573
}
574

  
575
static void pred16x16_129_dc_c(uint8_t *src, int stride){
576
    int i;
577

  
578
    for(i=0; i<16; i++){
579
        ((uint32_t*)(src+i*stride))[0]=
580
        ((uint32_t*)(src+i*stride))[1]=
581
        ((uint32_t*)(src+i*stride))[2]=
582
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*129U;
583
    }
584
}
585

  
586
static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3, const int rv40){
587
  int i, j, k;
588
  int a;
589
  uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
590
  const uint8_t * const src0 = src+7-stride;
591
  const uint8_t *src1 = src+8*stride-1;
592
  const uint8_t *src2 = src1-2*stride;      // == src+6*stride-1;
593
  int H = src0[1] - src0[-1];
594
  int V = src1[0] - src2[ 0];
595
  for(k=2; k<=8; ++k) {
596
    src1 += stride; src2 -= stride;
597
    H += k*(src0[k] - src0[-k]);
598
    V += k*(src1[0] - src2[ 0]);
599
  }
600
  if(svq3){
601
    H = ( 5*(H/4) ) / 16;
602
    V = ( 5*(V/4) ) / 16;
603

  
604
    /* required for 100% accuracy */
605
    i = H; H = V; V = i;
606
  }else if(rv40){
607
    H = ( H + (H>>2) ) >> 4;
608
    V = ( V + (V>>2) ) >> 4;
609
  }else{
610
    H = ( 5*H+32 ) >> 6;
611
    V = ( 5*V+32 ) >> 6;
612
  }
613

  
614
  a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
615
  for(j=16; j>0; --j) {
616
    int b = a;
617
    a += V;
618
    for(i=-16; i<0; i+=4) {
619
      src[16+i] = cm[ (b    ) >> 5 ];
620
      src[17+i] = cm[ (b+  H) >> 5 ];
621
      src[18+i] = cm[ (b+2*H) >> 5 ];
622
      src[19+i] = cm[ (b+3*H) >> 5 ];
623
      b += 4*H;
624
    }
625
    src += stride;
626
  }
627
}
628

  
629
static void pred16x16_plane_c(uint8_t *src, int stride){
630
    pred16x16_plane_compat_c(src, stride, 0, 0);
631
}
632

  
633 247
static void pred16x16_plane_svq3_c(uint8_t *src, int stride){
634 248
    pred16x16_plane_compat_c(src, stride, 1, 0);
635 249
}
......
665 279
    }
666 280
}
667 281

  
668
static void pred8x8_vertical_c(uint8_t *src, int stride){
669
    int i;
670
    const uint32_t a= ((uint32_t*)(src-stride))[0];
671
    const uint32_t b= ((uint32_t*)(src-stride))[1];
672

  
673
    for(i=0; i<8; i++){
674
        ((uint32_t*)(src+i*stride))[0]= a;
675
        ((uint32_t*)(src+i*stride))[1]= b;
676
    }
677
}
678

  
679
static void pred8x8_horizontal_c(uint8_t *src, int stride){
680
    int i;
681

  
682
    for(i=0; i<8; i++){
683
        ((uint32_t*)(src+i*stride))[0]=
684
        ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
685
    }
686
}
687

  
688
static void pred8x8_128_dc_c(uint8_t *src, int stride){
689
    int i;
690

  
691
    for(i=0; i<8; i++){
692
        ((uint32_t*)(src+i*stride))[0]=
693
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
694
    }
695
}
696

  
697
static void pred8x8_127_dc_c(uint8_t *src, int stride){
698
    int i;
699

  
700
    for(i=0; i<8; i++){
701
        ((uint32_t*)(src+i*stride))[0]=
702
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*127U;
703
    }
704
}
705
static void pred8x8_129_dc_c(uint8_t *src, int stride){
706
    int i;
707

  
708
    for(i=0; i<8; i++){
709
        ((uint32_t*)(src+i*stride))[0]=
710
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*129U;
711
    }
712
}
713

  
714
static void pred8x8_left_dc_c(uint8_t *src, int stride){
715
    int i;
716
    int dc0, dc2;
717

  
718
    dc0=dc2=0;
719
    for(i=0;i<4; i++){
720
        dc0+= src[-1+i*stride];
721
        dc2+= src[-1+(i+4)*stride];
722
    }
723
    dc0= 0x01010101*((dc0 + 2)>>2);
724
    dc2= 0x01010101*((dc2 + 2)>>2);
725

  
726
    for(i=0; i<4; i++){
727
        ((uint32_t*)(src+i*stride))[0]=
728
        ((uint32_t*)(src+i*stride))[1]= dc0;
729
    }
730
    for(i=4; i<8; i++){
731
        ((uint32_t*)(src+i*stride))[0]=
732
        ((uint32_t*)(src+i*stride))[1]= dc2;
733
    }
734
}
735

  
736 282
static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
737 283
    int i;
738 284
    int dc0;
......
748 294
    }
749 295
}
750 296

  
751
static void pred8x8_top_dc_c(uint8_t *src, int stride){
752
    int i;
753
    int dc0, dc1;
754

  
755
    dc0=dc1=0;
756
    for(i=0;i<4; i++){
757
        dc0+= src[i-stride];
758
        dc1+= src[4+i-stride];
759
    }
760
    dc0= 0x01010101*((dc0 + 2)>>2);
761
    dc1= 0x01010101*((dc1 + 2)>>2);
762

  
763
    for(i=0; i<4; i++){
764
        ((uint32_t*)(src+i*stride))[0]= dc0;
765
        ((uint32_t*)(src+i*stride))[1]= dc1;
766
    }
767
    for(i=4; i<8; i++){
768
        ((uint32_t*)(src+i*stride))[0]= dc0;
769
        ((uint32_t*)(src+i*stride))[1]= dc1;
770
    }
771
}
772

  
773 297
static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
774 298
    int i;
775 299
    int dc0;
......
785 309
    }
786 310
}
787 311

  
788

  
789
static void pred8x8_dc_c(uint8_t *src, int stride){
790
    int i;
791
    int dc0, dc1, dc2, dc3;
792

  
793
    dc0=dc1=dc2=0;
794
    for(i=0;i<4; i++){
795
        dc0+= src[-1+i*stride] + src[i-stride];
796
        dc1+= src[4+i-stride];
797
        dc2+= src[-1+(i+4)*stride];
798
    }
799
    dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
800
    dc0= 0x01010101*((dc0 + 4)>>3);
801
    dc1= 0x01010101*((dc1 + 2)>>2);
802
    dc2= 0x01010101*((dc2 + 2)>>2);
803

  
804
    for(i=0; i<4; i++){
805
        ((uint32_t*)(src+i*stride))[0]= dc0;
806
        ((uint32_t*)(src+i*stride))[1]= dc1;
807
    }
808
    for(i=4; i<8; i++){
809
        ((uint32_t*)(src+i*stride))[0]= dc2;
810
        ((uint32_t*)(src+i*stride))[1]= dc3;
811
    }
812
}
813

  
814
//the following 4 function should not be optimized!
815
static void pred8x8_mad_cow_dc_l0t(uint8_t *src, int stride){
816
    pred8x8_top_dc_c(src, stride);
817
    pred4x4_dc_c(src, NULL, stride);
818
}
819

  
820
static void pred8x8_mad_cow_dc_0lt(uint8_t *src, int stride){
821
    pred8x8_dc_c(src, stride);
822
    pred4x4_top_dc_c(src, NULL, stride);
823
}
824

  
825
static void pred8x8_mad_cow_dc_l00(uint8_t *src, int stride){
826
    pred8x8_left_dc_c(src, stride);
827
    pred4x4_128_dc_c(src + 4*stride    , NULL, stride);
828
    pred4x4_128_dc_c(src + 4*stride + 4, NULL, stride);
829
}
830

  
831
static void pred8x8_mad_cow_dc_0l0(uint8_t *src, int stride){
832
    pred8x8_left_dc_c(src, stride);
833
    pred4x4_128_dc_c(src    , NULL, stride);
834
    pred4x4_128_dc_c(src + 4, NULL, stride);
835
}
836

  
837 312
static void pred8x8_dc_rv40_c(uint8_t *src, int stride){
838 313
    int i;
839 314
    int dc0=0;
......
855 330
    }
856 331
}
857 332

  
858
static void pred8x8_plane_c(uint8_t *src, int stride){
859
  int j, k;
860
  int a;
861
  uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
862
  const uint8_t * const src0 = src+3-stride;
863
  const uint8_t *src1 = src+4*stride-1;
864
  const uint8_t *src2 = src1-2*stride;      // == src+2*stride-1;
865
  int H = src0[1] - src0[-1];
866
  int V = src1[0] - src2[ 0];
867
  for(k=2; k<=4; ++k) {
868
    src1 += stride; src2 -= stride;
869
    H += k*(src0[k] - src0[-k]);
870
    V += k*(src1[0] - src2[ 0]);
871
  }
872
  H = ( 17*H+16 ) >> 5;
873
  V = ( 17*V+16 ) >> 5;
874

  
875
  a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
876
  for(j=8; j>0; --j) {
877
    int b = a;
878
    a += V;
879
    src[0] = cm[ (b    ) >> 5 ];
880
    src[1] = cm[ (b+  H) >> 5 ];
881
    src[2] = cm[ (b+2*H) >> 5 ];
882
    src[3] = cm[ (b+3*H) >> 5 ];
883
    src[4] = cm[ (b+4*H) >> 5 ];
884
    src[5] = cm[ (b+5*H) >> 5 ];
885
    src[6] = cm[ (b+6*H) >> 5 ];
886
    src[7] = cm[ (b+7*H) >> 5 ];
887
    src += stride;
888
  }
889
}
890

  
891 333
static void pred8x8_tm_vp8_c(uint8_t *src, int stride){
892 334
    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
893 335
    uint8_t *top = src-stride;
......
907 349
    }
908 350
}
909 351

  
910
#define SRC(x,y) src[(x)+(y)*stride]
911
#define PL(y) \
912
    const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
913
#define PREDICT_8x8_LOAD_LEFT \
914
    const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
915
                     + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
916
    PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
917
    const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
918

  
919
#define PT(x) \
920
    const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
921
#define PREDICT_8x8_LOAD_TOP \
922
    const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
923
                     + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
924
    PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
925
    const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
926
                     + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
927

  
928
#define PTR(x) \
929
    t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
930
#define PREDICT_8x8_LOAD_TOPRIGHT \
931
    int t8, t9, t10, t11, t12, t13, t14, t15; \
932
    if(has_topright) { \
933
        PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
934
        t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
935
    } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
936

  
937
#define PREDICT_8x8_LOAD_TOPLEFT \
938
    const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
939

  
940
#define PREDICT_8x8_DC(v) \
941
    int y; \
942
    for( y = 0; y < 8; y++ ) { \
943
        ((uint32_t*)src)[0] = \
944
        ((uint32_t*)src)[1] = v; \
945
        src += stride; \
946
    }
947

  
948
static void pred8x8l_128_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
949
{
950
    PREDICT_8x8_DC(0x80808080);
951
}
952
static void pred8x8l_left_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
953
{
954
    PREDICT_8x8_LOAD_LEFT;
955
    const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
956
    PREDICT_8x8_DC(dc);
957
}
958
static void pred8x8l_top_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
959
{
960
    PREDICT_8x8_LOAD_TOP;
961
    const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
962
    PREDICT_8x8_DC(dc);
963
}
964
static void pred8x8l_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
965
{
966
    PREDICT_8x8_LOAD_LEFT;
967
    PREDICT_8x8_LOAD_TOP;
968
    const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
969
                         +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
970
    PREDICT_8x8_DC(dc);
971
}
972
static void pred8x8l_horizontal_c(uint8_t *src, int has_topleft, int has_topright, int stride)
973
{
974
    PREDICT_8x8_LOAD_LEFT;
975
#define ROW(y) ((uint32_t*)(src+y*stride))[0] =\
976
               ((uint32_t*)(src+y*stride))[1] = 0x01010101 * l##y
977
    ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
978
#undef ROW
979
}
980
static void pred8x8l_vertical_c(uint8_t *src, int has_topleft, int has_topright, int stride)
981
{
982
    int y;
983
    PREDICT_8x8_LOAD_TOP;
984
    src[0] = t0;
985
    src[1] = t1;
986
    src[2] = t2;
987
    src[3] = t3;
988
    src[4] = t4;
989
    src[5] = t5;
990
    src[6] = t6;
991
    src[7] = t7;
992
    for( y = 1; y < 8; y++ )
993
        *(uint64_t*)(src+y*stride) = *(uint64_t*)src;
994
}
995
static void pred8x8l_down_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
996
{
997
    PREDICT_8x8_LOAD_TOP;
998
    PREDICT_8x8_LOAD_TOPRIGHT;
999
    SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
1000
    SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
1001
    SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
1002
    SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
1003
    SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
1004
    SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
1005
    SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
1006
    SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
1007
    SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
1008
    SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
1009
    SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
1010
    SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
1011
    SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
1012
    SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
1013
    SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
1014
}
1015
static void pred8x8l_down_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1016
{
1017
    PREDICT_8x8_LOAD_TOP;
1018
    PREDICT_8x8_LOAD_LEFT;
1019
    PREDICT_8x8_LOAD_TOPLEFT;
1020
    SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
1021
    SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
1022
    SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
1023
    SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
1024
    SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
1025
    SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
1026
    SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
1027
    SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
1028
    SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
1029
    SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
1030
    SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
1031
    SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
1032
    SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
1033
    SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
1034
    SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
1035

  
1036
}
1037
static void pred8x8l_vertical_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1038
{
1039
    PREDICT_8x8_LOAD_TOP;
1040
    PREDICT_8x8_LOAD_LEFT;
1041
    PREDICT_8x8_LOAD_TOPLEFT;
1042
    SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
1043
    SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
1044
    SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
1045
    SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
1046
    SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
1047
    SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
1048
    SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
1049
    SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
1050
    SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
1051
    SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
1052
    SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
1053
    SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
1054
    SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
1055
    SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
1056
    SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
1057
    SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
1058
    SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
1059
    SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
1060
    SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
1061
    SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
1062
    SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
1063
    SRC(7,0)= (t6 + t7 + 1) >> 1;
1064
}
1065
static void pred8x8l_horizontal_down_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1066
{
1067
    PREDICT_8x8_LOAD_TOP;
1068
    PREDICT_8x8_LOAD_LEFT;
1069
    PREDICT_8x8_LOAD_TOPLEFT;
1070
    SRC(0,7)= (l6 + l7 + 1) >> 1;
1071
    SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
1072
    SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
1073
    SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
1074
    SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
1075
    SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
1076
    SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
1077
    SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
1078
    SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
1079
    SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
1080
    SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
1081
    SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
1082
    SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
1083
    SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
1084
    SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
1085
    SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
1086
    SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
1087
    SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
1088
    SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
1089
    SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
1090
    SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
1091
    SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
1092
}
1093
static void pred8x8l_vertical_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1094
{
1095
    PREDICT_8x8_LOAD_TOP;
1096
    PREDICT_8x8_LOAD_TOPRIGHT;
1097
    SRC(0,0)= (t0 + t1 + 1) >> 1;
1098
    SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
1099
    SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
1100
    SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
1101
    SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
1102
    SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
1103
    SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
1104
    SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
1105
    SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
1106
    SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
1107
    SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
1108
    SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
1109
    SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
1110
    SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
1111
    SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
1112
    SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
1113
    SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
1114
    SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
1115
    SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
1116
    SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
1117
    SRC(7,6)= (t10 + t11 + 1) >> 1;
1118
    SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
1119
}
1120
static void pred8x8l_horizontal_up_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1121
{
1122
    PREDICT_8x8_LOAD_LEFT;
1123
    SRC(0,0)= (l0 + l1 + 1) >> 1;
1124
    SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
1125
    SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
1126
    SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
1127
    SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
1128
    SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
1129
    SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
1130
    SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
1131
    SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
1132
    SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
1133
    SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
1134
    SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
1135
    SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
1136
    SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
1137
    SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
1138
    SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
1139
    SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
1140
    SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
1141
}
1142
#undef PREDICT_8x8_LOAD_LEFT
1143
#undef PREDICT_8x8_LOAD_TOP
1144
#undef PREDICT_8x8_LOAD_TOPLEFT
1145
#undef PREDICT_8x8_LOAD_TOPRIGHT
1146
#undef PREDICT_8x8_DC
1147
#undef PTR
1148
#undef PT
1149
#undef PL
1150
#undef SRC
1151

  
1152
static void pred4x4_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1153
    int i;
1154
    pix -= stride;
1155
    for(i=0; i<4; i++){
1156
        uint8_t v = pix[0];
1157
        pix[1*stride]= v += block[0];
1158
        pix[2*stride]= v += block[4];
1159
        pix[3*stride]= v += block[8];
1160
        pix[4*stride]= v +  block[12];
1161
        pix++;
1162
        block++;
1163
    }
1164
}
1165

  
1166
static void pred4x4_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1167
    int i;
1168
    for(i=0; i<4; i++){
1169
        uint8_t v = pix[-1];
1170
        pix[0]= v += block[0];
1171
        pix[1]= v += block[1];
1172
        pix[2]= v += block[2];
1173
        pix[3]= v +  block[3];
1174
        pix+= stride;
1175
        block+= 4;
1176
    }
1177
}
1178

  
1179
static void pred8x8l_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1180
    int i;
1181
    pix -= stride;
1182
    for(i=0; i<8; i++){
1183
        uint8_t v = pix[0];
1184
        pix[1*stride]= v += block[0];
1185
        pix[2*stride]= v += block[8];
1186
        pix[3*stride]= v += block[16];
1187
        pix[4*stride]= v += block[24];
1188
        pix[5*stride]= v += block[32];
1189
        pix[6*stride]= v += block[40];
1190
        pix[7*stride]= v += block[48];
1191
        pix[8*stride]= v +  block[56];
1192
        pix++;
1193
        block++;
1194
    }
1195
}
1196

  
1197
static void pred8x8l_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1198
    int i;
1199
    for(i=0; i<8; i++){
1200
        uint8_t v = pix[-1];
1201
        pix[0]= v += block[0];
1202
        pix[1]= v += block[1];
1203
        pix[2]= v += block[2];
1204
        pix[3]= v += block[3];
1205
        pix[4]= v += block[4];
1206
        pix[5]= v += block[5];
1207
        pix[6]= v += block[6];
1208
        pix[7]= v +  block[7];
1209
        pix+= stride;
1210
        block+= 8;
1211
    }
1212
}
1213

  
1214
static void pred16x16_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1215
    int i;
1216
    for(i=0; i<16; i++)
1217
        pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
1218
}
1219

  
1220
static void pred16x16_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1221
    int i;
1222
    for(i=0; i<16; i++)
1223
        pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
1224
}
1225

  
1226
static void pred8x8_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1227
    int i;
1228
    for(i=0; i<4; i++)
1229
        pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
1230
}
1231

  
1232
static void pred8x8_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1233
    int i;
1234
    for(i=0; i<4; i++)
1235
        pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
1236
}
1237

  
1238

  
1239 352
/**
1240 353
 * Set the intra prediction function pointers.
1241 354
 */

Also available in: Unified diff