Statistics
| Branch: | Revision:

ffmpeg / libavcodec / h264pred.c @ 2912e87a

History | View | Annotate | Download (45.1 KB)

1
/*
2
 * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
3
 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4
 *
5
 * This file is part of Libav.
6
 *
7
 * Libav is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * Libav is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with Libav; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
/**
23
 * @file
24
 * H.264 / AVC / MPEG4 part10 prediction functions.
25
 * @author Michael Niedermayer <michaelni@gmx.at>
26
 */
27

    
28
#include "avcodec.h"
29
#include "mpegvideo.h"
30
#include "h264pred.h"
31
#include "mathops.h"
32

    
33
static void pred4x4_vertical_c(uint8_t *src, const uint8_t *topright, int stride){
34
    const uint32_t a= ((uint32_t*)(src-stride))[0];
35
    ((uint32_t*)(src+0*stride))[0]= a;
36
    ((uint32_t*)(src+1*stride))[0]= a;
37
    ((uint32_t*)(src+2*stride))[0]= a;
38
    ((uint32_t*)(src+3*stride))[0]= a;
39
}
40

    
41
static void pred4x4_horizontal_c(uint8_t *src, const uint8_t *topright, int stride){
42
    ((uint32_t*)(src+0*stride))[0]= src[-1+0*stride]*0x01010101;
43
    ((uint32_t*)(src+1*stride))[0]= src[-1+1*stride]*0x01010101;
44
    ((uint32_t*)(src+2*stride))[0]= src[-1+2*stride]*0x01010101;
45
    ((uint32_t*)(src+3*stride))[0]= src[-1+3*stride]*0x01010101;
46
}
47

    
48
static void pred4x4_dc_c(uint8_t *src, const uint8_t *topright, int stride){
49
    const int dc= (  src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
50
                   + src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
51

    
52
    ((uint32_t*)(src+0*stride))[0]=
53
    ((uint32_t*)(src+1*stride))[0]=
54
    ((uint32_t*)(src+2*stride))[0]=
55
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
56
}
57

    
58
static void pred4x4_left_dc_c(uint8_t *src, const uint8_t *topright, int stride){
59
    const int dc= (  src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
60

    
61
    ((uint32_t*)(src+0*stride))[0]=
62
    ((uint32_t*)(src+1*stride))[0]=
63
    ((uint32_t*)(src+2*stride))[0]=
64
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
65
}
66

    
67
static void pred4x4_top_dc_c(uint8_t *src, const uint8_t *topright, int stride){
68
    const int dc= (  src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
69

    
70
    ((uint32_t*)(src+0*stride))[0]=
71
    ((uint32_t*)(src+1*stride))[0]=
72
    ((uint32_t*)(src+2*stride))[0]=
73
    ((uint32_t*)(src+3*stride))[0]= dc* 0x01010101;
74
}
75

    
76
static void pred4x4_128_dc_c(uint8_t *src, const uint8_t *topright, int stride){
77
    ((uint32_t*)(src+0*stride))[0]=
78
    ((uint32_t*)(src+1*stride))[0]=
79
    ((uint32_t*)(src+2*stride))[0]=
80
    ((uint32_t*)(src+3*stride))[0]= 128U*0x01010101U;
81
}
82

    
83
static void pred4x4_127_dc_c(uint8_t *src, const uint8_t *topright, int stride){
84
    ((uint32_t*)(src+0*stride))[0]=
85
    ((uint32_t*)(src+1*stride))[0]=
86
    ((uint32_t*)(src+2*stride))[0]=
87
    ((uint32_t*)(src+3*stride))[0]= 127U*0x01010101U;
88
}
89

    
90
static void pred4x4_129_dc_c(uint8_t *src, const uint8_t *topright, int stride){
91
    ((uint32_t*)(src+0*stride))[0]=
92
    ((uint32_t*)(src+1*stride))[0]=
93
    ((uint32_t*)(src+2*stride))[0]=
94
    ((uint32_t*)(src+3*stride))[0]= 129U*0x01010101U;
95
}
96

    
97

    
98
#define LOAD_TOP_RIGHT_EDGE\
99
    const int av_unused t4= topright[0];\
100
    const int av_unused t5= topright[1];\
101
    const int av_unused t6= topright[2];\
102
    const int av_unused t7= topright[3];\
103

    
104
#define LOAD_DOWN_LEFT_EDGE\
105
    const int av_unused l4= src[-1+4*stride];\
106
    const int av_unused l5= src[-1+5*stride];\
107
    const int av_unused l6= src[-1+6*stride];\
108
    const int av_unused l7= src[-1+7*stride];\
109

    
110
#define LOAD_LEFT_EDGE\
111
    const int av_unused l0= src[-1+0*stride];\
112
    const int av_unused l1= src[-1+1*stride];\
113
    const int av_unused l2= src[-1+2*stride];\
114
    const int av_unused l3= src[-1+3*stride];\
115

    
116
#define LOAD_TOP_EDGE\
117
    const int av_unused t0= src[ 0-1*stride];\
118
    const int av_unused t1= src[ 1-1*stride];\
119
    const int av_unused t2= src[ 2-1*stride];\
120
    const int av_unused t3= src[ 3-1*stride];\
121

    
122
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
123
    const int lt= src[-1-1*stride];
124
    LOAD_TOP_EDGE
125
    LOAD_TOP_RIGHT_EDGE
126
    uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
127
                            (t0 + 2*t1 + t2 + 2) >> 2,
128
                            (t1 + 2*t2 + t3 + 2) >> 2,
129
                            (t2 + 2*t3 + t4 + 2) >> 2);
130

    
131
    AV_WN32A(src+0*stride, v);
132
    AV_WN32A(src+1*stride, v);
133
    AV_WN32A(src+2*stride, v);
134
    AV_WN32A(src+3*stride, v);
135
}
136

    
137
static void pred4x4_horizontal_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
138
    const int lt= src[-1-1*stride];
139
    LOAD_LEFT_EDGE
140

    
141
    AV_WN32A(src+0*stride, ((lt + 2*l0 + l1 + 2) >> 2)*0x01010101);
142
    AV_WN32A(src+1*stride, ((l0 + 2*l1 + l2 + 2) >> 2)*0x01010101);
143
    AV_WN32A(src+2*stride, ((l1 + 2*l2 + l3 + 2) >> 2)*0x01010101);
144
    AV_WN32A(src+3*stride, ((l2 + 2*l3 + l3 + 2) >> 2)*0x01010101);
145
}
146

    
147
static void pred4x4_down_right_c(uint8_t *src, const uint8_t *topright, int stride){
148
    const int lt= src[-1-1*stride];
149
    LOAD_TOP_EDGE
150
    LOAD_LEFT_EDGE
151

    
152
    src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
153
    src[0+2*stride]=
154
    src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
155
    src[0+1*stride]=
156
    src[1+2*stride]=
157
    src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
158
    src[0+0*stride]=
159
    src[1+1*stride]=
160
    src[2+2*stride]=
161
    src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
162
    src[1+0*stride]=
163
    src[2+1*stride]=
164
    src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
165
    src[2+0*stride]=
166
    src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
167
    src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
168
}
169

    
170
static void pred4x4_down_left_c(uint8_t *src, const uint8_t *topright, int stride){
171
    LOAD_TOP_EDGE
172
    LOAD_TOP_RIGHT_EDGE
173
//    LOAD_LEFT_EDGE
174

    
175
    src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
176
    src[1+0*stride]=
177
    src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
178
    src[2+0*stride]=
179
    src[1+1*stride]=
180
    src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
181
    src[3+0*stride]=
182
    src[2+1*stride]=
183
    src[1+2*stride]=
184
    src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
185
    src[3+1*stride]=
186
    src[2+2*stride]=
187
    src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
188
    src[3+2*stride]=
189
    src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
190
    src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
191
}
192

    
193
static void pred4x4_down_left_svq3_c(uint8_t *src, const uint8_t *topright, int stride){
194
    LOAD_TOP_EDGE
195
    LOAD_LEFT_EDGE
196
    const av_unused int unu0= t0;
197
    const av_unused int unu1= l0;
198

    
199
    src[0+0*stride]=(l1 + t1)>>1;
200
    src[1+0*stride]=
201
    src[0+1*stride]=(l2 + t2)>>1;
202
    src[2+0*stride]=
203
    src[1+1*stride]=
204
    src[0+2*stride]=
205
    src[3+0*stride]=
206
    src[2+1*stride]=
207
    src[1+2*stride]=
208
    src[0+3*stride]=
209
    src[3+1*stride]=
210
    src[2+2*stride]=
211
    src[1+3*stride]=
212
    src[3+2*stride]=
213
    src[2+3*stride]=
214
    src[3+3*stride]=(l3 + t3)>>1;
215
}
216

    
217
static void pred4x4_down_left_rv40_c(uint8_t *src, const uint8_t *topright, int stride){
218
    LOAD_TOP_EDGE
219
    LOAD_TOP_RIGHT_EDGE
220
    LOAD_LEFT_EDGE
221
    LOAD_DOWN_LEFT_EDGE
222

    
223
    src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
224
    src[1+0*stride]=
225
    src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
226
    src[2+0*stride]=
227
    src[1+1*stride]=
228
    src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
229
    src[3+0*stride]=
230
    src[2+1*stride]=
231
    src[1+2*stride]=
232
    src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
233
    src[3+1*stride]=
234
    src[2+2*stride]=
235
    src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
236
    src[3+2*stride]=
237
    src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
238
    src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
239
}
240

    
241
static void pred4x4_down_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, int stride){
242
    LOAD_TOP_EDGE
243
    LOAD_TOP_RIGHT_EDGE
244
    LOAD_LEFT_EDGE
245

    
246
    src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
247
    src[1+0*stride]=
248
    src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
249
    src[2+0*stride]=
250
    src[1+1*stride]=
251
    src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
252
    src[3+0*stride]=
253
    src[2+1*stride]=
254
    src[1+2*stride]=
255
    src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
256
    src[3+1*stride]=
257
    src[2+2*stride]=
258
    src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
259
    src[3+2*stride]=
260
    src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
261
    src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
262
}
263

    
264
static void pred4x4_vertical_right_c(uint8_t *src, const uint8_t *topright, int stride){
265
    const int lt= src[-1-1*stride];
266
    LOAD_TOP_EDGE
267
    LOAD_LEFT_EDGE
268

    
269
    src[0+0*stride]=
270
    src[1+2*stride]=(lt + t0 + 1)>>1;
271
    src[1+0*stride]=
272
    src[2+2*stride]=(t0 + t1 + 1)>>1;
273
    src[2+0*stride]=
274
    src[3+2*stride]=(t1 + t2 + 1)>>1;
275
    src[3+0*stride]=(t2 + t3 + 1)>>1;
276
    src[0+1*stride]=
277
    src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
278
    src[1+1*stride]=
279
    src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
280
    src[2+1*stride]=
281
    src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
282
    src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
283
    src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
284
    src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
285
}
286

    
287
static void pred4x4_vertical_left_c(uint8_t *src, const uint8_t *topright, int stride){
288
    LOAD_TOP_EDGE
289
    LOAD_TOP_RIGHT_EDGE
290

    
291
    src[0+0*stride]=(t0 + t1 + 1)>>1;
292
    src[1+0*stride]=
293
    src[0+2*stride]=(t1 + t2 + 1)>>1;
294
    src[2+0*stride]=
295
    src[1+2*stride]=(t2 + t3 + 1)>>1;
296
    src[3+0*stride]=
297
    src[2+2*stride]=(t3 + t4+ 1)>>1;
298
    src[3+2*stride]=(t4 + t5+ 1)>>1;
299
    src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
300
    src[1+1*stride]=
301
    src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
302
    src[2+1*stride]=
303
    src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
304
    src[3+1*stride]=
305
    src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
306
    src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
307
}
308

    
309
static void pred4x4_vertical_left_rv40(uint8_t *src, const uint8_t *topright, int stride,
310
                                      const int l0, const int l1, const int l2, const int l3, const int l4){
311
    LOAD_TOP_EDGE
312
    LOAD_TOP_RIGHT_EDGE
313

    
314
    src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
315
    src[1+0*stride]=
316
    src[0+2*stride]=(t1 + t2 + 1)>>1;
317
    src[2+0*stride]=
318
    src[1+2*stride]=(t2 + t3 + 1)>>1;
319
    src[3+0*stride]=
320
    src[2+2*stride]=(t3 + t4+ 1)>>1;
321
    src[3+2*stride]=(t4 + t5+ 1)>>1;
322
    src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
323
    src[1+1*stride]=
324
    src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
325
    src[2+1*stride]=
326
    src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
327
    src[3+1*stride]=
328
    src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
329
    src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
330
}
331

    
332
static void pred4x4_vertical_left_rv40_c(uint8_t *src, const uint8_t *topright, int stride){
333
    LOAD_LEFT_EDGE
334
    LOAD_DOWN_LEFT_EDGE
335

    
336
    pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l4);
337
}
338

    
339
static void pred4x4_vertical_left_rv40_nodown_c(uint8_t *src, const uint8_t *topright, int stride){
340
    LOAD_LEFT_EDGE
341

    
342
    pred4x4_vertical_left_rv40(src, topright, stride, l0, l1, l2, l3, l3);
343
}
344

    
345
static void pred4x4_vertical_left_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
346
    LOAD_TOP_EDGE
347
    LOAD_TOP_RIGHT_EDGE
348

    
349
    src[0+0*stride]=(t0 + t1 + 1)>>1;
350
    src[1+0*stride]=
351
    src[0+2*stride]=(t1 + t2 + 1)>>1;
352
    src[2+0*stride]=
353
    src[1+2*stride]=(t2 + t3 + 1)>>1;
354
    src[3+0*stride]=
355
    src[2+2*stride]=(t3 + t4 + 1)>>1;
356
    src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
357
    src[1+1*stride]=
358
    src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
359
    src[2+1*stride]=
360
    src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
361
    src[3+1*stride]=
362
    src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
363
    src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
364
    src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
365
}
366

    
367
static void pred4x4_horizontal_up_c(uint8_t *src, const uint8_t *topright, int stride){
368
    LOAD_LEFT_EDGE
369

    
370
    src[0+0*stride]=(l0 + l1 + 1)>>1;
371
    src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
372
    src[2+0*stride]=
373
    src[0+1*stride]=(l1 + l2 + 1)>>1;
374
    src[3+0*stride]=
375
    src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
376
    src[2+1*stride]=
377
    src[0+2*stride]=(l2 + l3 + 1)>>1;
378
    src[3+1*stride]=
379
    src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
380
    src[3+2*stride]=
381
    src[1+3*stride]=
382
    src[0+3*stride]=
383
    src[2+2*stride]=
384
    src[2+3*stride]=
385
    src[3+3*stride]=l3;
386
}
387

    
388
static void pred4x4_horizontal_up_rv40_c(uint8_t *src, const uint8_t *topright, int stride){
389
    LOAD_LEFT_EDGE
390
    LOAD_DOWN_LEFT_EDGE
391
    LOAD_TOP_EDGE
392
    LOAD_TOP_RIGHT_EDGE
393

    
394
    src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
395
    src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
396
    src[2+0*stride]=
397
    src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
398
    src[3+0*stride]=
399
    src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
400
    src[2+1*stride]=
401
    src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
402
    src[3+1*stride]=
403
    src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
404
    src[3+2*stride]=
405
    src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
406
    src[0+3*stride]=
407
    src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
408
    src[2+3*stride]=(l4 + l5 + 1)>>1;
409
    src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
410
}
411

    
412
static void pred4x4_horizontal_up_rv40_nodown_c(uint8_t *src, const uint8_t *topright, int stride){
413
    LOAD_LEFT_EDGE
414
    LOAD_TOP_EDGE
415
    LOAD_TOP_RIGHT_EDGE
416

    
417
    src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
418
    src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
419
    src[2+0*stride]=
420
    src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
421
    src[3+0*stride]=
422
    src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
423
    src[2+1*stride]=
424
    src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
425
    src[3+1*stride]=
426
    src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
427
    src[3+2*stride]=
428
    src[1+3*stride]=l3;
429
    src[0+3*stride]=
430
    src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
431
    src[2+3*stride]=
432
    src[3+3*stride]=l3;
433
}
434

    
435
static void pred4x4_horizontal_down_c(uint8_t *src, const uint8_t *topright, int stride){
436
    const int lt= src[-1-1*stride];
437
    LOAD_TOP_EDGE
438
    LOAD_LEFT_EDGE
439

    
440
    src[0+0*stride]=
441
    src[2+1*stride]=(lt + l0 + 1)>>1;
442
    src[1+0*stride]=
443
    src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
444
    src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
445
    src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
446
    src[0+1*stride]=
447
    src[2+2*stride]=(l0 + l1 + 1)>>1;
448
    src[1+1*stride]=
449
    src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
450
    src[0+2*stride]=
451
    src[2+3*stride]=(l1 + l2+ 1)>>1;
452
    src[1+2*stride]=
453
    src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
454
    src[0+3*stride]=(l2 + l3 + 1)>>1;
455
    src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
456
}
457

    
458
static void pred4x4_tm_vp8_c(uint8_t *src, const uint8_t *topright, int stride){
459
    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
460
    uint8_t *top = src-stride;
461
    int y;
462

    
463
    for (y = 0; y < 4; y++) {
464
        uint8_t *cm_in = cm + src[-1];
465
        src[0] = cm_in[top[0]];
466
        src[1] = cm_in[top[1]];
467
        src[2] = cm_in[top[2]];
468
        src[3] = cm_in[top[3]];
469
        src += stride;
470
    }
471
}
472

    
473
static void pred16x16_vertical_c(uint8_t *src, int stride){
474
    int i;
475
    const uint32_t a= ((uint32_t*)(src-stride))[0];
476
    const uint32_t b= ((uint32_t*)(src-stride))[1];
477
    const uint32_t c= ((uint32_t*)(src-stride))[2];
478
    const uint32_t d= ((uint32_t*)(src-stride))[3];
479

    
480
    for(i=0; i<16; i++){
481
        ((uint32_t*)(src+i*stride))[0]= a;
482
        ((uint32_t*)(src+i*stride))[1]= b;
483
        ((uint32_t*)(src+i*stride))[2]= c;
484
        ((uint32_t*)(src+i*stride))[3]= d;
485
    }
486
}
487

    
488
static void pred16x16_horizontal_c(uint8_t *src, int stride){
489
    int i;
490

    
491
    for(i=0; i<16; i++){
492
        ((uint32_t*)(src+i*stride))[0]=
493
        ((uint32_t*)(src+i*stride))[1]=
494
        ((uint32_t*)(src+i*stride))[2]=
495
        ((uint32_t*)(src+i*stride))[3]= src[-1+i*stride]*0x01010101;
496
    }
497
}
498

    
499
static void pred16x16_dc_c(uint8_t *src, int stride){
500
    int i, dc=0;
501

    
502
    for(i=0;i<16; i++){
503
        dc+= src[-1+i*stride];
504
    }
505

    
506
    for(i=0;i<16; i++){
507
        dc+= src[i-stride];
508
    }
509

    
510
    dc= 0x01010101*((dc + 16)>>5);
511

    
512
    for(i=0; i<16; i++){
513
        ((uint32_t*)(src+i*stride))[0]=
514
        ((uint32_t*)(src+i*stride))[1]=
515
        ((uint32_t*)(src+i*stride))[2]=
516
        ((uint32_t*)(src+i*stride))[3]= dc;
517
    }
518
}
519

    
520
static void pred16x16_left_dc_c(uint8_t *src, int stride){
521
    int i, dc=0;
522

    
523
    for(i=0;i<16; i++){
524
        dc+= src[-1+i*stride];
525
    }
526

    
527
    dc= 0x01010101*((dc + 8)>>4);
528

    
529
    for(i=0; i<16; i++){
530
        ((uint32_t*)(src+i*stride))[0]=
531
        ((uint32_t*)(src+i*stride))[1]=
532
        ((uint32_t*)(src+i*stride))[2]=
533
        ((uint32_t*)(src+i*stride))[3]= dc;
534
    }
535
}
536

    
537
static void pred16x16_top_dc_c(uint8_t *src, int stride){
538
    int i, dc=0;
539

    
540
    for(i=0;i<16; i++){
541
        dc+= src[i-stride];
542
    }
543
    dc= 0x01010101*((dc + 8)>>4);
544

    
545
    for(i=0; i<16; i++){
546
        ((uint32_t*)(src+i*stride))[0]=
547
        ((uint32_t*)(src+i*stride))[1]=
548
        ((uint32_t*)(src+i*stride))[2]=
549
        ((uint32_t*)(src+i*stride))[3]= dc;
550
    }
551
}
552

    
553
static void pred16x16_128_dc_c(uint8_t *src, int stride){
554
    int i;
555

    
556
    for(i=0; i<16; i++){
557
        ((uint32_t*)(src+i*stride))[0]=
558
        ((uint32_t*)(src+i*stride))[1]=
559
        ((uint32_t*)(src+i*stride))[2]=
560
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*128U;
561
    }
562
}
563

    
564
static void pred16x16_127_dc_c(uint8_t *src, int stride){
565
    int i;
566

    
567
    for(i=0; i<16; i++){
568
        ((uint32_t*)(src+i*stride))[0]=
569
        ((uint32_t*)(src+i*stride))[1]=
570
        ((uint32_t*)(src+i*stride))[2]=
571
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*127U;
572
    }
573
}
574

    
575
static void pred16x16_129_dc_c(uint8_t *src, int stride){
576
    int i;
577

    
578
    for(i=0; i<16; i++){
579
        ((uint32_t*)(src+i*stride))[0]=
580
        ((uint32_t*)(src+i*stride))[1]=
581
        ((uint32_t*)(src+i*stride))[2]=
582
        ((uint32_t*)(src+i*stride))[3]= 0x01010101U*129U;
583
    }
584
}
585

    
586
static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3, const int rv40){
587
  int i, j, k;
588
  int a;
589
  uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
590
  const uint8_t * const src0 = src+7-stride;
591
  const uint8_t *src1 = src+8*stride-1;
592
  const uint8_t *src2 = src1-2*stride;      // == src+6*stride-1;
593
  int H = src0[1] - src0[-1];
594
  int V = src1[0] - src2[ 0];
595
  for(k=2; k<=8; ++k) {
596
    src1 += stride; src2 -= stride;
597
    H += k*(src0[k] - src0[-k]);
598
    V += k*(src1[0] - src2[ 0]);
599
  }
600
  if(svq3){
601
    H = ( 5*(H/4) ) / 16;
602
    V = ( 5*(V/4) ) / 16;
603

    
604
    /* required for 100% accuracy */
605
    i = H; H = V; V = i;
606
  }else if(rv40){
607
    H = ( H + (H>>2) ) >> 4;
608
    V = ( V + (V>>2) ) >> 4;
609
  }else{
610
    H = ( 5*H+32 ) >> 6;
611
    V = ( 5*V+32 ) >> 6;
612
  }
613

    
614
  a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
615
  for(j=16; j>0; --j) {
616
    int b = a;
617
    a += V;
618
    for(i=-16; i<0; i+=4) {
619
      src[16+i] = cm[ (b    ) >> 5 ];
620
      src[17+i] = cm[ (b+  H) >> 5 ];
621
      src[18+i] = cm[ (b+2*H) >> 5 ];
622
      src[19+i] = cm[ (b+3*H) >> 5 ];
623
      b += 4*H;
624
    }
625
    src += stride;
626
  }
627
}
628

    
629
static void pred16x16_plane_c(uint8_t *src, int stride){
630
    pred16x16_plane_compat_c(src, stride, 0, 0);
631
}
632

    
633
static void pred16x16_plane_svq3_c(uint8_t *src, int stride){
634
    pred16x16_plane_compat_c(src, stride, 1, 0);
635
}
636

    
637
static void pred16x16_plane_rv40_c(uint8_t *src, int stride){
638
    pred16x16_plane_compat_c(src, stride, 0, 1);
639
}
640

    
641
static void pred16x16_tm_vp8_c(uint8_t *src, int stride){
642
    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
643
    uint8_t *top = src-stride;
644
    int y;
645

    
646
    for (y = 0; y < 16; y++) {
647
        uint8_t *cm_in = cm + src[-1];
648
        src[0]  = cm_in[top[0]];
649
        src[1]  = cm_in[top[1]];
650
        src[2]  = cm_in[top[2]];
651
        src[3]  = cm_in[top[3]];
652
        src[4]  = cm_in[top[4]];
653
        src[5]  = cm_in[top[5]];
654
        src[6]  = cm_in[top[6]];
655
        src[7]  = cm_in[top[7]];
656
        src[8]  = cm_in[top[8]];
657
        src[9]  = cm_in[top[9]];
658
        src[10] = cm_in[top[10]];
659
        src[11] = cm_in[top[11]];
660
        src[12] = cm_in[top[12]];
661
        src[13] = cm_in[top[13]];
662
        src[14] = cm_in[top[14]];
663
        src[15] = cm_in[top[15]];
664
        src += stride;
665
    }
666
}
667

    
668
static void pred8x8_vertical_c(uint8_t *src, int stride){
669
    int i;
670
    const uint32_t a= ((uint32_t*)(src-stride))[0];
671
    const uint32_t b= ((uint32_t*)(src-stride))[1];
672

    
673
    for(i=0; i<8; i++){
674
        ((uint32_t*)(src+i*stride))[0]= a;
675
        ((uint32_t*)(src+i*stride))[1]= b;
676
    }
677
}
678

    
679
static void pred8x8_horizontal_c(uint8_t *src, int stride){
680
    int i;
681

    
682
    for(i=0; i<8; i++){
683
        ((uint32_t*)(src+i*stride))[0]=
684
        ((uint32_t*)(src+i*stride))[1]= src[-1+i*stride]*0x01010101;
685
    }
686
}
687

    
688
static void pred8x8_128_dc_c(uint8_t *src, int stride){
689
    int i;
690

    
691
    for(i=0; i<8; i++){
692
        ((uint32_t*)(src+i*stride))[0]=
693
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*128U;
694
    }
695
}
696

    
697
static void pred8x8_127_dc_c(uint8_t *src, int stride){
698
    int i;
699

    
700
    for(i=0; i<8; i++){
701
        ((uint32_t*)(src+i*stride))[0]=
702
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*127U;
703
    }
704
}
705
static void pred8x8_129_dc_c(uint8_t *src, int stride){
706
    int i;
707

    
708
    for(i=0; i<8; i++){
709
        ((uint32_t*)(src+i*stride))[0]=
710
        ((uint32_t*)(src+i*stride))[1]= 0x01010101U*129U;
711
    }
712
}
713

    
714
static void pred8x8_left_dc_c(uint8_t *src, int stride){
715
    int i;
716
    int dc0, dc2;
717

    
718
    dc0=dc2=0;
719
    for(i=0;i<4; i++){
720
        dc0+= src[-1+i*stride];
721
        dc2+= src[-1+(i+4)*stride];
722
    }
723
    dc0= 0x01010101*((dc0 + 2)>>2);
724
    dc2= 0x01010101*((dc2 + 2)>>2);
725

    
726
    for(i=0; i<4; i++){
727
        ((uint32_t*)(src+i*stride))[0]=
728
        ((uint32_t*)(src+i*stride))[1]= dc0;
729
    }
730
    for(i=4; i<8; i++){
731
        ((uint32_t*)(src+i*stride))[0]=
732
        ((uint32_t*)(src+i*stride))[1]= dc2;
733
    }
734
}
735

    
736
static void pred8x8_left_dc_rv40_c(uint8_t *src, int stride){
737
    int i;
738
    int dc0;
739

    
740
    dc0=0;
741
    for(i=0;i<8; i++)
742
        dc0+= src[-1+i*stride];
743
    dc0= 0x01010101*((dc0 + 4)>>3);
744

    
745
    for(i=0; i<8; i++){
746
        ((uint32_t*)(src+i*stride))[0]=
747
        ((uint32_t*)(src+i*stride))[1]= dc0;
748
    }
749
}
750

    
751
static void pred8x8_top_dc_c(uint8_t *src, int stride){
752
    int i;
753
    int dc0, dc1;
754

    
755
    dc0=dc1=0;
756
    for(i=0;i<4; i++){
757
        dc0+= src[i-stride];
758
        dc1+= src[4+i-stride];
759
    }
760
    dc0= 0x01010101*((dc0 + 2)>>2);
761
    dc1= 0x01010101*((dc1 + 2)>>2);
762

    
763
    for(i=0; i<4; i++){
764
        ((uint32_t*)(src+i*stride))[0]= dc0;
765
        ((uint32_t*)(src+i*stride))[1]= dc1;
766
    }
767
    for(i=4; i<8; i++){
768
        ((uint32_t*)(src+i*stride))[0]= dc0;
769
        ((uint32_t*)(src+i*stride))[1]= dc1;
770
    }
771
}
772

    
773
static void pred8x8_top_dc_rv40_c(uint8_t *src, int stride){
774
    int i;
775
    int dc0;
776

    
777
    dc0=0;
778
    for(i=0;i<8; i++)
779
        dc0+= src[i-stride];
780
    dc0= 0x01010101*((dc0 + 4)>>3);
781

    
782
    for(i=0; i<8; i++){
783
        ((uint32_t*)(src+i*stride))[0]=
784
        ((uint32_t*)(src+i*stride))[1]= dc0;
785
    }
786
}
787

    
788

    
789
static void pred8x8_dc_c(uint8_t *src, int stride){
790
    int i;
791
    int dc0, dc1, dc2, dc3;
792

    
793
    dc0=dc1=dc2=0;
794
    for(i=0;i<4; i++){
795
        dc0+= src[-1+i*stride] + src[i-stride];
796
        dc1+= src[4+i-stride];
797
        dc2+= src[-1+(i+4)*stride];
798
    }
799
    dc3= 0x01010101*((dc1 + dc2 + 4)>>3);
800
    dc0= 0x01010101*((dc0 + 4)>>3);
801
    dc1= 0x01010101*((dc1 + 2)>>2);
802
    dc2= 0x01010101*((dc2 + 2)>>2);
803

    
804
    for(i=0; i<4; i++){
805
        ((uint32_t*)(src+i*stride))[0]= dc0;
806
        ((uint32_t*)(src+i*stride))[1]= dc1;
807
    }
808
    for(i=4; i<8; i++){
809
        ((uint32_t*)(src+i*stride))[0]= dc2;
810
        ((uint32_t*)(src+i*stride))[1]= dc3;
811
    }
812
}
813

    
814
//the following 4 function should not be optimized!
815
static void pred8x8_mad_cow_dc_l0t(uint8_t *src, int stride){
816
    pred8x8_top_dc_c(src, stride);
817
    pred4x4_dc_c(src, NULL, stride);
818
}
819

    
820
static void pred8x8_mad_cow_dc_0lt(uint8_t *src, int stride){
821
    pred8x8_dc_c(src, stride);
822
    pred4x4_top_dc_c(src, NULL, stride);
823
}
824

    
825
static void pred8x8_mad_cow_dc_l00(uint8_t *src, int stride){
826
    pred8x8_left_dc_c(src, stride);
827
    pred4x4_128_dc_c(src + 4*stride    , NULL, stride);
828
    pred4x4_128_dc_c(src + 4*stride + 4, NULL, stride);
829
}
830

    
831
static void pred8x8_mad_cow_dc_0l0(uint8_t *src, int stride){
832
    pred8x8_left_dc_c(src, stride);
833
    pred4x4_128_dc_c(src    , NULL, stride);
834
    pred4x4_128_dc_c(src + 4, NULL, stride);
835
}
836

    
837
static void pred8x8_dc_rv40_c(uint8_t *src, int stride){
838
    int i;
839
    int dc0=0;
840

    
841
    for(i=0;i<4; i++){
842
        dc0+= src[-1+i*stride] + src[i-stride];
843
        dc0+= src[4+i-stride];
844
        dc0+= src[-1+(i+4)*stride];
845
    }
846
    dc0= 0x01010101*((dc0 + 8)>>4);
847

    
848
    for(i=0; i<4; i++){
849
        ((uint32_t*)(src+i*stride))[0]= dc0;
850
        ((uint32_t*)(src+i*stride))[1]= dc0;
851
    }
852
    for(i=4; i<8; i++){
853
        ((uint32_t*)(src+i*stride))[0]= dc0;
854
        ((uint32_t*)(src+i*stride))[1]= dc0;
855
    }
856
}
857

    
858
static void pred8x8_plane_c(uint8_t *src, int stride){
859
  int j, k;
860
  int a;
861
  uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
862
  const uint8_t * const src0 = src+3-stride;
863
  const uint8_t *src1 = src+4*stride-1;
864
  const uint8_t *src2 = src1-2*stride;      // == src+2*stride-1;
865
  int H = src0[1] - src0[-1];
866
  int V = src1[0] - src2[ 0];
867
  for(k=2; k<=4; ++k) {
868
    src1 += stride; src2 -= stride;
869
    H += k*(src0[k] - src0[-k]);
870
    V += k*(src1[0] - src2[ 0]);
871
  }
872
  H = ( 17*H+16 ) >> 5;
873
  V = ( 17*V+16 ) >> 5;
874

    
875
  a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
876
  for(j=8; j>0; --j) {
877
    int b = a;
878
    a += V;
879
    src[0] = cm[ (b    ) >> 5 ];
880
    src[1] = cm[ (b+  H) >> 5 ];
881
    src[2] = cm[ (b+2*H) >> 5 ];
882
    src[3] = cm[ (b+3*H) >> 5 ];
883
    src[4] = cm[ (b+4*H) >> 5 ];
884
    src[5] = cm[ (b+5*H) >> 5 ];
885
    src[6] = cm[ (b+6*H) >> 5 ];
886
    src[7] = cm[ (b+7*H) >> 5 ];
887
    src += stride;
888
  }
889
}
890

    
891
static void pred8x8_tm_vp8_c(uint8_t *src, int stride){
892
    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
893
    uint8_t *top = src-stride;
894
    int y;
895

    
896
    for (y = 0; y < 8; y++) {
897
        uint8_t *cm_in = cm + src[-1];
898
        src[0] = cm_in[top[0]];
899
        src[1] = cm_in[top[1]];
900
        src[2] = cm_in[top[2]];
901
        src[3] = cm_in[top[3]];
902
        src[4] = cm_in[top[4]];
903
        src[5] = cm_in[top[5]];
904
        src[6] = cm_in[top[6]];
905
        src[7] = cm_in[top[7]];
906
        src += stride;
907
    }
908
}
909

    
910
#define SRC(x,y) src[(x)+(y)*stride]
911
#define PL(y) \
912
    const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
913
#define PREDICT_8x8_LOAD_LEFT \
914
    const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
915
                     + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
916
    PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
917
    const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
918

    
919
#define PT(x) \
920
    const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
921
#define PREDICT_8x8_LOAD_TOP \
922
    const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
923
                     + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
924
    PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
925
    const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
926
                     + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
927

    
928
#define PTR(x) \
929
    t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
930
#define PREDICT_8x8_LOAD_TOPRIGHT \
931
    int t8, t9, t10, t11, t12, t13, t14, t15; \
932
    if(has_topright) { \
933
        PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
934
        t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
935
    } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
936

    
937
#define PREDICT_8x8_LOAD_TOPLEFT \
938
    const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
939

    
940
#define PREDICT_8x8_DC(v) \
941
    int y; \
942
    for( y = 0; y < 8; y++ ) { \
943
        ((uint32_t*)src)[0] = \
944
        ((uint32_t*)src)[1] = v; \
945
        src += stride; \
946
    }
947

    
948
static void pred8x8l_128_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
949
{
950
    PREDICT_8x8_DC(0x80808080);
951
}
952
static void pred8x8l_left_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
953
{
954
    PREDICT_8x8_LOAD_LEFT;
955
    const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
956
    PREDICT_8x8_DC(dc);
957
}
958
static void pred8x8l_top_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
959
{
960
    PREDICT_8x8_LOAD_TOP;
961
    const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
962
    PREDICT_8x8_DC(dc);
963
}
964
static void pred8x8l_dc_c(uint8_t *src, int has_topleft, int has_topright, int stride)
965
{
966
    PREDICT_8x8_LOAD_LEFT;
967
    PREDICT_8x8_LOAD_TOP;
968
    const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
969
                         +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
970
    PREDICT_8x8_DC(dc);
971
}
972
static void pred8x8l_horizontal_c(uint8_t *src, int has_topleft, int has_topright, int stride)
973
{
974
    PREDICT_8x8_LOAD_LEFT;
975
#define ROW(y) ((uint32_t*)(src+y*stride))[0] =\
976
               ((uint32_t*)(src+y*stride))[1] = 0x01010101 * l##y
977
    ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
978
#undef ROW
979
}
980
static void pred8x8l_vertical_c(uint8_t *src, int has_topleft, int has_topright, int stride)
981
{
982
    int y;
983
    PREDICT_8x8_LOAD_TOP;
984
    src[0] = t0;
985
    src[1] = t1;
986
    src[2] = t2;
987
    src[3] = t3;
988
    src[4] = t4;
989
    src[5] = t5;
990
    src[6] = t6;
991
    src[7] = t7;
992
    for( y = 1; y < 8; y++ )
993
        *(uint64_t*)(src+y*stride) = *(uint64_t*)src;
994
}
995
static void pred8x8l_down_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
996
{
997
    PREDICT_8x8_LOAD_TOP;
998
    PREDICT_8x8_LOAD_TOPRIGHT;
999
    SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
1000
    SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
1001
    SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
1002
    SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
1003
    SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
1004
    SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
1005
    SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
1006
    SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
1007
    SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
1008
    SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
1009
    SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
1010
    SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
1011
    SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
1012
    SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
1013
    SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
1014
}
1015
static void pred8x8l_down_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1016
{
1017
    PREDICT_8x8_LOAD_TOP;
1018
    PREDICT_8x8_LOAD_LEFT;
1019
    PREDICT_8x8_LOAD_TOPLEFT;
1020
    SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
1021
    SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
1022
    SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
1023
    SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
1024
    SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
1025
    SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
1026
    SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
1027
    SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
1028
    SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
1029
    SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
1030
    SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
1031
    SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
1032
    SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
1033
    SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
1034
    SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
1035

    
1036
}
1037
static void pred8x8l_vertical_right_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1038
{
1039
    PREDICT_8x8_LOAD_TOP;
1040
    PREDICT_8x8_LOAD_LEFT;
1041
    PREDICT_8x8_LOAD_TOPLEFT;
1042
    SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
1043
    SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
1044
    SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
1045
    SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
1046
    SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
1047
    SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
1048
    SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
1049
    SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
1050
    SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
1051
    SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
1052
    SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
1053
    SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
1054
    SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
1055
    SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
1056
    SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
1057
    SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
1058
    SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
1059
    SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
1060
    SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
1061
    SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
1062
    SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
1063
    SRC(7,0)= (t6 + t7 + 1) >> 1;
1064
}
1065
static void pred8x8l_horizontal_down_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1066
{
1067
    PREDICT_8x8_LOAD_TOP;
1068
    PREDICT_8x8_LOAD_LEFT;
1069
    PREDICT_8x8_LOAD_TOPLEFT;
1070
    SRC(0,7)= (l6 + l7 + 1) >> 1;
1071
    SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
1072
    SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
1073
    SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
1074
    SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
1075
    SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
1076
    SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
1077
    SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
1078
    SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
1079
    SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
1080
    SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
1081
    SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
1082
    SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
1083
    SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
1084
    SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
1085
    SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
1086
    SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
1087
    SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
1088
    SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
1089
    SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
1090
    SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
1091
    SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
1092
}
1093
static void pred8x8l_vertical_left_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1094
{
1095
    PREDICT_8x8_LOAD_TOP;
1096
    PREDICT_8x8_LOAD_TOPRIGHT;
1097
    SRC(0,0)= (t0 + t1 + 1) >> 1;
1098
    SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
1099
    SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
1100
    SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
1101
    SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
1102
    SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
1103
    SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
1104
    SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
1105
    SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
1106
    SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
1107
    SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
1108
    SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
1109
    SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
1110
    SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
1111
    SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
1112
    SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
1113
    SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
1114
    SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
1115
    SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
1116
    SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
1117
    SRC(7,6)= (t10 + t11 + 1) >> 1;
1118
    SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
1119
}
1120
static void pred8x8l_horizontal_up_c(uint8_t *src, int has_topleft, int has_topright, int stride)
1121
{
1122
    PREDICT_8x8_LOAD_LEFT;
1123
    SRC(0,0)= (l0 + l1 + 1) >> 1;
1124
    SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
1125
    SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
1126
    SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
1127
    SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
1128
    SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
1129
    SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
1130
    SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
1131
    SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
1132
    SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
1133
    SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
1134
    SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
1135
    SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
1136
    SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
1137
    SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
1138
    SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
1139
    SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
1140
    SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
1141
}
1142
#undef PREDICT_8x8_LOAD_LEFT
1143
#undef PREDICT_8x8_LOAD_TOP
1144
#undef PREDICT_8x8_LOAD_TOPLEFT
1145
#undef PREDICT_8x8_LOAD_TOPRIGHT
1146
#undef PREDICT_8x8_DC
1147
#undef PTR
1148
#undef PT
1149
#undef PL
1150
#undef SRC
1151

    
1152
static void pred4x4_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1153
    int i;
1154
    pix -= stride;
1155
    for(i=0; i<4; i++){
1156
        uint8_t v = pix[0];
1157
        pix[1*stride]= v += block[0];
1158
        pix[2*stride]= v += block[4];
1159
        pix[3*stride]= v += block[8];
1160
        pix[4*stride]= v +  block[12];
1161
        pix++;
1162
        block++;
1163
    }
1164
}
1165

    
1166
static void pred4x4_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1167
    int i;
1168
    for(i=0; i<4; i++){
1169
        uint8_t v = pix[-1];
1170
        pix[0]= v += block[0];
1171
        pix[1]= v += block[1];
1172
        pix[2]= v += block[2];
1173
        pix[3]= v +  block[3];
1174
        pix+= stride;
1175
        block+= 4;
1176
    }
1177
}
1178

    
1179
static void pred8x8l_vertical_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1180
    int i;
1181
    pix -= stride;
1182
    for(i=0; i<8; i++){
1183
        uint8_t v = pix[0];
1184
        pix[1*stride]= v += block[0];
1185
        pix[2*stride]= v += block[8];
1186
        pix[3*stride]= v += block[16];
1187
        pix[4*stride]= v += block[24];
1188
        pix[5*stride]= v += block[32];
1189
        pix[6*stride]= v += block[40];
1190
        pix[7*stride]= v += block[48];
1191
        pix[8*stride]= v +  block[56];
1192
        pix++;
1193
        block++;
1194
    }
1195
}
1196

    
1197
static void pred8x8l_horizontal_add_c(uint8_t *pix, const DCTELEM *block, int stride){
1198
    int i;
1199
    for(i=0; i<8; i++){
1200
        uint8_t v = pix[-1];
1201
        pix[0]= v += block[0];
1202
        pix[1]= v += block[1];
1203
        pix[2]= v += block[2];
1204
        pix[3]= v += block[3];
1205
        pix[4]= v += block[4];
1206
        pix[5]= v += block[5];
1207
        pix[6]= v += block[6];
1208
        pix[7]= v +  block[7];
1209
        pix+= stride;
1210
        block+= 8;
1211
    }
1212
}
1213

    
1214
static void pred16x16_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1215
    int i;
1216
    for(i=0; i<16; i++)
1217
        pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
1218
}
1219

    
1220
static void pred16x16_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1221
    int i;
1222
    for(i=0; i<16; i++)
1223
        pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
1224
}
1225

    
1226
static void pred8x8_vertical_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1227
    int i;
1228
    for(i=0; i<4; i++)
1229
        pred4x4_vertical_add_c(pix + block_offset[i], block + i*16, stride);
1230
}
1231

    
1232
static void pred8x8_horizontal_add_c(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
1233
    int i;
1234
    for(i=0; i<4; i++)
1235
        pred4x4_horizontal_add_c(pix + block_offset[i], block + i*16, stride);
1236
}
1237

    
1238

    
1239
/**
1240
 * Set the intra prediction function pointers.
1241
 */
1242
void ff_h264_pred_init(H264PredContext *h, int codec_id){
1243
//    MpegEncContext * const s = &h->s;
1244

    
1245
    if(codec_id != CODEC_ID_RV40){
1246
        if(codec_id == CODEC_ID_VP8) {
1247
            h->pred4x4[VERT_PRED       ]= pred4x4_vertical_vp8_c;
1248
            h->pred4x4[HOR_PRED        ]= pred4x4_horizontal_vp8_c;
1249
        } else {
1250
            h->pred4x4[VERT_PRED       ]= pred4x4_vertical_c;
1251
            h->pred4x4[HOR_PRED        ]= pred4x4_horizontal_c;
1252
        }
1253
        h->pred4x4[DC_PRED             ]= pred4x4_dc_c;
1254
        if(codec_id == CODEC_ID_SVQ3)
1255
            h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_svq3_c;
1256
        else
1257
            h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_c;
1258
        h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
1259
        h->pred4x4[VERT_RIGHT_PRED     ]= pred4x4_vertical_right_c;
1260
        h->pred4x4[HOR_DOWN_PRED       ]= pred4x4_horizontal_down_c;
1261
        if (codec_id == CODEC_ID_VP8) {
1262
            h->pred4x4[VERT_LEFT_PRED  ]= pred4x4_vertical_left_vp8_c;
1263
        } else
1264
            h->pred4x4[VERT_LEFT_PRED  ]= pred4x4_vertical_left_c;
1265
        h->pred4x4[HOR_UP_PRED         ]= pred4x4_horizontal_up_c;
1266
        if(codec_id != CODEC_ID_VP8) {
1267
            h->pred4x4[LEFT_DC_PRED    ]= pred4x4_left_dc_c;
1268
            h->pred4x4[TOP_DC_PRED     ]= pred4x4_top_dc_c;
1269
            h->pred4x4[DC_128_PRED     ]= pred4x4_128_dc_c;
1270
        } else {
1271
            h->pred4x4[TM_VP8_PRED     ]= pred4x4_tm_vp8_c;
1272
            h->pred4x4[DC_127_PRED     ]= pred4x4_127_dc_c;
1273
            h->pred4x4[DC_129_PRED     ]= pred4x4_129_dc_c;
1274
            h->pred4x4[VERT_VP8_PRED   ]= pred4x4_vertical_c;
1275
            h->pred4x4[HOR_VP8_PRED    ]= pred4x4_horizontal_c;
1276
        }
1277
    }else{
1278
        h->pred4x4[VERT_PRED           ]= pred4x4_vertical_c;
1279
        h->pred4x4[HOR_PRED            ]= pred4x4_horizontal_c;
1280
        h->pred4x4[DC_PRED             ]= pred4x4_dc_c;
1281
        h->pred4x4[DIAG_DOWN_LEFT_PRED ]= pred4x4_down_left_rv40_c;
1282
        h->pred4x4[DIAG_DOWN_RIGHT_PRED]= pred4x4_down_right_c;
1283
        h->pred4x4[VERT_RIGHT_PRED     ]= pred4x4_vertical_right_c;
1284
        h->pred4x4[HOR_DOWN_PRED       ]= pred4x4_horizontal_down_c;
1285
        h->pred4x4[VERT_LEFT_PRED      ]= pred4x4_vertical_left_rv40_c;
1286
        h->pred4x4[HOR_UP_PRED         ]= pred4x4_horizontal_up_rv40_c;
1287
        h->pred4x4[LEFT_DC_PRED        ]= pred4x4_left_dc_c;
1288
        h->pred4x4[TOP_DC_PRED         ]= pred4x4_top_dc_c;
1289
        h->pred4x4[DC_128_PRED         ]= pred4x4_128_dc_c;
1290
        h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= pred4x4_down_left_rv40_nodown_c;
1291
        h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= pred4x4_horizontal_up_rv40_nodown_c;
1292
        h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= pred4x4_vertical_left_rv40_nodown_c;
1293
    }
1294

    
1295
    h->pred8x8l[VERT_PRED           ]= pred8x8l_vertical_c;
1296
    h->pred8x8l[HOR_PRED            ]= pred8x8l_horizontal_c;
1297
    h->pred8x8l[DC_PRED             ]= pred8x8l_dc_c;
1298
    h->pred8x8l[DIAG_DOWN_LEFT_PRED ]= pred8x8l_down_left_c;
1299
    h->pred8x8l[DIAG_DOWN_RIGHT_PRED]= pred8x8l_down_right_c;
1300
    h->pred8x8l[VERT_RIGHT_PRED     ]= pred8x8l_vertical_right_c;
1301
    h->pred8x8l[HOR_DOWN_PRED       ]= pred8x8l_horizontal_down_c;
1302
    h->pred8x8l[VERT_LEFT_PRED      ]= pred8x8l_vertical_left_c;
1303
    h->pred8x8l[HOR_UP_PRED         ]= pred8x8l_horizontal_up_c;
1304
    h->pred8x8l[LEFT_DC_PRED        ]= pred8x8l_left_dc_c;
1305
    h->pred8x8l[TOP_DC_PRED         ]= pred8x8l_top_dc_c;
1306
    h->pred8x8l[DC_128_PRED         ]= pred8x8l_128_dc_c;
1307

    
1308
    h->pred8x8[VERT_PRED8x8   ]= pred8x8_vertical_c;
1309
    h->pred8x8[HOR_PRED8x8    ]= pred8x8_horizontal_c;
1310
    if (codec_id != CODEC_ID_VP8) {
1311
        h->pred8x8[PLANE_PRED8x8]= pred8x8_plane_c;
1312
    } else
1313
        h->pred8x8[PLANE_PRED8x8]= pred8x8_tm_vp8_c;
1314
    if(codec_id != CODEC_ID_RV40 && codec_id != CODEC_ID_VP8){
1315
        h->pred8x8[DC_PRED8x8     ]= pred8x8_dc_c;
1316
        h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_c;
1317
        h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_c;
1318
        h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8 ]= pred8x8_mad_cow_dc_l0t;
1319
        h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8 ]= pred8x8_mad_cow_dc_0lt;
1320
        h->pred8x8[ALZHEIMER_DC_L00_PRED8x8 ]= pred8x8_mad_cow_dc_l00;
1321
        h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8 ]= pred8x8_mad_cow_dc_0l0;
1322
    }else{
1323
        h->pred8x8[DC_PRED8x8     ]= pred8x8_dc_rv40_c;
1324
        h->pred8x8[LEFT_DC_PRED8x8]= pred8x8_left_dc_rv40_c;
1325
        h->pred8x8[TOP_DC_PRED8x8 ]= pred8x8_top_dc_rv40_c;
1326
        if (codec_id == CODEC_ID_VP8) {
1327
            h->pred8x8[DC_127_PRED8x8]= pred8x8_127_dc_c;
1328
            h->pred8x8[DC_129_PRED8x8]= pred8x8_129_dc_c;
1329
        }
1330
    }
1331
    h->pred8x8[DC_128_PRED8x8 ]= pred8x8_128_dc_c;
1332

    
1333
    h->pred16x16[DC_PRED8x8     ]= pred16x16_dc_c;
1334
    h->pred16x16[VERT_PRED8x8   ]= pred16x16_vertical_c;
1335
    h->pred16x16[HOR_PRED8x8    ]= pred16x16_horizontal_c;
1336
    switch(codec_id){
1337
    case CODEC_ID_SVQ3:
1338
       h->pred16x16[PLANE_PRED8x8  ]= pred16x16_plane_svq3_c;
1339
       break;
1340
    case CODEC_ID_RV40:
1341
       h->pred16x16[PLANE_PRED8x8  ]= pred16x16_plane_rv40_c;
1342
       break;
1343
    case CODEC_ID_VP8:
1344
       h->pred16x16[PLANE_PRED8x8  ]= pred16x16_tm_vp8_c;
1345
       h->pred16x16[DC_127_PRED8x8]= pred16x16_127_dc_c;
1346
       h->pred16x16[DC_129_PRED8x8]= pred16x16_129_dc_c;
1347
       break;
1348
    default:
1349
       h->pred16x16[PLANE_PRED8x8  ]= pred16x16_plane_c;
1350
       break;
1351
    }
1352
    h->pred16x16[LEFT_DC_PRED8x8]= pred16x16_left_dc_c;
1353
    h->pred16x16[TOP_DC_PRED8x8 ]= pred16x16_top_dc_c;
1354
    h->pred16x16[DC_128_PRED8x8 ]= pred16x16_128_dc_c;
1355

    
1356
    //special lossless h/v prediction for h264
1357
    h->pred4x4_add  [VERT_PRED   ]= pred4x4_vertical_add_c;
1358
    h->pred4x4_add  [ HOR_PRED   ]= pred4x4_horizontal_add_c;
1359
    h->pred8x8l_add [VERT_PRED   ]= pred8x8l_vertical_add_c;
1360
    h->pred8x8l_add [ HOR_PRED   ]= pred8x8l_horizontal_add_c;
1361
    h->pred8x8_add  [VERT_PRED8x8]= pred8x8_vertical_add_c;
1362
    h->pred8x8_add  [ HOR_PRED8x8]= pred8x8_horizontal_add_c;
1363
    h->pred16x16_add[VERT_PRED8x8]= pred16x16_vertical_add_c;
1364
    h->pred16x16_add[ HOR_PRED8x8]= pred16x16_horizontal_add_c;
1365

    
1366
    if (ARCH_ARM) ff_h264_pred_init_arm(h, codec_id);
1367
    if (HAVE_MMX) ff_h264_pred_init_x86(h, codec_id);
1368
}