Revision 41fda91d

View differences:

libavcodec/sh4/dsputil_align.c
1
/*
2
 * aligned/packed access motion 
3
 *
4
 * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20

  
21

  
22
#include "../avcodec.h"
23
#include "../dsputil.h"
24

  
25

  
26
#define	LP(p)	*(uint32_t*)(p)
27

  
28

  
29
#define	BYTE_VEC(c)	((c)*0x01010101UL)
30

  
31
#define	UNPACK(ph,pl,tt0,tt1) do { \
32
	uint32_t t0,t1; t0=tt0;t1=tt1; \
33
	ph = ( (t0 & ~BYTE_VEC(0x03))>>2) + ( (t1 & ~BYTE_VEC(0x03))>>2); \
34
	pl = (t0 & BYTE_VEC(0x03)) + (t1 & BYTE_VEC(0x03)); } while(0)
35

  
36
#define	rnd_PACK(ph,pl,nph,npl)	ph + nph + (((pl + npl + BYTE_VEC(0x02))>>2) & BYTE_VEC(0x03))
37
#define	no_rnd_PACK(ph,pl,nph,npl)	ph + nph + (((pl + npl + BYTE_VEC(0x01))>>2) & BYTE_VEC(0x03))
38

  
39
/* little endian */
40
#define	MERGE1(a,b,ofs)	(ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) )
41
#define	MERGE2(a,b,ofs)	(ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) )
42
/* big
43
#define	MERGE1(a,b,ofs)	(ofs==0)?a:( ((a)<<(8*ofs))|((b)>>(32-8*ofs)) )
44
#define	MERGE2(a,b,ofs)	(ofs==3)?b:( ((a)<<(8+8*ofs))|((b)>>(32-8-8*ofs)) )
45
*/
46

  
47

  
48
#define	put(d,s)	d = s
49
#define	avg(d,s)	d = rnd_avg2(s,d)
50

  
51
static inline uint32_t rnd_avg2(uint32_t a, uint32_t b)
52
{
53
    return (a | b) - (((a ^ b) & ~BYTE_VEC(0x01)) >> 1);
54
}
55

  
56
static inline uint32_t no_rnd_avg2(uint32_t a, uint32_t b)
57
{
58
    return (a & b) + (((a ^ b) & ~BYTE_VEC(0x01)) >> 1);
59
}
60

  
61

  
62
#define	OP_C4(ofs) \
63
	ref-=ofs; \
64
	do { \
65
		OP(LP(dest),MERGE1(LP(ref),LP(ref+4),ofs)); \
66
		ref+=stride; \
67
		dest+=stride; \
68
	} while(--height)
69

  
70
#define	OP_C40() \
71
	do { \
72
		OP(LP(dest),LP(ref)); \
73
		ref+=stride; \
74
		dest+=stride; \
75
	} while(--height)
76

  
77

  
78
#define	OP	put
79

  
80
static void put_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height)
81
{
82
	switch((int)ref&3){
83
	case 0: OP_C40(); return;
84
	case 1: OP_C4(1); return;
85
	case 2: OP_C4(2); return;
86
	case 3: OP_C4(3); return;
87
	}
88
}
89

  
90
#undef	OP
91
#define	OP	avg
92

  
93
static void avg_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height)
94
{
95
	switch((int)ref&3){
96
	case 0: OP_C40(); return;
97
	case 1: OP_C4(1); return;
98
	case 2: OP_C4(2); return;
99
	case 3: OP_C4(3); return;
100
	}
101
}
102

  
103
#undef	OP
104

  
105
#define	OP_C(ofs,sz,avg2) \
106
{ \
107
	ref-=ofs; \
108
	do { \
109
		uint32_t	t0,t1; \
110
		t0 = LP(ref+0); \
111
		t1 = LP(ref+4); \
112
		OP(LP(dest+0), MERGE1(t0,t1,ofs)); \
113
		t0 = LP(ref+8); \
114
		OP(LP(dest+4), MERGE1(t1,t0,ofs)); \
115
if (sz==16) { \
116
		t1 = LP(ref+12); \
117
		OP(LP(dest+8), MERGE1(t0,t1,ofs)); \
118
		t0 = LP(ref+16); \
119
		OP(LP(dest+12), MERGE1(t1,t0,ofs)); \
120
} \
121
		ref+=stride; \
122
		dest+= stride; \
123
	} while(--height); \
124
}
125

  
126
/* aligned */
127
#define	OP_C0(sz,avg2) \
128
{ \
129
	do { \
130
		OP(LP(dest+0), LP(ref+0)); \
131
		OP(LP(dest+4), LP(ref+4)); \
132
if (sz==16) { \
133
		OP(LP(dest+8), LP(ref+8)); \
134
		OP(LP(dest+12), LP(ref+12)); \
135
} \
136
		ref+=stride; \
137
		dest+= stride; \
138
	} while(--height); \
139
}
140

  
141
#define	OP_X(ofs,sz,avg2) \
142
{ \
143
	ref-=ofs; \
144
	do { \
145
		uint32_t	t0,t1; \
146
		t0 = LP(ref+0); \
147
		t1 = LP(ref+4); \
148
		OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
149
		t0 = LP(ref+8); \
150
		OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
151
if (sz==16) { \
152
		t1 = LP(ref+12); \
153
		OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
154
		t0 = LP(ref+16); \
155
		OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
156
} \
157
		ref+=stride; \
158
		dest+= stride; \
159
	} while(--height); \
160
}
161

  
162
/* aligned */
163
#define	OP_Y0(sz,avg2) \
164
{ \
165
	uint32_t t0,t1,t2,t3,t; \
166
\
167
	t0 = LP(ref+0); \
168
	t1 = LP(ref+4); \
169
if (sz==16) { \
170
	t2 = LP(ref+8); \
171
	t3 = LP(ref+12); \
172
} \
173
	do { \
174
		ref += stride; \
175
\
176
		t = LP(ref+0); \
177
		OP(LP(dest+0), avg2(t0,t)); t0 = t; \
178
		t = LP(ref+4); \
179
		OP(LP(dest+4), avg2(t1,t)); t1 = t; \
180
if (sz==16) { \
181
		t = LP(ref+8); \
182
		OP(LP(dest+8), avg2(t2,t)); t2 = t; \
183
		t = LP(ref+12); \
184
		OP(LP(dest+12), avg2(t3,t)); t3 = t; \
185
} \
186
		dest+= stride; \
187
	} while(--height); \
188
}
189

  
190
#define	OP_Y(ofs,sz,avg2) \
191
{ \
192
	uint32_t t0,t1,t2,t3,t,w0,w1; \
193
\
194
	ref-=ofs; \
195
	w0 = LP(ref+0); \
196
	w1 = LP(ref+4); \
197
	t0 = MERGE1(w0,w1,ofs); \
198
	w0 = LP(ref+8); \
199
	t1 = MERGE1(w1,w0,ofs); \
200
if (sz==16) { \
201
	w1 = LP(ref+12); \
202
	t2 = MERGE1(w0,w1,ofs); \
203
	w0 = LP(ref+16); \
204
	t3 = MERGE1(w1,w0,ofs); \
205
} \
206
	do { \
207
		ref += stride; \
208
\
209
		w0 = LP(ref+0); \
210
		w1 = LP(ref+4); \
211
		t = MERGE1(w0,w1,ofs); \
212
		OP(LP(dest+0), avg2(t0,t)); t0 = t; \
213
		w0 = LP(ref+8); \
214
		t = MERGE1(w1,w0,ofs); \
215
		OP(LP(dest+4), avg2(t1,t)); t1 = t; \
216
if (sz==16) { \
217
		w1 = LP(ref+12); \
218
		t = MERGE1(w0,w1,ofs); \
219
		OP(LP(dest+8), avg2(t2,t)); t2 = t; \
220
		w0 = LP(ref+16); \
221
		t = MERGE1(w1,w0,ofs); \
222
		OP(LP(dest+12), avg2(t3,t)); t3 = t; \
223
} \
224
		dest+=stride; \
225
	} while(--height); \
226
}
227

  
228
#define OP_X0(sz,avg2) OP_X(0,sz,avg2)
229
#define OP_XY0(sz,PACK) OP_XY(0,sz,PACK)
230
#define	OP_XY(ofs,sz,PACK) \
231
{ \
232
	uint32_t	t2,t3,w0,w1; \
233
	uint32_t	a0,a1,a2,a3,a4,a5,a6,a7; \
234
\
235
	ref -= ofs; \
236
	w0 = LP(ref+0); \
237
	w1 = LP(ref+4); \
238
	UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
239
	w0 = LP(ref+8); \
240
	UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
241
if (sz==16) { \
242
	w1 = LP(ref+12); \
243
	UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
244
	w0 = LP(ref+16); \
245
	UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
246
} \
247
	do { \
248
		ref+=stride; \
249
		w0 = LP(ref+0); \
250
		w1 = LP(ref+4); \
251
		UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
252
		OP(LP(dest+0),PACK(a0,a1,t2,t3)); \
253
		a0 = t2; a1 = t3; \
254
		w0 = LP(ref+8); \
255
		UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
256
		OP(LP(dest+4),PACK(a2,a3,t2,t3)); \
257
		a2 = t2; a3 = t3; \
258
if (sz==16) { \
259
		w1 = LP(ref+12); \
260
		UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
261
		OP(LP(dest+8),PACK(a4,a5,t2,t3)); \
262
		a4 = t2; a5 = t3; \
263
		w0 = LP(ref+16); \
264
		UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
265
		OP(LP(dest+12),PACK(a6,a7,t2,t3)); \
266
		a6 = t2; a7 = t3; \
267
} \
268
		dest+=stride; \
269
	} while(--height); \
270
}
271

  
272
#define	DEFFUNC(op,rnd,xy,sz,OP_N,avgfunc) \
273
static void op##_##rnd##_pixels##sz##_##xy (uint8_t * dest, const uint8_t * ref,	\
274
				   const int stride, int height)	\
275
{ \
276
	switch((int)ref&3) { \
277
	case 0:OP_N##0(sz,rnd##_##avgfunc); return; \
278
	case 1:OP_N(1,sz,rnd##_##avgfunc); return; \
279
	case 2:OP_N(2,sz,rnd##_##avgfunc); return; \
280
	case 3:OP_N(3,sz,rnd##_##avgfunc); return; \
281
	} \
282
}
283

  
284
#define OP put
285

  
286
DEFFUNC(put,   rnd,o,8,OP_C,avg2)
287
DEFFUNC(put,   rnd,x,8,OP_X,avg2)
288
DEFFUNC(put,no_rnd,x,8,OP_X,avg2)
289
DEFFUNC(put,   rnd,y,8,OP_Y,avg2)
290
DEFFUNC(put,no_rnd,y,8,OP_Y,avg2)
291
DEFFUNC(put,   rnd,xy,8,OP_XY,PACK)
292
DEFFUNC(put,no_rnd,xy,8,OP_XY,PACK)
293
DEFFUNC(put,   rnd,o,16,OP_C,avg2)
294
DEFFUNC(put,   rnd,x,16,OP_X,avg2)
295
DEFFUNC(put,no_rnd,x,16,OP_X,avg2)
296
DEFFUNC(put,   rnd,y,16,OP_Y,avg2)
297
DEFFUNC(put,no_rnd,y,16,OP_Y,avg2)
298
DEFFUNC(put,   rnd,xy,16,OP_XY,PACK)
299
DEFFUNC(put,no_rnd,xy,16,OP_XY,PACK)
300

  
301
#undef OP
302
#define OP avg
303

  
304
DEFFUNC(avg,   rnd,o,8,OP_C,avg2)
305
DEFFUNC(avg,   rnd,x,8,OP_X,avg2)
306
DEFFUNC(avg,no_rnd,x,8,OP_X,avg2)
307
DEFFUNC(avg,   rnd,y,8,OP_Y,avg2)
308
DEFFUNC(avg,no_rnd,y,8,OP_Y,avg2)
309
DEFFUNC(avg,   rnd,xy,8,OP_XY,PACK)
310
DEFFUNC(avg,no_rnd,xy,8,OP_XY,PACK)
311
DEFFUNC(avg,   rnd,o,16,OP_C,avg2)
312
DEFFUNC(avg,   rnd,x,16,OP_X,avg2)
313
DEFFUNC(avg,no_rnd,x,16,OP_X,avg2)
314
DEFFUNC(avg,   rnd,y,16,OP_Y,avg2)
315
DEFFUNC(avg,no_rnd,y,16,OP_Y,avg2)
316
DEFFUNC(avg,   rnd,xy,16,OP_XY,PACK)
317
DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK)
318

  
319
#undef OP
320

  
321
#define	put_no_rnd_pixels8_o	put_rnd_pixels8_o
322
#define	put_no_rnd_pixels16_o	put_rnd_pixels16_o
323
#define	avg_no_rnd_pixels8_o	avg_rnd_pixels8_o
324
#define	avg_no_rnd_pixels16_o	avg_rnd_pixels16_o
325

  
326
#define	put_pixels8_c	put_rnd_pixels8_o
327
#define	put_pixels16_c	put_rnd_pixels16_o
328
#define	avg_pixels8_c	avg_rnd_pixels8_o
329
#define	avg_pixels16_c	avg_rnd_pixels16_o
330
#define	put_no_rnd_pixels8_c	put_rnd_pixels8_o
331
#define	put_no_rnd_pixels16_c	put_rnd_pixels16_o
332
#define	avg_no_rnd_pixels8_c	avg_rnd_pixels8_o
333
#define	avg_no_rnd_pixels16_c	avg_rnd_pixels16_o
334

  
335
#define	QPEL
336

  
337
#ifdef QPEL
338

  
339
#include "qpel.c"
340

  
341
#endif
342

  
343
void dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
344
{
345
	c->put_pixels_tab[0][0] = put_rnd_pixels16_o;
346
	c->put_pixels_tab[0][1] = put_rnd_pixels16_x;
347
	c->put_pixels_tab[0][2] = put_rnd_pixels16_y;
348
	c->put_pixels_tab[0][3] = put_rnd_pixels16_xy;
349
	c->put_pixels_tab[1][0] = put_rnd_pixels8_o;
350
	c->put_pixels_tab[1][1] = put_rnd_pixels8_x;
351
	c->put_pixels_tab[1][2] = put_rnd_pixels8_y;
352
	c->put_pixels_tab[1][3] = put_rnd_pixels8_xy;
353

  
354
	c->put_no_rnd_pixels_tab[0][0] = put_no_rnd_pixels16_o;
355
	c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x;
356
	c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y;
357
	c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy;
358
	c->put_no_rnd_pixels_tab[1][0] = put_no_rnd_pixels8_o;
359
	c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x;
360
	c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y;
361
	c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy;
362

  
363
	c->avg_pixels_tab[0][0] = avg_rnd_pixels16_o;
364
	c->avg_pixels_tab[0][1] = avg_rnd_pixels16_x;
365
	c->avg_pixels_tab[0][2] = avg_rnd_pixels16_y;
366
	c->avg_pixels_tab[0][3] = avg_rnd_pixels16_xy;
367
	c->avg_pixels_tab[1][0] = avg_rnd_pixels8_o;
368
	c->avg_pixels_tab[1][1] = avg_rnd_pixels8_x;
369
	c->avg_pixels_tab[1][2] = avg_rnd_pixels8_y;
370
	c->avg_pixels_tab[1][3] = avg_rnd_pixels8_xy;
371

  
372
	c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_o;
373
	c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x;
374
	c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y;
375
	c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy;
376
	c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_o;
377
	c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x;
378
	c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y;
379
	c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy;
380

  
381
#ifdef QPEL
382

  
383
#define dspfunc(PFX, IDX, NUM) \
384
    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
385
    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
386
    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
387
    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
388
    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
389
    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
390
    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
391
    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
392
    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
393
    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
394
    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
395
    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
396
    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
397
    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
398
    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
399
    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
400

  
401
    dspfunc(put_qpel, 0, 16);
402
    dspfunc(put_no_rnd_qpel, 0, 16);
403

  
404
    dspfunc(avg_qpel, 0, 16);
405
    /* dspfunc(avg_no_rnd_qpel, 0, 16); */
406

  
407
    dspfunc(put_qpel, 1, 8);
408
    dspfunc(put_no_rnd_qpel, 1, 8);
409

  
410
    dspfunc(avg_qpel, 1, 8);
411
    /* dspfunc(avg_no_rnd_qpel, 1, 8); */
412

  
413
    dspfunc(put_h264_qpel, 0, 16);
414
    dspfunc(put_h264_qpel, 1, 8);
415
    dspfunc(put_h264_qpel, 2, 4);
416
    dspfunc(avg_h264_qpel, 0, 16);
417
    dspfunc(avg_h264_qpel, 1, 8);
418
    dspfunc(avg_h264_qpel, 2, 4);
419

  
420
#undef dspfunc
421
    c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_c;
422
    c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_c;
423
    c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_c;
424
    c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c;
425
    c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
426
    c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
427

  
428
    c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
429
    c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
430
    c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
431
    c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
432
    c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
433
    c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
434
    c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
435
    c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
436

  
437
    c->gmc1 = gmc1_c;
438
    c->gmc = gmc_c;
439

  
440
#endif
441
}
libavcodec/sh4/qpel.c
1
/* 
2
	this is optimized for sh, which have post increment addressing (*p++)
3
	some cpu may be index (p[n]) faster than post increment (*p++)
4
*/
5

  
6
#define	LD(adr)	*(uint32_t*)(adr)
7

  
8
#define PIXOP2(OPNAME, OP) \
9
/*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
10
{\
11
	do {\
12
		OP(LP(dst  ),no_rnd_avg2(LD32(src1  ),LD32(src2  )) ); \
13
		OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
14
		src1+=src_stride1; \
15
		src2+=src_stride2; \
16
		dst+=dst_stride; \
17
	} while(--h); \
18
}\
19
\
20
static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
21
{\
22
	do {\
23
		OP(LP(dst  ),rnd_avg2(LD32(src1  ),LD32(src2  )) ); \
24
		OP(LP(dst+4),rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
25
		src1+=src_stride1; \
26
		src2+=src_stride2; \
27
		dst+=dst_stride; \
28
	} while(--h); \
29
}\
30
\
31
static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
32
{\
33
	do {\
34
		OP(LP(dst  ),rnd_avg2(LD32(src1  ),LD32(src2  )) ); \
35
		src1+=src_stride1; \
36
		src2+=src_stride2; \
37
		dst+=dst_stride; \
38
	} while(--h); \
39
}\
40
\
41
static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
42
{\
43
	do {\
44
		OP(LP(dst  ),no_rnd_avg2(LD32(src1  ),LD32(src2  )) ); \
45
		OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
46
		OP(LP(dst+8),no_rnd_avg2(LD32(src1+8),LD32(src2+8)) ); \
47
		OP(LP(dst+12),no_rnd_avg2(LD32(src1+12),LD32(src2+12)) ); \
48
		src1+=src_stride1; \
49
		src2+=src_stride2; \
50
		dst+=dst_stride; \
51
	} while(--h); \
52
}\
53
\
54
static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
55
{\
56
	do {\
57
		OP(LP(dst  ),rnd_avg2(LD32(src1  ),LD32(src2  )) ); \
58
		OP(LP(dst+4),rnd_avg2(LD32(src1+4),LD32(src2+4)) ); \
59
		OP(LP(dst+8),rnd_avg2(LD32(src1+8),LD32(src2+8)) ); \
60
		OP(LP(dst+12),rnd_avg2(LD32(src1+12),LD32(src2+12)) ); \
61
		src1+=src_stride1; \
62
		src2+=src_stride2; \
63
		dst+=dst_stride; \
64
	} while(--h); \
65
}*/\
66
\
67
static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
68
{\
69
	do {\
70
		OP(LP(dst  ),rnd_avg2(LP(src1  ),LP(src2  )) ); \
71
		src1+=src_stride1; \
72
		src2+=src_stride2; \
73
		dst+=dst_stride; \
74
	} while(--h); \
75
}\
76
\
77
static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
78
{\
79
	do {\
80
		OP(LP(dst  ),rnd_avg2(LD32(src1  ),LP(src2  )) ); \
81
		src1+=src_stride1; \
82
		src2+=src_stride2; \
83
		dst+=dst_stride; \
84
	} while(--h); \
85
}\
86
\
87
static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
88
{\
89
	do {\
90
		OP(LP(dst  ),no_rnd_avg2(LD32(src1  ),LP(src2  )) ); \
91
		OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
92
		OP(LP(dst+8),no_rnd_avg2(LD32(src1+8),LP(src2+8)) ); \
93
		OP(LP(dst+12),no_rnd_avg2(LD32(src1+12),LP(src2+12)) ); \
94
		src1+=src_stride1; \
95
		src2+=src_stride2; \
96
		dst+=dst_stride; \
97
	} while(--h); \
98
}\
99
\
100
static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
101
{\
102
	do {\
103
		OP(LP(dst  ),rnd_avg2(LD32(src1  ),LP(src2  )) ); \
104
		OP(LP(dst+4),rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
105
		OP(LP(dst+8),rnd_avg2(LD32(src1+8),LP(src2+8)) ); \
106
		OP(LP(dst+12),rnd_avg2(LD32(src1+12),LP(src2+12)) ); \
107
		src1+=src_stride1; \
108
		src2+=src_stride2; \
109
		dst+=dst_stride; \
110
	} while(--h); \
111
}\
112
\
113
static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
114
{\
115
	do { /* onlye src2 aligned */\
116
		OP(LP(dst  ),no_rnd_avg2(LD32(src1  ),LP(src2  )) ); \
117
		OP(LP(dst+4),no_rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
118
		src1+=src_stride1; \
119
		src2+=src_stride2; \
120
		dst+=dst_stride; \
121
	} while(--h); \
122
}\
123
\
124
static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
125
{\
126
	do {\
127
		OP(LP(dst  ),rnd_avg2(LD32(src1  ),LP(src2  )) ); \
128
		OP(LP(dst+4),rnd_avg2(LD32(src1+4),LP(src2+4)) ); \
129
		src1+=src_stride1; \
130
		src2+=src_stride2; \
131
		dst+=dst_stride; \
132
	} while(--h); \
133
}\
134
\
135
static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
136
{\
137
	do {\
138
		OP(LP(dst  ),no_rnd_avg2(LP(src1  ),LP(src2  )) ); \
139
		OP(LP(dst+4),no_rnd_avg2(LP(src1+4),LP(src2+4)) ); \
140
		src1+=src_stride1; \
141
		src2+=src_stride2; \
142
		dst+=dst_stride; \
143
	} while(--h); \
144
}\
145
\
146
static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
147
{\
148
	do {\
149
		OP(LP(dst  ),rnd_avg2(LP(src1  ),LP(src2  )) ); \
150
		OP(LP(dst+4),rnd_avg2(LP(src1+4),LP(src2+4)) ); \
151
		src1+=src_stride1; \
152
		src2+=src_stride2; \
153
		dst+=dst_stride; \
154
	} while(--h); \
155
}\
156
\
157
static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
158
{\
159
	do {\
160
		OP(LP(dst  ),no_rnd_avg2(LP(src1  ),LP(src2  )) ); \
161
		OP(LP(dst+4),no_rnd_avg2(LP(src1+4),LP(src2+4)) ); \
162
		OP(LP(dst+8),no_rnd_avg2(LP(src1+8),LP(src2+8)) ); \
163
		OP(LP(dst+12),no_rnd_avg2(LP(src1+12),LP(src2+12)) ); \
164
		src1+=src_stride1; \
165
		src2+=src_stride2; \
166
		dst+=dst_stride; \
167
	} while(--h); \
168
}\
169
\
170
static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
171
{\
172
	do {\
173
		OP(LP(dst  ),rnd_avg2(LP(src1  ),LP(src2  )) ); \
174
		OP(LP(dst+4),rnd_avg2(LP(src1+4),LP(src2+4)) ); \
175
		OP(LP(dst+8),rnd_avg2(LP(src1+8),LP(src2+8)) ); \
176
		OP(LP(dst+12),rnd_avg2(LP(src1+12),LP(src2+12)) ); \
177
		src1+=src_stride1; \
178
		src2+=src_stride2; \
179
		dst+=dst_stride; \
180
	} while(--h); \
181
}\
182
\
183
static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
184
{ OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
185
\
186
static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
187
{ OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
188
\
189
static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
190
{ OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
191
\
192
static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
193
{ OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
194
\
195
static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
196
	do { \
197
		uint32_t a0,a1,a2,a3; \
198
		UNPACK(a0,a1,LP(src1),LP(src2)); \
199
		UNPACK(a2,a3,LP(src3),LP(src4)); \
200
		OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
201
		UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
202
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
203
		OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
204
		src1+=src_stride1;\
205
		src2+=src_stride2;\
206
		src3+=src_stride3;\
207
		src4+=src_stride4;\
208
		dst+=dst_stride;\
209
	} while(--h); \
210
} \
211
\
212
static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
213
	do { \
214
		uint32_t a0,a1,a2,a3; \
215
		UNPACK(a0,a1,LP(src1),LP(src2)); \
216
		UNPACK(a2,a3,LP(src3),LP(src4)); \
217
		OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
218
		UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
219
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
220
		OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
221
		src1+=src_stride1;\
222
		src2+=src_stride2;\
223
		src3+=src_stride3;\
224
		src4+=src_stride4;\
225
		dst+=dst_stride;\
226
	} while(--h); \
227
} \
228
\
229
static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
230
	do { \
231
		uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
232
		UNPACK(a0,a1,LD32(src1),LP(src2)); \
233
		UNPACK(a2,a3,LP(src3),LP(src4)); \
234
		OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
235
		UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
236
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
237
		OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
238
		src1+=src_stride1;\
239
		src2+=src_stride2;\
240
		src3+=src_stride3;\
241
		src4+=src_stride4;\
242
		dst+=dst_stride;\
243
	} while(--h); \
244
} \
245
\
246
static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
247
	do { \
248
		uint32_t a0,a1,a2,a3; \
249
		UNPACK(a0,a1,LD32(src1),LP(src2)); \
250
		UNPACK(a2,a3,LP(src3),LP(src4)); \
251
		OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
252
		UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
253
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
254
		OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
255
		src1+=src_stride1;\
256
		src2+=src_stride2;\
257
		src3+=src_stride3;\
258
		src4+=src_stride4;\
259
		dst+=dst_stride;\
260
	} while(--h); \
261
} \
262
\
263
static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
264
	do { \
265
		uint32_t a0,a1,a2,a3; \
266
		UNPACK(a0,a1,LP(src1),LP(src2)); \
267
		UNPACK(a2,a3,LP(src3),LP(src4)); \
268
		OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
269
		UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
270
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
271
		OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
272
		UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
273
		UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
274
		OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
275
		UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
276
		UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
277
		OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
278
		src1+=src_stride1;\
279
		src2+=src_stride2;\
280
		src3+=src_stride3;\
281
		src4+=src_stride4;\
282
		dst+=dst_stride;\
283
	} while(--h); \
284
} \
285
\
286
static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
287
	do { \
288
		uint32_t a0,a1,a2,a3; \
289
		UNPACK(a0,a1,LP(src1),LP(src2)); \
290
		UNPACK(a2,a3,LP(src3),LP(src4)); \
291
		OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
292
		UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
293
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
294
		OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
295
		UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
296
		UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
297
		OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
298
		UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
299
		UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
300
		OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
301
		src1+=src_stride1;\
302
		src2+=src_stride2;\
303
		src3+=src_stride3;\
304
		src4+=src_stride4;\
305
		dst+=dst_stride;\
306
	} while(--h); \
307
} \
308
\
309
static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
310
	do { /* src1 is unaligned */\
311
		uint32_t a0,a1,a2,a3; \
312
		UNPACK(a0,a1,LD32(src1),LP(src2)); \
313
		UNPACK(a2,a3,LP(src3),LP(src4)); \
314
		OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
315
		UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
316
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
317
		OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
318
		UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
319
		UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
320
		OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
321
		UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
322
		UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
323
		OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
324
		src1+=src_stride1;\
325
		src2+=src_stride2;\
326
		src3+=src_stride3;\
327
		src4+=src_stride4;\
328
		dst+=dst_stride;\
329
	} while(--h); \
330
} \
331
\
332
static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
333
	do { \
334
		uint32_t a0,a1,a2,a3; \
335
		UNPACK(a0,a1,LD32(src1),LP(src2)); \
336
		UNPACK(a2,a3,LP(src3),LP(src4)); \
337
		OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
338
		UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
339
		UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
340
		OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
341
		UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
342
		UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
343
		OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
344
		UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
345
		UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
346
		OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
347
		src1+=src_stride1;\
348
		src2+=src_stride2;\
349
		src3+=src_stride3;\
350
		src4+=src_stride4;\
351
		dst+=dst_stride;\
352
	} while(--h); \
353
} \
354
\
355

  
356
#define op_avg(a, b) a = rnd_avg2(a,b)
357
#define op_put(a, b) a = b
358

  
359
PIXOP2(avg, op_avg)
360
PIXOP2(put, op_put)
361
#undef op_avg
362
#undef op_put
363

  
364
#define avg2(a,b) ((a+b+1)>>1)
365
#define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
366

  
367

  
368
static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
369
{
370
    const int A=(16-x16)*(16-y16);
371
    const int B=(   x16)*(16-y16);
372
    const int C=(16-x16)*(   y16);
373
    const int D=(   x16)*(   y16);
374

  
375
    do {
376
        int t0,t1,t2,t3;
377
        uint8_t *s0 = src;
378
        uint8_t *s1 = src+stride;
379
        t0 = *s0++; t2 = *s1++;
380
        t1 = *s0++; t3 = *s1++;
381
        dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
382
        t0 = *s0++; t2 = *s1++;
383
        dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
384
        t1 = *s0++; t3 = *s1++;
385
        dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
386
        t0 = *s0++; t2 = *s1++;
387
        dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
388
        t1 = *s0++; t3 = *s1++;
389
        dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
390
        t0 = *s0++; t2 = *s1++;
391
        dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
392
        t1 = *s0++; t3 = *s1++;
393
        dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
394
        t0 = *s0++; t2 = *s1++;
395
        dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
396
        dst+= stride;
397
        src+= stride;
398
    }while(--h);
399
}
400

  
401
static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, 
402
                  int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
403
{
404
    int y, vx, vy;
405
    const int s= 1<<shift;
406
    
407
    width--;
408
    height--;
409

  
410
    for(y=0; y<h; y++){
411
        int x;
412

  
413
        vx= ox;
414
        vy= oy;
415
        for(x=0; x<8; x++){ //XXX FIXME optimize
416
            int src_x, src_y, frac_x, frac_y, index;
417

  
418
            src_x= vx>>16;
419
            src_y= vy>>16;
420
            frac_x= src_x&(s-1);
421
            frac_y= src_y&(s-1);
422
            src_x>>=shift;
423
            src_y>>=shift;
424
  
425
            if((unsigned)src_x < width){
426
                if((unsigned)src_y < height){
427
                    index= src_x + src_y*stride;
428
                    dst[y*stride + x]= (  (  src[index         ]*(s-frac_x)
429
                                           + src[index       +1]*   frac_x )*(s-frac_y)
430
                                        + (  src[index+stride  ]*(s-frac_x)
431
                                           + src[index+stride+1]*   frac_x )*   frac_y
432
                                        + r)>>(shift*2);
433
                }else{
434
                    index= src_x + clip(src_y, 0, height)*stride;                    
435
                    dst[y*stride + x]= ( (  src[index         ]*(s-frac_x) 
436
                                          + src[index       +1]*   frac_x )*s
437
                                        + r)>>(shift*2);
438
                }
439
            }else{
440
                if((unsigned)src_y < height){
441
                    index= clip(src_x, 0, width) + src_y*stride;                    
442
                    dst[y*stride + x]= (  (  src[index         ]*(s-frac_y) 
443
                                           + src[index+stride  ]*   frac_y )*s
444
                                        + r)>>(shift*2);
445
                }else{
446
                    index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;                    
447
                    dst[y*stride + x]=    src[index         ];
448
                }
449
            }
450
            
451
            vx+= dxx;
452
            vy+= dyx;
453
        }
454
        ox += dxy;
455
        oy += dyy;
456
    }
457
}
458
#define H264_CHROMA_MC(OPNAME, OP)\
459
static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
460
    const int A=(8-x)*(8-y);\
461
    const int B=(  x)*(8-y);\
462
    const int C=(8-x)*(  y);\
463
    const int D=(  x)*(  y);\
464
    \
465
    assert(x<8 && y<8 && x>=0 && y>=0);\
466
\
467
    do {\
468
        int t0,t1,t2,t3; \
469
        uint8_t *s0 = src; \
470
        uint8_t *s1 = src+stride; \
471
        t0 = *s0++; t2 = *s1++; \
472
        t1 = *s0++; t3 = *s1++; \
473
        OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
474
        t0 = *s0++; t2 = *s1++; \
475
        OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
476
        dst+= stride;\
477
        src+= stride;\
478
    }while(--h);\
479
}\
480
\
481
static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
482
    const int A=(8-x)*(8-y);\
483
    const int B=(  x)*(8-y);\
484
    const int C=(8-x)*(  y);\
485
    const int D=(  x)*(  y);\
486
    \
487
    assert(x<8 && y<8 && x>=0 && y>=0);\
488
\
489
    do {\
490
        int t0,t1,t2,t3; \
491
        uint8_t *s0 = src; \
492
        uint8_t *s1 = src+stride; \
493
        t0 = *s0++; t2 = *s1++; \
494
        t1 = *s0++; t3 = *s1++; \
495
        OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
496
        t0 = *s0++; t2 = *s1++; \
497
        OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
498
        t1 = *s0++; t3 = *s1++; \
499
        OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
500
        t0 = *s0++; t2 = *s1++; \
501
        OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
502
        dst+= stride;\
503
        src+= stride;\
504
    }while(--h);\
505
}\
506
\
507
static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
508
    const int A=(8-x)*(8-y);\
509
    const int B=(  x)*(8-y);\
510
    const int C=(8-x)*(  y);\
511
    const int D=(  x)*(  y);\
512
    \
513
    assert(x<8 && y<8 && x>=0 && y>=0);\
514
\
515
    do {\
516
        int t0,t1,t2,t3; \
517
        uint8_t *s0 = src; \
518
        uint8_t *s1 = src+stride; \
519
        t0 = *s0++; t2 = *s1++; \
520
        t1 = *s0++; t3 = *s1++; \
521
        OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
522
        t0 = *s0++; t2 = *s1++; \
523
        OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
524
        t1 = *s0++; t3 = *s1++; \
525
        OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
526
        t0 = *s0++; t2 = *s1++; \
527
        OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
528
        t1 = *s0++; t3 = *s1++; \
529
        OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
530
        t0 = *s0++; t2 = *s1++; \
531
        OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
532
        t1 = *s0++; t3 = *s1++; \
533
        OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
534
        t0 = *s0++; t2 = *s1++; \
535
        OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
536
        dst+= stride;\
537
        src+= stride;\
538
    }while(--h);\
539
}
540

  
541
#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
542
#define op_put(a, b) a = (((b) + 32)>>6)
543

  
544
H264_CHROMA_MC(put_       , op_put)
545
H264_CHROMA_MC(avg_       , op_avg)
546
#undef op_avg
547
#undef op_put
548

  
549
/* not yet optimized */
550
static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
551
{
552
    int i;
553
    for(i=0; i<h; i++)
554
    {
555
        ST32(dst   , LD32(src   ));
556
        dst+=dstStride;
557
        src+=srcStride;
558
    }
559
}
560

  
561
static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
562
{
563
    int i;
564
    for(i=0; i<h; i++)
565
    {
566
        ST32(dst   , LD32(src   ));
567
        ST32(dst+4 , LD32(src+4 ));
568
        dst+=dstStride;
569
        src+=srcStride;
570
    }
571
}
572

  
573
static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
574
{
575
    int i;
576
    for(i=0; i<h; i++)
577
    {
578
        ST32(dst   , LD32(src   ));
579
        ST32(dst+4 , LD32(src+4 ));
580
        ST32(dst+8 , LD32(src+8 ));
581
        ST32(dst+12, LD32(src+12));
582
        dst+=dstStride;
583
        src+=srcStride;
584
    }
585
}
586

  
587
static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
588
{
589
    int i;
590
    for(i=0; i<h; i++)
591
    {
592
        ST32(dst   , LD32(src   ));
593
        ST32(dst+4 , LD32(src+4 ));
594
        ST32(dst+8 , LD32(src+8 ));
595
        ST32(dst+12, LD32(src+12));
596
        dst[16]= src[16];
597
        dst+=dstStride;
598
        src+=srcStride;
599
    }
600
}
601

  
602
static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
603
{
604
    int i;
605
    for(i=0; i<h; i++)
606
    {
607
        ST32(dst   , LD32(src   ));
608
        ST32(dst+4 , LD32(src+4 ));
609
        dst[8]= src[8];
610
        dst+=dstStride;
611
        src+=srcStride;
612
    }
613
}
614
/* end not optimized */
615

  
616
#define QPEL_MC(r, OPNAME, RND, OP) \
617
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
618
    uint8_t *cm = cropTbl + MAX_NEG_CROP;\
619
    do {\
620
        uint8_t *s = src; \
621
        int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
622
        src0= *s++;\
623
        src1= *s++;\
624
        src2= *s++;\
625
        src3= *s++;\
626
        src4= *s++;\
627
        OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
628
        src5= *s++;\
629
        OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
630
        src6= *s++;\
631
        OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
632
        src7= *s++;\
633
        OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
634
        src8= *s++;\
635
        OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
636
        OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
637
        OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
638
        OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
639
        dst+=dstStride;\
640
        src+=srcStride;\
641
    }while(--h);\
642
}\
643
\
644
static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
645
    uint8_t *cm = cropTbl + MAX_NEG_CROP;\
646
    int w=8;\
647
    do{\
648
        uint8_t *s = src, *d=dst;\
649
        int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
650
        src0 = *s; s+=srcStride; \
651
        src1 = *s; s+=srcStride; \
652
        src2 = *s; s+=srcStride; \
653
        src3 = *s; s+=srcStride; \
654
        src4 = *s; s+=srcStride; \
655
        OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
656
        src5 = *s; s+=srcStride; \
657
        OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
658
        src6 = *s; s+=srcStride; \
659
        OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
660
        src7 = *s; s+=srcStride; \
661
        OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
662
        src8 = *s; \
663
        OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
664
        OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
665
        OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
666
        OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
667
        dst++;\
668
        src++;\
669
    }while(--w);\
670
}\
671
\
672
static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
673
    uint8_t *cm = cropTbl + MAX_NEG_CROP;\
674
    do {\
675
        uint8_t *s = src;\
676
        int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
677
        int src9,src10,src11,src12,src13,src14,src15,src16;\
678
        src0= *s++;\
679
        src1= *s++;\
680
        src2= *s++;\
681
        src3= *s++;\
682
        src4= *s++;\
683
        OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
684
        src5= *s++;\
685
        OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
686
        src6= *s++;\
687
        OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
688
        src7= *s++;\
689
        OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
690
        src8= *s++;\
691
        OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
692
        src9= *s++;\
693
        OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
694
        src10= *s++;\
695
        OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
696
        src11= *s++;\
697
        OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
698
        src12= *s++;\
699
        OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
700
        src13= *s++;\
701
        OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
702
        src14= *s++;\
703
        OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
704
        src15= *s++;\
705
        OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
706
        src16= *s++;\
707
        OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
708
        OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
709
        OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
710
        OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
711
        dst+=dstStride;\
712
        src+=srcStride;\
713
    }while(--h);\
714
}\
715
\
716
static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
717
    uint8_t *cm = cropTbl + MAX_NEG_CROP;\
718
    int w=16;\
719
    do {\
720
        uint8_t *s = src, *d=dst;\
721
        int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
722
        int src9,src10,src11,src12,src13,src14,src15,src16;\
723
        src0 = *s; s+=srcStride; \
724
        src1 = *s; s+=srcStride; \
725
        src2 = *s; s+=srcStride; \
726
        src3 = *s; s+=srcStride; \
727
        src4 = *s; s+=srcStride; \
728
        OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
729
        src5 = *s; s+=srcStride; \
730
        OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
731
        src6 = *s; s+=srcStride; \
732
        OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
733
        src7 = *s; s+=srcStride; \
734
        OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
735
        src8 = *s; s+=srcStride; \
736
        OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
737
        src9 = *s; s+=srcStride; \
738
        OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
739
        src10 = *s; s+=srcStride; \
740
        OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
741
        src11 = *s; s+=srcStride; \
742
        OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
743
        src12 = *s; s+=srcStride; \
744
        OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
745
        src13 = *s; s+=srcStride; \
746
        OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
747
        src14 = *s; s+=srcStride; \
748
        OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
749
        src15 = *s; s+=srcStride; \
750
        OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
751
        src16 = *s; \
752
        OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
753
        OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
754
        OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
755
        OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
756
        dst++;\
757
        src++;\
758
    }while(--w);\
759
}\
760
\
761
static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
762
    OPNAME ## pixels8_c(dst, src, stride, 8);\
763
}\
764
\
765
static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
766
    uint8_t half[64];\
767
    put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
768
    OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
769
}\
770
\
771
static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
772
    OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
773
}\
774
\
775
static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
776
    uint8_t half[64];\
777
    put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
778
    OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
779
}\
780
\
781
static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
782
    uint8_t full[16*9];\
783
    uint8_t half[64];\
784
    copy_block9(full, src, 16, stride, 9);\
785
    put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
786
    OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
787
}\
788
\
789
static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
790
    uint8_t full[16*9];\
791
    copy_block9(full, src, 16, stride, 9);\
792
    OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
793
}\
794
\
795
static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
796
    uint8_t full[16*9];\
797
    uint8_t half[64];\
798
    copy_block9(full, src, 16, stride, 9);\
799
    put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
800
    OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
801
}\
802
static void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
803
    uint8_t full[16*9];\
804
    uint8_t halfH[72];\
805
    uint8_t halfV[64];\
806
    uint8_t halfHV[64];\
807
    copy_block9(full, src, 16, stride, 9);\
808
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
809
    put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
810
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
811
    OPNAME ## pixels8_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
812
}\
813
static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
814
    uint8_t full[16*9];\
815
    uint8_t halfH[72];\
816
    uint8_t halfHV[64];\
817
    copy_block9(full, src, 16, stride, 9);\
818
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
819
    put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
820
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
821
    OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
822
}\
823
static void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
824
    uint8_t full[16*9];\
825
    uint8_t halfH[72];\
826
    uint8_t halfV[64];\
827
    uint8_t halfHV[64];\
828
    copy_block9(full, src, 16, stride, 9);\
829
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
830
    put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
831
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
832
    OPNAME ## pixels8_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
833
}\
834
static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
835
    uint8_t full[16*9];\
836
    uint8_t halfH[72];\
837
    uint8_t halfHV[64];\
838
    copy_block9(full, src, 16, stride, 9);\
839
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
840
    put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
841
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
842
    OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
843
}\
844
static void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
845
    uint8_t full[16*9];\
846
    uint8_t halfH[72];\
847
    uint8_t halfV[64];\
848
    uint8_t halfHV[64];\
849
    copy_block9(full, src, 16, stride, 9);\
850
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
851
    put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
852
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
853
    OPNAME ## pixels8_l4_aligned(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
854
}\
855
static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
856
    uint8_t full[16*9];\
857
    uint8_t halfH[72];\
858
    uint8_t halfHV[64];\
859
    copy_block9(full, src, 16, stride, 9);\
860
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
861
    put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
862
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
863
    OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
864
}\
865
static void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
866
    uint8_t full[16*9];\
867
    uint8_t halfH[72];\
868
    uint8_t halfV[64];\
869
    uint8_t halfHV[64];\
870
    copy_block9(full, src, 16, stride, 9);\
871
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full  , 8, 16, 9);\
872
    put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
873
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
874
    OPNAME ## pixels8_l4_aligned0(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
875
}\
876
static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
877
    uint8_t full[16*9];\
878
    uint8_t halfH[72];\
879
    uint8_t halfHV[64];\
880
    copy_block9(full, src, 16, stride, 9);\
881
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
882
    put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
883
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
884
    OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
885
}\
886
static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
887
    uint8_t halfH[72];\
888
    uint8_t halfHV[64];\
889
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
890
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
891
    OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
892
}\
893
static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
894
    uint8_t halfH[72];\
895
    uint8_t halfHV[64];\
896
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
897
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
898
    OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
899
}\
900
static void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
901
    uint8_t full[16*9];\
902
    uint8_t halfH[72];\
903
    uint8_t halfV[64];\
904
    uint8_t halfHV[64];\
905
    copy_block9(full, src, 16, stride, 9);\
906
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
907
    put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
908
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
909
    OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
910
}\
911
static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
912
    uint8_t full[16*9];\
913
    uint8_t halfH[72];\
914
    copy_block9(full, src, 16, stride, 9);\
915
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
916
    put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
917
    OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
918
}\
919
static void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
920
    uint8_t full[16*9];\
921
    uint8_t halfH[72];\
922
    uint8_t halfV[64];\
923
    uint8_t halfHV[64];\
924
    copy_block9(full, src, 16, stride, 9);\
925
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
926
    put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
927
    put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
928
    OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
929
}\
930
static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
931
    uint8_t full[16*9];\
932
    uint8_t halfH[72];\
933
    copy_block9(full, src, 16, stride, 9);\
934
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
935
    put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
936
    OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
937
}\
938
static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
939
    uint8_t halfH[72];\
940
    put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
941
    OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
942
}\
943
static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
944
    OPNAME ## pixels16_c(dst, src, stride, 16);\
945
}\
946
\
947
static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
948
    uint8_t half[256];\
949
    put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
950
    OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
951
}\
952
\
953
static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
954
    OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
955
}\
956
\
957
static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
958
    uint8_t half[256];\
959
    put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
960
    OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
961
}\
962
\
963
static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
964
    uint8_t full[24*17];\
965
    uint8_t half[256];\
966
    copy_block17(full, src, 24, stride, 17);\
967
    put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
968
    OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
969
}\
970
\
971
static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
972
    uint8_t full[24*17];\
973
    copy_block17(full, src, 24, stride, 17);\
974
    OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
975
}\
976
\
977
static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
978
    uint8_t full[24*17];\
979
    uint8_t half[256];\
980
    copy_block17(full, src, 24, stride, 17);\
981
    put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
982
    OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
983
}\
984
static void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
985
    uint8_t full[24*17];\
986
    uint8_t halfH[272];\
987
    uint8_t halfV[256];\
988
    uint8_t halfHV[256];\
989
    copy_block17(full, src, 24, stride, 17);\
990
    put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
991
    put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
992
    put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
993
    OPNAME ## pixels16_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
994
}\
995
static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
996
    uint8_t full[24*17];\
997
    uint8_t halfH[272];\
998
    uint8_t halfHV[256];\
999
    copy_block17(full, src, 24, stride, 17);\
1000
    put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1001
    put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
1002
    put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1003
    OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
1004
}\
1005
static void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
1006
    uint8_t full[24*17];\
1007
    uint8_t halfH[272];\
1008
    uint8_t halfV[256];\
1009
    uint8_t halfHV[256];\
1010
    copy_block17(full, src, 24, stride, 17);\
1011
    put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1012
    put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
1013
    put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1014
    OPNAME ## pixels16_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
1015
}\
1016
static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
1017
    uint8_t full[24*17];\
1018
    uint8_t halfH[272];\
1019
    uint8_t halfHV[256];\
1020
    copy_block17(full, src, 24, stride, 17);\
1021
    put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1022
    put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
1023
    put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1024
    OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
1025
}\
1026
static void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
1027
    uint8_t full[24*17];\
1028
    uint8_t halfH[272];\
1029
    uint8_t halfV[256];\
1030
    uint8_t halfHV[256];\
1031
    copy_block17(full, src, 24, stride, 17);\
1032
    put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
1033
    put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
1034
    put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
1035
    OPNAME ## pixels16_l4_aligned(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
1036
}\
1037
static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
1038
    uint8_t full[24*17];\
1039
    uint8_t halfH[272];\
1040
    uint8_t halfHV[256];\
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff