Statistics
| Branch: | Revision:

ffmpeg / libswscale / swscale.c @ b87fae9f

History | View | Annotate | Download (117 KB)

1
/*
2
 * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation; either version 2 of the License, or
9
 * (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU General Public License
17
 * along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 *
20
 * the C code (not assembly, mmx, ...) of this file can be used
21
 * under the LGPL license too
22
 */
23

    
24
/*
25
  supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR32_1, BGR24, BGR16, BGR15, RGB32, RGB32_1, RGB24, Y8/Y800, YVU9/IF09, PAL8
26
  supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
27
  {BGR,RGB}{1,4,8,15,16} support dithering
28

29
  unscaled special converters (YV12=I420=IYUV, Y800=Y8)
30
  YV12 -> {BGR,RGB}{1,4,8,15,16,24,32}
31
  x -> x
32
  YUV9 -> YV12
33
  YUV9/YV12 -> Y800
34
  Y800 -> YUV9/YV12
35
  BGR24 -> BGR32 & RGB24 -> RGB32
36
  BGR32 -> BGR24 & RGB32 -> RGB24
37
  BGR15 -> BGR16
38
*/
39

    
40
/*
41
tested special converters (most are tested actually, but I did not write it down ...)
42
 YV12 -> BGR16
43
 YV12 -> YV12
44
 BGR15 -> BGR16
45
 BGR16 -> BGR16
46
 YVU9 -> YV12
47

48
untested special converters
49
  YV12/I420 -> BGR15/BGR24/BGR32 (it is the yuv2rgb stuff, so it should be OK)
50
  YV12/I420 -> YV12/I420
51
  YUY2/BGR15/BGR24/BGR32/RGB24/RGB32 -> same format
52
  BGR24 -> BGR32 & RGB24 -> RGB32
53
  BGR32 -> BGR24 & RGB32 -> RGB24
54
  BGR24 -> YV12
55
*/
56

    
57
#define _SVID_SOURCE //needed for MAP_ANONYMOUS
58
#include <inttypes.h>
59
#include <string.h>
60
#include <math.h>
61
#include <stdio.h>
62
#include <unistd.h>
63
#include "config.h"
64
#include <assert.h>
65
#if HAVE_SYS_MMAN_H
66
#include <sys/mman.h>
67
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
68
#define MAP_ANONYMOUS MAP_ANON
69
#endif
70
#endif
71
#if HAVE_VIRTUALALLOC
72
#define WIN32_LEAN_AND_MEAN
73
#include <windows.h>
74
#endif
75
#include "swscale.h"
76
#include "swscale_internal.h"
77
#include "rgb2rgb.h"
78
#include "libavutil/x86_cpu.h"
79
#include "libavutil/bswap.h"
80

    
81
unsigned swscale_version(void)
82
{
83
    return LIBSWSCALE_VERSION_INT;
84
}
85

    
86
#undef MOVNTQ
87
#undef PAVGB
88

    
89
//#undef HAVE_MMX2
90
//#define HAVE_AMD3DNOW
91
//#undef HAVE_MMX
92
//#undef ARCH_X86
93
//#define WORDS_BIGENDIAN
94
#define DITHER1XBPP
95

    
96
#define FAST_BGR2YV12 // use 7 bit coefficients instead of 15 bit
97

    
98
#define RET 0xC3 //near return opcode for x86
99

    
100
#ifdef M_PI
101
#define PI M_PI
102
#else
103
#define PI 3.14159265358979323846
104
#endif
105

    
106
#define isSupportedIn(x)    (       \
107
           (x)==PIX_FMT_YUV420P     \
108
        || (x)==PIX_FMT_YUVA420P    \
109
        || (x)==PIX_FMT_YUYV422     \
110
        || (x)==PIX_FMT_UYVY422     \
111
        || (x)==PIX_FMT_RGB32       \
112
        || (x)==PIX_FMT_RGB32_1     \
113
        || (x)==PIX_FMT_BGR24       \
114
        || (x)==PIX_FMT_BGR565      \
115
        || (x)==PIX_FMT_BGR555      \
116
        || (x)==PIX_FMT_BGR32       \
117
        || (x)==PIX_FMT_BGR32_1     \
118
        || (x)==PIX_FMT_RGB24       \
119
        || (x)==PIX_FMT_RGB565      \
120
        || (x)==PIX_FMT_RGB555      \
121
        || (x)==PIX_FMT_GRAY8       \
122
        || (x)==PIX_FMT_YUV410P     \
123
        || (x)==PIX_FMT_YUV440P     \
124
        || (x)==PIX_FMT_GRAY16BE    \
125
        || (x)==PIX_FMT_GRAY16LE    \
126
        || (x)==PIX_FMT_YUV444P     \
127
        || (x)==PIX_FMT_YUV422P     \
128
        || (x)==PIX_FMT_YUV411P     \
129
        || (x)==PIX_FMT_PAL8        \
130
        || (x)==PIX_FMT_BGR8        \
131
        || (x)==PIX_FMT_RGB8        \
132
        || (x)==PIX_FMT_BGR4_BYTE   \
133
        || (x)==PIX_FMT_RGB4_BYTE   \
134
        || (x)==PIX_FMT_YUV440P     \
135
        || (x)==PIX_FMT_MONOWHITE   \
136
        || (x)==PIX_FMT_MONOBLACK   \
137
    )
138
#define isSupportedOut(x)   (       \
139
           (x)==PIX_FMT_YUV420P     \
140
        || (x)==PIX_FMT_YUVA420P    \
141
        || (x)==PIX_FMT_YUYV422     \
142
        || (x)==PIX_FMT_UYVY422     \
143
        || (x)==PIX_FMT_YUV444P     \
144
        || (x)==PIX_FMT_YUV422P     \
145
        || (x)==PIX_FMT_YUV411P     \
146
        || isRGB(x)                 \
147
        || isBGR(x)                 \
148
        || (x)==PIX_FMT_NV12        \
149
        || (x)==PIX_FMT_NV21        \
150
        || (x)==PIX_FMT_GRAY16BE    \
151
        || (x)==PIX_FMT_GRAY16LE    \
152
        || (x)==PIX_FMT_GRAY8       \
153
        || (x)==PIX_FMT_YUV410P     \
154
        || (x)==PIX_FMT_YUV440P     \
155
    )
156
#define isPacked(x)         (       \
157
           (x)==PIX_FMT_PAL8        \
158
        || (x)==PIX_FMT_YUYV422     \
159
        || (x)==PIX_FMT_UYVY422     \
160
        || isRGB(x)                 \
161
        || isBGR(x)                 \
162
    )
163
#define usePal(x)           (       \
164
           (x)==PIX_FMT_PAL8        \
165
        || (x)==PIX_FMT_BGR4_BYTE   \
166
        || (x)==PIX_FMT_RGB4_BYTE   \
167
        || (x)==PIX_FMT_BGR8        \
168
        || (x)==PIX_FMT_RGB8        \
169
    )
170

    
171
#define RGB2YUV_SHIFT 15
172
#define BY ( (int)(0.114*219/255*(1<<RGB2YUV_SHIFT)+0.5))
173
#define BV (-(int)(0.081*224/255*(1<<RGB2YUV_SHIFT)+0.5))
174
#define BU ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
175
#define GY ( (int)(0.587*219/255*(1<<RGB2YUV_SHIFT)+0.5))
176
#define GV (-(int)(0.419*224/255*(1<<RGB2YUV_SHIFT)+0.5))
177
#define GU (-(int)(0.331*224/255*(1<<RGB2YUV_SHIFT)+0.5))
178
#define RY ( (int)(0.299*219/255*(1<<RGB2YUV_SHIFT)+0.5))
179
#define RV ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
180
#define RU (-(int)(0.169*224/255*(1<<RGB2YUV_SHIFT)+0.5))
181

    
182
extern const int32_t ff_yuv2rgb_coeffs[8][4];
183

    
184
static const double rgb2yuv_table[8][9]={
185
    {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5},
186
    {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5},
187
    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5},
188
    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5},
189
    {0.59  , 0.11  , 0.30  , -0.331, 0.5, -0.169, -0.421, -0.079, 0.5}, //FCC
190
    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5},
191
    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, //SMPTE 170M
192
    {0.701 , 0.087 , 0.212 , -0.384, 0.5  -0.116, -0.445, -0.055, 0.5}, //SMPTE 240M
193
};
194

    
195
/*
196
NOTES
197
Special versions: fast Y 1:1 scaling (no interpolation in y direction)
198

199
TODO
200
more intelligent misalignment avoidance for the horizontal scaler
201
write special vertical cubic upscale version
202
optimize C code (YV12 / minmax)
203
add support for packed pixel YUV input & output
204
add support for Y8 output
205
optimize BGR24 & BGR32
206
add BGR4 output support
207
write special BGR->BGR scaler
208
*/
209

    
210
#if ARCH_X86 && CONFIG_GPL
211
DECLARE_ASM_CONST(8, uint64_t, bF8)=       0xF8F8F8F8F8F8F8F8LL;
212
DECLARE_ASM_CONST(8, uint64_t, bFC)=       0xFCFCFCFCFCFCFCFCLL;
213
DECLARE_ASM_CONST(8, uint64_t, w10)=       0x0010001000100010LL;
214
DECLARE_ASM_CONST(8, uint64_t, w02)=       0x0002000200020002LL;
215
DECLARE_ASM_CONST(8, uint64_t, bm00001111)=0x00000000FFFFFFFFLL;
216
DECLARE_ASM_CONST(8, uint64_t, bm00000111)=0x0000000000FFFFFFLL;
217
DECLARE_ASM_CONST(8, uint64_t, bm11111000)=0xFFFFFFFFFF000000LL;
218
DECLARE_ASM_CONST(8, uint64_t, bm01010101)=0x00FF00FF00FF00FFLL;
219

    
220
const DECLARE_ALIGNED(8, uint64_t, ff_dither4[2]) = {
221
        0x0103010301030103LL,
222
        0x0200020002000200LL,};
223

    
224
const DECLARE_ALIGNED(8, uint64_t, ff_dither8[2]) = {
225
        0x0602060206020602LL,
226
        0x0004000400040004LL,};
227

    
228
DECLARE_ASM_CONST(8, uint64_t, b16Mask)=   0x001F001F001F001FLL;
229
DECLARE_ASM_CONST(8, uint64_t, g16Mask)=   0x07E007E007E007E0LL;
230
DECLARE_ASM_CONST(8, uint64_t, r16Mask)=   0xF800F800F800F800LL;
231
DECLARE_ASM_CONST(8, uint64_t, b15Mask)=   0x001F001F001F001FLL;
232
DECLARE_ASM_CONST(8, uint64_t, g15Mask)=   0x03E003E003E003E0LL;
233
DECLARE_ASM_CONST(8, uint64_t, r15Mask)=   0x7C007C007C007C00LL;
234

    
235
DECLARE_ALIGNED(8, const uint64_t, ff_M24A)         = 0x00FF0000FF0000FFLL;
236
DECLARE_ALIGNED(8, const uint64_t, ff_M24B)         = 0xFF0000FF0000FF00LL;
237
DECLARE_ALIGNED(8, const uint64_t, ff_M24C)         = 0x0000FF0000FF0000LL;
238

    
239
#ifdef FAST_BGR2YV12
240
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff)   = 0x000000210041000DULL;
241
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff)   = 0x0000FFEEFFDC0038ULL;
242
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff)   = 0x00000038FFD2FFF8ULL;
243
#else
244
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff)   = 0x000020E540830C8BULL;
245
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff)   = 0x0000ED0FDAC23831ULL;
246
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff)   = 0x00003831D0E6F6EAULL;
247
#endif /* FAST_BGR2YV12 */
248
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YOffset)  = 0x1010101010101010ULL;
249
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL;
250
DECLARE_ALIGNED(8, const uint64_t, ff_w1111)        = 0x0001000100010001ULL;
251

    
252
DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY1Coeff) = 0x0C88000040870C88ULL;
253
DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY2Coeff) = 0x20DE4087000020DEULL;
254
DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY1Coeff) = 0x20DE0000408720DEULL;
255
DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY2Coeff) = 0x0C88408700000C88ULL;
256
DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toYOffset) = 0x0008400000084000ULL;
257

    
258
DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUV[2][4]) = {
259
    {0x38380000DAC83838ULL, 0xECFFDAC80000ECFFULL, 0xF6E40000D0E3F6E4ULL, 0x3838D0E300003838ULL},
260
    {0xECFF0000DAC8ECFFULL, 0x3838DAC800003838ULL, 0x38380000D0E33838ULL, 0xF6E4D0E30000F6E4ULL},
261
};
262

    
263
DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUVOffset)= 0x0040400000404000ULL;
264

    
265
#endif /* ARCH_X86 && CONFIG_GPL */
266

    
267
// clipping helper table for C implementations:
268
static unsigned char clip_table[768];
269

    
270
static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b);
271

    
272
static const uint8_t  __attribute__((aligned(8))) dither_2x2_4[2][8]={
273
{  1,   3,   1,   3,   1,   3,   1,   3, },
274
{  2,   0,   2,   0,   2,   0,   2,   0, },
275
};
276

    
277
static const uint8_t  __attribute__((aligned(8))) dither_2x2_8[2][8]={
278
{  6,   2,   6,   2,   6,   2,   6,   2, },
279
{  0,   4,   0,   4,   0,   4,   0,   4, },
280
};
281

    
282
const uint8_t  __attribute__((aligned(8))) dither_8x8_32[8][8]={
283
{ 17,   9,  23,  15,  16,   8,  22,  14, },
284
{  5,  29,   3,  27,   4,  28,   2,  26, },
285
{ 21,  13,  19,  11,  20,  12,  18,  10, },
286
{  0,  24,   6,  30,   1,  25,   7,  31, },
287
{ 16,   8,  22,  14,  17,   9,  23,  15, },
288
{  4,  28,   2,  26,   5,  29,   3,  27, },
289
{ 20,  12,  18,  10,  21,  13,  19,  11, },
290
{  1,  25,   7,  31,   0,  24,   6,  30, },
291
};
292

    
293
#if 0
294
const uint8_t  __attribute__((aligned(8))) dither_8x8_64[8][8]={
295
{  0,  48,  12,  60,   3,  51,  15,  63, },
296
{ 32,  16,  44,  28,  35,  19,  47,  31, },
297
{  8,  56,   4,  52,  11,  59,   7,  55, },
298
{ 40,  24,  36,  20,  43,  27,  39,  23, },
299
{  2,  50,  14,  62,   1,  49,  13,  61, },
300
{ 34,  18,  46,  30,  33,  17,  45,  29, },
301
{ 10,  58,   6,  54,   9,  57,   5,  53, },
302
{ 42,  26,  38,  22,  41,  25,  37,  21, },
303
};
304
#endif
305

    
306
const uint8_t  __attribute__((aligned(8))) dither_8x8_73[8][8]={
307
{  0,  55,  14,  68,   3,  58,  17,  72, },
308
{ 37,  18,  50,  32,  40,  22,  54,  35, },
309
{  9,  64,   5,  59,  13,  67,   8,  63, },
310
{ 46,  27,  41,  23,  49,  31,  44,  26, },
311
{  2,  57,  16,  71,   1,  56,  15,  70, },
312
{ 39,  21,  52,  34,  38,  19,  51,  33, },
313
{ 11,  66,   7,  62,  10,  65,   6,  60, },
314
{ 48,  30,  43,  25,  47,  29,  42,  24, },
315
};
316

    
317
#if 0
318
const uint8_t  __attribute__((aligned(8))) dither_8x8_128[8][8]={
319
{ 68,  36,  92,  60,  66,  34,  90,  58, },
320
{ 20, 116,  12, 108,  18, 114,  10, 106, },
321
{ 84,  52,  76,  44,  82,  50,  74,  42, },
322
{  0,  96,  24, 120,   6, 102,  30, 126, },
323
{ 64,  32,  88,  56,  70,  38,  94,  62, },
324
{ 16, 112,   8, 104,  22, 118,  14, 110, },
325
{ 80,  48,  72,  40,  86,  54,  78,  46, },
326
{  4, 100,  28, 124,   2,  98,  26, 122, },
327
};
328
#endif
329

    
330
#if 1
331
const uint8_t  __attribute__((aligned(8))) dither_8x8_220[8][8]={
332
{117,  62, 158, 103, 113,  58, 155, 100, },
333
{ 34, 199,  21, 186,  31, 196,  17, 182, },
334
{144,  89, 131,  76, 141,  86, 127,  72, },
335
{  0, 165,  41, 206,  10, 175,  52, 217, },
336
{110,  55, 151,  96, 120,  65, 162, 107, },
337
{ 28, 193,  14, 179,  38, 203,  24, 189, },
338
{138,  83, 124,  69, 148,  93, 134,  79, },
339
{  7, 172,  48, 213,   3, 168,  45, 210, },
340
};
341
#elif 1
342
// tries to correct a gamma of 1.5
343
const uint8_t  __attribute__((aligned(8))) dither_8x8_220[8][8]={
344
{  0, 143,  18, 200,   2, 156,  25, 215, },
345
{ 78,  28, 125,  64,  89,  36, 138,  74, },
346
{ 10, 180,   3, 161,  16, 195,   8, 175, },
347
{109,  51,  93,  38, 121,  60, 105,  47, },
348
{  1, 152,  23, 210,   0, 147,  20, 205, },
349
{ 85,  33, 134,  71,  81,  30, 130,  67, },
350
{ 14, 190,   6, 171,  12, 185,   5, 166, },
351
{117,  57, 101,  44, 113,  54,  97,  41, },
352
};
353
#elif 1
354
// tries to correct a gamma of 2.0
355
const uint8_t  __attribute__((aligned(8))) dither_8x8_220[8][8]={
356
{  0, 124,   8, 193,   0, 140,  12, 213, },
357
{ 55,  14, 104,  42,  66,  19, 119,  52, },
358
{  3, 168,   1, 145,   6, 187,   3, 162, },
359
{ 86,  31,  70,  21,  99,  39,  82,  28, },
360
{  0, 134,  11, 206,   0, 129,   9, 200, },
361
{ 62,  17, 114,  48,  58,  16, 109,  45, },
362
{  5, 181,   2, 157,   4, 175,   1, 151, },
363
{ 95,  36,  78,  26,  90,  34,  74,  24, },
364
};
365
#else
366
// tries to correct a gamma of 2.5
367
const uint8_t  __attribute__((aligned(8))) dither_8x8_220[8][8]={
368
{  0, 107,   3, 187,   0, 125,   6, 212, },
369
{ 39,   7,  86,  28,  49,  11, 102,  36, },
370
{  1, 158,   0, 131,   3, 180,   1, 151, },
371
{ 68,  19,  52,  12,  81,  25,  64,  17, },
372
{  0, 119,   5, 203,   0, 113,   4, 195, },
373
{ 45,   9,  96,  33,  42,   8,  91,  30, },
374
{  2, 172,   1, 144,   2, 165,   0, 137, },
375
{ 77,  23,  60,  15,  72,  21,  56,  14, },
376
};
377
#endif
378

    
379
const char *sws_format_name(enum PixelFormat format)
380
{
381
    switch (format) {
382
        case PIX_FMT_YUV420P:
383
            return "yuv420p";
384
        case PIX_FMT_YUVA420P:
385
            return "yuva420p";
386
        case PIX_FMT_YUYV422:
387
            return "yuyv422";
388
        case PIX_FMT_RGB24:
389
            return "rgb24";
390
        case PIX_FMT_BGR24:
391
            return "bgr24";
392
        case PIX_FMT_YUV422P:
393
            return "yuv422p";
394
        case PIX_FMT_YUV444P:
395
            return "yuv444p";
396
        case PIX_FMT_RGB32:
397
            return "rgb32";
398
        case PIX_FMT_YUV410P:
399
            return "yuv410p";
400
        case PIX_FMT_YUV411P:
401
            return "yuv411p";
402
        case PIX_FMT_RGB565:
403
            return "rgb565";
404
        case PIX_FMT_RGB555:
405
            return "rgb555";
406
        case PIX_FMT_GRAY16BE:
407
            return "gray16be";
408
        case PIX_FMT_GRAY16LE:
409
            return "gray16le";
410
        case PIX_FMT_GRAY8:
411
            return "gray8";
412
        case PIX_FMT_MONOWHITE:
413
            return "mono white";
414
        case PIX_FMT_MONOBLACK:
415
            return "mono black";
416
        case PIX_FMT_PAL8:
417
            return "Palette";
418
        case PIX_FMT_YUVJ420P:
419
            return "yuvj420p";
420
        case PIX_FMT_YUVJ422P:
421
            return "yuvj422p";
422
        case PIX_FMT_YUVJ444P:
423
            return "yuvj444p";
424
        case PIX_FMT_XVMC_MPEG2_MC:
425
            return "xvmc_mpeg2_mc";
426
        case PIX_FMT_XVMC_MPEG2_IDCT:
427
            return "xvmc_mpeg2_idct";
428
        case PIX_FMT_UYVY422:
429
            return "uyvy422";
430
        case PIX_FMT_UYYVYY411:
431
            return "uyyvyy411";
432
        case PIX_FMT_RGB32_1:
433
            return "rgb32x";
434
        case PIX_FMT_BGR32_1:
435
            return "bgr32x";
436
        case PIX_FMT_BGR32:
437
            return "bgr32";
438
        case PIX_FMT_BGR565:
439
            return "bgr565";
440
        case PIX_FMT_BGR555:
441
            return "bgr555";
442
        case PIX_FMT_BGR8:
443
            return "bgr8";
444
        case PIX_FMT_BGR4:
445
            return "bgr4";
446
        case PIX_FMT_BGR4_BYTE:
447
            return "bgr4 byte";
448
        case PIX_FMT_RGB8:
449
            return "rgb8";
450
        case PIX_FMT_RGB4:
451
            return "rgb4";
452
        case PIX_FMT_RGB4_BYTE:
453
            return "rgb4 byte";
454
        case PIX_FMT_NV12:
455
            return "nv12";
456
        case PIX_FMT_NV21:
457
            return "nv21";
458
        case PIX_FMT_YUV440P:
459
            return "yuv440p";
460
        case PIX_FMT_VDPAU_H264:
461
            return "vdpau_h264";
462
        case PIX_FMT_VDPAU_MPEG1:
463
            return "vdpau_mpeg1";
464
        case PIX_FMT_VDPAU_MPEG2:
465
            return "vdpau_mpeg2";
466
        case PIX_FMT_VDPAU_WMV3:
467
            return "vdpau_wmv3";
468
        case PIX_FMT_VDPAU_VC1:
469
            return "vdpau_vc1";
470
        default:
471
            return "Unknown format";
472
    }
473
}
474

    
475
static inline void yuv2yuvXinC(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
476
                               const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
477
                               const int16_t **alpSrc, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, int dstW, int chrDstW)
478
{
479
    //FIXME Optimize (just quickly written not optimized..)
480
    int i;
481
    for (i=0; i<dstW; i++)
482
    {
483
        int val=1<<18;
484
        int j;
485
        for (j=0; j<lumFilterSize; j++)
486
            val += lumSrc[j][i] * lumFilter[j];
487

    
488
        dest[i]= av_clip_uint8(val>>19);
489
    }
490

    
491
    if (uDest)
492
        for (i=0; i<chrDstW; i++)
493
        {
494
            int u=1<<18;
495
            int v=1<<18;
496
            int j;
497
            for (j=0; j<chrFilterSize; j++)
498
            {
499
                u += chrSrc[j][i] * chrFilter[j];
500
                v += chrSrc[j][i + VOFW] * chrFilter[j];
501
            }
502

    
503
            uDest[i]= av_clip_uint8(u>>19);
504
            vDest[i]= av_clip_uint8(v>>19);
505
        }
506

    
507
    if (CONFIG_SWSCALE_ALPHA && aDest)
508
        for (i=0; i<dstW; i++){
509
            int val=1<<18;
510
            int j;
511
            for (j=0; j<lumFilterSize; j++)
512
                val += alpSrc[j][i] * lumFilter[j];
513

    
514
            aDest[i]= av_clip_uint8(val>>19);
515
        }
516

    
517
}
518

    
519
static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
520
                                const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
521
                                uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
522
{
523
    //FIXME Optimize (just quickly written not optimized..)
524
    int i;
525
    for (i=0; i<dstW; i++)
526
    {
527
        int val=1<<18;
528
        int j;
529
        for (j=0; j<lumFilterSize; j++)
530
            val += lumSrc[j][i] * lumFilter[j];
531

    
532
        dest[i]= av_clip_uint8(val>>19);
533
    }
534

    
535
    if (!uDest)
536
        return;
537

    
538
    if (dstFormat == PIX_FMT_NV12)
539
        for (i=0; i<chrDstW; i++)
540
        {
541
            int u=1<<18;
542
            int v=1<<18;
543
            int j;
544
            for (j=0; j<chrFilterSize; j++)
545
            {
546
                u += chrSrc[j][i] * chrFilter[j];
547
                v += chrSrc[j][i + VOFW] * chrFilter[j];
548
            }
549

    
550
            uDest[2*i]= av_clip_uint8(u>>19);
551
            uDest[2*i+1]= av_clip_uint8(v>>19);
552
        }
553
    else
554
        for (i=0; i<chrDstW; i++)
555
        {
556
            int u=1<<18;
557
            int v=1<<18;
558
            int j;
559
            for (j=0; j<chrFilterSize; j++)
560
            {
561
                u += chrSrc[j][i] * chrFilter[j];
562
                v += chrSrc[j][i + VOFW] * chrFilter[j];
563
            }
564

    
565
            uDest[2*i]= av_clip_uint8(v>>19);
566
            uDest[2*i+1]= av_clip_uint8(u>>19);
567
        }
568
}
569

    
570
#define YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha) \
571
    for (i=0; i<(dstW>>1); i++){\
572
        int j;\
573
        int Y1 = 1<<18;\
574
        int Y2 = 1<<18;\
575
        int U  = 1<<18;\
576
        int V  = 1<<18;\
577
        int av_unused A1, A2;\
578
        type av_unused *r, *b, *g;\
579
        const int i2= 2*i;\
580
        \
581
        for (j=0; j<lumFilterSize; j++)\
582
        {\
583
            Y1 += lumSrc[j][i2] * lumFilter[j];\
584
            Y2 += lumSrc[j][i2+1] * lumFilter[j];\
585
        }\
586
        for (j=0; j<chrFilterSize; j++)\
587
        {\
588
            U += chrSrc[j][i] * chrFilter[j];\
589
            V += chrSrc[j][i+VOFW] * chrFilter[j];\
590
        }\
591
        Y1>>=19;\
592
        Y2>>=19;\
593
        U >>=19;\
594
        V >>=19;\
595
        if (alpha){\
596
            A1 = 1<<18;\
597
            A2 = 1<<18;\
598
            for (j=0; j<lumFilterSize; j++){\
599
                A1 += alpSrc[j][i2  ] * lumFilter[j];\
600
                A2 += alpSrc[j][i2+1] * lumFilter[j];\
601
            }\
602
            A1>>=19;\
603
            A2>>=19;\
604
        }\
605

    
606
#define YSCALE_YUV_2_PACKEDX_C(type,alpha) \
607
        YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha)\
608
        if ((Y1|Y2|U|V)&256)\
609
        {\
610
            if (Y1>255)   Y1=255; \
611
            else if (Y1<0)Y1=0;   \
612
            if (Y2>255)   Y2=255; \
613
            else if (Y2<0)Y2=0;   \
614
            if (U>255)    U=255;  \
615
            else if (U<0) U=0;    \
616
            if (V>255)    V=255;  \
617
            else if (V<0) V=0;    \
618
        }\
619
        if (alpha && ((A1|A2)&256)){\
620
            A1=av_clip_uint8(A1);\
621
            A2=av_clip_uint8(A2);\
622
        }
623

    
624
#define YSCALE_YUV_2_PACKEDX_FULL_C(rnd,alpha) \
625
    for (i=0; i<dstW; i++){\
626
        int j;\
627
        int Y = 0;\
628
        int U = -128<<19;\
629
        int V = -128<<19;\
630
        int av_unused A;\
631
        int R,G,B;\
632
        \
633
        for (j=0; j<lumFilterSize; j++){\
634
            Y += lumSrc[j][i     ] * lumFilter[j];\
635
        }\
636
        for (j=0; j<chrFilterSize; j++){\
637
            U += chrSrc[j][i     ] * chrFilter[j];\
638
            V += chrSrc[j][i+VOFW] * chrFilter[j];\
639
        }\
640
        Y >>=10;\
641
        U >>=10;\
642
        V >>=10;\
643
        if (alpha){\
644
            A = rnd;\
645
            for (j=0; j<lumFilterSize; j++)\
646
                A += alpSrc[j][i     ] * lumFilter[j];\
647
            A >>=19;\
648
            if (A&256)\
649
                A = av_clip_uint8(A);\
650
        }\
651

    
652
#define YSCALE_YUV_2_RGBX_FULL_C(rnd,alpha) \
653
    YSCALE_YUV_2_PACKEDX_FULL_C(rnd>>3,alpha)\
654
        Y-= c->yuv2rgb_y_offset;\
655
        Y*= c->yuv2rgb_y_coeff;\
656
        Y+= rnd;\
657
        R= Y + V*c->yuv2rgb_v2r_coeff;\
658
        G= Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;\
659
        B= Y +                          U*c->yuv2rgb_u2b_coeff;\
660
        if ((R|G|B)&(0xC0000000)){\
661
            if (R>=(256<<22))   R=(256<<22)-1; \
662
            else if (R<0)R=0;   \
663
            if (G>=(256<<22))   G=(256<<22)-1; \
664
            else if (G<0)G=0;   \
665
            if (B>=(256<<22))   B=(256<<22)-1; \
666
            else if (B<0)B=0;   \
667
        }\
668

    
669

    
670
#define YSCALE_YUV_2_GRAY16_C \
671
    for (i=0; i<(dstW>>1); i++){\
672
        int j;\
673
        int Y1 = 1<<18;\
674
        int Y2 = 1<<18;\
675
        int U  = 1<<18;\
676
        int V  = 1<<18;\
677
        \
678
        const int i2= 2*i;\
679
        \
680
        for (j=0; j<lumFilterSize; j++)\
681
        {\
682
            Y1 += lumSrc[j][i2] * lumFilter[j];\
683
            Y2 += lumSrc[j][i2+1] * lumFilter[j];\
684
        }\
685
        Y1>>=11;\
686
        Y2>>=11;\
687
        if ((Y1|Y2|U|V)&65536)\
688
        {\
689
            if (Y1>65535)   Y1=65535; \
690
            else if (Y1<0)Y1=0;   \
691
            if (Y2>65535)   Y2=65535; \
692
            else if (Y2<0)Y2=0;   \
693
        }
694

    
695
#define YSCALE_YUV_2_RGBX_C(type,alpha) \
696
    YSCALE_YUV_2_PACKEDX_C(type,alpha)  /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/\
697
    r = (type *)c->table_rV[V];   \
698
    g = (type *)(c->table_gU[U] + c->table_gV[V]); \
699
    b = (type *)c->table_bU[U];   \
700

    
701
#define YSCALE_YUV_2_PACKED2_C(type,alpha)   \
702
    for (i=0; i<(dstW>>1); i++){ \
703
        const int i2= 2*i;       \
704
        int Y1= (buf0[i2  ]*yalpha1+buf1[i2  ]*yalpha)>>19;           \
705
        int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19;           \
706
        int U= (uvbuf0[i     ]*uvalpha1+uvbuf1[i     ]*uvalpha)>>19;  \
707
        int V= (uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19;  \
708
        type av_unused *r, *b, *g;                                    \
709
        int av_unused A1, A2;                                         \
710
        if (alpha){\
711
            A1= (abuf0[i2  ]*yalpha1+abuf1[i2  ]*yalpha)>>19;         \
712
            A2= (abuf0[i2+1]*yalpha1+abuf1[i2+1]*yalpha)>>19;         \
713
        }\
714

    
715
#define YSCALE_YUV_2_GRAY16_2_C   \
716
    for (i=0; i<(dstW>>1); i++){ \
717
        const int i2= 2*i;       \
718
        int Y1= (buf0[i2  ]*yalpha1+buf1[i2  ]*yalpha)>>11;           \
719
        int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>11;           \
720

    
721
#define YSCALE_YUV_2_RGB2_C(type,alpha) \
722
    YSCALE_YUV_2_PACKED2_C(type,alpha)\
723
    r = (type *)c->table_rV[V];\
724
    g = (type *)(c->table_gU[U] + c->table_gV[V]);\
725
    b = (type *)c->table_bU[U];\
726

    
727
#define YSCALE_YUV_2_PACKED1_C(type,alpha) \
728
    for (i=0; i<(dstW>>1); i++){\
729
        const int i2= 2*i;\
730
        int Y1= buf0[i2  ]>>7;\
731
        int Y2= buf0[i2+1]>>7;\
732
        int U= (uvbuf1[i     ])>>7;\
733
        int V= (uvbuf1[i+VOFW])>>7;\
734
        type av_unused *r, *b, *g;\
735
        int av_unused A1, A2;\
736
        if (alpha){\
737
            A1= abuf0[i2  ]>>7;\
738
            A2= abuf0[i2+1]>>7;\
739
        }\
740

    
741
#define YSCALE_YUV_2_GRAY16_1_C \
742
    for (i=0; i<(dstW>>1); i++){\
743
        const int i2= 2*i;\
744
        int Y1= buf0[i2  ]<<1;\
745
        int Y2= buf0[i2+1]<<1;\
746

    
747
#define YSCALE_YUV_2_RGB1_C(type,alpha) \
748
    YSCALE_YUV_2_PACKED1_C(type,alpha)\
749
    r = (type *)c->table_rV[V];\
750
    g = (type *)(c->table_gU[U] + c->table_gV[V]);\
751
    b = (type *)c->table_bU[U];\
752

    
753
#define YSCALE_YUV_2_PACKED1B_C(type,alpha) \
754
    for (i=0; i<(dstW>>1); i++){\
755
        const int i2= 2*i;\
756
        int Y1= buf0[i2  ]>>7;\
757
        int Y2= buf0[i2+1]>>7;\
758
        int U= (uvbuf0[i     ] + uvbuf1[i     ])>>8;\
759
        int V= (uvbuf0[i+VOFW] + uvbuf1[i+VOFW])>>8;\
760
        type av_unused *r, *b, *g;\
761
        int av_unused A1, A2;\
762
        if (alpha){\
763
            A1= abuf0[i2  ]>>7;\
764
            A2= abuf0[i2+1]>>7;\
765
        }\
766

    
767
#define YSCALE_YUV_2_RGB1B_C(type,alpha) \
768
    YSCALE_YUV_2_PACKED1B_C(type,alpha)\
769
    r = (type *)c->table_rV[V];\
770
    g = (type *)(c->table_gU[U] + c->table_gV[V]);\
771
    b = (type *)c->table_bU[U];\
772

    
773
#define YSCALE_YUV_2_MONO2_C \
774
    const uint8_t * const d128=dither_8x8_220[y&7];\
775
    uint8_t *g= c->table_gU[128] + c->table_gV[128];\
776
    for (i=0; i<dstW-7; i+=8){\
777
        int acc;\
778
        acc =       g[((buf0[i  ]*yalpha1+buf1[i  ]*yalpha)>>19) + d128[0]];\
779
        acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\
780
        acc+= acc + g[((buf0[i+2]*yalpha1+buf1[i+2]*yalpha)>>19) + d128[2]];\
781
        acc+= acc + g[((buf0[i+3]*yalpha1+buf1[i+3]*yalpha)>>19) + d128[3]];\
782
        acc+= acc + g[((buf0[i+4]*yalpha1+buf1[i+4]*yalpha)>>19) + d128[4]];\
783
        acc+= acc + g[((buf0[i+5]*yalpha1+buf1[i+5]*yalpha)>>19) + d128[5]];\
784
        acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\
785
        acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\
786
        ((uint8_t*)dest)[0]= c->dstFormat == PIX_FMT_MONOBLACK ? acc : ~acc;\
787
        dest++;\
788
    }\
789

    
790

    
791
#define YSCALE_YUV_2_MONOX_C \
792
    const uint8_t * const d128=dither_8x8_220[y&7];\
793
    uint8_t *g= c->table_gU[128] + c->table_gV[128];\
794
    int acc=0;\
795
    for (i=0; i<dstW-1; i+=2){\
796
        int j;\
797
        int Y1=1<<18;\
798
        int Y2=1<<18;\
799
\
800
        for (j=0; j<lumFilterSize; j++)\
801
        {\
802
            Y1 += lumSrc[j][i] * lumFilter[j];\
803
            Y2 += lumSrc[j][i+1] * lumFilter[j];\
804
        }\
805
        Y1>>=19;\
806
        Y2>>=19;\
807
        if ((Y1|Y2)&256)\
808
        {\
809
            if (Y1>255)   Y1=255;\
810
            else if (Y1<0)Y1=0;\
811
            if (Y2>255)   Y2=255;\
812
            else if (Y2<0)Y2=0;\
813
        }\
814
        acc+= acc + g[Y1+d128[(i+0)&7]];\
815
        acc+= acc + g[Y2+d128[(i+1)&7]];\
816
        if ((i&7)==6){\
817
            ((uint8_t*)dest)[0]= c->dstFormat == PIX_FMT_MONOBLACK ? acc : ~acc;\
818
            dest++;\
819
        }\
820
    }
821

    
822

    
823
#define YSCALE_YUV_2_ANYRGB_C(func, func2, func_g16, func_monoblack)\
824
    switch(c->dstFormat)\
825
    {\
826
    case PIX_FMT_RGBA:\
827
    case PIX_FMT_BGRA:\
828
        if (CONFIG_SMALL){\
829
            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
830
            func(uint32_t,needAlpha)\
831
                ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? (A1<<24) : 0);\
832
                ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? (A2<<24) : 0);\
833
            }\
834
        }else{\
835
            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){\
836
                func(uint32_t,1)\
837
                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (A1<<24);\
838
                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (A2<<24);\
839
                }\
840
            }else{\
841
                func(uint32_t,0)\
842
                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
843
                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
844
                }\
845
            }\
846
        }\
847
        break;\
848
    case PIX_FMT_ARGB:\
849
    case PIX_FMT_ABGR:\
850
        if (CONFIG_SMALL){\
851
            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
852
            func(uint32_t,needAlpha)\
853
                ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? A1 : 0);\
854
                ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? A2 : 0);\
855
            }\
856
        }else{\
857
            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){\
858
                func(uint32_t,1)\
859
                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + A1;\
860
                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + A2;\
861
                }\
862
            }else{\
863
                func(uint32_t,0)\
864
                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
865
                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
866
                }\
867
            }\
868
        }                \
869
        break;\
870
    case PIX_FMT_RGB24:\
871
        func(uint8_t,0)\
872
            ((uint8_t*)dest)[0]= r[Y1];\
873
            ((uint8_t*)dest)[1]= g[Y1];\
874
            ((uint8_t*)dest)[2]= b[Y1];\
875
            ((uint8_t*)dest)[3]= r[Y2];\
876
            ((uint8_t*)dest)[4]= g[Y2];\
877
            ((uint8_t*)dest)[5]= b[Y2];\
878
            dest+=6;\
879
        }\
880
        break;\
881
    case PIX_FMT_BGR24:\
882
        func(uint8_t,0)\
883
            ((uint8_t*)dest)[0]= b[Y1];\
884
            ((uint8_t*)dest)[1]= g[Y1];\
885
            ((uint8_t*)dest)[2]= r[Y1];\
886
            ((uint8_t*)dest)[3]= b[Y2];\
887
            ((uint8_t*)dest)[4]= g[Y2];\
888
            ((uint8_t*)dest)[5]= r[Y2];\
889
            dest+=6;\
890
        }\
891
        break;\
892
    case PIX_FMT_RGB565:\
893
    case PIX_FMT_BGR565:\
894
        {\
895
            const int dr1= dither_2x2_8[y&1    ][0];\
896
            const int dg1= dither_2x2_4[y&1    ][0];\
897
            const int db1= dither_2x2_8[(y&1)^1][0];\
898
            const int dr2= dither_2x2_8[y&1    ][1];\
899
            const int dg2= dither_2x2_4[y&1    ][1];\
900
            const int db2= dither_2x2_8[(y&1)^1][1];\
901
            func(uint16_t,0)\
902
                ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
903
                ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
904
            }\
905
        }\
906
        break;\
907
    case PIX_FMT_RGB555:\
908
    case PIX_FMT_BGR555:\
909
        {\
910
            const int dr1= dither_2x2_8[y&1    ][0];\
911
            const int dg1= dither_2x2_8[y&1    ][1];\
912
            const int db1= dither_2x2_8[(y&1)^1][0];\
913
            const int dr2= dither_2x2_8[y&1    ][1];\
914
            const int dg2= dither_2x2_8[y&1    ][0];\
915
            const int db2= dither_2x2_8[(y&1)^1][1];\
916
            func(uint16_t,0)\
917
                ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
918
                ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
919
            }\
920
        }\
921
        break;\
922
    case PIX_FMT_RGB8:\
923
    case PIX_FMT_BGR8:\
924
        {\
925
            const uint8_t * const d64= dither_8x8_73[y&7];\
926
            const uint8_t * const d32= dither_8x8_32[y&7];\
927
            func(uint8_t,0)\
928
                ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\
929
                ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\
930
            }\
931
        }\
932
        break;\
933
    case PIX_FMT_RGB4:\
934
    case PIX_FMT_BGR4:\
935
        {\
936
            const uint8_t * const d64= dither_8x8_73 [y&7];\
937
            const uint8_t * const d128=dither_8x8_220[y&7];\
938
            func(uint8_t,0)\
939
                ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\
940
                                 + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\
941
            }\
942
        }\
943
        break;\
944
    case PIX_FMT_RGB4_BYTE:\
945
    case PIX_FMT_BGR4_BYTE:\
946
        {\
947
            const uint8_t * const d64= dither_8x8_73 [y&7];\
948
            const uint8_t * const d128=dither_8x8_220[y&7];\
949
            func(uint8_t,0)\
950
                ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\
951
                ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\
952
            }\
953
        }\
954
        break;\
955
    case PIX_FMT_MONOBLACK:\
956
    case PIX_FMT_MONOWHITE:\
957
        {\
958
            func_monoblack\
959
        }\
960
        break;\
961
    case PIX_FMT_YUYV422:\
962
        func2\
963
            ((uint8_t*)dest)[2*i2+0]= Y1;\
964
            ((uint8_t*)dest)[2*i2+1]= U;\
965
            ((uint8_t*)dest)[2*i2+2]= Y2;\
966
            ((uint8_t*)dest)[2*i2+3]= V;\
967
        }                \
968
        break;\
969
    case PIX_FMT_UYVY422:\
970
        func2\
971
            ((uint8_t*)dest)[2*i2+0]= U;\
972
            ((uint8_t*)dest)[2*i2+1]= Y1;\
973
            ((uint8_t*)dest)[2*i2+2]= V;\
974
            ((uint8_t*)dest)[2*i2+3]= Y2;\
975
        }                \
976
        break;\
977
    case PIX_FMT_GRAY16BE:\
978
        func_g16\
979
            ((uint8_t*)dest)[2*i2+0]= Y1>>8;\
980
            ((uint8_t*)dest)[2*i2+1]= Y1;\
981
            ((uint8_t*)dest)[2*i2+2]= Y2>>8;\
982
            ((uint8_t*)dest)[2*i2+3]= Y2;\
983
        }                \
984
        break;\
985
    case PIX_FMT_GRAY16LE:\
986
        func_g16\
987
            ((uint8_t*)dest)[2*i2+0]= Y1;\
988
            ((uint8_t*)dest)[2*i2+1]= Y1>>8;\
989
            ((uint8_t*)dest)[2*i2+2]= Y2;\
990
            ((uint8_t*)dest)[2*i2+3]= Y2>>8;\
991
        }                \
992
        break;\
993
    }\
994

    
995

    
996
static inline void yuv2packedXinC(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
997
                                  const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
998
                                  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
999
{
1000
    int i;
1001
    YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGBX_C, YSCALE_YUV_2_PACKEDX_C(void,0), YSCALE_YUV_2_GRAY16_C, YSCALE_YUV_2_MONOX_C)
1002
}
1003

    
1004
static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
1005
                                    const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
1006
                                    const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
1007
{
1008
    int i;
1009
    int step= fmt_depth(c->dstFormat)/8;
1010
    int aidx= 3;
1011

    
1012
    switch(c->dstFormat){
1013
    case PIX_FMT_ARGB:
1014
        dest++;
1015
        aidx= 0;
1016
    case PIX_FMT_RGB24:
1017
        aidx--;
1018
    case PIX_FMT_RGBA:
1019
        if (CONFIG_SMALL){
1020
            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
1021
            YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
1022
                dest[aidx]= needAlpha ? A : 255;
1023
                dest[0]= R>>22;
1024
                dest[1]= G>>22;
1025
                dest[2]= B>>22;
1026
                dest+= step;
1027
            }
1028
        }else{
1029
            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1030
                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
1031
                    dest[aidx]= A;
1032
                    dest[0]= R>>22;
1033
                    dest[1]= G>>22;
1034
                    dest[2]= B>>22;
1035
                    dest+= step;
1036
                }
1037
            }else{
1038
                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
1039
                    dest[aidx]= 255;
1040
                    dest[0]= R>>22;
1041
                    dest[1]= G>>22;
1042
                    dest[2]= B>>22;
1043
                    dest+= step;
1044
                }
1045
            }
1046
        }
1047
        break;
1048
    case PIX_FMT_ABGR:
1049
        dest++;
1050
        aidx= 0;
1051
    case PIX_FMT_BGR24:
1052
        aidx--;
1053
    case PIX_FMT_BGRA:
1054
        if (CONFIG_SMALL){
1055
            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
1056
            YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
1057
                dest[aidx]= needAlpha ? A : 255;
1058
                dest[0]= B>>22;
1059
                dest[1]= G>>22;
1060
                dest[2]= R>>22;
1061
                dest+= step;
1062
            }
1063
        }else{
1064
            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
1065
                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
1066
                    dest[aidx]= A;
1067
                    dest[0]= B>>22;
1068
                    dest[1]= G>>22;
1069
                    dest[2]= R>>22;
1070
                    dest+= step;
1071
                }
1072
            }else{
1073
                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
1074
                    dest[aidx]= 255;
1075
                    dest[0]= B>>22;
1076
                    dest[1]= G>>22;
1077
                    dest[2]= R>>22;
1078
                    dest+= step;
1079
                }
1080
            }
1081
        }
1082
        break;
1083
    default:
1084
        assert(0);
1085
    }
1086
}
1087

    
1088
static void fillPlane(uint8_t* plane, int stride, int width, int height, int y, uint8_t val){
1089
    int i;
1090
    uint8_t *ptr = plane + stride*y;
1091
    for (i=0; i<height; i++){
1092
        memset(ptr, val, width);
1093
        ptr += stride;
1094
    }
1095
}
1096

    
1097
//Note: we have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW+MMX2 one
1098
//Plain C versions
1099
#if !HAVE_MMX || CONFIG_RUNTIME_CPUDETECT || !CONFIG_GPL
1100
#define COMPILE_C
1101
#endif
1102

    
1103
#if ARCH_PPC
1104
#if (HAVE_ALTIVEC || CONFIG_RUNTIME_CPUDETECT) && CONFIG_GPL
1105
#undef COMPILE_C
1106
#define COMPILE_ALTIVEC
1107
#endif
1108
#endif //ARCH_PPC
1109

    
1110
#if ARCH_X86
1111

    
1112
#if ((HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || CONFIG_RUNTIME_CPUDETECT) && CONFIG_GPL
1113
#define COMPILE_MMX
1114
#endif
1115

    
1116
#if (HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT) && CONFIG_GPL
1117
#define COMPILE_MMX2
1118
#endif
1119

    
1120
#if ((HAVE_AMD3DNOW && !HAVE_MMX2) || CONFIG_RUNTIME_CPUDETECT) && CONFIG_GPL
1121
#define COMPILE_3DNOW
1122
#endif
1123
#endif //ARCH_X86
1124

    
1125
#undef HAVE_MMX
1126
#undef HAVE_MMX2
1127
#undef HAVE_AMD3DNOW
1128
#undef HAVE_ALTIVEC
1129
#define HAVE_MMX 0
1130
#define HAVE_MMX2 0
1131
#define HAVE_AMD3DNOW 0
1132
#define HAVE_ALTIVEC 0
1133

    
1134
#ifdef COMPILE_C
1135
#define RENAME(a) a ## _C
1136
#include "swscale_template.c"
1137
#endif
1138

    
1139
#ifdef COMPILE_ALTIVEC
1140
#undef RENAME
1141
#undef HAVE_ALTIVEC
1142
#define HAVE_ALTIVEC 1
1143
#define RENAME(a) a ## _altivec
1144
#include "swscale_template.c"
1145
#endif
1146

    
1147
#if ARCH_X86
1148

    
1149
//MMX versions
1150
#ifdef COMPILE_MMX
1151
#undef RENAME
1152
#undef HAVE_MMX
1153
#undef HAVE_MMX2
1154
#undef HAVE_AMD3DNOW
1155
#define HAVE_MMX 1
1156
#define HAVE_MMX2 0
1157
#define HAVE_AMD3DNOW 0
1158
#define RENAME(a) a ## _MMX
1159
#include "swscale_template.c"
1160
#endif
1161

    
1162
//MMX2 versions
1163
#ifdef COMPILE_MMX2
1164
#undef RENAME
1165
#undef HAVE_MMX
1166
#undef HAVE_MMX2
1167
#undef HAVE_AMD3DNOW
1168
#define HAVE_MMX 1
1169
#define HAVE_MMX2 1
1170
#define HAVE_AMD3DNOW 0
1171
#define RENAME(a) a ## _MMX2
1172
#include "swscale_template.c"
1173
#endif
1174

    
1175
//3DNOW versions
1176
#ifdef COMPILE_3DNOW
1177
#undef RENAME
1178
#undef HAVE_MMX
1179
#undef HAVE_MMX2
1180
#undef HAVE_AMD3DNOW
1181
#define HAVE_MMX 1
1182
#define HAVE_MMX2 0
1183
#define HAVE_AMD3DNOW 1
1184
#define RENAME(a) a ## _3DNow
1185
#include "swscale_template.c"
1186
#endif
1187

    
1188
#endif //ARCH_X86
1189

    
1190
// minor note: the HAVE_xyz are messed up after this line so don't use them
1191

    
1192
static double getSplineCoeff(double a, double b, double c, double d, double dist)
1193
{
1194
//    printf("%f %f %f %f %f\n", a,b,c,d,dist);
1195
    if (dist<=1.0)      return ((d*dist + c)*dist + b)*dist +a;
1196
    else                return getSplineCoeff(        0.0,
1197
                                             b+ 2.0*c + 3.0*d,
1198
                                                    c + 3.0*d,
1199
                                            -b- 3.0*c - 6.0*d,
1200
                                            dist-1.0);
1201
}
1202

    
1203
static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
1204
                             int srcW, int dstW, int filterAlign, int one, int flags,
1205
                             SwsVector *srcFilter, SwsVector *dstFilter, double param[2])
1206
{
1207
    int i;
1208
    int filterSize;
1209
    int filter2Size;
1210
    int minFilterSize;
1211
    int64_t *filter=NULL;
1212
    int64_t *filter2=NULL;
1213
    const int64_t fone= 1LL<<54;
1214
    int ret= -1;
1215
#if ARCH_X86
1216
    if (flags & SWS_CPU_CAPS_MMX)
1217
        __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
1218
#endif
1219

    
1220
    // NOTE: the +1 is for the MMX scaler which reads over the end
1221
    *filterPos = av_malloc((dstW+1)*sizeof(int16_t));
1222

    
1223
    if (FFABS(xInc - 0x10000) <10) // unscaled
1224
    {
1225
        int i;
1226
        filterSize= 1;
1227
        filter= av_mallocz(dstW*sizeof(*filter)*filterSize);
1228

    
1229
        for (i=0; i<dstW; i++)
1230
        {
1231
            filter[i*filterSize]= fone;
1232
            (*filterPos)[i]=i;
1233
        }
1234

    
1235
    }
1236
    else if (flags&SWS_POINT) // lame looking point sampling mode
1237
    {
1238
        int i;
1239
        int xDstInSrc;
1240
        filterSize= 1;
1241
        filter= av_malloc(dstW*sizeof(*filter)*filterSize);
1242

    
1243
        xDstInSrc= xInc/2 - 0x8000;
1244
        for (i=0; i<dstW; i++)
1245
        {
1246
            int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
1247

    
1248
            (*filterPos)[i]= xx;
1249
            filter[i]= fone;
1250
            xDstInSrc+= xInc;
1251
        }
1252
    }
1253
    else if ((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) // bilinear upscale
1254
    {
1255
        int i;
1256
        int xDstInSrc;
1257
        if      (flags&SWS_BICUBIC) filterSize= 4;
1258
        else if (flags&SWS_X      ) filterSize= 4;
1259
        else                        filterSize= 2; // SWS_BILINEAR / SWS_AREA
1260
        filter= av_malloc(dstW*sizeof(*filter)*filterSize);
1261

    
1262
        xDstInSrc= xInc/2 - 0x8000;
1263
        for (i=0; i<dstW; i++)
1264
        {
1265
            int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
1266
            int j;
1267

    
1268
            (*filterPos)[i]= xx;
1269
                //bilinear upscale / linear interpolate / area averaging
1270
                for (j=0; j<filterSize; j++)
1271
                {
1272
                    int64_t coeff= fone - FFABS((xx<<16) - xDstInSrc)*(fone>>16);
1273
                    if (coeff<0) coeff=0;
1274
                    filter[i*filterSize + j]= coeff;
1275
                    xx++;
1276
                }
1277
            xDstInSrc+= xInc;
1278
        }
1279
    }
1280
    else
1281
    {
1282
        int xDstInSrc;
1283
        int sizeFactor;
1284

    
1285
        if      (flags&SWS_BICUBIC)      sizeFactor=  4;
1286
        else if (flags&SWS_X)            sizeFactor=  8;
1287
        else if (flags&SWS_AREA)         sizeFactor=  1; //downscale only, for upscale it is bilinear
1288
        else if (flags&SWS_GAUSS)        sizeFactor=  8;   // infinite ;)
1289
        else if (flags&SWS_LANCZOS)      sizeFactor= param[0] != SWS_PARAM_DEFAULT ? ceil(2*param[0]) : 6;
1290
        else if (flags&SWS_SINC)         sizeFactor= 20; // infinite ;)
1291
        else if (flags&SWS_SPLINE)       sizeFactor= 20;  // infinite ;)
1292
        else if (flags&SWS_BILINEAR)     sizeFactor=  2;
1293
        else {
1294
            sizeFactor= 0; //GCC warning killer
1295
            assert(0);
1296
        }
1297

    
1298
        if (xInc <= 1<<16)      filterSize= 1 + sizeFactor; // upscale
1299
        else                    filterSize= 1 + (sizeFactor*srcW + dstW - 1)/ dstW;
1300

    
1301
        if (filterSize > srcW-2) filterSize=srcW-2;
1302

    
1303
        filter= av_malloc(dstW*sizeof(*filter)*filterSize);
1304

    
1305
        xDstInSrc= xInc - 0x10000;
1306
        for (i=0; i<dstW; i++)
1307
        {
1308
            int xx= (xDstInSrc - ((filterSize-2)<<16)) / (1<<17);
1309
            int j;
1310
            (*filterPos)[i]= xx;
1311
            for (j=0; j<filterSize; j++)
1312
            {
1313
                int64_t d= ((int64_t)FFABS((xx<<17) - xDstInSrc))<<13;
1314
                double floatd;
1315
                int64_t coeff;
1316

    
1317
                if (xInc > 1<<16)
1318
                    d= d*dstW/srcW;
1319
                floatd= d * (1.0/(1<<30));
1320

    
1321
                if (flags & SWS_BICUBIC)
1322
                {
1323
                    int64_t B= (param[0] != SWS_PARAM_DEFAULT ? param[0] :   0) * (1<<24);
1324
                    int64_t C= (param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6) * (1<<24);
1325
                    int64_t dd = ( d*d)>>30;
1326
                    int64_t ddd= (dd*d)>>30;
1327

    
1328
                    if      (d < 1LL<<30)
1329
                        coeff = (12*(1<<24)-9*B-6*C)*ddd + (-18*(1<<24)+12*B+6*C)*dd + (6*(1<<24)-2*B)*(1<<30);
1330
                    else if (d < 1LL<<31)
1331
                        coeff = (-B-6*C)*ddd + (6*B+30*C)*dd + (-12*B-48*C)*d + (8*B+24*C)*(1<<30);
1332
                    else
1333
                        coeff=0.0;
1334
                    coeff *= fone>>(30+24);
1335
                }
1336
/*                else if (flags & SWS_X)
1337
                {
1338
                    double p= param ? param*0.01 : 0.3;
1339
                    coeff = d ? sin(d*PI)/(d*PI) : 1.0;
1340
                    coeff*= pow(2.0, - p*d*d);
1341
                }*/
1342
                else if (flags & SWS_X)
1343
                {
1344
                    double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
1345
                    double c;
1346

    
1347
                    if (floatd<1.0)
1348
                        c = cos(floatd*PI);
1349
                    else
1350
                        c=-1.0;
1351
                    if (c<0.0)      c= -pow(-c, A);
1352
                    else            c=  pow( c, A);
1353
                    coeff= (c*0.5 + 0.5)*fone;
1354
                }
1355
                else if (flags & SWS_AREA)
1356
                {
1357
                    int64_t d2= d - (1<<29);
1358
                    if      (d2*xInc < -(1LL<<(29+16))) coeff= 1.0 * (1LL<<(30+16));
1359
                    else if (d2*xInc <  (1LL<<(29+16))) coeff= -d2*xInc + (1LL<<(29+16));
1360
                    else coeff=0.0;
1361
                    coeff *= fone>>(30+16);
1362
                }
1363
                else if (flags & SWS_GAUSS)
1364
                {
1365
                    double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
1366
                    coeff = (pow(2.0, - p*floatd*floatd))*fone;
1367
                }
1368
                else if (flags & SWS_SINC)
1369
                {
1370
                    coeff = (d ? sin(floatd*PI)/(floatd*PI) : 1.0)*fone;
1371
                }
1372
                else if (flags & SWS_LANCZOS)
1373
                {
1374
                    double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
1375
                    coeff = (d ? sin(floatd*PI)*sin(floatd*PI/p)/(floatd*floatd*PI*PI/p) : 1.0)*fone;
1376
                    if (floatd>p) coeff=0;
1377
                }
1378
                else if (flags & SWS_BILINEAR)
1379
                {
1380
                    coeff= (1<<30) - d;
1381
                    if (coeff<0) coeff=0;
1382
                    coeff *= fone >> 30;
1383
                }
1384
                else if (flags & SWS_SPLINE)
1385
                {
1386
                    double p=-2.196152422706632;
1387
                    coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, floatd) * fone;
1388
                }
1389
                else {
1390
                    coeff= 0.0; //GCC warning killer
1391
                    assert(0);
1392
                }
1393

    
1394
                filter[i*filterSize + j]= coeff;
1395
                xx++;
1396
            }
1397
            xDstInSrc+= 2*xInc;
1398
        }
1399
    }
1400

    
1401
    /* apply src & dst Filter to filter -> filter2
1402
       av_free(filter);
1403
    */
1404
    assert(filterSize>0);
1405
    filter2Size= filterSize;
1406
    if (srcFilter) filter2Size+= srcFilter->length - 1;
1407
    if (dstFilter) filter2Size+= dstFilter->length - 1;
1408
    assert(filter2Size>0);
1409
    filter2= av_mallocz(filter2Size*dstW*sizeof(*filter2));
1410

    
1411
    for (i=0; i<dstW; i++)
1412
    {
1413
        int j, k;
1414

    
1415
        if(srcFilter){
1416
            for (k=0; k<srcFilter->length; k++){
1417
                for (j=0; j<filterSize; j++)
1418
                    filter2[i*filter2Size + k + j] += srcFilter->coeff[k]*filter[i*filterSize + j];
1419
            }
1420
        }else{
1421
            for (j=0; j<filterSize; j++)
1422
                filter2[i*filter2Size + j]= filter[i*filterSize + j];
1423
        }
1424
        //FIXME dstFilter
1425

    
1426
        (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
1427
    }
1428
    av_freep(&filter);
1429

    
1430
    /* try to reduce the filter-size (step1 find size and shift left) */
1431
    // Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not).
1432
    minFilterSize= 0;
1433
    for (i=dstW-1; i>=0; i--)
1434
    {
1435
        int min= filter2Size;
1436
        int j;
1437
        int64_t cutOff=0.0;
1438

    
1439
        /* get rid off near zero elements on the left by shifting left */
1440
        for (j=0; j<filter2Size; j++)
1441
        {
1442
            int k;
1443
            cutOff += FFABS(filter2[i*filter2Size]);
1444

    
1445
            if (cutOff > SWS_MAX_REDUCE_CUTOFF*fone) break;
1446

    
1447
            /* preserve monotonicity because the core can't handle the filter otherwise */
1448
            if (i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
1449

    
1450
            // move filter coefficients left
1451
            for (k=1; k<filter2Size; k++)
1452
                filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
1453
            filter2[i*filter2Size + k - 1]= 0;
1454
            (*filterPos)[i]++;
1455
        }
1456

    
1457
        cutOff=0;
1458
        /* count near zeros on the right */
1459
        for (j=filter2Size-1; j>0; j--)
1460
        {
1461
            cutOff += FFABS(filter2[i*filter2Size + j]);
1462

    
1463
            if (cutOff > SWS_MAX_REDUCE_CUTOFF*fone) break;
1464
            min--;
1465
        }
1466

    
1467
        if (min>minFilterSize) minFilterSize= min;
1468
    }
1469

    
1470
    if (flags & SWS_CPU_CAPS_ALTIVEC) {
1471
        // we can handle the special case 4,
1472
        // so we don't want to go to the full 8
1473
        if (minFilterSize < 5)
1474
            filterAlign = 4;
1475

    
1476
        // We really don't want to waste our time
1477
        // doing useless computation, so fall back on
1478
        // the scalar C code for very small filters.
1479
        // Vectorizing is worth it only if you have a
1480
        // decent-sized vector.
1481
        if (minFilterSize < 3)
1482
            filterAlign = 1;
1483
    }
1484

    
1485
    if (flags & SWS_CPU_CAPS_MMX) {
1486
        // special case for unscaled vertical filtering
1487
        if (minFilterSize == 1 && filterAlign == 2)
1488
            filterAlign= 1;
1489
    }
1490

    
1491
    assert(minFilterSize > 0);
1492
    filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
1493
    assert(filterSize > 0);
1494
    filter= av_malloc(filterSize*dstW*sizeof(*filter));
1495
    if (filterSize >= MAX_FILTER_SIZE*16/((flags&SWS_ACCURATE_RND) ? APCK_SIZE : 16) || !filter)
1496
        goto error;
1497
    *outFilterSize= filterSize;
1498

    
1499
    if (flags&SWS_PRINT_INFO)
1500
        av_log(NULL, AV_LOG_VERBOSE, "SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize);
1501
    /* try to reduce the filter-size (step2 reduce it) */
1502
    for (i=0; i<dstW; i++)
1503
    {
1504
        int j;
1505

    
1506
        for (j=0; j<filterSize; j++)
1507
        {
1508
            if (j>=filter2Size) filter[i*filterSize + j]= 0;
1509
            else               filter[i*filterSize + j]= filter2[i*filter2Size + j];
1510
            if((flags & SWS_BITEXACT) && j>=minFilterSize)
1511
                filter[i*filterSize + j]= 0;
1512
        }
1513
    }
1514

    
1515

    
1516
    //FIXME try to align filterPos if possible
1517

    
1518
    //fix borders
1519
    for (i=0; i<dstW; i++)
1520
    {
1521
        int j;
1522
        if ((*filterPos)[i] < 0)
1523
        {
1524
            // move filter coefficients left to compensate for filterPos
1525
            for (j=1; j<filterSize; j++)
1526
            {
1527
                int left= FFMAX(j + (*filterPos)[i], 0);
1528
                filter[i*filterSize + left] += filter[i*filterSize + j];
1529
                filter[i*filterSize + j]=0;
1530
            }
1531
            (*filterPos)[i]= 0;
1532
        }
1533

    
1534
        if ((*filterPos)[i] + filterSize > srcW)
1535
        {
1536
            int shift= (*filterPos)[i] + filterSize - srcW;
1537
            // move filter coefficients right to compensate for filterPos
1538
            for (j=filterSize-2; j>=0; j--)
1539
            {
1540
                int right= FFMIN(j + shift, filterSize-1);
1541
                filter[i*filterSize +right] += filter[i*filterSize +j];
1542
                filter[i*filterSize +j]=0;
1543
            }
1544
            (*filterPos)[i]= srcW - filterSize;
1545
        }
1546
    }
1547

    
1548
    // Note the +1 is for the MMX scaler which reads over the end
1549
    /* align at 16 for AltiVec (needed by hScale_altivec_real) */
1550
    *outFilter= av_mallocz(*outFilterSize*(dstW+1)*sizeof(int16_t));
1551

    
1552
    /* normalize & store in outFilter */
1553
    for (i=0; i<dstW; i++)
1554
    {
1555
        int j;
1556
        int64_t error=0;
1557
        int64_t sum=0;
1558

    
1559
        for (j=0; j<filterSize; j++)
1560
        {
1561
            sum+= filter[i*filterSize + j];
1562
        }
1563
        sum= (sum + one/2)/ one;
1564
        for (j=0; j<*outFilterSize; j++)
1565
        {
1566
            int64_t v= filter[i*filterSize + j] + error;
1567
            int intV= ROUNDED_DIV(v, sum);
1568
            (*outFilter)[i*(*outFilterSize) + j]= intV;
1569
            error= v - intV*sum;
1570
        }
1571
    }
1572

    
1573
    (*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end
1574
    for (i=0; i<*outFilterSize; i++)
1575
    {
1576
        int j= dstW*(*outFilterSize);
1577
        (*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)];
1578
    }
1579

    
1580
    ret=0;
1581
error:
1582
    av_free(filter);
1583
    av_free(filter2);
1584
    return ret;
1585
}
1586

    
1587
#ifdef COMPILE_MMX2
1588
static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
1589
{
1590
    uint8_t *fragmentA;
1591
    x86_reg imm8OfPShufW1A;
1592
    x86_reg imm8OfPShufW2A;
1593
    x86_reg fragmentLengthA;
1594
    uint8_t *fragmentB;
1595
    x86_reg imm8OfPShufW1B;
1596
    x86_reg imm8OfPShufW2B;
1597
    x86_reg fragmentLengthB;
1598
    int fragmentPos;
1599

    
1600
    int xpos, i;
1601

    
1602
    // create an optimized horizontal scaling routine
1603

    
1604
    //code fragment
1605

    
1606
    __asm__ volatile(
1607
        "jmp                         9f                 \n\t"
1608
    // Begin
1609
        "0:                                             \n\t"
1610
        "movq    (%%"REG_d", %%"REG_a"), %%mm3          \n\t"
1611
        "movd    (%%"REG_c", %%"REG_S"), %%mm0          \n\t"
1612
        "movd   1(%%"REG_c", %%"REG_S"), %%mm1          \n\t"
1613
        "punpcklbw                %%mm7, %%mm1          \n\t"
1614
        "punpcklbw                %%mm7, %%mm0          \n\t"
1615
        "pshufw                   $0xFF, %%mm1, %%mm1   \n\t"
1616
        "1:                                             \n\t"
1617
        "pshufw                   $0xFF, %%mm0, %%mm0   \n\t"
1618
        "2:                                             \n\t"
1619
        "psubw                    %%mm1, %%mm0          \n\t"
1620
        "movl   8(%%"REG_b", %%"REG_a"), %%esi          \n\t"
1621
        "pmullw                   %%mm3, %%mm0          \n\t"
1622
        "psllw                       $7, %%mm1          \n\t"
1623
        "paddw                    %%mm1, %%mm0          \n\t"
1624

    
1625
        "movq                     %%mm0, (%%"REG_D", %%"REG_a") \n\t"
1626

    
1627
        "add                         $8, %%"REG_a"      \n\t"
1628
    // End
1629
        "9:                                             \n\t"
1630
//        "int $3                                         \n\t"
1631
        "lea                 " LOCAL_MANGLE(0b) ", %0   \n\t"
1632
        "lea                 " LOCAL_MANGLE(1b) ", %1   \n\t"
1633
        "lea                 " LOCAL_MANGLE(2b) ", %2   \n\t"
1634
        "dec                         %1                 \n\t"
1635
        "dec                         %2                 \n\t"
1636
        "sub                         %0, %1             \n\t"
1637
        "sub                         %0, %2             \n\t"
1638
        "lea                 " LOCAL_MANGLE(9b) ", %3   \n\t"
1639
        "sub                         %0, %3             \n\t"
1640

    
1641

    
1642
        :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
1643
        "=r" (fragmentLengthA)
1644
    );
1645

    
1646
    __asm__ volatile(
1647
        "jmp                         9f                 \n\t"
1648
    // Begin
1649
        "0:                                             \n\t"
1650
        "movq    (%%"REG_d", %%"REG_a"), %%mm3          \n\t"
1651
        "movd    (%%"REG_c", %%"REG_S"), %%mm0          \n\t"
1652
        "punpcklbw                %%mm7, %%mm0          \n\t"
1653
        "pshufw                   $0xFF, %%mm0, %%mm1   \n\t"
1654
        "1:                                             \n\t"
1655
        "pshufw                   $0xFF, %%mm0, %%mm0   \n\t"
1656
        "2:                                             \n\t"
1657
        "psubw                    %%mm1, %%mm0          \n\t"
1658
        "movl   8(%%"REG_b", %%"REG_a"), %%esi          \n\t"
1659
        "pmullw                   %%mm3, %%mm0          \n\t"
1660
        "psllw                       $7, %%mm1          \n\t"
1661
        "paddw                    %%mm1, %%mm0          \n\t"
1662

    
1663
        "movq                     %%mm0, (%%"REG_D", %%"REG_a") \n\t"
1664

    
1665
        "add                         $8, %%"REG_a"      \n\t"
1666
    // End
1667
        "9:                                             \n\t"
1668
//        "int                       $3                   \n\t"
1669
        "lea                 " LOCAL_MANGLE(0b) ", %0   \n\t"
1670
        "lea                 " LOCAL_MANGLE(1b) ", %1   \n\t"
1671
        "lea                 " LOCAL_MANGLE(2b) ", %2   \n\t"
1672
        "dec                         %1                 \n\t"
1673
        "dec                         %2                 \n\t"
1674
        "sub                         %0, %1             \n\t"
1675
        "sub                         %0, %2             \n\t"
1676
        "lea                 " LOCAL_MANGLE(9b) ", %3   \n\t"
1677
        "sub                         %0, %3             \n\t"
1678

    
1679

    
1680
        :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
1681
        "=r" (fragmentLengthB)
1682
    );
1683

    
1684
    xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
1685
    fragmentPos=0;
1686

    
1687
    for (i=0; i<dstW/numSplits; i++)
1688
    {
1689
        int xx=xpos>>16;
1690

    
1691
        if ((i&3) == 0)
1692
        {
1693
            int a=0;
1694
            int b=((xpos+xInc)>>16) - xx;
1695
            int c=((xpos+xInc*2)>>16) - xx;
1696
            int d=((xpos+xInc*3)>>16) - xx;
1697

    
1698
            filter[i  ] = (( xpos         & 0xFFFF) ^ 0xFFFF)>>9;
1699
            filter[i+1] = (((xpos+xInc  ) & 0xFFFF) ^ 0xFFFF)>>9;
1700
            filter[i+2] = (((xpos+xInc*2) & 0xFFFF) ^ 0xFFFF)>>9;
1701
            filter[i+3] = (((xpos+xInc*3) & 0xFFFF) ^ 0xFFFF)>>9;
1702
            filterPos[i/2]= xx;
1703

    
1704
            if (d+1<4)
1705
            {
1706
                int maxShift= 3-(d+1);
1707
                int shift=0;
1708

    
1709
                memcpy(funnyCode + fragmentPos, fragmentB, fragmentLengthB);
1710

    
1711
                funnyCode[fragmentPos + imm8OfPShufW1B]=
1712
                    (a+1) | ((b+1)<<2) | ((c+1)<<4) | ((d+1)<<6);
1713
                funnyCode[fragmentPos + imm8OfPShufW2B]=
1714
                    a | (b<<2) | (c<<4) | (d<<6);
1715

    
1716
                if (i+3>=dstW) shift=maxShift; //avoid overread
1717
                else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align
1718

    
1719
                if (shift && i>=shift)
1720
                {
1721
                    funnyCode[fragmentPos + imm8OfPShufW1B]+= 0x55*shift;
1722
                    funnyCode[fragmentPos + imm8OfPShufW2B]+= 0x55*shift;
1723
                    filterPos[i/2]-=shift;
1724
                }
1725

    
1726
                fragmentPos+= fragmentLengthB;
1727
            }
1728
            else
1729
            {
1730
                int maxShift= 3-d;
1731
                int shift=0;
1732

    
1733
                memcpy(funnyCode + fragmentPos, fragmentA, fragmentLengthA);
1734

    
1735
                funnyCode[fragmentPos + imm8OfPShufW1A]=
1736
                funnyCode[fragmentPos + imm8OfPShufW2A]=
1737
                    a | (b<<2) | (c<<4) | (d<<6);
1738

    
1739
                if (i+4>=dstW) shift=maxShift; //avoid overread
1740
                else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //partial align
1741

    
1742
                if (shift && i>=shift)
1743
                {
1744
                    funnyCode[fragmentPos + imm8OfPShufW1A]+= 0x55*shift;
1745
                    funnyCode[fragmentPos + imm8OfPShufW2A]+= 0x55*shift;
1746
                    filterPos[i/2]-=shift;
1747
                }
1748

    
1749
                fragmentPos+= fragmentLengthA;
1750
            }
1751

    
1752
            funnyCode[fragmentPos]= RET;
1753
        }
1754
        xpos+=xInc;
1755
    }
1756
    filterPos[((i/2)+1)&(~1)]= xpos>>16; // needed to jump to the next part
1757
}
1758
#endif /* COMPILE_MMX2 */
1759

    
1760
static void globalInit(void){
1761
    // generating tables:
1762
    int i;
1763
    for (i=0; i<768; i++){
1764
        int c= av_clip_uint8(i-256);
1765
        clip_table[i]=c;
1766
    }
1767
}
1768

    
1769
static SwsFunc getSwsFunc(SwsContext *c)
1770
{
1771
    int flags = c->flags;
1772

    
1773
#if CONFIG_RUNTIME_CPUDETECT && CONFIG_GPL
1774
#if ARCH_X86
1775
    // ordered per speed fastest first
1776
    if (flags & SWS_CPU_CAPS_MMX2) {
1777
        sws_init_swScale_MMX2(c);
1778
        return swScale_MMX2;
1779
    } else if (flags & SWS_CPU_CAPS_3DNOW) {
1780
        sws_init_swScale_3DNow(c);
1781
        return swScale_3DNow;
1782
    } else if (flags & SWS_CPU_CAPS_MMX) {
1783
        sws_init_swScale_MMX(c);
1784
        return swScale_MMX;
1785
    } else {
1786
        sws_init_swScale_C(c);
1787
        return swScale_C;
1788
    }
1789

    
1790
#else
1791
#if ARCH_PPC
1792
    if (flags & SWS_CPU_CAPS_ALTIVEC) {
1793
        sws_init_swScale_altivec(c);
1794
        return swScale_altivec;
1795
    } else {
1796
        sws_init_swScale_C(c);
1797
        return swScale_C;
1798
    }
1799
#endif
1800
    sws_init_swScale_C(c);
1801
    return swScale_C;
1802
#endif /* ARCH_X86 */
1803
#else //CONFIG_RUNTIME_CPUDETECT
1804
#if   HAVE_MMX2
1805
    sws_init_swScale_MMX2(c);
1806
    return swScale_MMX2;
1807
#elif HAVE_AMD3DNOW
1808
    sws_init_swScale_3DNow(c);
1809
    return swScale_3DNow;
1810
#elif HAVE_MMX
1811
    sws_init_swScale_MMX(c);
1812
    return swScale_MMX;
1813
#elif HAVE_ALTIVEC
1814
    sws_init_swScale_altivec(c);
1815
    return swScale_altivec;
1816
#else
1817
    sws_init_swScale_C(c);
1818
    return swScale_C;
1819
#endif
1820
#endif //!CONFIG_RUNTIME_CPUDETECT
1821
}
1822

    
1823
static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1824
                               int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1825
    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
1826
    /* Copy Y plane */
1827
    if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
1828
        memcpy(dst, src[0], srcSliceH*dstStride[0]);
1829
    else
1830
    {
1831
        int i;
1832
        const uint8_t *srcPtr= src[0];
1833
        uint8_t *dstPtr= dst;
1834
        for (i=0; i<srcSliceH; i++)
1835
        {
1836
            memcpy(dstPtr, srcPtr, c->srcW);
1837
            srcPtr+= srcStride[0];
1838
            dstPtr+= dstStride[0];
1839
        }
1840
    }
1841
    dst = dstParam[1] + dstStride[1]*srcSliceY/2;
1842
    if (c->dstFormat == PIX_FMT_NV12)
1843
        interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]);
1844
    else
1845
        interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]);
1846

    
1847
    return srcSliceH;
1848
}
1849

    
1850
static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1851
                               int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1852
    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
1853

    
1854
    yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
1855

    
1856
    return srcSliceH;
1857
}
1858

    
1859
static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1860
                               int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1861
    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
1862

    
1863
    yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
1864

    
1865
    return srcSliceH;
1866
}
1867

    
1868
static int YUV422PToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1869
                                int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1870
    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
1871

    
1872
    yuv422ptoyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
1873

    
1874
    return srcSliceH;
1875
}
1876

    
1877
static int YUV422PToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1878
                                int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1879
    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
1880

    
1881
    yuv422ptouyvy(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
1882

    
1883
    return srcSliceH;
1884
}
1885

    
1886
static int YUYV2YUV420Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1887
                               int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1888
    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
1889
    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2;
1890
    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2;
1891

    
1892
    yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
1893

    
1894
    if (dstParam[3])
1895
        fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
1896

    
1897
    return srcSliceH;
1898
}
1899

    
1900
static int YUYV2YUV422Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1901
                               int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1902
    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
1903
    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY;
1904
    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY;
1905

    
1906
    yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
1907

    
1908
    return srcSliceH;
1909
}
1910

    
1911
static int UYVY2YUV420Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1912
                               int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1913
    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
1914
    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2;
1915
    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2;
1916

    
1917
    uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
1918

    
1919
    if (dstParam[3])
1920
        fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
1921

    
1922
    return srcSliceH;
1923
}
1924

    
1925
static int UYVY2YUV422Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1926
                               int srcSliceH, uint8_t* dstParam[], int dstStride[]){
1927
    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
1928
    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY;
1929
    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY;
1930

    
1931
    uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
1932

    
1933
    return srcSliceH;
1934
}
1935

    
1936
static int pal2rgbWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1937
                          int srcSliceH, uint8_t* dst[], int dstStride[]){
1938
    const enum PixelFormat srcFormat= c->srcFormat;
1939
    const enum PixelFormat dstFormat= c->dstFormat;
1940
    void (*conv)(const uint8_t *src, uint8_t *dst, long num_pixels,
1941
                 const uint8_t *palette)=NULL;
1942
    int i;
1943
    uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
1944
    uint8_t *srcPtr= src[0];
1945

    
1946
    if (!usePal(srcFormat))
1947
        av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
1948
               sws_format_name(srcFormat), sws_format_name(dstFormat));
1949

    
1950
    switch(dstFormat){
1951
    case PIX_FMT_RGB32  : conv = palette8topacked32; break;
1952
    case PIX_FMT_BGR32  : conv = palette8topacked32; break;
1953
    case PIX_FMT_BGR32_1: conv = palette8topacked32; break;
1954
    case PIX_FMT_RGB32_1: conv = palette8topacked32; break;
1955
    case PIX_FMT_RGB24  : conv = palette8topacked24; break;
1956
    case PIX_FMT_BGR24  : conv = palette8topacked24; break;
1957
    default: av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
1958
                    sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
1959
    }
1960

    
1961

    
1962
    for (i=0; i<srcSliceH; i++) {
1963
        conv(srcPtr, dstPtr, c->srcW, (uint8_t *) c->pal_rgb);
1964
        srcPtr+= srcStride[0];
1965
        dstPtr+= dstStride[0];
1966
    }
1967

    
1968
    return srcSliceH;
1969
}
1970

    
1971
/* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
1972
static int rgb2rgbWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
1973
                          int srcSliceH, uint8_t* dst[], int dstStride[]){
1974
    const enum PixelFormat srcFormat= c->srcFormat;
1975
    const enum PixelFormat dstFormat= c->dstFormat;
1976
    const int srcBpp= (fmt_depth(srcFormat) + 7) >> 3;
1977
    const int dstBpp= (fmt_depth(dstFormat) + 7) >> 3;
1978
    const int srcId= fmt_depth(srcFormat) >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */
1979
    const int dstId= fmt_depth(dstFormat) >> 2;
1980
    void (*conv)(const uint8_t *src, uint8_t *dst, long src_size)=NULL;
1981

    
1982
    /* BGR -> BGR */
1983
    if (  (isBGR(srcFormat) && isBGR(dstFormat))
1984
       || (isRGB(srcFormat) && isRGB(dstFormat))){
1985
        switch(srcId | (dstId<<4)){
1986
        case 0x34: conv= rgb16to15; break;
1987
        case 0x36: conv= rgb24to15; break;
1988
        case 0x38: conv= rgb32to15; break;
1989
        case 0x43: conv= rgb15to16; break;
1990
        case 0x46: conv= rgb24to16; break;
1991
        case 0x48: conv= rgb32to16; break;
1992
        case 0x63: conv= rgb15to24; break;
1993
        case 0x64: conv= rgb16to24; break;
1994
        case 0x68: conv= rgb32to24; break;
1995
        case 0x83: conv= rgb15to32; break;
1996
        case 0x84: conv= rgb16to32; break;
1997
        case 0x86: conv= rgb24to32; break;
1998
        default: av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
1999
                        sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
2000
        }
2001
    }else if (  (isBGR(srcFormat) && isRGB(dstFormat))
2002
             || (isRGB(srcFormat) && isBGR(dstFormat))){
2003
        switch(srcId | (dstId<<4)){
2004
        case 0x33: conv= rgb15tobgr15; break;
2005
        case 0x34: conv= rgb16tobgr15; break;
2006
        case 0x36: conv= rgb24tobgr15; break;
2007
        case 0x38: conv= rgb32tobgr15; break;
2008
        case 0x43: conv= rgb15tobgr16; break;
2009
        case 0x44: conv= rgb16tobgr16; break;
2010
        case 0x46: conv= rgb24tobgr16; break;
2011
        case 0x48: conv= rgb32tobgr16; break;
2012
        case 0x63: conv= rgb15tobgr24; break;
2013
        case 0x64: conv= rgb16tobgr24; break;
2014
        case 0x66: conv= rgb24tobgr24; break;
2015
        case 0x68: conv= rgb32tobgr24; break;
2016
        case 0x83: conv= rgb15tobgr32; break;
2017
        case 0x84: conv= rgb16tobgr32; break;
2018
        case 0x86: conv= rgb24tobgr32; break;
2019
        case 0x88: conv= rgb32tobgr32; break;
2020
        default: av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
2021
                        sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
2022
        }
2023
    }else{
2024
        av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
2025
               sws_format_name(srcFormat), sws_format_name(dstFormat));
2026
    }
2027

    
2028
    if(conv)
2029
    {
2030
        uint8_t *srcPtr= src[0];
2031
        if(srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1)
2032
            srcPtr += ALT32_CORR;
2033

    
2034
        if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0)
2035
            conv(srcPtr, dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
2036
        else
2037
        {
2038
            int i;
2039
            uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
2040

    
2041
            for (i=0; i<srcSliceH; i++)
2042
            {
2043
                conv(srcPtr, dstPtr, c->srcW*srcBpp);
2044
                srcPtr+= srcStride[0];
2045
                dstPtr+= dstStride[0];
2046
            }
2047
        }
2048
    }
2049
    return srcSliceH;
2050
}
2051

    
2052
static int bgr24toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2053
                              int srcSliceH, uint8_t* dst[], int dstStride[]){
2054

    
2055
    rgb24toyv12(
2056
        src[0],
2057
        dst[0]+ srcSliceY    *dstStride[0],
2058
        dst[1]+(srcSliceY>>1)*dstStride[1],
2059
        dst[2]+(srcSliceY>>1)*dstStride[2],
2060
        c->srcW, srcSliceH,
2061
        dstStride[0], dstStride[1], srcStride[0]);
2062
    if (dst[3])
2063
        fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
2064
    return srcSliceH;
2065
}
2066

    
2067
static int yvu9toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2068
                             int srcSliceH, uint8_t* dst[], int dstStride[]){
2069
    int i;
2070

    
2071
    /* copy Y */
2072
    if (srcStride[0]==dstStride[0] && srcStride[0] > 0)
2073
        memcpy(dst[0]+ srcSliceY*dstStride[0], src[0], srcStride[0]*srcSliceH);
2074
    else{
2075
        uint8_t *srcPtr= src[0];
2076
        uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
2077

    
2078
        for (i=0; i<srcSliceH; i++)
2079
        {
2080
            memcpy(dstPtr, srcPtr, c->srcW);
2081
            srcPtr+= srcStride[0];
2082
            dstPtr+= dstStride[0];
2083
        }
2084
    }
2085

    
2086
    if (c->dstFormat==PIX_FMT_YUV420P || c->dstFormat==PIX_FMT_YUVA420P){
2087
        planar2x(src[1], dst[1], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[1]);
2088
        planar2x(src[2], dst[2], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[2]);
2089
    }else{
2090
        planar2x(src[1], dst[2], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[2]);
2091
        planar2x(src[2], dst[1], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[1]);
2092
    }
2093
    if (dst[3])
2094
        fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
2095
    return srcSliceH;
2096
}
2097

    
2098
/* unscaled copy like stuff (assumes nearly identical formats) */
2099
static int packedCopy(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2100
                      int srcSliceH, uint8_t* dst[], int dstStride[])
2101
{
2102
    if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
2103
        memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]);
2104
    else
2105
    {
2106
        int i;
2107
        uint8_t *srcPtr= src[0];
2108
        uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
2109
        int length=0;
2110

    
2111
        /* universal length finder */
2112
        while(length+c->srcW <= FFABS(dstStride[0])
2113
           && length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW;
2114
        assert(length!=0);
2115

    
2116
        for (i=0; i<srcSliceH; i++)
2117
        {
2118
            memcpy(dstPtr, srcPtr, length);
2119
            srcPtr+= srcStride[0];
2120
            dstPtr+= dstStride[0];
2121
        }
2122
    }
2123
    return srcSliceH;
2124
}
2125

    
2126
static int planarCopy(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2127
                      int srcSliceH, uint8_t* dst[], int dstStride[])
2128
{
2129
    int plane;
2130
    for (plane=0; plane<4; plane++)
2131
    {
2132
        int length= (plane==0 || plane==3) ? c->srcW  : -((-c->srcW  )>>c->chrDstHSubSample);
2133
        int y=      (plane==0 || plane==3) ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample);
2134
        int height= (plane==0 || plane==3) ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample);
2135

    
2136
        if (!dst[plane]) continue;
2137
        // ignore palette for GRAY8
2138
        if (plane == 1 && !dst[2]) continue;
2139
        if (!src[plane] || (plane == 1 && !src[2]))
2140
            fillPlane(dst[plane], dstStride[plane], length, height, y, (plane==3) ? 255 : 128);
2141
        else
2142
        {
2143
            if (dstStride[plane]==srcStride[plane] && srcStride[plane] > 0)
2144
                memcpy(dst[plane] + dstStride[plane]*y, src[plane], height*dstStride[plane]);
2145
            else
2146
            {
2147
                int i;
2148
                uint8_t *srcPtr= src[plane];
2149
                uint8_t *dstPtr= dst[plane] + dstStride[plane]*y;
2150
                for (i=0; i<height; i++)
2151
                {
2152
                    memcpy(dstPtr, srcPtr, length);
2153
                    srcPtr+= srcStride[plane];
2154
                    dstPtr+= dstStride[plane];
2155
                }
2156
            }
2157
        }
2158
    }
2159
    return srcSliceH;
2160
}
2161

    
2162
static int gray16togray(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2163
                        int srcSliceH, uint8_t* dst[], int dstStride[]){
2164

    
2165
    int length= c->srcW;
2166
    int y=      srcSliceY;
2167
    int height= srcSliceH;
2168
    int i, j;
2169
    uint8_t *srcPtr= src[0];
2170
    uint8_t *dstPtr= dst[0] + dstStride[0]*y;
2171

    
2172
    if (!isGray(c->dstFormat)){
2173
        int height= -((-srcSliceH)>>c->chrDstVSubSample);
2174
        memset(dst[1], 128, dstStride[1]*height);
2175
        memset(dst[2], 128, dstStride[2]*height);
2176
    }
2177
    if (!isBE(c->srcFormat)) srcPtr++;
2178
    for (i=0; i<height; i++)
2179
    {
2180
        for (j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1];
2181
        srcPtr+= srcStride[0];
2182
        dstPtr+= dstStride[0];
2183
    }
2184
    if (dst[3])
2185
        fillPlane(dst[3], dstStride[3], length, height, y, 255);
2186
    return srcSliceH;
2187
}
2188

    
2189
static int graytogray16(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2190
                        int srcSliceH, uint8_t* dst[], int dstStride[]){
2191

    
2192
    int length= c->srcW;
2193
    int y=      srcSliceY;
2194
    int height= srcSliceH;
2195
    int i, j;
2196
    uint8_t *srcPtr= src[0];
2197
    uint8_t *dstPtr= dst[0] + dstStride[0]*y;
2198
    for (i=0; i<height; i++)
2199
    {
2200
        for (j=0; j<length; j++)
2201
        {
2202
            dstPtr[j<<1] = srcPtr[j];
2203
            dstPtr[(j<<1)+1] = srcPtr[j];
2204
        }
2205
        srcPtr+= srcStride[0];
2206
        dstPtr+= dstStride[0];
2207
    }
2208
    return srcSliceH;
2209
}
2210

    
2211
static int gray16swap(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2212
                      int srcSliceH, uint8_t* dst[], int dstStride[]){
2213

    
2214
    int length= c->srcW;
2215
    int y=      srcSliceY;
2216
    int height= srcSliceH;
2217
    int i, j;
2218
    uint16_t *srcPtr= (uint16_t*)src[0];
2219
    uint16_t *dstPtr= (uint16_t*)(dst[0] + dstStride[0]*y/2);
2220
    for (i=0; i<height; i++)
2221
    {
2222
        for (j=0; j<length; j++) dstPtr[j] = bswap_16(srcPtr[j]);
2223
        srcPtr+= srcStride[0]/2;
2224
        dstPtr+= dstStride[0]/2;
2225
    }
2226
    return srcSliceH;
2227
}
2228

    
2229

    
2230
static void getSubSampleFactors(int *h, int *v, int format){
2231
    switch(format){
2232
    case PIX_FMT_UYVY422:
2233
    case PIX_FMT_YUYV422:
2234
        *h=1;
2235
        *v=0;
2236
        break;
2237
    case PIX_FMT_YUV420P:
2238
    case PIX_FMT_YUVA420P:
2239
    case PIX_FMT_GRAY16BE:
2240
    case PIX_FMT_GRAY16LE:
2241
    case PIX_FMT_GRAY8: //FIXME remove after different subsamplings are fully implemented
2242
    case PIX_FMT_NV12:
2243
    case PIX_FMT_NV21:
2244
        *h=1;
2245
        *v=1;
2246
        break;
2247
    case PIX_FMT_YUV440P:
2248
        *h=0;
2249
        *v=1;
2250
        break;
2251
    case PIX_FMT_YUV410P:
2252
        *h=2;
2253
        *v=2;
2254
        break;
2255
    case PIX_FMT_YUV444P:
2256
        *h=0;
2257
        *v=0;
2258
        break;
2259
    case PIX_FMT_YUV422P:
2260
        *h=1;
2261
        *v=0;
2262
        break;
2263
    case PIX_FMT_YUV411P:
2264
        *h=2;
2265
        *v=0;
2266
        break;
2267
    default:
2268
        *h=0;
2269
        *v=0;
2270
        break;
2271
    }
2272
}
2273

    
2274
static uint16_t roundToInt16(int64_t f){
2275
    int r= (f + (1<<15))>>16;
2276
         if (r<-0x7FFF) return 0x8000;
2277
    else if (r> 0x7FFF) return 0x7FFF;
2278
    else                return r;
2279
}
2280

    
2281
int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation){
2282
    int64_t crv =  inv_table[0];
2283
    int64_t cbu =  inv_table[1];
2284
    int64_t cgu = -inv_table[2];
2285
    int64_t cgv = -inv_table[3];
2286
    int64_t cy  = 1<<16;
2287
    int64_t oy  = 0;
2288

    
2289
    memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4);
2290
    memcpy(c->dstColorspaceTable,     table, sizeof(int)*4);
2291

    
2292
    c->brightness= brightness;
2293
    c->contrast  = contrast;
2294
    c->saturation= saturation;
2295
    c->srcRange  = srcRange;
2296
    c->dstRange  = dstRange;
2297
    if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
2298

    
2299
    c->uOffset=   0x0400040004000400LL;
2300
    c->vOffset=   0x0400040004000400LL;
2301

    
2302
    if (!srcRange){
2303
        cy= (cy*255) / 219;
2304
        oy= 16<<16;
2305
    }else{
2306
        crv= (crv*224) / 255;
2307
        cbu= (cbu*224) / 255;
2308
        cgu= (cgu*224) / 255;
2309
        cgv= (cgv*224) / 255;
2310
    }
2311

    
2312
    cy = (cy *contrast             )>>16;
2313
    crv= (crv*contrast * saturation)>>32;
2314
    cbu= (cbu*contrast * saturation)>>32;
2315
    cgu= (cgu*contrast * saturation)>>32;
2316
    cgv= (cgv*contrast * saturation)>>32;
2317

    
2318
    oy -= 256*brightness;
2319

    
2320
    c->yCoeff=    roundToInt16(cy *8192) * 0x0001000100010001ULL;
2321
    c->vrCoeff=   roundToInt16(crv*8192) * 0x0001000100010001ULL;
2322
    c->ubCoeff=   roundToInt16(cbu*8192) * 0x0001000100010001ULL;
2323
    c->vgCoeff=   roundToInt16(cgv*8192) * 0x0001000100010001ULL;
2324
    c->ugCoeff=   roundToInt16(cgu*8192) * 0x0001000100010001ULL;
2325
    c->yOffset=   roundToInt16(oy *   8) * 0x0001000100010001ULL;
2326

    
2327
    c->yuv2rgb_y_coeff  = (int16_t)roundToInt16(cy <<13);
2328
    c->yuv2rgb_y_offset = (int16_t)roundToInt16(oy << 9);
2329
    c->yuv2rgb_v2r_coeff= (int16_t)roundToInt16(crv<<13);
2330
    c->yuv2rgb_v2g_coeff= (int16_t)roundToInt16(cgv<<13);
2331
    c->yuv2rgb_u2g_coeff= (int16_t)roundToInt16(cgu<<13);
2332
    c->yuv2rgb_u2b_coeff= (int16_t)roundToInt16(cbu<<13);
2333

    
2334
    ff_yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation);
2335
    //FIXME factorize
2336

    
2337
#ifdef COMPILE_ALTIVEC
2338
    if (c->flags & SWS_CPU_CAPS_ALTIVEC)
2339
        ff_yuv2rgb_init_tables_altivec(c, inv_table, brightness, contrast, saturation);
2340
#endif
2341
    return 0;
2342
}
2343

    
2344
int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation){
2345
    if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
2346

    
2347
    *inv_table = c->srcColorspaceTable;
2348
    *table     = c->dstColorspaceTable;
2349
    *srcRange  = c->srcRange;
2350
    *dstRange  = c->dstRange;
2351
    *brightness= c->brightness;
2352
    *contrast  = c->contrast;
2353
    *saturation= c->saturation;
2354

    
2355
    return 0;
2356
}
2357

    
2358
static int handle_jpeg(enum PixelFormat *format)
2359
{
2360
    switch (*format) {
2361
        case PIX_FMT_YUVJ420P:
2362
            *format = PIX_FMT_YUV420P;
2363
            return 1;
2364
        case PIX_FMT_YUVJ422P:
2365
            *format = PIX_FMT_YUV422P;
2366
            return 1;
2367
        case PIX_FMT_YUVJ444P:
2368
            *format = PIX_FMT_YUV444P;
2369
            return 1;
2370
        case PIX_FMT_YUVJ440P:
2371
            *format = PIX_FMT_YUV440P;
2372
            return 1;
2373
        default:
2374
            return 0;
2375
    }
2376
}
2377

    
2378
SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int dstW, int dstH, enum PixelFormat dstFormat, int flags,
2379
                           SwsFilter *srcFilter, SwsFilter *dstFilter, double *param){
2380

    
2381
    SwsContext *c;
2382
    int i;
2383
    int usesVFilter, usesHFilter;
2384
    int unscaled, needsDither;
2385
    int srcRange, dstRange;
2386
    SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
2387
#if ARCH_X86
2388
    if (flags & SWS_CPU_CAPS_MMX)
2389
        __asm__ volatile("emms\n\t"::: "memory");
2390
#endif
2391

    
2392
#if !CONFIG_RUNTIME_CPUDETECT || !CONFIG_GPL //ensure that the flags match the compiled variant if cpudetect is off
2393
    flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC|SWS_CPU_CAPS_BFIN);
2394
#if   HAVE_MMX2
2395
    flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2;
2396
#elif HAVE_AMD3DNOW
2397
    flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW;
2398
#elif HAVE_MMX
2399
    flags |= SWS_CPU_CAPS_MMX;
2400
#elif HAVE_ALTIVEC
2401
    flags |= SWS_CPU_CAPS_ALTIVEC;
2402
#elif ARCH_BFIN
2403
    flags |= SWS_CPU_CAPS_BFIN;
2404
#endif
2405
#endif /* CONFIG_RUNTIME_CPUDETECT */
2406
    if (clip_table[512] != 255) globalInit();
2407
    if (!rgb15to16) sws_rgb2rgb_init(flags);
2408

    
2409
    unscaled = (srcW == dstW && srcH == dstH);
2410
    needsDither= (isBGR(dstFormat) || isRGB(dstFormat))
2411
        && (fmt_depth(dstFormat))<24
2412
        && ((fmt_depth(dstFormat))<(fmt_depth(srcFormat)) || (!(isRGB(srcFormat) || isBGR(srcFormat))));
2413

    
2414
    srcRange = handle_jpeg(&srcFormat);
2415
    dstRange = handle_jpeg(&dstFormat);
2416

    
2417
    if (!isSupportedIn(srcFormat))
2418
    {
2419
        av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as input pixel format\n", sws_format_name(srcFormat));
2420
        return NULL;
2421
    }
2422
    if (!isSupportedOut(dstFormat))
2423
    {
2424
        av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as output pixel format\n", sws_format_name(dstFormat));
2425
        return NULL;
2426
    }
2427

    
2428
    i= flags & ( SWS_POINT
2429
                |SWS_AREA
2430
                |SWS_BILINEAR
2431
                |SWS_FAST_BILINEAR
2432
                |SWS_BICUBIC
2433
                |SWS_X
2434
                |SWS_GAUSS
2435
                |SWS_LANCZOS
2436
                |SWS_SINC
2437
                |SWS_SPLINE
2438
                |SWS_BICUBLIN);
2439
    if(!i || (i & (i-1)))
2440
    {
2441
        av_log(NULL, AV_LOG_ERROR, "swScaler: Exactly one scaler algorithm must be chosen\n");
2442
        return NULL;
2443
    }
2444

    
2445
    /* sanity check */
2446
    if (srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code
2447
    {
2448
        av_log(NULL, AV_LOG_ERROR, "swScaler: %dx%d -> %dx%d is invalid scaling dimension\n",
2449
               srcW, srcH, dstW, dstH);
2450
        return NULL;
2451
    }
2452
    if(srcW > VOFW || dstW > VOFW){
2453
        av_log(NULL, AV_LOG_ERROR, "swScaler: Compile-time maximum width is "AV_STRINGIFY(VOFW)" change VOF/VOFW and recompile\n");
2454
        return NULL;
2455
    }
2456

    
2457
    if (!dstFilter) dstFilter= &dummyFilter;
2458
    if (!srcFilter) srcFilter= &dummyFilter;
2459

    
2460
    c= av_mallocz(sizeof(SwsContext));
2461

    
2462
    c->av_class = &sws_context_class;
2463
    c->srcW= srcW;
2464
    c->srcH= srcH;
2465
    c->dstW= dstW;
2466
    c->dstH= dstH;
2467
    c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW;
2468
    c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH;
2469
    c->flags= flags;
2470
    c->dstFormat= dstFormat;
2471
    c->srcFormat= srcFormat;
2472
    c->vRounder= 4* 0x0001000100010001ULL;
2473

    
2474
    usesHFilter= usesVFilter= 0;
2475
    if (dstFilter->lumV && dstFilter->lumV->length>1) usesVFilter=1;
2476
    if (dstFilter->lumH && dstFilter->lumH->length>1) usesHFilter=1;
2477
    if (dstFilter->chrV && dstFilter->chrV->length>1) usesVFilter=1;
2478
    if (dstFilter->chrH && dstFilter->chrH->length>1) usesHFilter=1;
2479
    if (srcFilter->lumV && srcFilter->lumV->length>1) usesVFilter=1;
2480
    if (srcFilter->lumH && srcFilter->lumH->length>1) usesHFilter=1;
2481
    if (srcFilter->chrV && srcFilter->chrV->length>1) usesVFilter=1;
2482
    if (srcFilter->chrH && srcFilter->chrH->length>1) usesHFilter=1;
2483

    
2484
    getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
2485
    getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
2486

    
2487
    // reuse chroma for 2 pixels RGB/BGR unless user wants full chroma interpolation
2488
    if ((isBGR(dstFormat) || isRGB(dstFormat)) && !(flags&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1;
2489

    
2490
    // drop some chroma lines if the user wants it
2491
    c->vChrDrop= (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT;
2492
    c->chrSrcVSubSample+= c->vChrDrop;
2493

    
2494
    // drop every other pixel for chroma calculation unless user wants full chroma
2495
    if ((isBGR(srcFormat) || isRGB(srcFormat)) && !(flags&SWS_FULL_CHR_H_INP)
2496
      && srcFormat!=PIX_FMT_RGB8      && srcFormat!=PIX_FMT_BGR8
2497
      && srcFormat!=PIX_FMT_RGB4      && srcFormat!=PIX_FMT_BGR4
2498
      && srcFormat!=PIX_FMT_RGB4_BYTE && srcFormat!=PIX_FMT_BGR4_BYTE
2499
      && ((dstW>>c->chrDstHSubSample) <= (srcW>>1) || (flags&(SWS_FAST_BILINEAR|SWS_POINT))))
2500
        c->chrSrcHSubSample=1;
2501

    
2502
    if (param){
2503
        c->param[0] = param[0];
2504
        c->param[1] = param[1];
2505
    }else{
2506
        c->param[0] =
2507
        c->param[1] = SWS_PARAM_DEFAULT;
2508
    }
2509

    
2510
    c->chrIntHSubSample= c->chrDstHSubSample;
2511
    c->chrIntVSubSample= c->chrSrcVSubSample;
2512

    
2513
    // Note the -((-x)>>y) is so that we always round toward +inf.
2514
    c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample);
2515
    c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample);
2516
    c->chrDstW= -((-dstW) >> c->chrDstHSubSample);
2517
    c->chrDstH= -((-dstH) >> c->chrDstVSubSample);
2518

    
2519
    sws_setColorspaceDetails(c, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], srcRange, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT] /* FIXME*/, dstRange, 0, 1<<16, 1<<16);
2520

    
2521
    /* unscaled special cases */
2522
    if (unscaled && !usesHFilter && !usesVFilter && (srcRange == dstRange || isBGR(dstFormat) || isRGB(dstFormat)))
2523
    {
2524
        /* yv12_to_nv12 */
2525
        if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21))
2526
        {
2527
            c->swScale= PlanarToNV12Wrapper;
2528
        }
2529
        /* yuv2bgr */
2530
        if ((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P || srcFormat==PIX_FMT_YUVA420P) && (isBGR(dstFormat) || isRGB(dstFormat))
2531
            && !(flags & SWS_ACCURATE_RND) && !(dstH&1))
2532
        {
2533
            c->swScale= ff_yuv2rgb_get_func_ptr(c);
2534
        }
2535

    
2536
        if (srcFormat==PIX_FMT_YUV410P && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_BITEXACT))
2537
        {
2538
            c->swScale= yvu9toyv12Wrapper;
2539
        }
2540

    
2541
        /* bgr24toYV12 */
2542
        if (srcFormat==PIX_FMT_BGR24 && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_ACCURATE_RND))
2543
            c->swScale= bgr24toyv12Wrapper;
2544

    
2545
        /* RGB/BGR -> RGB/BGR (no dither needed forms) */
2546
        if (  (isBGR(srcFormat) || isRGB(srcFormat))
2547
           && (isBGR(dstFormat) || isRGB(dstFormat))
2548
           && srcFormat != PIX_FMT_BGR8      && dstFormat != PIX_FMT_BGR8
2549
           && srcFormat != PIX_FMT_RGB8      && dstFormat != PIX_FMT_RGB8
2550
           && srcFormat != PIX_FMT_BGR4      && dstFormat != PIX_FMT_BGR4
2551
           && srcFormat != PIX_FMT_RGB4      && dstFormat != PIX_FMT_RGB4
2552
           && srcFormat != PIX_FMT_BGR4_BYTE && dstFormat != PIX_FMT_BGR4_BYTE
2553
           && srcFormat != PIX_FMT_RGB4_BYTE && dstFormat != PIX_FMT_RGB4_BYTE
2554
           && srcFormat != PIX_FMT_MONOBLACK && dstFormat != PIX_FMT_MONOBLACK
2555
           && srcFormat != PIX_FMT_MONOWHITE && dstFormat != PIX_FMT_MONOWHITE
2556
                                             && dstFormat != PIX_FMT_RGB32_1
2557
                                             && dstFormat != PIX_FMT_BGR32_1
2558
           && (!needsDither || (c->flags&(SWS_FAST_BILINEAR|SWS_POINT))))
2559
             c->swScale= rgb2rgbWrapper;
2560

    
2561
        if ((usePal(srcFormat) && (
2562
                 dstFormat == PIX_FMT_RGB32   ||
2563
                 dstFormat == PIX_FMT_RGB32_1 ||
2564
                 dstFormat == PIX_FMT_RGB24   ||
2565
                 dstFormat == PIX_FMT_BGR32   ||
2566
                 dstFormat == PIX_FMT_BGR32_1 ||
2567
                 dstFormat == PIX_FMT_BGR24)))
2568
             c->swScale= pal2rgbWrapper;
2569

    
2570
        if (srcFormat == PIX_FMT_YUV422P)
2571
        {
2572
            if (dstFormat == PIX_FMT_YUYV422)
2573
                c->swScale= YUV422PToYuy2Wrapper;
2574
            else if (dstFormat == PIX_FMT_UYVY422)
2575
                c->swScale= YUV422PToUyvyWrapper;
2576
        }
2577

    
2578
        /* LQ converters if -sws 0 or -sws 4*/
2579
        if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)){
2580
            /* yv12_to_yuy2 */
2581
            if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P)
2582
            {
2583
                if (dstFormat == PIX_FMT_YUYV422)
2584
                    c->swScale= PlanarToYuy2Wrapper;
2585
                else if (dstFormat == PIX_FMT_UYVY422)
2586
                    c->swScale= PlanarToUyvyWrapper;
2587
            }
2588
        }
2589
        if(srcFormat == PIX_FMT_YUYV422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
2590
            c->swScale= YUYV2YUV420Wrapper;
2591
        if(srcFormat == PIX_FMT_UYVY422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
2592
            c->swScale= UYVY2YUV420Wrapper;
2593
        if(srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P)
2594
            c->swScale= YUYV2YUV422Wrapper;
2595
        if(srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P)
2596
            c->swScale= UYVY2YUV422Wrapper;
2597

    
2598
#ifdef COMPILE_ALTIVEC
2599
        if ((c->flags & SWS_CPU_CAPS_ALTIVEC) &&
2600
            !(c->flags & SWS_BITEXACT) &&
2601
            srcFormat == PIX_FMT_YUV420P) {
2602
          // unscaled YV12 -> packed YUV, we want speed
2603
          if (dstFormat == PIX_FMT_YUYV422)
2604
              c->swScale= yv12toyuy2_unscaled_altivec;
2605
          else if (dstFormat == PIX_FMT_UYVY422)
2606
              c->swScale= yv12touyvy_unscaled_altivec;
2607
        }
2608
#endif
2609

    
2610
        /* simple copy */
2611
        if (  srcFormat == dstFormat
2612
            || (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P)
2613
            || (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P)
2614
            || (isPlanarYUV(srcFormat) && isGray(dstFormat))
2615
            || (isPlanarYUV(dstFormat) && isGray(srcFormat)))
2616
        {
2617
            if (isPacked(c->srcFormat))
2618
                c->swScale= packedCopy;
2619
            else /* Planar YUV or gray */
2620
                c->swScale= planarCopy;
2621
        }
2622

    
2623
        /* gray16{le,be} conversions */
2624
        if (isGray16(srcFormat) && (isPlanarYUV(dstFormat) || (dstFormat == PIX_FMT_GRAY8)))
2625
        {
2626
            c->swScale= gray16togray;
2627
        }
2628
        if ((isPlanarYUV(srcFormat) || (srcFormat == PIX_FMT_GRAY8)) && isGray16(dstFormat))
2629
        {
2630
            c->swScale= graytogray16;
2631
        }
2632
        if (srcFormat != dstFormat && isGray16(srcFormat) && isGray16(dstFormat))
2633
        {
2634
            c->swScale= gray16swap;
2635
        }
2636

    
2637
#if ARCH_BFIN
2638
        if (flags & SWS_CPU_CAPS_BFIN)
2639
            ff_bfin_get_unscaled_swscale (c);
2640
#endif
2641

    
2642
        if (c->swScale){
2643
            if (flags&SWS_PRINT_INFO)
2644
                av_log(c, AV_LOG_INFO, "using unscaled %s -> %s special converter\n",
2645
                                sws_format_name(srcFormat), sws_format_name(dstFormat));
2646
            return c;
2647
        }
2648
    }
2649

    
2650
    if (flags & SWS_CPU_CAPS_MMX2)
2651
    {
2652
        c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
2653
        if (!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR))
2654
        {
2655
            if (flags&SWS_PRINT_INFO)
2656
                av_log(c, AV_LOG_INFO, "output width is not a multiple of 32 -> no MMX2 scaler\n");
2657
        }
2658
        if (usesHFilter) c->canMMX2BeUsed=0;
2659
    }
2660
    else
2661
        c->canMMX2BeUsed=0;
2662

    
2663
    c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW;
2664
    c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH;
2665

    
2666
    // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
2667
    // but only for the FAST_BILINEAR mode otherwise do correct scaling
2668
    // n-2 is the last chrominance sample available
2669
    // this is not perfect, but no one should notice the difference, the more correct variant
2670
    // would be like the vertical one, but that would require some special code for the
2671
    // first and last pixel
2672
    if (flags&SWS_FAST_BILINEAR)
2673
    {
2674
        if (c->canMMX2BeUsed)
2675
        {
2676
            c->lumXInc+= 20;
2677
            c->chrXInc+= 20;
2678
        }
2679
        //we don't use the x86 asm scaler if MMX is available
2680
        else if (flags & SWS_CPU_CAPS_MMX)
2681
        {
2682
            c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
2683
            c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
2684
        }
2685
    }
2686

    
2687
    /* precalculate horizontal scaler filter coefficients */
2688
    {
2689
        const int filterAlign=
2690
            (flags & SWS_CPU_CAPS_MMX) ? 4 :
2691
            (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
2692
            1;
2693

    
2694
        initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
2695
                   srcW      ,       dstW, filterAlign, 1<<14,
2696
                   (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC)  : flags,
2697
                   srcFilter->lumH, dstFilter->lumH, c->param);
2698
        initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
2699
                   c->chrSrcW, c->chrDstW, filterAlign, 1<<14,
2700
                   (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
2701
                   srcFilter->chrH, dstFilter->chrH, c->param);
2702

    
2703
#define MAX_FUNNY_CODE_SIZE 10000
2704
#if defined(COMPILE_MMX2)
2705
// can't downscale !!!
2706
        if (c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
2707
        {
2708
#ifdef MAP_ANONYMOUS
2709
            c->funnyYCode  = mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
2710
            c->funnyUVCode = mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
2711
#elif HAVE_VIRTUALALLOC
2712
            c->funnyYCode  = VirtualAlloc(NULL, MAX_FUNNY_CODE_SIZE, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
2713
            c->funnyUVCode = VirtualAlloc(NULL, MAX_FUNNY_CODE_SIZE, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
2714
#else
2715
            c->funnyYCode  = av_malloc(MAX_FUNNY_CODE_SIZE);
2716
            c->funnyUVCode = av_malloc(MAX_FUNNY_CODE_SIZE);
2717
#endif
2718

    
2719
            c->lumMmx2Filter   = av_malloc((dstW        /8+8)*sizeof(int16_t));
2720
            c->chrMmx2Filter   = av_malloc((c->chrDstW  /4+8)*sizeof(int16_t));
2721
            c->lumMmx2FilterPos= av_malloc((dstW      /2/8+8)*sizeof(int32_t));
2722
            c->chrMmx2FilterPos= av_malloc((c->chrDstW/2/4+8)*sizeof(int32_t));
2723

    
2724
            initMMX2HScaler(      dstW, c->lumXInc, c->funnyYCode , c->lumMmx2Filter, c->lumMmx2FilterPos, 8);
2725
            initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode, c->chrMmx2Filter, c->chrMmx2FilterPos, 4);
2726
        }
2727
#endif /* defined(COMPILE_MMX2) */
2728
    } // initialize horizontal stuff
2729

    
2730

    
2731

    
2732
    /* precalculate vertical scaler filter coefficients */
2733
    {
2734
        const int filterAlign=
2735
            (flags & SWS_CPU_CAPS_MMX) && (flags & SWS_ACCURATE_RND) ? 2 :
2736
            (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
2737
            1;
2738

    
2739
        initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
2740
                   srcH      ,        dstH, filterAlign, (1<<12),
2741
                   (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC)  : flags,
2742
                   srcFilter->lumV, dstFilter->lumV, c->param);
2743
        initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
2744
                   c->chrSrcH, c->chrDstH, filterAlign, (1<<12),
2745
                   (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
2746
                   srcFilter->chrV, dstFilter->chrV, c->param);
2747

    
2748
#if HAVE_ALTIVEC
2749
        c->vYCoeffsBank = av_malloc(sizeof (vector signed short)*c->vLumFilterSize*c->dstH);
2750
        c->vCCoeffsBank = av_malloc(sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH);
2751

    
2752
        for (i=0;i<c->vLumFilterSize*c->dstH;i++) {
2753
            int j;
2754
            short *p = (short *)&c->vYCoeffsBank[i];
2755
            for (j=0;j<8;j++)
2756
                p[j] = c->vLumFilter[i];
2757
        }
2758

    
2759
        for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) {
2760
            int j;
2761
            short *p = (short *)&c->vCCoeffsBank[i];
2762
            for (j=0;j<8;j++)
2763
                p[j] = c->vChrFilter[i];
2764
        }
2765
#endif
2766
    }
2767

    
2768
    // calculate buffer sizes so that they won't run out while handling these damn slices
2769
    c->vLumBufSize= c->vLumFilterSize;
2770
    c->vChrBufSize= c->vChrFilterSize;
2771
    for (i=0; i<dstH; i++)
2772
    {
2773
        int chrI= i*c->chrDstH / dstH;
2774
        int nextSlice= FFMAX(c->vLumFilterPos[i   ] + c->vLumFilterSize - 1,
2775
                           ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
2776

    
2777
        nextSlice>>= c->chrSrcVSubSample;
2778
        nextSlice<<= c->chrSrcVSubSample;
2779
        if (c->vLumFilterPos[i   ] + c->vLumBufSize < nextSlice)
2780
            c->vLumBufSize= nextSlice - c->vLumFilterPos[i];
2781
        if (c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample))
2782
            c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI];
2783
    }
2784

    
2785
    // allocate pixbufs (we use dynamic allocation because otherwise we would need to
2786
    c->lumPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
2787
    c->chrPixBuf= av_malloc(c->vChrBufSize*2*sizeof(int16_t*));
2788
    if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat))
2789
        c->alpPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
2790
    //Note we need at least one pixel more at the end because of the MMX code (just in case someone wanna replace the 4000/8000)
2791
    /* align at 16 bytes for AltiVec */
2792
    for (i=0; i<c->vLumBufSize; i++)
2793
        c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= av_mallocz(VOF+1);
2794
    for (i=0; i<c->vChrBufSize; i++)
2795
        c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= av_malloc((VOF+1)*2);
2796
    if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
2797
        for (i=0; i<c->vLumBufSize; i++)
2798
            c->alpPixBuf[i]= c->alpPixBuf[i+c->vLumBufSize]= av_mallocz(VOF+1);
2799

    
2800
    //try to avoid drawing green stuff between the right end and the stride end
2801
    for (i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, (VOF+1)*2);
2802

    
2803
    assert(2*VOFW == VOF);
2804

    
2805
    assert(c->chrDstH <= dstH);
2806

    
2807
    if (flags&SWS_PRINT_INFO)
2808
    {
2809
#ifdef DITHER1XBPP
2810
        const char *dither= " dithered";
2811
#else
2812
        const char *dither= "";
2813
#endif
2814
        if (flags&SWS_FAST_BILINEAR)
2815
            av_log(c, AV_LOG_INFO, "FAST_BILINEAR scaler, ");
2816
        else if (flags&SWS_BILINEAR)
2817
            av_log(c, AV_LOG_INFO, "BILINEAR scaler, ");
2818
        else if (flags&SWS_BICUBIC)
2819
            av_log(c, AV_LOG_INFO, "BICUBIC scaler, ");
2820
        else if (flags&SWS_X)
2821
            av_log(c, AV_LOG_INFO, "Experimental scaler, ");
2822
        else if (flags&SWS_POINT)
2823
            av_log(c, AV_LOG_INFO, "Nearest Neighbor / POINT scaler, ");
2824
        else if (flags&SWS_AREA)
2825
            av_log(c, AV_LOG_INFO, "Area Averageing scaler, ");
2826
        else if (flags&SWS_BICUBLIN)
2827
            av_log(c, AV_LOG_INFO, "luma BICUBIC / chroma BILINEAR scaler, ");
2828
        else if (flags&SWS_GAUSS)
2829
            av_log(c, AV_LOG_INFO, "Gaussian scaler, ");
2830
        else if (flags&SWS_SINC)
2831
            av_log(c, AV_LOG_INFO, "Sinc scaler, ");
2832
        else if (flags&SWS_LANCZOS)
2833
            av_log(c, AV_LOG_INFO, "Lanczos scaler, ");
2834
        else if (flags&SWS_SPLINE)
2835
            av_log(c, AV_LOG_INFO, "Bicubic spline scaler, ");
2836
        else
2837
            av_log(c, AV_LOG_INFO, "ehh flags invalid?! ");
2838

    
2839
        if (dstFormat==PIX_FMT_BGR555 || dstFormat==PIX_FMT_BGR565)
2840
            av_log(c, AV_LOG_INFO, "from %s to%s %s ",
2841
                   sws_format_name(srcFormat), dither, sws_format_name(dstFormat));
2842
        else
2843
            av_log(c, AV_LOG_INFO, "from %s to %s ",
2844
                   sws_format_name(srcFormat), sws_format_name(dstFormat));
2845

    
2846
        if (flags & SWS_CPU_CAPS_MMX2)
2847
            av_log(c, AV_LOG_INFO, "using MMX2\n");
2848
        else if (flags & SWS_CPU_CAPS_3DNOW)
2849
            av_log(c, AV_LOG_INFO, "using 3DNOW\n");
2850
        else if (flags & SWS_CPU_CAPS_MMX)
2851
            av_log(c, AV_LOG_INFO, "using MMX\n");
2852
        else if (flags & SWS_CPU_CAPS_ALTIVEC)
2853
            av_log(c, AV_LOG_INFO, "using AltiVec\n");
2854
        else
2855
            av_log(c, AV_LOG_INFO, "using C\n");
2856
    }
2857

    
2858
    if (flags & SWS_PRINT_INFO)
2859
    {
2860
        if (flags & SWS_CPU_CAPS_MMX)
2861
        {
2862
            if (c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
2863
                av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
2864
            else
2865
            {
2866
                if (c->hLumFilterSize==4)
2867
                    av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal luminance scaling\n");
2868
                else if (c->hLumFilterSize==8)
2869
                    av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal luminance scaling\n");
2870
                else
2871
                    av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal luminance scaling\n");
2872

    
2873
                if (c->hChrFilterSize==4)
2874
                    av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal chrominance scaling\n");
2875
                else if (c->hChrFilterSize==8)
2876
                    av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal chrominance scaling\n");
2877
                else
2878
                    av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal chrominance scaling\n");
2879
            }
2880
        }
2881
        else
2882
        {
2883
#if ARCH_X86
2884
            av_log(c, AV_LOG_VERBOSE, "using x86 asm scaler for horizontal scaling\n");
2885
#else
2886
            if (flags & SWS_FAST_BILINEAR)
2887
                av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR C scaler for horizontal scaling\n");
2888
            else
2889
                av_log(c, AV_LOG_VERBOSE, "using C scaler for horizontal scaling\n");
2890
#endif
2891
        }
2892
        if (isPlanarYUV(dstFormat))
2893
        {
2894
            if (c->vLumFilterSize==1)
2895
                av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2896
            else
2897
                av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2898
        }
2899
        else
2900
        {
2901
            if (c->vLumFilterSize==1 && c->vChrFilterSize==2)
2902
                av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
2903
                       "      2-tap scaler for vertical chrominance scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2904
            else if (c->vLumFilterSize==2 && c->vChrFilterSize==2)
2905
                av_log(c, AV_LOG_VERBOSE, "using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2906
            else
2907
                av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2908
        }
2909

    
2910
        if (dstFormat==PIX_FMT_BGR24)
2911
            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR24 converter\n",
2912
                   (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"));
2913
        else if (dstFormat==PIX_FMT_RGB32)
2914
            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR32 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2915
        else if (dstFormat==PIX_FMT_BGR565)
2916
            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR16 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2917
        else if (dstFormat==PIX_FMT_BGR555)
2918
            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR15 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
2919

    
2920
        av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
2921
    }
2922
    if (flags & SWS_PRINT_INFO)
2923
    {
2924
        av_log(c, AV_LOG_DEBUG, "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
2925
               c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
2926
        av_log(c, AV_LOG_DEBUG, "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
2927
               c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc);
2928
    }
2929

    
2930
    c->swScale= getSwsFunc(c);
2931
    return c;
2932
}
2933

    
2934
static int reset_ptr(uint8_t* src[], int format){
2935
    if(!isALPHA(format))
2936
        src[3]=NULL;
2937
    if(!isPlanarYUV(format)){
2938
        src[3]=src[2]=NULL;
2939
        if(   format != PIX_FMT_PAL8
2940
           && format != PIX_FMT_RGB8
2941
           && format != PIX_FMT_BGR8
2942
           && format != PIX_FMT_RGB4_BYTE
2943
           && format != PIX_FMT_BGR4_BYTE
2944
          )
2945
            src[1]= NULL;
2946
    }
2947
}
2948

    
2949
/**
2950
 * swscale wrapper, so we don't need to export the SwsContext.
2951
 * Assumes planar YUV to be in YUV order instead of YVU.
2952
 */
2953
int sws_scale(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
2954
              int srcSliceH, uint8_t* dst[], int dstStride[]){
2955
    int i;
2956
    uint8_t* src2[4]= {src[0], src[1], src[2], src[3]};
2957
    uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]};
2958

    
2959
    if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
2960
        av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
2961
        return 0;
2962
    }
2963
    if (c->sliceDir == 0) {
2964
        if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
2965
    }
2966

    
2967
    if (usePal(c->srcFormat)){
2968
        for (i=0; i<256; i++){
2969
            int p, r, g, b,y,u,v;
2970
            if(c->srcFormat == PIX_FMT_PAL8){
2971
                p=((uint32_t*)(src[1]))[i];
2972
                r= (p>>16)&0xFF;
2973
                g= (p>> 8)&0xFF;
2974
                b=  p     &0xFF;
2975
            }else if(c->srcFormat == PIX_FMT_RGB8){
2976
                r= (i>>5    )*36;
2977
                g= ((i>>2)&7)*36;
2978
                b= (i&3     )*85;
2979
            }else if(c->srcFormat == PIX_FMT_BGR8){
2980
                b= (i>>6    )*85;
2981
                g= ((i>>3)&7)*36;
2982
                r= (i&7     )*36;
2983
            }else if(c->srcFormat == PIX_FMT_RGB4_BYTE){
2984
                r= (i>>3    )*255;
2985
                g= ((i>>1)&3)*85;
2986
                b= (i&1     )*255;
2987
            }else {
2988
                assert(c->srcFormat == PIX_FMT_BGR4_BYTE);
2989
                b= (i>>3    )*255;
2990
                g= ((i>>1)&3)*85;
2991
                r= (i&1     )*255;
2992
            }
2993
            y= av_clip_uint8((RY*r + GY*g + BY*b + ( 33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
2994
            u= av_clip_uint8((RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
2995
            v= av_clip_uint8((RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
2996
            c->pal_yuv[i]= y + (u<<8) + (v<<16);
2997

    
2998

    
2999
            switch(c->dstFormat) {
3000
            case PIX_FMT_BGR32:
3001
#ifndef WORDS_BIGENDIAN
3002
            case PIX_FMT_RGB24:
3003
#endif
3004
                c->pal_rgb[i]=  r + (g<<8) + (b<<16);
3005
                break;
3006
            case PIX_FMT_BGR32_1:
3007
#ifdef  WORDS_BIGENDIAN
3008
            case PIX_FMT_BGR24:
3009
#endif
3010
                c->pal_rgb[i]= (r + (g<<8) + (b<<16)) << 8;
3011
                break;
3012
            case PIX_FMT_RGB32_1:
3013
#ifdef  WORDS_BIGENDIAN
3014
            case PIX_FMT_RGB24:
3015
#endif
3016
                c->pal_rgb[i]= (b + (g<<8) + (r<<16)) << 8;
3017
                break;
3018
            case PIX_FMT_RGB32:
3019
#ifndef WORDS_BIGENDIAN
3020
            case PIX_FMT_BGR24:
3021
#endif
3022
            default:
3023
                c->pal_rgb[i]=  b + (g<<8) + (r<<16);
3024
            }
3025
        }
3026
    }
3027

    
3028
    // copy strides, so they can safely be modified
3029
    if (c->sliceDir == 1) {
3030
        // slices go from top to bottom
3031
        int srcStride2[4]= {srcStride[0], srcStride[1], srcStride[2], srcStride[3]};
3032
        int dstStride2[4]= {dstStride[0], dstStride[1], dstStride[2], dstStride[3]};
3033

    
3034
        reset_ptr(src2, c->srcFormat);
3035
        reset_ptr(dst2, c->dstFormat);
3036

    
3037
        return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2, dstStride2);
3038
    } else {
3039
        // slices go from bottom to top => we flip the image internally
3040
        int srcStride2[4]= {-srcStride[0], -srcStride[1], -srcStride[2], -srcStride[3]};
3041
        int dstStride2[4]= {-dstStride[0], -dstStride[1], -dstStride[2], -dstStride[3]};
3042

    
3043
        src2[0] += (srcSliceH-1)*srcStride[0];
3044
        if (!usePal(c->srcFormat))
3045
            src2[1] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1];
3046
        src2[2] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2];
3047
        src2[3] += (srcSliceH-1)*srcStride[3];
3048
        dst2[0] += ( c->dstH                      -1)*dstStride[0];
3049
        dst2[1] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1];
3050
        dst2[2] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2];
3051
        dst2[3] += ( c->dstH                      -1)*dstStride[3];
3052

    
3053
        reset_ptr(src2, c->srcFormat);
3054
        reset_ptr(dst2, c->dstFormat);
3055

    
3056
        return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2);
3057
    }
3058
}
3059

    
3060
#if LIBSWSCALE_VERSION_MAJOR < 1
3061
int sws_scale_ordered(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
3062
                      int srcSliceH, uint8_t* dst[], int dstStride[]){
3063
    return sws_scale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
3064
}
3065
#endif
3066

    
3067
SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
3068
                                float lumaSharpen, float chromaSharpen,
3069
                                float chromaHShift, float chromaVShift,
3070
                                int verbose)
3071
{
3072
    SwsFilter *filter= av_malloc(sizeof(SwsFilter));
3073

    
3074
    if (lumaGBlur!=0.0){
3075
        filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0);
3076
        filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0);
3077
    }else{
3078
        filter->lumH= sws_getIdentityVec();
3079
        filter->lumV= sws_getIdentityVec();
3080
    }
3081

    
3082
    if (chromaGBlur!=0.0){
3083
        filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0);
3084
        filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0);
3085
    }else{
3086
        filter->chrH= sws_getIdentityVec();
3087
        filter->chrV= sws_getIdentityVec();
3088
    }
3089

    
3090
    if (chromaSharpen!=0.0){
3091
        SwsVector *id= sws_getIdentityVec();
3092
        sws_scaleVec(filter->chrH, -chromaSharpen);
3093
        sws_scaleVec(filter->chrV, -chromaSharpen);
3094
        sws_addVec(filter->chrH, id);
3095
        sws_addVec(filter->chrV, id);
3096
        sws_freeVec(id);
3097
    }
3098

    
3099
    if (lumaSharpen!=0.0){
3100
        SwsVector *id= sws_getIdentityVec();
3101
        sws_scaleVec(filter->lumH, -lumaSharpen);
3102
        sws_scaleVec(filter->lumV, -lumaSharpen);
3103
        sws_addVec(filter->lumH, id);
3104
        sws_addVec(filter->lumV, id);
3105
        sws_freeVec(id);
3106
    }
3107

    
3108
    if (chromaHShift != 0.0)
3109
        sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5));
3110

    
3111
    if (chromaVShift != 0.0)
3112
        sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5));
3113

    
3114
    sws_normalizeVec(filter->chrH, 1.0);
3115
    sws_normalizeVec(filter->chrV, 1.0);
3116
    sws_normalizeVec(filter->lumH, 1.0);
3117
    sws_normalizeVec(filter->lumV, 1.0);
3118

    
3119
    if (verbose) sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG);
3120
    if (verbose) sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG);
3121

    
3122
    return filter;
3123
}
3124

    
3125
SwsVector *sws_getGaussianVec(double variance, double quality){
3126
    const int length= (int)(variance*quality + 0.5) | 1;
3127
    int i;
3128
    double *coeff= av_malloc(length*sizeof(double));
3129
    double middle= (length-1)*0.5;
3130
    SwsVector *vec= av_malloc(sizeof(SwsVector));
3131

    
3132
    vec->coeff= coeff;
3133
    vec->length= length;
3134

    
3135
    for (i=0; i<length; i++)
3136
    {
3137
        double dist= i-middle;
3138
        coeff[i]= exp(-dist*dist/(2*variance*variance)) / sqrt(2*variance*PI);
3139
    }
3140

    
3141
    sws_normalizeVec(vec, 1.0);
3142

    
3143
    return vec;
3144
}
3145

    
3146
SwsVector *sws_getConstVec(double c, int length){
3147
    int i;
3148
    double *coeff= av_malloc(length*sizeof(double));
3149
    SwsVector *vec= av_malloc(sizeof(SwsVector));
3150

    
3151
    vec->coeff= coeff;
3152
    vec->length= length;
3153

    
3154
    for (i=0; i<length; i++)
3155
        coeff[i]= c;
3156

    
3157
    return vec;
3158
}
3159

    
3160

    
3161
SwsVector *sws_getIdentityVec(void){
3162
    return sws_getConstVec(1.0, 1);
3163
}
3164

    
3165
double sws_dcVec(SwsVector *a){
3166
    int i;
3167
    double sum=0;
3168

    
3169
    for (i=0; i<a->length; i++)
3170
        sum+= a->coeff[i];
3171

    
3172
    return sum;
3173
}
3174

    
3175
void sws_scaleVec(SwsVector *a, double scalar){
3176
    int i;
3177

    
3178
    for (i=0; i<a->length; i++)
3179
        a->coeff[i]*= scalar;
3180
}
3181

    
3182
void sws_normalizeVec(SwsVector *a, double height){
3183
    sws_scaleVec(a, height/sws_dcVec(a));
3184
}
3185

    
3186
static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b){
3187
    int length= a->length + b->length - 1;
3188
    double *coeff= av_malloc(length*sizeof(double));
3189
    int i, j;
3190
    SwsVector *vec= av_malloc(sizeof(SwsVector));
3191

    
3192
    vec->coeff= coeff;
3193
    vec->length= length;
3194

    
3195
    for (i=0; i<length; i++) coeff[i]= 0.0;
3196

    
3197
    for (i=0; i<a->length; i++)
3198
    {
3199
        for (j=0; j<b->length; j++)
3200
        {
3201
            coeff[i+j]+= a->coeff[i]*b->coeff[j];
3202
        }
3203
    }
3204

    
3205
    return vec;
3206
}
3207

    
3208
static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b){
3209
    int length= FFMAX(a->length, b->length);
3210
    double *coeff= av_malloc(length*sizeof(double));
3211
    int i;
3212
    SwsVector *vec= av_malloc(sizeof(SwsVector));
3213

    
3214
    vec->coeff= coeff;
3215
    vec->length= length;
3216

    
3217
    for (i=0; i<length; i++) coeff[i]= 0.0;
3218

    
3219
    for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
3220
    for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
3221

    
3222
    return vec;
3223
}
3224

    
3225
static SwsVector *sws_diffVec(SwsVector *a, SwsVector *b){
3226
    int length= FFMAX(a->length, b->length);
3227
    double *coeff= av_malloc(length*sizeof(double));
3228
    int i;
3229
    SwsVector *vec= av_malloc(sizeof(SwsVector));
3230

    
3231
    vec->coeff= coeff;
3232
    vec->length= length;
3233

    
3234
    for (i=0; i<length; i++) coeff[i]= 0.0;
3235

    
3236
    for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
3237
    for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
3238

    
3239
    return vec;
3240
}
3241

    
3242
/* shift left / or right if "shift" is negative */
3243
static SwsVector *sws_getShiftedVec(SwsVector *a, int shift){
3244
    int length= a->length + FFABS(shift)*2;
3245
    double *coeff= av_malloc(length*sizeof(double));
3246
    int i;
3247
    SwsVector *vec= av_malloc(sizeof(SwsVector));
3248

    
3249
    vec->coeff= coeff;
3250
    vec->length= length;
3251

    
3252
    for (i=0; i<length; i++) coeff[i]= 0.0;
3253

    
3254
    for (i=0; i<a->length; i++)
3255
    {
3256
        coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
3257
    }
3258

    
3259
    return vec;
3260
}
3261

    
3262
void sws_shiftVec(SwsVector *a, int shift){
3263
    SwsVector *shifted= sws_getShiftedVec(a, shift);
3264
    av_free(a->coeff);
3265
    a->coeff= shifted->coeff;
3266
    a->length= shifted->length;
3267
    av_free(shifted);
3268
}
3269

    
3270
void sws_addVec(SwsVector *a, SwsVector *b){
3271
    SwsVector *sum= sws_sumVec(a, b);
3272
    av_free(a->coeff);
3273
    a->coeff= sum->coeff;
3274
    a->length= sum->length;
3275
    av_free(sum);
3276
}
3277

    
3278
void sws_subVec(SwsVector *a, SwsVector *b){
3279
    SwsVector *diff= sws_diffVec(a, b);
3280
    av_free(a->coeff);
3281
    a->coeff= diff->coeff;
3282
    a->length= diff->length;
3283
    av_free(diff);
3284
}
3285

    
3286
void sws_convVec(SwsVector *a, SwsVector *b){
3287
    SwsVector *conv= sws_getConvVec(a, b);
3288
    av_free(a->coeff);
3289
    a->coeff= conv->coeff;
3290
    a->length= conv->length;
3291
    av_free(conv);
3292
}
3293

    
3294
SwsVector *sws_cloneVec(SwsVector *a){
3295
    double *coeff= av_malloc(a->length*sizeof(double));
3296
    int i;
3297
    SwsVector *vec= av_malloc(sizeof(SwsVector));
3298

    
3299
    vec->coeff= coeff;
3300
    vec->length= a->length;
3301

    
3302
    for (i=0; i<a->length; i++) coeff[i]= a->coeff[i];
3303

    
3304
    return vec;
3305
}
3306

    
3307
void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level){
3308
    int i;
3309
    double max=0;
3310
    double min=0;
3311
    double range;
3312

    
3313
    for (i=0; i<a->length; i++)
3314
        if (a->coeff[i]>max) max= a->coeff[i];
3315

    
3316
    for (i=0; i<a->length; i++)
3317
        if (a->coeff[i]<min) min= a->coeff[i];
3318

    
3319
    range= max - min;
3320

    
3321
    for (i=0; i<a->length; i++)
3322
    {
3323
        int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
3324
        av_log(log_ctx, log_level, "%1.3f ", a->coeff[i]);
3325
        for (;x>0; x--) av_log(log_ctx, log_level, " ");
3326
        av_log(log_ctx, log_level, "|\n");
3327
    }
3328
}
3329

    
3330
#if LIBSWSCALE_VERSION_MAJOR < 1
3331
void sws_printVec(SwsVector *a){
3332
    sws_printVec2(a, NULL, AV_LOG_DEBUG);
3333
}
3334
#endif
3335

    
3336
void sws_freeVec(SwsVector *a){
3337
    if (!a) return;
3338
    av_freep(&a->coeff);
3339
    a->length=0;
3340
    av_free(a);
3341
}
3342

    
3343
void sws_freeFilter(SwsFilter *filter){
3344
    if (!filter) return;
3345

    
3346
    if (filter->lumH) sws_freeVec(filter->lumH);
3347
    if (filter->lumV) sws_freeVec(filter->lumV);
3348
    if (filter->chrH) sws_freeVec(filter->chrH);
3349
    if (filter->chrV) sws_freeVec(filter->chrV);
3350
    av_free(filter);
3351
}
3352

    
3353

    
3354
void sws_freeContext(SwsContext *c){
3355
    int i;
3356
    if (!c) return;
3357

    
3358
    if (c->lumPixBuf)
3359
    {
3360
        for (i=0; i<c->vLumBufSize; i++)
3361
            av_freep(&c->lumPixBuf[i]);
3362
        av_freep(&c->lumPixBuf);
3363
    }
3364

    
3365
    if (c->chrPixBuf)
3366
    {
3367
        for (i=0; i<c->vChrBufSize; i++)
3368
            av_freep(&c->chrPixBuf[i]);
3369
        av_freep(&c->chrPixBuf);
3370
    }
3371

    
3372
    if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
3373
        for (i=0; i<c->vLumBufSize; i++)
3374
            av_freep(&c->alpPixBuf[i]);
3375
        av_freep(&c->alpPixBuf);
3376
    }
3377

    
3378
    av_freep(&c->vLumFilter);
3379
    av_freep(&c->vChrFilter);
3380
    av_freep(&c->hLumFilter);
3381
    av_freep(&c->hChrFilter);
3382
#if HAVE_ALTIVEC
3383
    av_freep(&c->vYCoeffsBank);
3384
    av_freep(&c->vCCoeffsBank);
3385
#endif
3386

    
3387
    av_freep(&c->vLumFilterPos);
3388
    av_freep(&c->vChrFilterPos);
3389
    av_freep(&c->hLumFilterPos);
3390
    av_freep(&c->hChrFilterPos);
3391

    
3392
#if ARCH_X86 && CONFIG_GPL
3393
#ifdef MAP_ANONYMOUS
3394
    if (c->funnyYCode ) munmap(c->funnyYCode , MAX_FUNNY_CODE_SIZE);
3395
    if (c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE);
3396
#elif HAVE_VIRTUALALLOC
3397
    if (c->funnyYCode ) VirtualFree(c->funnyYCode , MAX_FUNNY_CODE_SIZE, MEM_RELEASE);
3398
    if (c->funnyUVCode) VirtualFree(c->funnyUVCode, MAX_FUNNY_CODE_SIZE, MEM_RELEASE);
3399
#else
3400
    av_free(c->funnyYCode );
3401
    av_free(c->funnyUVCode);
3402
#endif
3403
    c->funnyYCode=NULL;
3404
    c->funnyUVCode=NULL;
3405
#endif /* ARCH_X86 && CONFIG_GPL */
3406

    
3407
    av_freep(&c->lumMmx2Filter);
3408
    av_freep(&c->chrMmx2Filter);
3409
    av_freep(&c->lumMmx2FilterPos);
3410
    av_freep(&c->chrMmx2FilterPos);
3411
    av_freep(&c->yuvTable);
3412

    
3413
    av_free(c);
3414
}
3415

    
3416
struct SwsContext *sws_getCachedContext(struct SwsContext *context,
3417
                                        int srcW, int srcH, enum PixelFormat srcFormat,
3418
                                        int dstW, int dstH, enum PixelFormat dstFormat, int flags,
3419
                                        SwsFilter *srcFilter, SwsFilter *dstFilter, double *param)
3420
{
3421
    static const double default_param[2] = {SWS_PARAM_DEFAULT, SWS_PARAM_DEFAULT};
3422

    
3423
    if (!param)
3424
        param = default_param;
3425

    
3426
    if (context) {
3427
        if (context->srcW != srcW || context->srcH != srcH ||
3428
            context->srcFormat != srcFormat ||
3429
            context->dstW != dstW || context->dstH != dstH ||
3430
            context->dstFormat != dstFormat || context->flags != flags ||
3431
            context->param[0] != param[0] || context->param[1] != param[1])
3432
        {
3433
            sws_freeContext(context);
3434
            context = NULL;
3435
        }
3436
    }
3437
    if (!context) {
3438
        return sws_getContext(srcW, srcH, srcFormat,
3439
                              dstW, dstH, dstFormat, flags,
3440
                              srcFilter, dstFilter, param);
3441
    }
3442
    return context;
3443
}
3444