Statistics
| Branch: | Revision:

ffmpeg / libavfilter / libmpcodecs / vf_spp.c @ e4852fb3

History | View | Annotate | Download (23.5 KB)

1
/*
2
 * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
3
 *
4
 * This file is part of MPlayer.
5
 *
6
 * MPlayer is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation; either version 2 of the License, or
9
 * (at your option) any later version.
10
 *
11
 * MPlayer is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU General Public License along
17
 * with MPlayer; if not, write to the Free Software Foundation, Inc.,
18
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
 */
20

    
21
/*
22
 * This implementation is based on an algorithm described in
23
 * "Aria Nosratinia Embedded Post-Processing for
24
 * Enhancement of Compressed Images (1999)"
25
 * (http://citeseer.nj.nec.com/nosratinia99embedded.html)
26
 */
27

    
28

    
29
#include <stdio.h>
30
#include <stdlib.h>
31
#include <string.h>
32
#include <inttypes.h>
33
#include <math.h>
34

    
35
#include "config.h"
36

    
37
#include "mp_msg.h"
38
#include "cpudetect.h"
39

    
40
#include "libavutil/internal.h"
41
#include "libavutil/intreadwrite.h"
42
#include "libavcodec/avcodec.h"
43
#include "libavcodec/dsputil.h"
44

    
45
#undef fprintf
46
#undef free
47
#undef malloc
48

    
49
#include "img_format.h"
50
#include "mp_image.h"
51
#include "vf.h"
52
#include "vd_ffmpeg.h"
53
#include "libvo/fastmemcpy.h"
54

    
55
#define XMIN(a,b) ((a) < (b) ? (a) : (b))
56

    
57
//===========================================================================//
58
static const uint8_t  __attribute__((aligned(8))) dither[8][8]={
59
{  0,  48,  12,  60,   3,  51,  15,  63, },
60
{ 32,  16,  44,  28,  35,  19,  47,  31, },
61
{  8,  56,   4,  52,  11,  59,   7,  55, },
62
{ 40,  24,  36,  20,  43,  27,  39,  23, },
63
{  2,  50,  14,  62,   1,  49,  13,  61, },
64
{ 34,  18,  46,  30,  33,  17,  45,  29, },
65
{ 10,  58,   6,  54,   9,  57,   5,  53, },
66
{ 42,  26,  38,  22,  41,  25,  37,  21, },
67
};
68

    
69
static const uint8_t offset[127][2]= {
70
{0,0},
71
{0,0}, {4,4},
72
{0,0}, {2,2}, {6,4}, {4,6},
73
{0,0}, {5,1}, {2,2}, {7,3}, {4,4}, {1,5}, {6,6}, {3,7},
74

    
75
{0,0}, {4,0}, {1,1}, {5,1}, {3,2}, {7,2}, {2,3}, {6,3},
76
{0,4}, {4,4}, {1,5}, {5,5}, {3,6}, {7,6}, {2,7}, {6,7},
77

    
78
{0,0}, {0,2}, {0,4}, {0,6}, {1,1}, {1,3}, {1,5}, {1,7},
79
{2,0}, {2,2}, {2,4}, {2,6}, {3,1}, {3,3}, {3,5}, {3,7},
80
{4,0}, {4,2}, {4,4}, {4,6}, {5,1}, {5,3}, {5,5}, {5,7},
81
{6,0}, {6,2}, {6,4}, {6,6}, {7,1}, {7,3}, {7,5}, {7,7},
82

    
83
{0,0}, {4,4}, {0,4}, {4,0}, {2,2}, {6,6}, {2,6}, {6,2},
84
{0,2}, {4,6}, {0,6}, {4,2}, {2,0}, {6,4}, {2,4}, {6,0},
85
{1,1}, {5,5}, {1,5}, {5,1}, {3,3}, {7,7}, {3,7}, {7,3},
86
{1,3}, {5,7}, {1,7}, {5,3}, {3,1}, {7,5}, {3,5}, {7,1},
87
{0,1}, {4,5}, {0,5}, {4,1}, {2,3}, {6,7}, {2,7}, {6,3},
88
{0,3}, {4,7}, {0,7}, {4,3}, {2,1}, {6,5}, {2,5}, {6,1},
89
{1,0}, {5,4}, {1,4}, {5,0}, {3,2}, {7,6}, {3,6}, {7,2},
90
{1,2}, {5,6}, {1,6}, {5,2}, {3,0}, {7,4}, {3,4}, {7,0},
91
};
92

    
93
struct vf_priv_s {
94
        int log2_count;
95
        int qp;
96
        int mode;
97
        int mpeg2;
98
        int temp_stride;
99
        uint8_t *src;
100
        int16_t *temp;
101
        AVCodecContext *avctx;
102
        DSPContext dsp;
103
        char *non_b_qp;
104
};
105

    
106
#define SHIFT 22
107

    
108
static void hardthresh_c(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
109
        int i;
110
        int bias= 0; //FIXME
111
        unsigned int threshold1, threshold2;
112

    
113
        threshold1= qp*((1<<4) - bias) - 1;
114
        threshold2= (threshold1<<1);
115

    
116
        memset(dst, 0, 64*sizeof(DCTELEM));
117
        dst[0]= (src[0] + 4)>>3;
118

    
119
        for(i=1; i<64; i++){
120
                int level= src[i];
121
                if(((unsigned)(level+threshold1))>threshold2){
122
                        const int j= permutation[i];
123
                        dst[j]= (level + 4)>>3;
124
                }
125
        }
126
}
127

    
128
static void softthresh_c(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
129
        int i;
130
        int bias= 0; //FIXME
131
        unsigned int threshold1, threshold2;
132

    
133
        threshold1= qp*((1<<4) - bias) - 1;
134
        threshold2= (threshold1<<1);
135

    
136
        memset(dst, 0, 64*sizeof(DCTELEM));
137
        dst[0]= (src[0] + 4)>>3;
138

    
139
        for(i=1; i<64; i++){
140
                int level= src[i];
141
                if(((unsigned)(level+threshold1))>threshold2){
142
                        const int j= permutation[i];
143
                        if(level>0)
144
                                dst[j]= (level - threshold1 + 4)>>3;
145
                        else
146
                                dst[j]= (level + threshold1 + 4)>>3;
147
                }
148
        }
149
}
150

    
151
#if HAVE_MMX
152
static void hardthresh_mmx(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
153
        int bias= 0; //FIXME
154
        unsigned int threshold1;
155

    
156
        threshold1= qp*((1<<4) - bias) - 1;
157

    
158
        __asm__ volatile(
159
#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
160
                "movq " #src0 ", %%mm0        \n\t"\
161
                "movq " #src1 ", %%mm1        \n\t"\
162
                "movq " #src2 ", %%mm2        \n\t"\
163
                "movq " #src3 ", %%mm3        \n\t"\
164
                "psubw %%mm4, %%mm0        \n\t"\
165
                "psubw %%mm4, %%mm1        \n\t"\
166
                "psubw %%mm4, %%mm2        \n\t"\
167
                "psubw %%mm4, %%mm3        \n\t"\
168
                "paddusw %%mm5, %%mm0        \n\t"\
169
                "paddusw %%mm5, %%mm1        \n\t"\
170
                "paddusw %%mm5, %%mm2        \n\t"\
171
                "paddusw %%mm5, %%mm3        \n\t"\
172
                "paddw %%mm6, %%mm0        \n\t"\
173
                "paddw %%mm6, %%mm1        \n\t"\
174
                "paddw %%mm6, %%mm2        \n\t"\
175
                "paddw %%mm6, %%mm3        \n\t"\
176
                "psubusw %%mm6, %%mm0        \n\t"\
177
                "psubusw %%mm6, %%mm1        \n\t"\
178
                "psubusw %%mm6, %%mm2        \n\t"\
179
                "psubusw %%mm6, %%mm3        \n\t"\
180
                "psraw $3, %%mm0        \n\t"\
181
                "psraw $3, %%mm1        \n\t"\
182
                "psraw $3, %%mm2        \n\t"\
183
                "psraw $3, %%mm3        \n\t"\
184
\
185
                "movq %%mm0, %%mm7        \n\t"\
186
                "punpcklwd %%mm2, %%mm0        \n\t" /*A*/\
187
                "punpckhwd %%mm2, %%mm7        \n\t" /*C*/\
188
                "movq %%mm1, %%mm2        \n\t"\
189
                "punpcklwd %%mm3, %%mm1        \n\t" /*B*/\
190
                "punpckhwd %%mm3, %%mm2        \n\t" /*D*/\
191
                "movq %%mm0, %%mm3        \n\t"\
192
                "punpcklwd %%mm1, %%mm0        \n\t" /*A*/\
193
                "punpckhwd %%mm7, %%mm3        \n\t" /*C*/\
194
                "punpcklwd %%mm2, %%mm7        \n\t" /*B*/\
195
                "punpckhwd %%mm2, %%mm1        \n\t" /*D*/\
196
\
197
                "movq %%mm0, " #dst0 "        \n\t"\
198
                "movq %%mm7, " #dst1 "        \n\t"\
199
                "movq %%mm3, " #dst2 "        \n\t"\
200
                "movq %%mm1, " #dst3 "        \n\t"
201

    
202
                "movd %2, %%mm4                \n\t"
203
                "movd %3, %%mm5                \n\t"
204
                "movd %4, %%mm6                \n\t"
205
                "packssdw %%mm4, %%mm4        \n\t"
206
                "packssdw %%mm5, %%mm5        \n\t"
207
                "packssdw %%mm6, %%mm6        \n\t"
208
                "packssdw %%mm4, %%mm4        \n\t"
209
                "packssdw %%mm5, %%mm5        \n\t"
210
                "packssdw %%mm6, %%mm6        \n\t"
211
                REQUANT_CORE(  (%1),  8(%1), 16(%1), 24(%1),  (%0), 8(%0), 64(%0), 72(%0))
212
                REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
213
                REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
214
                REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
215
                : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
216
        );
217
        dst[0]= (src[0] + 4)>>3;
218
}
219

    
220
static void softthresh_mmx(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation){
221
        int bias= 0; //FIXME
222
        unsigned int threshold1;
223

    
224
        threshold1= qp*((1<<4) - bias) - 1;
225

    
226
        __asm__ volatile(
227
#undef REQUANT_CORE
228
#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3) \
229
                "movq " #src0 ", %%mm0        \n\t"\
230
                "movq " #src1 ", %%mm1        \n\t"\
231
                "pxor %%mm6, %%mm6        \n\t"\
232
                "pxor %%mm7, %%mm7        \n\t"\
233
                "pcmpgtw %%mm0, %%mm6        \n\t"\
234
                "pcmpgtw %%mm1, %%mm7        \n\t"\
235
                "pxor %%mm6, %%mm0        \n\t"\
236
                "pxor %%mm7, %%mm1        \n\t"\
237
                "psubusw %%mm4, %%mm0        \n\t"\
238
                "psubusw %%mm4, %%mm1        \n\t"\
239
                "pxor %%mm6, %%mm0        \n\t"\
240
                "pxor %%mm7, %%mm1        \n\t"\
241
                "movq " #src2 ", %%mm2        \n\t"\
242
                "movq " #src3 ", %%mm3        \n\t"\
243
                "pxor %%mm6, %%mm6        \n\t"\
244
                "pxor %%mm7, %%mm7        \n\t"\
245
                "pcmpgtw %%mm2, %%mm6        \n\t"\
246
                "pcmpgtw %%mm3, %%mm7        \n\t"\
247
                "pxor %%mm6, %%mm2        \n\t"\
248
                "pxor %%mm7, %%mm3        \n\t"\
249
                "psubusw %%mm4, %%mm2        \n\t"\
250
                "psubusw %%mm4, %%mm3        \n\t"\
251
                "pxor %%mm6, %%mm2        \n\t"\
252
                "pxor %%mm7, %%mm3        \n\t"\
253
\
254
                "paddsw %%mm5, %%mm0        \n\t"\
255
                "paddsw %%mm5, %%mm1        \n\t"\
256
                "paddsw %%mm5, %%mm2        \n\t"\
257
                "paddsw %%mm5, %%mm3        \n\t"\
258
                "psraw $3, %%mm0        \n\t"\
259
                "psraw $3, %%mm1        \n\t"\
260
                "psraw $3, %%mm2        \n\t"\
261
                "psraw $3, %%mm3        \n\t"\
262
\
263
                "movq %%mm0, %%mm7        \n\t"\
264
                "punpcklwd %%mm2, %%mm0        \n\t" /*A*/\
265
                "punpckhwd %%mm2, %%mm7        \n\t" /*C*/\
266
                "movq %%mm1, %%mm2        \n\t"\
267
                "punpcklwd %%mm3, %%mm1        \n\t" /*B*/\
268
                "punpckhwd %%mm3, %%mm2        \n\t" /*D*/\
269
                "movq %%mm0, %%mm3        \n\t"\
270
                "punpcklwd %%mm1, %%mm0        \n\t" /*A*/\
271
                "punpckhwd %%mm7, %%mm3        \n\t" /*C*/\
272
                "punpcklwd %%mm2, %%mm7        \n\t" /*B*/\
273
                "punpckhwd %%mm2, %%mm1        \n\t" /*D*/\
274
\
275
                "movq %%mm0, " #dst0 "        \n\t"\
276
                "movq %%mm7, " #dst1 "        \n\t"\
277
                "movq %%mm3, " #dst2 "        \n\t"\
278
                "movq %%mm1, " #dst3 "        \n\t"
279

    
280
                "movd %2, %%mm4                \n\t"
281
                "movd %3, %%mm5                \n\t"
282
                "packssdw %%mm4, %%mm4        \n\t"
283
                "packssdw %%mm5, %%mm5        \n\t"
284
                "packssdw %%mm4, %%mm4        \n\t"
285
                "packssdw %%mm5, %%mm5        \n\t"
286
                REQUANT_CORE(  (%1),  8(%1), 16(%1), 24(%1),  (%0), 8(%0), 64(%0), 72(%0))
287
                REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
288
                REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
289
                REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
290
                : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
291
        );
292

    
293
        dst[0]= (src[0] + 4)>>3;
294
}
295
#endif
296

    
297
static inline void add_block(int16_t *dst, int stride, DCTELEM block[64]){
298
        int y;
299

    
300
        for(y=0; y<8; y++){
301
                *(uint32_t*)&dst[0 + y*stride]+= *(uint32_t*)&block[0 + y*8];
302
                *(uint32_t*)&dst[2 + y*stride]+= *(uint32_t*)&block[2 + y*8];
303
                *(uint32_t*)&dst[4 + y*stride]+= *(uint32_t*)&block[4 + y*8];
304
                *(uint32_t*)&dst[6 + y*stride]+= *(uint32_t*)&block[6 + y*8];
305
        }
306
}
307

    
308
static void store_slice_c(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
309
        int y, x;
310

    
311
#define STORE(pos) \
312
        temp= ((src[x + y*src_stride + pos]<<log2_scale) + d[pos])>>6;\
313
        if(temp & 0x100) temp= ~(temp>>31);\
314
        dst[x + y*dst_stride + pos]= temp;
315

    
316
        for(y=0; y<height; y++){
317
                const uint8_t *d= dither[y];
318
                for(x=0; x<width; x+=8){
319
                        int temp;
320
                        STORE(0);
321
                        STORE(1);
322
                        STORE(2);
323
                        STORE(3);
324
                        STORE(4);
325
                        STORE(5);
326
                        STORE(6);
327
                        STORE(7);
328
                }
329
        }
330
}
331

    
332
#if HAVE_MMX
333
static void store_slice_mmx(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale){
334
        int y;
335

    
336
        for(y=0; y<height; y++){
337
                uint8_t *dst1= dst;
338
                int16_t *src1= src;
339
                __asm__ volatile(
340
                        "movq (%3), %%mm3        \n\t"
341
                        "movq (%3), %%mm4        \n\t"
342
                        "movd %4, %%mm2                \n\t"
343
                        "pxor %%mm0, %%mm0        \n\t"
344
                        "punpcklbw %%mm0, %%mm3        \n\t"
345
                        "punpckhbw %%mm0, %%mm4        \n\t"
346
                        "psraw %%mm2, %%mm3        \n\t"
347
                        "psraw %%mm2, %%mm4        \n\t"
348
                        "movd %5, %%mm2                \n\t"
349
                        "1:                        \n\t"
350
                        "movq (%0), %%mm0        \n\t"
351
                        "movq 8(%0), %%mm1        \n\t"
352
                        "paddw %%mm3, %%mm0        \n\t"
353
                        "paddw %%mm4, %%mm1        \n\t"
354
                        "psraw %%mm2, %%mm0        \n\t"
355
                        "psraw %%mm2, %%mm1        \n\t"
356
                        "packuswb %%mm1, %%mm0        \n\t"
357
                        "movq %%mm0, (%1)         \n\t"
358
                        "add $16, %0                \n\t"
359
                        "add $8, %1                \n\t"
360
                        "cmp %2, %1                \n\t"
361
                        " jb 1b                        \n\t"
362
                        : "+r" (src1), "+r"(dst1)
363
                        : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(6-log2_scale)
364
                );
365
                src += src_stride;
366
                dst += dst_stride;
367
        }
368
//        if(width != mmxw)
369
//                store_slice_c(dst + mmxw, src + mmxw, dst_stride, src_stride, width - mmxw, log2_scale);
370
}
371
#endif
372

    
373
static void (*store_slice)(uint8_t *dst, int16_t *src, int dst_stride, int src_stride, int width, int height, int log2_scale)= store_slice_c;
374

    
375
static void (*requantize)(DCTELEM dst[64], DCTELEM src[64], int qp, uint8_t *permutation)= hardthresh_c;
376

    
377
static void filter(struct vf_priv_s *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma){
378
        int x, y, i;
379
        const int count= 1<<p->log2_count;
380
        const int stride= is_luma ? p->temp_stride : ((width+16+15)&(~15));
381
        uint64_t __attribute__((aligned(16))) block_align[32];
382
        DCTELEM *block = (DCTELEM *)block_align;
383
        DCTELEM *block2= (DCTELEM *)(block_align+16);
384

    
385
        if (!src || !dst) return; // HACK avoid crash for Y8 colourspace
386
        for(y=0; y<height; y++){
387
                int index= 8 + 8*stride + y*stride;
388
                fast_memcpy(p->src + index, src + y*src_stride, width);
389
                for(x=0; x<8; x++){
390
                        p->src[index         - x - 1]= p->src[index +         x    ];
391
                        p->src[index + width + x    ]= p->src[index + width - x - 1];
392
                }
393
        }
394
        for(y=0; y<8; y++){
395
                fast_memcpy(p->src + (      7-y)*stride, p->src + (      y+8)*stride, stride);
396
                fast_memcpy(p->src + (height+8+y)*stride, p->src + (height-y+7)*stride, stride);
397
        }
398
        //FIXME (try edge emu)
399

    
400
        for(y=0; y<height+8; y+=8){
401
                memset(p->temp + (8+y)*stride, 0, 8*stride*sizeof(int16_t));
402
                for(x=0; x<width+8; x+=8){
403
                        const int qps= 3 + is_luma;
404
                        int qp;
405

    
406
                        if(p->qp)
407
                                qp= p->qp;
408
                        else{
409
                                qp= qp_store[ (XMIN(x, width-1)>>qps) + (XMIN(y, height-1)>>qps) * qp_stride];
410
                                qp = FFMAX(1, norm_qscale(qp, p->mpeg2));
411
                        }
412
                        for(i=0; i<count; i++){
413
                                const int x1= x + offset[i+count-1][0];
414
                                const int y1= y + offset[i+count-1][1];
415
                                const int index= x1 + y1*stride;
416
                                p->dsp.get_pixels(block, p->src + index, stride);
417
                                p->dsp.fdct(block);
418
                                requantize(block2, block, qp, p->dsp.idct_permutation);
419
                                p->dsp.idct(block2);
420
                                add_block(p->temp + index, stride, block2);
421
                        }
422
                }
423
                if(y)
424
                        store_slice(dst + (y-8)*dst_stride, p->temp + 8 + y*stride, dst_stride, stride, width, XMIN(8, height+8-y), 6-p->log2_count);
425
        }
426
#if 0
427
        for(y=0; y<height; y++){
428
                for(x=0; x<width; x++){
429
                        if((((x>>6) ^ (y>>6)) & 1) == 0)
430
                                dst[x + y*dst_stride]= p->src[8 + 8*stride  + x + y*stride];
431
                        if((x&63) == 0 || (y&63)==0)
432
                                dst[x + y*dst_stride] += 128;
433
                }
434
        }
435
#endif
436
        //FIXME reorder for better caching
437
}
438

    
439
static int config(struct vf_instance *vf,
440
        int width, int height, int d_width, int d_height,
441
        unsigned int flags, unsigned int outfmt){
442
        int h= (height+16+15)&(~15);
443

    
444
        vf->priv->temp_stride= (width+16+15)&(~15);
445
        vf->priv->temp= malloc(vf->priv->temp_stride*h*sizeof(int16_t));
446
        vf->priv->src = malloc(vf->priv->temp_stride*h*sizeof(uint8_t));
447

    
448
        return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
449
}
450

    
451
static void get_image(struct vf_instance *vf, mp_image_t *mpi){
452
    if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
453
    // ok, we can do pp in-place (or pp disabled):
454
    vf->dmpi=vf_get_image(vf->next,mpi->imgfmt,
455
        mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
456
    mpi->planes[0]=vf->dmpi->planes[0];
457
    mpi->stride[0]=vf->dmpi->stride[0];
458
    mpi->width=vf->dmpi->width;
459
    if(mpi->flags&MP_IMGFLAG_PLANAR){
460
        mpi->planes[1]=vf->dmpi->planes[1];
461
        mpi->planes[2]=vf->dmpi->planes[2];
462
        mpi->stride[1]=vf->dmpi->stride[1];
463
        mpi->stride[2]=vf->dmpi->stride[2];
464
    }
465
    mpi->flags|=MP_IMGFLAG_DIRECT;
466
}
467

    
468
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
469
        mp_image_t *dmpi;
470

    
471
        if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
472
                // no DR, so get a new image! hope we'll get DR buffer:
473
                dmpi=vf_get_image(vf->next,mpi->imgfmt,
474
                    MP_IMGTYPE_TEMP,
475
                    MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
476
                    mpi->width,mpi->height);
477
                vf_clone_mpi_attributes(dmpi, mpi);
478
        }else{
479
           dmpi=vf->dmpi;
480
        }
481

    
482
        vf->priv->mpeg2= mpi->qscale_type;
483
        if(mpi->pict_type != 3 && mpi->qscale && !vf->priv->qp){
484
            int w = mpi->qstride;
485
            int h = (mpi->h + 15) >> 4;
486
            if (!w) {
487
                w = (mpi->w + 15) >> 4;
488
                h = 1;
489
            }
490
            if(!vf->priv->non_b_qp)
491
                vf->priv->non_b_qp= malloc(w*h);
492
            fast_memcpy(vf->priv->non_b_qp, mpi->qscale, w*h);
493
        }
494
        if(vf->priv->log2_count || !(mpi->flags&MP_IMGFLAG_DIRECT)){
495
            char *qp_tab= vf->priv->non_b_qp;
496
            if((vf->priv->mode&4) || !qp_tab)
497
                qp_tab= mpi->qscale;
498

    
499
            if(qp_tab || vf->priv->qp){
500
                filter(vf->priv, dmpi->planes[0], mpi->planes[0], dmpi->stride[0], mpi->stride[0], mpi->w, mpi->h, qp_tab, mpi->qstride, 1);
501
                filter(vf->priv, dmpi->planes[1], mpi->planes[1], dmpi->stride[1], mpi->stride[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
502
                filter(vf->priv, dmpi->planes[2], mpi->planes[2], dmpi->stride[2], mpi->stride[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, qp_tab, mpi->qstride, 0);
503
            }else{
504
                memcpy_pic(dmpi->planes[0], mpi->planes[0], mpi->w, mpi->h, dmpi->stride[0], mpi->stride[0]);
505
                memcpy_pic(dmpi->planes[1], mpi->planes[1], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[1], mpi->stride[1]);
506
                memcpy_pic(dmpi->planes[2], mpi->planes[2], mpi->w>>mpi->chroma_x_shift, mpi->h>>mpi->chroma_y_shift, dmpi->stride[2], mpi->stride[2]);
507
            }
508
        }
509

    
510
#if HAVE_MMX
511
        if(gCpuCaps.hasMMX) __asm__ volatile ("emms\n\t");
512
#endif
513
#if HAVE_MMX2
514
        if(gCpuCaps.hasMMX2) __asm__ volatile ("sfence\n\t");
515
#endif
516

    
517
        return vf_next_put_image(vf,dmpi, pts);
518
}
519

    
520
static void uninit(struct vf_instance *vf){
521
        if(!vf->priv) return;
522

    
523
        free(vf->priv->temp);
524
        vf->priv->temp= NULL;
525
        free(vf->priv->src);
526
        vf->priv->src= NULL;
527
        free(vf->priv->avctx);
528
        vf->priv->avctx= NULL;
529
        free(vf->priv->non_b_qp);
530
        vf->priv->non_b_qp= NULL;
531

    
532
        free(vf->priv);
533
        vf->priv=NULL;
534
}
535

    
536
//===========================================================================//
537
static int query_format(struct vf_instance *vf, unsigned int fmt){
538
    switch(fmt){
539
        case IMGFMT_YVU9:
540
        case IMGFMT_IF09:
541
        case IMGFMT_YV12:
542
        case IMGFMT_I420:
543
        case IMGFMT_IYUV:
544
        case IMGFMT_CLPL:
545
        case IMGFMT_Y800:
546
        case IMGFMT_Y8:
547
        case IMGFMT_444P:
548
        case IMGFMT_422P:
549
        case IMGFMT_411P:
550
            return vf_next_query_format(vf,fmt);
551
    }
552
    return 0;
553
}
554

    
555
static int control(struct vf_instance *vf, int request, void* data){
556
    switch(request){
557
    case VFCTRL_QUERY_MAX_PP_LEVEL:
558
        return 6;
559
    case VFCTRL_SET_PP_LEVEL:
560
        vf->priv->log2_count= *((unsigned int*)data);
561
        return CONTROL_TRUE;
562
    }
563
    return vf_next_control(vf,request,data);
564
}
565

    
566
static int vf_open(vf_instance_t *vf, char *args){
567

    
568
    int log2c=-1;
569

    
570
    vf->config=config;
571
    vf->put_image=put_image;
572
    vf->get_image=get_image;
573
    vf->query_format=query_format;
574
    vf->uninit=uninit;
575
    vf->control= control;
576
    vf->priv=malloc(sizeof(struct vf_priv_s));
577
    memset(vf->priv, 0, sizeof(struct vf_priv_s));
578

    
579
    init_avcodec();
580

    
581
    vf->priv->avctx= avcodec_alloc_context();
582
    dsputil_init(&vf->priv->dsp, vf->priv->avctx);
583

    
584
    vf->priv->log2_count= 3;
585

    
586
    if (args) sscanf(args, "%d:%d:%d", &log2c, &vf->priv->qp, &vf->priv->mode);
587

    
588
    if( log2c >=0 && log2c <=6 )
589
        vf->priv->log2_count = log2c;
590

    
591
    if(vf->priv->qp < 0)
592
        vf->priv->qp = 0;
593

    
594
    switch(vf->priv->mode&3){
595
        default:
596
        case 0: requantize= hardthresh_c; break;
597
        case 1: requantize= softthresh_c; break;
598
    }
599

    
600
#if HAVE_MMX
601
    if(gCpuCaps.hasMMX){
602
        store_slice= store_slice_mmx;
603
        switch(vf->priv->mode&3){
604
            case 0: requantize= hardthresh_mmx; break;
605
            case 1: requantize= softthresh_mmx; break;
606
        }
607
    }
608
#endif
609

    
610
    return 1;
611
}
612

    
613
const vf_info_t vf_info_spp = {
614
    "simple postprocess",
615
    "spp",
616
    "Michael Niedermayer",
617
    "",
618
    vf_open,
619
    NULL
620
};