Statistics
| Branch: | Revision:

ffmpeg / libavcodec / svq3.c @ 8dbcc9f2

History | View | Annotate | Download (29 KB)

1 8b82a956 Michael Niedermayer
/*
2
 * Copyright (c) 2003 The FFmpeg Project.
3
 *
4
 * This library is free software; you can redistribute it and/or
5
 * modify it under the terms of the GNU Lesser General Public
6
 * License as published by the Free Software Foundation; either
7
 * version 2 of the License, or (at your option) any later version.
8
 *
9
 * This library is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
 * Lesser General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU Lesser General Public
15
 * License along with this library; if not, write to the Free Software
16
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17
 *
18
 *
19
 * How to use this decoder:
20
 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
21 89a79364 Mike Melanson
 * have stsd atoms to describe media trak properties. A stsd atom for a
22
 * video trak contains 1 or more ImageDescription atoms. These atoms begin
23
 * with the 4-byte length of the atom followed by the codec fourcc. Some
24
 * decoders need information in this atom to operate correctly. Such
25
 * is the case with SVQ3. In order to get the best use out of this decoder,
26
 * the calling app must make the SVQ3 ImageDescription atom available
27 8b82a956 Michael Niedermayer
 * via the AVCodecContext's extradata[_size] field:
28
 *
29 89a79364 Mike Melanson
 * AVCodecContext.extradata = pointer to ImageDescription, first characters 
30
 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
31
 * AVCodecContext.extradata_size = size of ImageDescription atom memory 
32
 * buffer (which will be the same as the ImageDescription atom size field 
33
 * from the QT file, minus 4 bytes since the length is missing)
34
 *
35
 * You will know you have these parameters passed correctly when the decoder
36
 * correctly decodes this file:
37
 *  ftp://ftp.mplayerhq.hu/MPlayer/samples/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
38 8b82a956 Michael Niedermayer
 *
39
 */
40
 
41
/**
42
 * @file svq3.c
43
 * svq3 decoder.
44
 */
45
46 94d44f45 Michael Niedermayer
#define FULLPEL_MODE  1 
47
#define HALFPEL_MODE  2 
48
#define THIRDPEL_MODE 3
49 2e26c8d2 Mike Melanson
#define PREDICT_MODE  4
50 94d44f45 Michael Niedermayer
 
51 f7a8c179 Michael Niedermayer
/* dual scan (from some older h264 draft)
52
 o-->o-->o   o
53
         |  /|
54
 o   o   o / o
55
 | / |   |/  |
56
 o   o   o   o
57
   / 
58
 o-->o-->o-->o
59
*/
60 8b82a956 Michael Niedermayer
static const uint8_t svq3_scan[16]={
61
 0+0*4, 1+0*4, 2+0*4, 2+1*4,
62
 2+2*4, 3+0*4, 3+1*4, 3+2*4,
63
 0+1*4, 0+2*4, 1+1*4, 1+2*4,
64
 0+3*4, 1+3*4, 2+3*4, 3+3*4,
65
};
66
67
static const uint8_t svq3_pred_0[25][2] = {
68
  { 0, 0 },
69
  { 1, 0 }, { 0, 1 },
70
  { 0, 2 }, { 1, 1 }, { 2, 0 },
71
  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
72
  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
73
  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
74
  { 2, 4 }, { 3, 3 }, { 4, 2 },
75
  { 4, 3 }, { 3, 4 },
76
  { 4, 4 }
77
};
78
79
static const int8_t svq3_pred_1[6][6][5] = {
80
  { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
81
    { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
82
  { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
83
    { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
84
  { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
85
    { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
86
  { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
87
    { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
88
  { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
89
    { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
90
  { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
91
    { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
92
};
93
94
static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
95
  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
96
    { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
97
  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
98
    { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
99
};
100
101
static const uint32_t svq3_dequant_coeff[32] = {
102
   3881,  4351,  4890,  5481,  6154,  6914,  7761,  8718,
103
   9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
104
  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
105
  61694, 68745, 77615, 89113,100253,109366,126635,141533
106
};
107
108
109
static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp){
110
    const int qmul= svq3_dequant_coeff[qp];
111
#define stride 16
112
    int i;
113
    int temp[16];
114
    static const int x_offset[4]={0, 1*stride, 4* stride,  5*stride};
115
    static const int y_offset[4]={0, 2*stride, 8* stride, 10*stride};
116
117
    for(i=0; i<4; i++){
118
        const int offset= y_offset[i];
119
        const int z0= 13*(block[offset+stride*0] +    block[offset+stride*4]);
120
        const int z1= 13*(block[offset+stride*0] -    block[offset+stride*4]);
121
        const int z2=  7* block[offset+stride*1] - 17*block[offset+stride*5];
122
        const int z3= 17* block[offset+stride*1] +  7*block[offset+stride*5];
123
124
        temp[4*i+0]= z0+z3;
125
        temp[4*i+1]= z1+z2;
126
        temp[4*i+2]= z1-z2;
127
        temp[4*i+3]= z0-z3;
128
    }
129
130
    for(i=0; i<4; i++){
131
        const int offset= x_offset[i];
132
        const int z0= 13*(temp[4*0+i] +    temp[4*2+i]);
133
        const int z1= 13*(temp[4*0+i] -    temp[4*2+i]);
134
        const int z2=  7* temp[4*1+i] - 17*temp[4*3+i];
135
        const int z3= 17* temp[4*1+i] +  7*temp[4*3+i];
136
137
        block[stride*0 +offset]= ((z0 + z3)*qmul + 0x80000)>>20;
138
        block[stride*2 +offset]= ((z1 + z2)*qmul + 0x80000)>>20;
139
        block[stride*8 +offset]= ((z1 - z2)*qmul + 0x80000)>>20;
140
        block[stride*10+offset]= ((z0 - z3)*qmul + 0x80000)>>20;
141
    }
142
}
143
#undef stride
144
145
static void svq3_add_idct_c (uint8_t *dst, DCTELEM *block, int stride, int qp, int dc){
146
    const int qmul= svq3_dequant_coeff[qp];
147
    int i;
148
    uint8_t *cm = cropTbl + MAX_NEG_CROP;
149
150
    if (dc) {
151
        dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
152
        block[0] = 0;
153
    }
154
155
    for (i=0; i < 4; i++) {
156
        const int z0= 13*(block[0 + 4*i] +    block[2 + 4*i]);
157
        const int z1= 13*(block[0 + 4*i] -    block[2 + 4*i]);
158
        const int z2=  7* block[1 + 4*i] - 17*block[3 + 4*i];
159
        const int z3= 17* block[1 + 4*i] +  7*block[3 + 4*i];
160
161
        block[0 + 4*i]= z0 + z3;
162
        block[1 + 4*i]= z1 + z2;
163
        block[2 + 4*i]= z1 - z2;
164
        block[3 + 4*i]= z0 - z3;
165
    }
166
167
    for (i=0; i < 4; i++) {
168
        const int z0= 13*(block[i + 4*0] +    block[i + 4*2]);
169
        const int z1= 13*(block[i + 4*0] -    block[i + 4*2]);
170
        const int z2=  7* block[i + 4*1] - 17*block[i + 4*3];
171
        const int z3= 17* block[i + 4*1] +  7*block[i + 4*3];
172
        const int rr= (dc + 0x80000);
173
174
        dst[i + stride*0]= cm[ dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) ];
175
        dst[i + stride*1]= cm[ dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) ];
176
        dst[i + stride*2]= cm[ dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) ];
177
        dst[i + stride*3]= cm[ dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) ];
178
    }
179
}
180
181
static void pred4x4_down_left_svq3_c(uint8_t *src, uint8_t *topright, int stride){
182
    LOAD_TOP_EDGE    
183
    LOAD_LEFT_EDGE    
184
    const __attribute__((unused)) int unu0= t0;
185
    const __attribute__((unused)) int unu1= l0;
186
187
    src[0+0*stride]=(l1 + t1)>>1;
188
    src[1+0*stride]=
189
    src[0+1*stride]=(l2 + t2)>>1;
190
    src[2+0*stride]=
191
    src[1+1*stride]=
192
    src[0+2*stride]=
193
    src[3+0*stride]=
194
    src[2+1*stride]=
195
    src[1+2*stride]=
196
    src[0+3*stride]=
197
    src[3+1*stride]=
198
    src[2+2*stride]=
199
    src[1+3*stride]=
200
    src[3+2*stride]=
201
    src[2+3*stride]=
202
    src[3+3*stride]=(l3 + t3)>>1;
203 4cfbf61b Falk H├╝ffner
}
204 8b82a956 Michael Niedermayer
205
static void pred16x16_plane_svq3_c(uint8_t *src, int stride){
206
    pred16x16_plane_compat_c(src, stride, 1);
207
}
208
209
static inline int svq3_decode_block (GetBitContext *gb, DCTELEM *block,
210
                                     int index, const int type) {
211
212
  static const uint8_t *const scan_patterns[4] =
213
  { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
214
215
  int run, level, sign, vlc, limit;
216
  const int intra = (3 * type) >> 2;
217
  const uint8_t *const scan = scan_patterns[type];
218
219
  for (limit=(16 >> intra); index < 16; index=limit, limit+=8) {
220
    for (; (vlc = svq3_get_ue_golomb (gb)) != 0; index++) {
221
222
      if (vlc == INVALID_VLC)
223
        return -1;
224
225
      sign = (vlc & 0x1) - 1;
226
      vlc  = (vlc + 1) >> 1;
227
228
      if (type == 3) {
229
        if (vlc < 3) {
230
          run   = 0;
231
          level = vlc;
232
        } else if (vlc < 4) {
233
          run   = 1;
234
          level = 1;
235
        } else {
236
          run   = (vlc & 0x3);
237
          level = ((vlc + 9) >> 2) - run;
238
        }
239
      } else {
240
        if (vlc < 16) {
241
          run   = svq3_dct_tables[intra][vlc].run;
242
          level = svq3_dct_tables[intra][vlc].level;
243
        } else if (intra) {
244
          run   = (vlc & 0x7);
245
          level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
246
        } else {
247
          run   = (vlc & 0xF);
248
          level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
249
        }
250
      }
251
252
      if ((index += run) >= limit)
253
        return -1;
254
255
      block[scan[index]] = (level ^ sign) - sign;
256
    }
257
258
    if (type != 2) {
259
      break;
260
    }
261
  }
262
263
  return 0;
264
}
265
266 da3b9756 Mike Melanson
static inline void svq3_mc_dir_part (MpegEncContext *s,
267
                                     int x, int y, int width, int height,
268
                                     int mx, int my, int dxy,
269
                                     int thirdpel, int dir, int avg) {
270
271
  const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
272 8b82a956 Michael Niedermayer
  uint8_t *src, *dest;
273
  int i, emu = 0;
274 669ac79c Michael Niedermayer
  int blocksize= 2 - (width>>3); //16->0, 8->1, 4->2
275 8b82a956 Michael Niedermayer
276 94d44f45 Michael Niedermayer
  mx += x;
277
  my += y;
278
  
279 4c701ac8 Michael Niedermayer
  if (mx < 0 || mx >= (s->h_edge_pos - width  - 1) ||
280
      my < 0 || my >= (s->v_edge_pos - height - 1)) {
281 8b82a956 Michael Niedermayer
282
    if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
283
      emu = 1;
284
    }
285
286 4c701ac8 Michael Niedermayer
    mx = clip (mx, -16, (s->h_edge_pos - width  + 15));
287
    my = clip (my, -16, (s->v_edge_pos - height + 15));
288 8b82a956 Michael Niedermayer
  }
289
290
  /* form component predictions */
291
  dest = s->current_picture.data[0] + x + y*s->linesize;
292 da3b9756 Mike Melanson
  src  = pic->data[0] + mx + my*s->linesize;
293 8b82a956 Michael Niedermayer
294
  if (emu) {
295 a7d3e772 Mike Melanson
    ff_emulated_edge_mc (s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
296 4c701ac8 Michael Niedermayer
                         mx, my, s->h_edge_pos, s->v_edge_pos);
297 8b82a956 Michael Niedermayer
    src = s->edge_emu_buffer;
298
  }
299 669ac79c Michael Niedermayer
  if(thirdpel)
300 da3b9756 Mike Melanson
    (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
301 669ac79c Michael Niedermayer
  else
302 da3b9756 Mike Melanson
    (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
303 8b82a956 Michael Niedermayer
304
  if (!(s->flags & CODEC_FLAG_GRAY)) {
305
    mx           = (mx + (mx < (int) x)) >> 1;
306
    my           = (my + (my < (int) y)) >> 1;
307
    width  = (width  >> 1);
308
    height = (height >> 1);
309 669ac79c Michael Niedermayer
    blocksize++;
310 8b82a956 Michael Niedermayer
311
    for (i=1; i < 3; i++) {
312
      dest = s->current_picture.data[i] + (x >> 1) + (y >> 1)*s->uvlinesize;
313 da3b9756 Mike Melanson
      src  = pic->data[i] + mx + my*s->uvlinesize;
314 8b82a956 Michael Niedermayer
315
      if (emu) {
316 a7d3e772 Mike Melanson
        ff_emulated_edge_mc (s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
317 4c701ac8 Michael Niedermayer
                             mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
318 a7d3e772 Mike Melanson
        src = s->edge_emu_buffer;
319 8b82a956 Michael Niedermayer
      }
320 669ac79c Michael Niedermayer
      if(thirdpel)
321 da3b9756 Mike Melanson
        (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
322 669ac79c Michael Niedermayer
      else
323 da3b9756 Mike Melanson
        (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
324 8b82a956 Michael Niedermayer
    }
325
  }
326
}
327
328 2e26c8d2 Mike Melanson
static inline int svq3_mc_dir (H264Context *h, int size, int mode, int dir, int avg) {
329
330
  int i, j, k, mx, my, dx, dy, x, y;
331
  MpegEncContext *const s = (MpegEncContext *) h;
332
  const int part_width  = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
333
  const int part_height = 16 >> ((unsigned) (size + 1) / 3);
334
  const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
335
  const int h_edge_pos  = 6*(s->h_edge_pos - part_width ) - extra_width;
336
  const int v_edge_pos  = 6*(s->v_edge_pos - part_height) - extra_width;
337
338
  for (i=0; i < 16; i+=part_height) {
339
    for (j=0; j < 16; j+=part_width) {
340
      const int b_xy = (4*s->mb_x+(j>>2)) + (4*s->mb_y+(i>>2))*h->b_stride;
341
      int dxy;
342
      x = 16*s->mb_x + j;
343
      y = 16*s->mb_y + i;
344
      k = ((j>>2)&1) + ((i>>1)&2) + ((j>>1)&4) + (i&8);
345
346
      if (mode != PREDICT_MODE) {
347
        pred_motion (h, k, (part_width >> 2), dir, 1, &mx, &my);
348
      } else {
349
        mx = s->next_picture.motion_val[0][b_xy][0]<<1;
350
        my = s->next_picture.motion_val[0][b_xy][1]<<1;
351
352
        if (dir == 0) {
353
          mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1)>>1;
354
          my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1)>>1;
355
        } else {
356
          mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1)>>1;
357
          my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1)>>1;
358
        }
359
      }
360
361
      /* clip motion vector prediction to frame border */
362
      mx = clip (mx, extra_width - 6*x, h_edge_pos - 6*x);
363
      my = clip (my, extra_width - 6*y, v_edge_pos - 6*y);
364
365
      /* get (optional) motion vector differential */
366
      if (mode == PREDICT_MODE) {
367
        dx = dy = 0;
368
      } else {
369
        dy = svq3_get_se_golomb (&s->gb);
370
        dx = svq3_get_se_golomb (&s->gb);
371
372
        if (dx == INVALID_VLC || dy == INVALID_VLC) {
373
          return -1;
374
        }
375
      }
376
377
      /* compute motion vector */
378
      if (mode == THIRDPEL_MODE) {
379
        int fx, fy;
380
        mx = ((mx + 1)>>1) + dx;
381
        my = ((my + 1)>>1) + dy;
382
        fx= ((unsigned)(mx + 0x3000))/3 - 0x1000;
383
        fy= ((unsigned)(my + 0x3000))/3 - 0x1000;
384
        dxy= (mx - 3*fx) + 4*(my - 3*fy);
385
386
        svq3_mc_dir_part (s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
387
        mx += mx;
388
        my += my;
389
      } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
390
        mx = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
391
        my = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
392
        dxy= (mx&1) + 2*(my&1);
393
394
        svq3_mc_dir_part (s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
395
        mx *= 3;
396
        my *= 3;
397
      } else {
398
        mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
399
        my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
400
401
        svq3_mc_dir_part (s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
402
        mx *= 6;
403
        my *= 6;
404
      }
405
406
      /* update mv_cache */
407
      if (mode != PREDICT_MODE) {
408
        int32_t mv = pack16to32(mx,my);
409
410
        if (part_height == 8 && i < 8) {
411
          *(int32_t *) h->mv_cache[dir][scan8[k] + 1*8] = mv;
412
413
          if (part_width == 8 && j < 8) {
414
            *(int32_t *) h->mv_cache[dir][scan8[k] + 1 + 1*8] = mv;
415
          }
416
        }
417
        if (part_width == 8 && j < 8) {
418
          *(int32_t *) h->mv_cache[dir][scan8[k] + 1] = mv;
419
        }
420
        if (part_width == 4 || part_height == 4) {
421
          *(int32_t *) h->mv_cache[dir][scan8[k]] = mv;
422
        }
423
      }
424
425
      /* write back motion vectors */
426
      fill_rectangle(s->current_picture.motion_val[dir][b_xy], part_width>>2, part_height>>2, h->b_stride, pack16to32(mx,my), 4);
427
    }
428
  }
429
430
  return 0;
431
}
432
433 8b82a956 Michael Niedermayer
static int svq3_decode_mb (H264Context *h, unsigned int mb_type) {
434 2e26c8d2 Mike Melanson
  int i, j, k, m, dir, mode;
435
  int cbp = 0;
436 8b82a956 Michael Niedermayer
  uint32_t vlc;
437
  int8_t *top, *left;
438
  MpegEncContext *const s = (MpegEncContext *) h;
439
  const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
440
  const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
441
442
  h->top_samples_available        = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
443
  h->left_samples_available        = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
444
  h->topright_samples_available        = 0xFFFF;
445
446
  if (mb_type == 0) {                /* SKIP */
447 2e26c8d2 Mike Melanson
    if (s->pict_type == P_TYPE || s->next_picture.mb_type[mb_xy] == -1) {
448 da3b9756 Mike Melanson
      svq3_mc_dir_part (s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
449
450 2e26c8d2 Mike Melanson
      if (s->pict_type == B_TYPE) {
451
        svq3_mc_dir_part (s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
452 da3b9756 Mike Melanson
      }
453 8b82a956 Michael Niedermayer
454 2e26c8d2 Mike Melanson
      mb_type = MB_TYPE_SKIP;
455
    } else {
456
      svq3_mc_dir (h, s->next_picture.mb_type[mb_xy], PREDICT_MODE, 0, 0);
457
      svq3_mc_dir (h, s->next_picture.mb_type[mb_xy], PREDICT_MODE, 1, 1);
458 da3b9756 Mike Melanson
459
      mb_type = MB_TYPE_16x16;
460
    }
461 8b82a956 Michael Niedermayer
  } else if (mb_type < 8) {        /* INTER */
462
    if (h->thirdpel_flag && h->halfpel_flag == !get_bits (&s->gb, 1)) {
463 94d44f45 Michael Niedermayer
      mode = THIRDPEL_MODE;
464 8b82a956 Michael Niedermayer
    } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits (&s->gb, 1)) {
465 94d44f45 Michael Niedermayer
      mode = HALFPEL_MODE;
466 8b82a956 Michael Niedermayer
    } else {
467 94d44f45 Michael Niedermayer
      mode = FULLPEL_MODE;
468 8b82a956 Michael Niedermayer
    }
469
470
    /* fill caches */
471 da3b9756 Mike Melanson
    /* note ref_cache should contain here:
472 f7a8c179 Michael Niedermayer
        ????????
473
        ???11111
474
        N??11111
475
        N??11111
476
        N??11111
477
        N
478
    */
479
    
480 da3b9756 Mike Melanson
    for (m=0; m < 2; m++) {
481
      if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) {
482
        for (i=0; i < 4; i++) {
483
          *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride];
484
        }
485
      } else {
486
        for (i=0; i < 4; i++) {
487
          *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
488
        }
489 8b82a956 Michael Niedermayer
      }
490 da3b9756 Mike Melanson
      if (s->mb_y > 0) {
491
        memcpy (h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
492
        memset (&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4);
493
494
        if (s->mb_x < (s->mb_width - 1)) {
495
          *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4];
496
          h->ref_cache[m][scan8[0] + 4 - 1*8] =
497
                  (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 ||
498
                   h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1;
499
        }else
500
          h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
501
        if (s->mb_x > 0) {
502 2e26c8d2 Mike Melanson
          *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1];
503 da3b9756 Mike Melanson
          h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1;
504
        }else
505
          h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
506 f7a8c179 Michael Niedermayer
      }else
507 da3b9756 Mike Melanson
        memset (&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
508
509
      if (s->pict_type != B_TYPE)
510
        break;
511
    }
512 8b82a956 Michael Niedermayer
513
    /* decode motion vector(s) and form prediction(s) */
514 da3b9756 Mike Melanson
    if (s->pict_type == P_TYPE) {
515 2e26c8d2 Mike Melanson
      svq3_mc_dir (h, (mb_type - 1), mode, 0, 0);
516 da3b9756 Mike Melanson
    } else {        /* B_TYPE */
517 2e26c8d2 Mike Melanson
      if (mb_type != 2) {
518
        svq3_mc_dir (h, 0, mode, 0, 0);
519
      } else {
520
        for (i=0; i < 4; i++) {
521
          memset (s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
522 8b82a956 Michael Niedermayer
        }
523 da3b9756 Mike Melanson
      }
524
      if (mb_type != 1) {
525 2e26c8d2 Mike Melanson
        svq3_mc_dir (h, 0, mode, 1, (mb_type == 3));
526 da3b9756 Mike Melanson
      } else {
527
        for (i=0; i < 4; i++) {
528
          memset (s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
529
        }
530
      }
531 8b82a956 Michael Niedermayer
    }
532
533
    mb_type = MB_TYPE_16x16;
534 da3b9756 Mike Melanson
  } else if (mb_type == 8 || mb_type == 33) {        /* INTRA4x4 */
535 8b82a956 Michael Niedermayer
    memset (h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
536
537 da3b9756 Mike Melanson
    if (mb_type == 8) {
538
      if (s->mb_x > 0) {
539
        for (i=0; i < 4; i++) {
540
          h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i];
541
        }
542
        if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
543
          h->left_samples_available = 0x5F5F;
544
        }
545
      }
546
      if (s->mb_y > 0) {
547
        h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4];
548
        h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5];
549
        h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6];
550
        h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3];
551
552
        if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
553
          h->top_samples_available = 0x33FF;
554
        }
555 8b82a956 Michael Niedermayer
      }
556
557 da3b9756 Mike Melanson
      /* decode prediction codes for luma blocks */
558
      for (i=0; i < 16; i+=2) {
559
        vlc = svq3_get_ue_golomb (&s->gb);
560 8b82a956 Michael Niedermayer
561 da3b9756 Mike Melanson
        if (vlc >= 25)
562
          return -1;
563 8b82a956 Michael Niedermayer
564 da3b9756 Mike Melanson
        left        = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
565
        top        = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
566 8b82a956 Michael Niedermayer
567 da3b9756 Mike Melanson
        left[1]        = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
568
        left[2]        = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
569 8b82a956 Michael Niedermayer
570 da3b9756 Mike Melanson
        if (left[1] == -1 || left[2] == -1)
571
          return -1;
572
      }
573 2e26c8d2 Mike Melanson
    } else {        /* mb_type == 33, DC_128_PRED block type */
574 da3b9756 Mike Melanson
      for (i=0; i < 4; i++) {
575
        memset (&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
576
      }
577 8b82a956 Michael Niedermayer
    }
578
579
    write_back_intra_pred_mode (h);
580 da3b9756 Mike Melanson
581
    if (mb_type == 8) {
582
      check_intra4x4_pred_mode (h);
583
584
      h->top_samples_available  = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
585
      h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
586
    } else {
587
      for (i=0; i < 4; i++) {
588
        memset (&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
589
      }
590
591
      h->top_samples_available  = 0x33FF;
592
      h->left_samples_available = 0x5F5F;
593
    }
594 8b82a956 Michael Niedermayer
595
    mb_type = MB_TYPE_INTRA4x4;
596
  } else {                        /* INTRA16x16 */
597
    dir = i_mb_type_info[mb_type - 8].pred_mode;
598
    dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
599
600
    if ((h->intra16x16_pred_mode = check_intra_pred_mode (h, dir)) == -1)
601
      return -1;
602
603
    cbp = i_mb_type_info[mb_type - 8].cbp;
604
    mb_type = MB_TYPE_INTRA16x16;
605
  }
606
607
  if (!IS_INTER(mb_type) && s->pict_type != I_TYPE) {
608
    for (i=0; i < 4; i++) {
609
      memset (s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
610
    }
611 da3b9756 Mike Melanson
    if (s->pict_type == B_TYPE) {
612
      for (i=0; i < 4; i++) {
613
        memset (s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
614
      }
615
    }
616 8b82a956 Michael Niedermayer
  }
617
  if (!IS_INTRA4x4(mb_type)) {
618
    memset (h->intra4x4_pred_mode[mb_xy], DC_PRED, 8);
619
  }
620 2e26c8d2 Mike Melanson
  if (!IS_SKIP(mb_type) || s->pict_type == B_TYPE) {
621 f7a8c179 Michael Niedermayer
    memset (h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t));
622
    s->dsp.clear_blocks(h->mb);
623 8b82a956 Michael Niedermayer
  }
624
625 2e26c8d2 Mike Melanson
  if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == B_TYPE)) {
626
    if ((vlc = svq3_get_ue_golomb (&s->gb)) >= 48)
627
      return -1;
628
629
    cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
630
  }
631 8b82a956 Michael Niedermayer
  if (IS_INTRA16x16(mb_type) || (s->pict_type != I_TYPE && s->adaptive_quant && cbp)) {
632
    s->qscale += svq3_get_se_golomb (&s->gb);
633
634
    if (s->qscale > 31)
635
      return -1;
636
  }
637
  if (IS_INTRA16x16(mb_type)) {
638
    if (svq3_decode_block (&s->gb, h->mb, 0, 0))
639
      return -1;
640
  }
641
642 2e26c8d2 Mike Melanson
  if (cbp) {
643
    const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
644
    const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
645 8b82a956 Michael Niedermayer
646
    for (i=0; i < 4; i++) {
647
      if ((cbp & (1 << i))) {
648
        for (j=0; j < 4; j++) {
649 2e26c8d2 Mike Melanson
          k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
650 8b82a956 Michael Niedermayer
          h->non_zero_count_cache[ scan8[k] ] = 1;
651
652 2e26c8d2 Mike Melanson
          if (svq3_decode_block (&s->gb, &h->mb[16*k], index, type))
653 8b82a956 Michael Niedermayer
            return -1;
654
        }
655
      }
656
    }
657
658
    if ((cbp & 0x30)) {
659
      for (i=0; i < 2; ++i) {
660
        if (svq3_decode_block (&s->gb, &h->mb[16*(16 + 4*i)], 0, 3))
661
          return -1;
662
      }
663
664
      if ((cbp & 0x20)) {
665
        for (i=0; i < 8; i++) {
666
          h->non_zero_count_cache[ scan8[16+i] ] = 1;
667
668
          if (svq3_decode_block (&s->gb, &h->mb[16*(16 + i)], 1, 1))
669
            return -1;
670
        }
671
      }
672
    }
673
  }
674
675
  s->current_picture.mb_type[mb_xy] = mb_type;
676
677
  if (IS_INTRA(mb_type)) {
678
    h->chroma_pred_mode = check_intra_pred_mode (h, DC_PRED8x8);
679
  }
680
681
  return 0;
682
}
683
684 da3b9756 Mike Melanson
static int svq3_decode_slice_header (H264Context *h) {
685
  MpegEncContext *const s = (MpegEncContext *) h;
686
  const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
687
  int i, header;
688
689
  header = get_bits (&s->gb, 8);
690
691
  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
692
    /* TODO: what? */
693 9b879566 Michel Bardiaux
    av_log(h->s.avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
694 da3b9756 Mike Melanson
    return -1;
695
  } else {
696
    int length = (header >> 5) & 3;
697
698
    h->next_slice_index = s->gb.index + 8*show_bits (&s->gb, 8*length) + 8*length;
699
700
    if (h->next_slice_index > s->gb.size_in_bits)
701
      return -1;
702
703
    s->gb.size_in_bits = h->next_slice_index - 8*(length - 1);
704
    s->gb.index += 8;
705
706
    if (length > 0) {
707
      memcpy ((uint8_t *) &s->gb.buffer[s->gb.index >> 3],
708
             &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
709
    }
710
  }
711
712
  if ((i = svq3_get_ue_golomb (&s->gb)) == INVALID_VLC || i >= 3)
713
    return -1;
714
715
  h->slice_type = golomb_to_pict_type[i];
716
717
  if ((header & 0x9F) == 2) {
718
    i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
719
    s->mb_skip_run = get_bits (&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
720
  } else {
721
    get_bits1 (&s->gb);
722
    s->mb_skip_run = 0;
723
  }
724
725
  h->slice_num = get_bits (&s->gb, 8);
726
  s->qscale = get_bits (&s->gb, 5);
727
  s->adaptive_quant = get_bits1 (&s->gb);
728
729
  /* unknown fields */
730
  get_bits1 (&s->gb);
731
732
  if (h->unknown_svq3_flag) {
733
    get_bits1 (&s->gb);
734
  }
735
736
  get_bits1 (&s->gb);
737
  get_bits (&s->gb, 2);
738
739
  while (get_bits1 (&s->gb)) {
740
    get_bits (&s->gb, 8);
741
  }
742
743
  /* reset intra predictors and invalidate motion vector references */
744
  if (s->mb_x > 0) {
745
    memset (h->intra4x4_pred_mode[mb_xy - 1], -1, 4*sizeof(int8_t));
746
    memset (h->intra4x4_pred_mode[mb_xy - s->mb_x], -1, 8*sizeof(int8_t)*s->mb_x);
747
  }
748
  if (s->mb_y > 0) {
749
    memset (h->intra4x4_pred_mode[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
750
751
    if (s->mb_x > 0) {
752
      h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] = -1;
753
    }
754
  }
755
756
  return 0;
757
}
758
759 8b82a956 Michael Niedermayer
static int svq3_decode_frame (AVCodecContext *avctx,
760
                              void *data, int *data_size,
761
                              uint8_t *buf, int buf_size) {
762
  MpegEncContext *const s = avctx->priv_data;
763
  H264Context *const h = avctx->priv_data;
764 da3b9756 Mike Melanson
  int m, mb_type;
765
766
  *data_size = 0;
767 8b82a956 Michael Niedermayer
768
  s->flags = avctx->flags;
769 4c701ac8 Michael Niedermayer
  
770 8b82a956 Michael Niedermayer
  if (!s->context_initialized) {
771 4c701ac8 Michael Niedermayer
    s->width = avctx->width;
772
    s->height = avctx->height;
773 8b82a956 Michael Niedermayer
    h->pred4x4[DIAG_DOWN_LEFT_PRED] = pred4x4_down_left_svq3_c;
774
    h->pred16x16[PLANE_PRED8x8] = pred16x16_plane_svq3_c;
775
    h->halfpel_flag = 1;
776
    h->thirdpel_flag = 1;
777 da3b9756 Mike Melanson
    h->unknown_svq3_flag = 0;
778 8b82a956 Michael Niedermayer
    h->chroma_qp = 4;
779
780
    if (MPV_common_init (s) < 0)
781
      return -1;
782
783 4c701ac8 Michael Niedermayer
    h->b_stride = 4*s->mb_width;
784
785 8b82a956 Michael Niedermayer
    alloc_tables (h);
786
787 da3b9756 Mike Melanson
    if (avctx->extradata && avctx->extradata_size >= 0x64
788
        && !memcmp (avctx->extradata, "SVQ3", 4)) {
789
790
      GetBitContext gb;
791 8b82a956 Michael Niedermayer
792 da3b9756 Mike Melanson
      init_get_bits (&gb, (uint8_t *) avctx->extradata + 0x62,
793
                     8*(avctx->extradata_size - 0x62));
794 8b82a956 Michael Niedermayer
795 da3b9756 Mike Melanson
      /* 'frame size code' and optional 'width, height' */
796
      if (get_bits (&gb, 3) == 7) {
797
        get_bits (&gb, 12);
798
        get_bits (&gb, 12);
799 8b82a956 Michael Niedermayer
      }
800
801 da3b9756 Mike Melanson
      h->halfpel_flag = get_bits1 (&gb);
802
      h->thirdpel_flag = get_bits1 (&gb);
803 8b82a956 Michael Niedermayer
804 da3b9756 Mike Melanson
      /* unknown fields */
805
      get_bits1 (&gb);
806
      get_bits1 (&gb);
807
      get_bits1 (&gb);
808
      get_bits1 (&gb);
809 8b82a956 Michael Niedermayer
810 da3b9756 Mike Melanson
      s->low_delay = get_bits1 (&gb);
811 8b82a956 Michael Niedermayer
812 da3b9756 Mike Melanson
      /* unknown field */
813
      get_bits1 (&gb);
814 8b82a956 Michael Niedermayer
815 da3b9756 Mike Melanson
      while (get_bits1 (&gb)) {
816
        get_bits (&gb, 8);
817
      }
818 8b82a956 Michael Niedermayer
819 da3b9756 Mike Melanson
      h->unknown_svq3_flag = get_bits1 (&gb);
820
      avctx->has_b_frames = !s->low_delay;
821
    }
822
  }
823 8b82a956 Michael Niedermayer
824 da3b9756 Mike Melanson
  /* special case for last picture */
825
  if (buf_size == 0) {
826
    if (s->next_picture_ptr && !s->low_delay) {
827
      *(AVFrame *) data = *(AVFrame *) &s->next_picture;
828
      *data_size = sizeof(AVFrame);
829
    }
830
    return 0;
831
  }
832 8b82a956 Michael Niedermayer
833 da3b9756 Mike Melanson
  init_get_bits (&s->gb, buf, 8*buf_size);
834 8b82a956 Michael Niedermayer
835 da3b9756 Mike Melanson
  s->mb_x = s->mb_y = 0;
836 8b82a956 Michael Niedermayer
837 da3b9756 Mike Melanson
  if (svq3_decode_slice_header (h))
838
    return -1;
839 8b82a956 Michael Niedermayer
840 da3b9756 Mike Melanson
  s->pict_type = h->slice_type;
841
  s->picture_number = h->slice_num;
842 8b82a956 Michael Niedermayer
843 4704097a Michael Niedermayer
  if(avctx->debug&FF_DEBUG_PICT_INFO){
844 9b879566 Michel Bardiaux
      av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d\n", 
845 d8085ea7 Michael Niedermayer
      av_get_pict_type_char(s->pict_type), h->halfpel_flag, h->thirdpel_flag,
846 4704097a Michael Niedermayer
      s->adaptive_quant, s->qscale
847
      );
848
  }
849 8b82a956 Michael Niedermayer
850 da3b9756 Mike Melanson
  /* for hurry_up==5 */
851
  s->current_picture.pict_type = s->pict_type;
852
  s->current_picture.key_frame = (s->pict_type == I_TYPE);
853
854
  /* skip b frames if we dont have reference frames */
855
  if (s->last_picture_ptr == NULL && s->pict_type == B_TYPE) return 0;
856
  /* skip b frames if we are in a hurry */
857
  if (avctx->hurry_up && s->pict_type == B_TYPE) return 0;
858
  /* skip everything if we are in a hurry >= 5 */
859
  if (avctx->hurry_up >= 5) return 0;
860
861
  if (s->next_p_frame_damaged) {
862
    if (s->pict_type == B_TYPE)
863
      return 0;
864
    else
865
      s->next_p_frame_damaged = 0;
866
  }
867 8b82a956 Michael Niedermayer
868
  frame_start (h);
869
870 da3b9756 Mike Melanson
  if (s->pict_type == B_TYPE) {
871
    h->frame_num_offset = (h->slice_num - h->prev_frame_num);
872
873
    if (h->frame_num_offset < 0) {
874
      h->frame_num_offset += 256;
875
    }
876
    if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
877 9b879566 Michel Bardiaux
      av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
878 da3b9756 Mike Melanson
      return -1;
879
    }
880
  } else {
881
    h->prev_frame_num = h->frame_num;
882
    h->frame_num = h->slice_num;
883
    h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
884
885
    if (h->prev_frame_num_offset < 0) {
886
      h->prev_frame_num_offset += 256;
887
    }
888
  }
889
890
  for(m=0; m<2; m++){
891
    int i;
892
    for(i=0; i<4; i++){
893
      int j;
894
      for(j=-1; j<4; j++)
895
        h->ref_cache[m][scan8[0] + 8*i + j]= 1;
896
      h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
897
    }
898 f7a8c179 Michael Niedermayer
  }
899
  
900 8b82a956 Michael Niedermayer
  for (s->mb_y=0; s->mb_y < s->mb_height; s->mb_y++) {
901
    for (s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
902 da3b9756 Mike Melanson
903
      if ( (s->gb.index + 7) >= s->gb.size_in_bits &&
904
          ((s->gb.index & 7) == 0 || show_bits (&s->gb, (-s->gb.index & 7)) == 0)) {
905
906
        s->gb.index = h->next_slice_index;
907
        s->gb.size_in_bits = 8*buf_size;
908
909
        if (svq3_decode_slice_header (h))
910
          return -1;
911
912
        /* TODO: support s->mb_skip_run */
913
      }
914
915
      mb_type = svq3_get_ue_golomb (&s->gb);
916 8b82a956 Michael Niedermayer
917
      if (s->pict_type == I_TYPE) {
918
        mb_type += 8;
919 da3b9756 Mike Melanson
      } else if (s->pict_type == B_TYPE && mb_type >= 4) {
920
        mb_type += 4;
921 8b82a956 Michael Niedermayer
      }
922 da3b9756 Mike Melanson
      if (mb_type > 33 || svq3_decode_mb (h, mb_type)) {
923 9b879566 Michel Bardiaux
        av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
924 8b82a956 Michael Niedermayer
        return -1;
925
      }
926
927
      if (mb_type != 0) {
928
        hl_decode_mb (h);
929
      }
930 2e26c8d2 Mike Melanson
931
      if (s->pict_type != B_TYPE && !s->low_delay) {
932
        s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
933
                        (s->pict_type == P_TYPE && mb_type < 8) ? (mb_type - 1) : -1;
934
      }
935 8b82a956 Michael Niedermayer
    }
936 4c701ac8 Michael Niedermayer
937
    ff_draw_horiz_band(s, 16*s->mb_y, 16);
938 8b82a956 Michael Niedermayer
  }
939
940
  MPV_frame_end(s);
941 da3b9756 Mike Melanson
942
  if (s->pict_type == B_TYPE || s->low_delay) {
943
    *(AVFrame *) data = *(AVFrame *) &s->current_picture;
944
  } else {
945
    *(AVFrame *) data = *(AVFrame *) &s->last_picture;
946
  }
947
948
  avctx->frame_number = s->picture_number - 1;
949
950
  /* dont output the last pic after seeking */
951
  if (s->last_picture_ptr || s->low_delay) {
952
    *data_size = sizeof(AVFrame);
953
  }
954
955 8b82a956 Michael Niedermayer
  return buf_size;
956
}
957
958
959
AVCodec svq3_decoder = {
960
    "svq3",
961
    CODEC_TYPE_VIDEO,
962
    CODEC_ID_SVQ3,
963
    sizeof(H264Context),
964
    decode_init,
965
    NULL,
966
    decode_end,
967
    svq3_decode_frame,
968 4c701ac8 Michael Niedermayer
    CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
969 8b82a956 Michael Niedermayer
};