Statistics
| Branch: | Revision:

ffmpeg / doc / snow.txt @ e5635270

History | View | Annotate | Download (19.3 KB)

1
=============================================
2
SNOW Video Codec Specification Draft 20070103
3
=============================================
4

    
5
Intro:
6
======
7
This Specification describes the snow syntax and semmantics as well as
8
how to decode snow.
9
The decoding process is precissely described and any compliant decoder
10
MUST produce the exactly same output for a spec conformant snow stream.
11
For encoding though any process which generates a stream compliant to
12
the syntactical and semmantical requirements and which is decodeable by
13
the process described in this spec shall be considered a conformant
14
snow encoder.
15

    
16
Definitions:
17
============
18

    
19
MUST    the specific part must be done to conform to this standard
20
SHOULD  it is recommended to be done that way, but not strictly required
21

    
22
ilog2(x) is the rounded down logarithm of x with basis 2
23
ilog2(0) = 0
24

    
25
Type definitions:
26
=================
27

    
28
b   1-bit range coded
29
u   unsigned scalar value range coded
30
s   signed scalar value range coded
31

    
32

    
33
Bitstream syntax:
34
=================
35

    
36
frame:
37
    header
38
    prediction
39
    residual
40

    
41
header:
42
    keyframe                            b   MID_STATE
43
    if(keyframe || always_reset)
44
        reset_contexts
45
    if(keyframe){
46
        version                         u   header_state
47
        always_reset                    b   header_state
48
        temporal_decomposition_type     u   header_state
49
        temporal_decomposition_count    u   header_state
50
        spatial_decomposition_count     u   header_state
51
        colorspace_type                 u   header_state
52
        chroma_h_shift                  u   header_state
53
        chroma_v_shift                  u   header_state
54
        spatial_scalability             b   header_state
55
        max_ref_frames-1                u   header_state
56
        qlogs
57
    }
58
    if(!keyframe){
59
        update_mc                       b   header_state
60
        if(update_mc){
61
            for(plane=0; plane<2; plane++){
62
                diag_mc                 b   header_state
63
                htaps/2-1               u   header_state
64
                for(i= p->htaps/2; i; i--)
65
                    |hcoeff[i]|         u   header_state
66
            }
67
        }
68
        update_qlogs                    b   header_state
69
        if(update_qlogs){
70
            spatial_decomposition_count u   header_state
71
            qlogs
72
        }
73
    }
74

    
75
    spatial_decomposition_type          s   header_state
76
    qlog                                s   header_state
77
    mv_scale                            s   header_state
78
    qbias                               s   header_state
79
    block_max_depth                     s   header_state
80

    
81
qlogs:
82
    for(plane=0; plane<2; plane++){
83
        quant_table[plane][0][0]        s   header_state
84
        for(level=0; level < spatial_decomposition_count; level++){
85
            quant_table[plane][level][1]s   header_state
86
            quant_table[plane][level][3]s   header_state
87
        }
88
    }
89

    
90
reset_contexts
91
    *_state[*]= MID_STATE
92

    
93
prediction:
94
    for(y=0; y<block_count_vertical; y++)
95
        for(x=0; x<block_count_horizontal; x++)
96
            block(0)
97

    
98
block(level):
99
    mvx_diff=mvy_diff=y_diff=cb_diff=cr_diff=0
100
    if(keyframe){
101
        intra=1
102
    }else{
103
        if(level!=max_block_depth){
104
            s_context= 2*left->level + 2*top->level + topleft->level + topright->level
105
            leaf                        b   block_state[4 + s_context]
106
        }
107
        if(level==max_block_depth || leaf){
108
            intra                       b   block_state[1 + left->intra + top->intra]
109
            if(intra){
110
                y_diff                  s   block_state[32]
111
                cb_diff                 s   block_state[64]
112
                cr_diff                 s   block_state[96]
113
            }else{
114
                ref_context= ilog2(2*left->ref) + ilog2(2*top->ref)
115
                if(ref_frames > 1)
116
                    ref                 u   block_state[128 + 1024 + 32*ref_context]
117
                mx_context= ilog2(2*abs(left->mx - top->mx))
118
                my_context= ilog2(2*abs(left->my - top->my))
119
                mvx_diff                s   block_state[128 + 32*(mx_context + 16*!!ref)]
120
                mvy_diff                s   block_state[128 + 32*(my_context + 16*!!ref)]
121
            }
122
        }else{
123
            block(level+1)
124
            block(level+1)
125
            block(level+1)
126
            block(level+1)
127
        }
128
    }
129

    
130

    
131
residual:
132
    residual2(luma)
133
    residual2(chroma_cr)
134
    residual2(chroma_cb)
135

    
136
residual2:
137
    for(level=0; level<spatial_decomposition_count; level++){
138
        if(level==0)
139
            subband(LL, 0)
140
        subband(HL, level)
141
        subband(LH, level)
142
        subband(HH, level)
143
    }
144

    
145
subband:
146
    FIXME
147

    
148

    
149

    
150
Tag description:
151
----------------
152

    
153
version
154
    0
155
    this MUST NOT change within a bitstream
156

    
157
always_reset
158
    if 1 then the range coder contexts will be reset after each frame
159

    
160
temporal_decomposition_type
161
    0
162

    
163
temporal_decomposition_count
164
    0
165

    
166
spatial_decomposition_count
167
    FIXME
168

    
169
colorspace_type
170
    0
171
    this MUST NOT change within a bitstream
172

    
173
chroma_h_shift
174
    log2(luma.width / chroma.width)
175
    this MUST NOT change within a bitstream
176

    
177
chroma_v_shift
178
    log2(luma.height / chroma.height)
179
    this MUST NOT change within a bitstream
180

    
181
spatial_scalability
182
    0
183

    
184
max_ref_frames
185
    maximum number of reference frames
186
    this MUST NOT change within a bitstream
187

    
188
update_mc
189
    indicates that motion compensation filter parameters are stored in the
190
    header
191

    
192
diag_mc
193
    flag to enable faster diagonal interpolation
194
    this SHOULD be 1 unless it turns out to be covered by a valid patent
195

    
196
htaps
197
    number of half pel interpolation filter taps, MUST be even, >0 and <10
198

    
199
hcoeff
200
    half pel interpolation filter coefficients, hcoeff[0] are the 2 middle
201
    coefficients [1] are the next outer ones and so on, resulting in a filter
202
    like: ...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ...
203
    the sign of the coefficients is not explicitly stored but alternates
204
    after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,...
205
    hcoeff[0] is not explicitly stored but found by subtracting the sum
206
    of all stored coefficients with signs from 32
207
    hcoeff[0]= 32 - hcoeff[1] - hcoeff[2] - ...
208
    a good choice for hcoeff and htaps is
209
    htaps= 6
210
    hcoeff={40,-10,2}
211
    an alternative which requires more computations at both encoder and
212
    decoder side and may or may not be better is
213
    htaps= 8
214
    hcoeff={42,-14,6,-2}
215

    
216

    
217
ref_frames
218
    minimum of the number of available reference frames and max_ref_frames
219
    for example the first frame after a key frame always has ref_frames=1
220

    
221
spatial_decomposition_type
222
    wavelet type
223
    0 is a 9/7 symmetric compact integer wavelet
224
    1 is a 5/3 symmetric compact integer wavelet
225
    others are reserved
226
    stored as delta from last, last is reset to 0 if always_reset || keyframe
227

    
228
qlog
229
    quality (logarthmic quantizer scale)
230
    stored as delta from last, last is reset to 0 if always_reset || keyframe
231

    
232
mv_scale
233
    stored as delta from last, last is reset to 0 if always_reset || keyframe
234
    FIXME check that everything works fine if this changes between frames
235

    
236
qbias
237
    dequantization bias
238
    stored as delta from last, last is reset to 0 if always_reset || keyframe
239

    
240
block_max_depth
241
    maximum depth of the block tree
242
    stored as delta from last, last is reset to 0 if always_reset || keyframe
243

    
244
quant_table
245
    quantiztation table
246

    
247

    
248
Highlevel bitstream structure:
249
=============================
250
 --------------------------------------------
251
|                   Header                   |
252
 --------------------------------------------
253
|    ------------------------------------    |
254
|   |               Block0               |   |
255
|   |             split?                 |   |
256
|   |     yes              no            |   |
257
|   |  .........         intra?          |   |
258
|   | : Block01 :    yes         no      |   |
259
|   | : Block02 :  .......   ..........  |   |
260
|   | : Block03 : :  y DC : : ref index: |   |
261
|   | : Block04 : : cb DC : : motion x : |   |
262
|   |  .........  : cr DC : : motion y : |   |
263
|   |              .......   ..........  |   |
264
|    ------------------------------------    |
265
|    ------------------------------------    |
266
|   |               Block1               |   |
267
|                    ...                     |
268
 --------------------------------------------
269
| ------------   ------------   ------------ |
270
|| Y subbands | | Cb subbands| | Cr subbands||
271
||  ---  ---  | |  ---  ---  | |  ---  ---  ||
272
|| |LL0||HL0| | | |LL0||HL0| | | |LL0||HL0| ||
273
||  ---  ---  | |  ---  ---  | |  ---  ---  ||
274
||  ---  ---  | |  ---  ---  | |  ---  ---  ||
275
|| |LH0||HH0| | | |LH0||HH0| | | |LH0||HH0| ||
276
||  ---  ---  | |  ---  ---  | |  ---  ---  ||
277
||  ---  ---  | |  ---  ---  | |  ---  ---  ||
278
|| |HL1||LH1| | | |HL1||LH1| | | |HL1||LH1| ||
279
||  ---  ---  | |  ---  ---  | |  ---  ---  ||
280
||  ---  ---  | |  ---  ---  | |  ---  ---  ||
281
|| |HH1||HL2| | | |HH1||HL2| | | |HH1||HL2| ||
282
||    ...     | |    ...     | |    ...     ||
283
| ------------   ------------   ------------ |
284
 --------------------------------------------
285

    
286
Decoding process:
287
=================
288

    
289
                                         ------------
290
                                        |            |
291
                                        |  Subbands  |
292
                   ------------         |            |
293
                  |            |         ------------
294
                  |  Intra DC  |               |
295
                  |            |    LL0 subband prediction
296
                   ------------                |
297
                                \        Dequantizaton
298
 -------------------             \             |
299
|  Reference frames |             \           IDWT
300
| -------   ------- |    Motion    \           |
301
||Frame 0| |Frame 1|| Compensation  .   OBMC   v      -------
302
| -------   ------- | --------------. \------> + --->|Frame n|-->output
303
| -------   ------- |                                 -------
304
||Frame 2| |Frame 3||<----------------------------------/
305
|        ...        |
306
 -------------------
307

    
308

    
309
Range Coder:
310
============
311
FIXME
312
The implemented range coder is an adapted version based upon "Range encoding:
313
an algorithm for removing redundancy from a digitised message." by G. N. N.
314
Martin.
315
The encoded symbols encoded by the ffmpeg range coder are bits (0|1). The
316
associated probabilities are not fix but change depending on the symbol mix
317
seen so far.
318

    
319

    
320
Neighboring Blocks:
321
===================
322
left and top are set to the respective blocks unless they are outside of
323
the image in which case they are set to the Null block
324

    
325
top-left is set to the top left block unless it is outside of the image in
326
which case it is set to the left block
327

    
328
if this block has no larger parent block or it is at the left side of its
329
parent block and the top right block is not outside of the image then the
330
top right block is used for top-right else the top-left block is used
331

    
332
Null block
333
y,cb,cr are 128
334
level, ref, mx and my are 0
335

    
336

    
337
Motion Vector Prediction:
338
=========================
339
1. the motion vectors of all the neighboring blocks are scaled to
340
compensate for the difference of reference frames
341

    
342
scaled_mv= (mv * (256 * (current_reference+1) / (mv.reference+1)) + 128)>>8
343

    
344
2. the median of the scaled left, top and top-right vectors is used as
345
motion vector prediction
346

    
347
3. the used motion vector is the sum of the predictor and
348
   (mvx_diff, mvy_diff)*mv_scale
349

    
350

    
351
Intra DC Predicton:
352
======================
353
the luma and chroma values of the left block are used as predictors
354

    
355
the used luma and chroma is the sum of the predictor and y_diff, cb_diff, cr_diff
356
to reverse this in the decoder apply the following:
357
block[y][x].dc[0] = block[y][x-1].dc[0] +  y_diff;
358
block[y][x].dc[1] = block[y][x-1].dc[1] + cb_diff;
359
block[y][x].dc[2] = block[y][x-1].dc[2] + cr_diff;
360
block[*][-1].dc[*]= 128;
361

    
362

    
363
Motion Compensation:
364
====================
365

    
366
Halfpel interpolation:
367
----------------------
368
halfpel interpolation is done by convolution with the halfpel filter stored
369
in the header:
370

    
371
horizontal halfpel samples are found by
372
H1[y][x] =    hcoeff[0]*(F[y][x  ] + F[y][x+1])
373
            + hcoeff[1]*(F[y][x-1] + F[y][x+2])
374
            + hcoeff[2]*(F[y][x-2] + F[y][x+3])
375
            + ...
376
h1[y][x] = (H1[y][x] + 32)>>6;
377

    
378
vertical halfpel samples are found by
379
H2[y][x] =    hcoeff[0]*(F[y  ][x] + F[y+1][x])
380
            + hcoeff[1]*(F[y-1][x] + F[y+2][x])
381
            + ...
382
h2[y][x] = (H2[y][x] + 32)>>6;
383

    
384
vertical+horizontal halfpel samples are found by
385
H3[y][x] =    hcoeff[0]*(H2[y][x  ] + H2[y][x+1])
386
            + hcoeff[1]*(H2[y][x-1] + H2[y][x+2])
387
            + ...
388
H3[y][x] =    hcoeff[0]*(H1[y  ][x] + H1[y+1][x])
389
            + hcoeff[1]*(H1[y+1][x] + H1[y+2][x])
390
            + ...
391
h3[y][x] = (H3[y][x] + 2048)>>12;
392

    
393

    
394
                   F   H1  F
395
                   |   |   |
396
                   |   |   |
397
                   |   |   |
398
                   F   H1  F
399
                   |   |   |
400
                   |   |   |
401
                   |   |   |
402
   F-------F-------F-> H1<-F-------F-------F
403
                   v   v   v
404
                  H2   H3  H2
405
                   ^   ^   ^
406
   F-------F-------F-> H1<-F-------F-------F
407
                   |   |   |
408
                   |   |   |
409
                   |   |   |
410
                   F   H1  F
411
                   |   |   |
412
                   |   |   |
413
                   |   |   |
414
                   F   H1  F
415

    
416

    
417
unavailable fullpel samples (outside the picture for example) shall be equal
418
to the closest available fullpel sample
419

    
420

    
421
Smaller pel interpolation:
422
--------------------------
423
if diag_mc is set then points which lie on a line between 2 vertically,
424
horiziontally or diagonally adjacent halfpel points shall be interpolated
425
linearls with rounding to nearest and halfway values rounded up.
426
points which lie on 2 diagonals at the same time should only use the one
427
diagonal not containing the fullpel point
428

    
429

    
430

    
431
           F-->O---q---O<--h1->O---q---O<--F
432
           v \           / v \           / v
433
           O   O       O   O   O       O   O
434
           |         /     |     \         |
435
           q       q       q       q       q
436
           |     /         |         \     |
437
           O   O       O   O   O       O   O
438
           ^ /           \ ^ /           \ ^
439
          h2-->O---q---O<--h3->O---q---O<--h2
440
           v \           / v \           / v
441
           O   O       O   O   O       O   O
442
           |     \         |         /     |
443
           q       q       q       q       q
444
           |         \     |     /         |
445
           O   O       O   O   O       O   O
446
           ^ /           \ ^ /           \ ^
447
           F-->O---q---O<--h1->O---q---O<--F
448

    
449

    
450

    
451
the remaining points shall be bilinearly interpolated from the
452
up to 4 surrounding halfpel and fullpel points, again rounding should be to
453
nearest and halfway values rounded up
454

    
455
compliant snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chroma
456
interpolation at least
457

    
458

    
459
Overlapped block motion compensation:
460
-------------------------------------
461
FIXME
462

    
463
LL band prediction:
464
===================
465
Each sample in the LL0 subband is predicted by the median of the left, top and
466
left+top-topleft samples, samples outside the subband shall be considered to
467
be 0. To reverse this prediction in the decoder apply the following.
468
for(y=0; y<height; y++){
469
    for(x=0; x<width; x++){
470
        sample[y][x] += median(sample[y-1][x],
471
                               sample[y][x-1],
472
                               sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);
473
    }
474
}
475
sample[-1][*]=sample[*][-1]= 0;
476
width,height here are the width and height of the LL0 subband not of the final
477
video
478

    
479

    
480
Dequantizaton:
481
==============
482
FIXME
483

    
484
Wavelet Transform:
485
==================
486

    
487
Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integer
488
transform and a integer approximation of the symmetric biorthogonal 9/7
489
daubechies wavelet.
490

    
491
2D IDWT (inverse discrete wavelet transform)
492
--------------------------------------------
493
The 2D IDWT applies a 2D filter recursively, each time combining the
494
4 lowest frequency subbands into a single subband until only 1 subband
495
remains.
496
The 2D filter is done by first applying a 1D filter in the vertical direction
497
and then applying it in the horizontal one.
498
 ---------------    ---------------    ---------------    ---------------
499
|LL0|HL0|       |  |   |   |       |  |       |       |  |       |       |
500
|---+---|  HL1  |  | L0|H0 |  HL1  |  |  LL1  |  HL1  |  |       |       |
501
|LH0|HH0|       |  |   |   |       |  |       |       |  |       |       |
502
|-------+-------|->|-------+-------|->|-------+-------|->|   L1  |  H1   |->...
503
|       |       |  |       |       |  |       |       |  |       |       |
504
|  LH1  |  HH1  |  |  LH1  |  HH1  |  |  LH1  |  HH1  |  |       |       |
505
|       |       |  |       |       |  |       |       |  |       |       |
506
 ---------------    ---------------    ---------------    ---------------
507

    
508

    
509
1D Filter:
510
----------
511
1. interleave the samples of the low and high frequency subbands like
512
s={L0, H0, L1, H1, L2, H2, L3, H3, ... }
513
note, this can end with a L or a H, the number of elements shall be w
514
s[-1] shall be considered equivalent to s[1  ]
515
s[w ] shall be considered equivalent to s[w-2]
516

    
517
2. perform the lifting steps in order as described below
518

    
519
5/3 Integer filter:
520
1. s[i] -= (s[i-1] + s[i+1] + 2)>>2; for all even i < w
521
2. s[i] += (s[i-1] + s[i+1]    )>>1; for all odd  i < w
522

    
523
\ | /|\ | /|\ | /|\ | /|\
524
 \|/ | \|/ | \|/ | \|/ |
525
  +  |  +  |  +  |  +  |   -1/4
526
 /|\ | /|\ | /|\ | /|\ |
527
/ | \|/ | \|/ | \|/ | \|/
528
  |  +  |  +  |  +  |  +   +1/2
529

    
530

    
531
snows 9/7 Integer filter:
532
1. s[i] -= (3*(s[i-1] + s[i+1])         + 4)>>3; for all even i < w
533
2. s[i] -=     s[i-1] + s[i+1]                 ; for all odd  i < w
534
3. s[i] += (   s[i-1] + s[i+1] + 4*s[i] + 8)>>4; for all even i < w
535
4. s[i] += (3*(s[i-1] + s[i+1])            )>>1; for all odd  i < w
536

    
537
\ | /|\ | /|\ | /|\ | /|\
538
 \|/ | \|/ | \|/ | \|/ |
539
  +  |  +  |  +  |  +  |   -3/8
540
 /|\ | /|\ | /|\ | /|\ |
541
/ | \|/ | \|/ | \|/ | \|/
542
 (|  + (|  + (|  + (|  +   -1
543
\ + /|\ + /|\ + /|\ + /|\  +1/4
544
 \|/ | \|/ | \|/ | \|/ |
545
  +  |  +  |  +  |  +  |   +1/16
546
 /|\ | /|\ | /|\ | /|\ |
547
/ | \|/ | \|/ | \|/ | \|/
548
  |  +  |  +  |  +  |  +   +3/2
549

    
550
optimization tips:
551
following are exactly identical
552
(3a)>>1 == a + (a>>1)
553
(a + 4b + 8)>>4 == ((a>>2) + b + 2)>>2
554

    
555
16bit implementation note:
556
The IDWT can be implemented with 16bits, but this requires some care to
557
prevent overflows, the following list, lists the minimum number of bits needed
558
for some terms
559
1. lifting step
560
A= s[i-1] + s[i+1]                              16bit
561
3*A + 4                                         18bit
562
A + (A>>1) + 2                                  17bit
563

    
564
3. lifting step
565
s[i-1] + s[i+1]                                 17bit
566

    
567
4. lifiting step
568
3*(s[i-1] + s[i+1])                             17bit
569

    
570

    
571
TODO:
572
=====
573
Important:
574
finetune initial contexts
575
flip wavelet?
576
try to use the wavelet transformed predicted image (motion compensated image) as context for coding the residual coefficients
577
try the MV length as context for coding the residual coefficients
578
use extradata for stuff which is in the keyframes now?
579
the MV median predictor is patented IIRC
580
implement per picture halfpel interpolation
581
try different range coder state transition tables for different contexts
582

    
583
Not Important:
584
compare the 6 tap and 8 tap hpel filters (psnr/bitrate and subjective quality)
585
spatial_scalability b vs u (!= 0 breaks syntax anyway so we can add a u later)
586

    
587

    
588
Credits:
589
========
590
Michael Niedermayer
591
Loren Merritt
592

    
593

    
594
Copyright:
595
==========
596
GPL + GFDL + whatever is needed to make this a RFC