Revision f66e4f5f

View differences:

ffmpeg.c
475 475
                        fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
476 476
                }
477 477
            }else if(audio_sync_method>1){
478
                int comp= clip(delta, -audio_sync_method, audio_sync_method);
478
                int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
479 479
                assert(ost->audio_resample);
480 480
                if(verbose > 2)
481 481
                    fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
libavcodec/adpcm.c
209 209
    int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8;
210 210
    c->prev_sample = c->prev_sample + ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
211 211
    CLAMP_TO_SHORT(c->prev_sample);
212
    c->step_index = clip(c->step_index + index_table[nibble], 0, 88);
212
    c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88);
213 213
    return nibble;
214 214
}
215 215

  
......
224 224
    else          bias=-c->idelta/2;
225 225

  
226 226
    nibble= (nibble + bias) / c->idelta;
227
    nibble= clip(nibble, -8, 7)&0x0F;
227
    nibble= av_clip(nibble, -8, 7)&0x0F;
228 228

  
229 229
    predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
230 230
    CLAMP_TO_SHORT(predictor);
......
254 254
    c->predictor = c->predictor + ((c->step * yamaha_difflookup[nibble]) / 8);
255 255
    CLAMP_TO_SHORT(c->predictor);
256 256
    c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
257
    c->step = clip(c->step, 127, 24567);
257
    c->step = av_clip(c->step, 127, 24567);
258 258

  
259 259
    return nibble;
260 260
}
......
324 324
            if(version == CODEC_ID_ADPCM_MS) {
325 325
                const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 256;
326 326
                const int div = (sample - predictor) / step;
327
                const int nmin = clip(div-range, -8, 6);
328
                const int nmax = clip(div+range, -7, 7);
327
                const int nmin = av_clip(div-range, -8, 6);
328
                const int nmax = av_clip(div+range, -7, 7);
329 329
                for(nidx=nmin; nidx<=nmax; nidx++) {
330 330
                    const int nibble = nidx & 0xf;
331 331
                    int dec_sample = predictor + nidx * step;
......
372 372
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
373 373
                const int predictor = nodes[j]->sample1;\
374 374
                const int div = (sample - predictor) * 4 / STEP_TABLE;\
375
                int nmin = clip(div-range, -7, 6);\
376
                int nmax = clip(div+range, -6, 7);\
375
                int nmin = av_clip(div-range, -7, 6);\
376
                int nmax = av_clip(div+range, -6, 7);\
377 377
                if(nmin<=0) nmin--; /* distinguish -0 from +0 */\
378 378
                if(nmax<0) nmax--;\
379 379
                for(nidx=nmin; nidx<=nmax; nidx++) {\
......
381 381
                    int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\
382 382
                    STORE_NODE(NAME, STEP_INDEX);\
383 383
                }
384
                LOOP_NODES(ima, step_table[step], clip(step + index_table[nibble], 0, 88));
384
                LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88));
385 385
            } else { //CODEC_ID_ADPCM_YAMAHA
386
                LOOP_NODES(yamaha, step, clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
386
                LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567));
387 387
#undef LOOP_NODES
388 388
#undef STORE_NODE
389 389
            }
......
734 734
    c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
735 735
    CLAMP_TO_SHORT(c->predictor);
736 736
    c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
737
    c->step = clip(c->step, 127, 24567);
737
    c->step = av_clip(c->step, 127, 24567);
738 738
    return c->predictor;
739 739
}
740 740

  
......
974 974
        n = buf_size - 7 * avctx->channels;
975 975
        if (n < 0)
976 976
            return -1;
977
        block_predictor[0] = clip(*src++, 0, 7);
977
        block_predictor[0] = av_clip(*src++, 0, 7);
978 978
        block_predictor[1] = 0;
979 979
        if (st)
980
            block_predictor[1] = clip(*src++, 0, 7);
980
            block_predictor[1] = av_clip(*src++, 0, 7);
981 981
        c->status[0].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00));
982 982
        src+=2;
983 983
        if (st){
......
1299 1299

  
1300 1300
                c->status[i].step_index += table[delta & (~signmask)];
1301 1301

  
1302
                c->status[i].step_index = clip(c->status[i].step_index, 0, 88);
1303
                c->status[i].predictor = clip(c->status[i].predictor, -32768, 32767);
1302
                c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
1303
                c->status[i].predictor = av_clip(c->status[i].predictor, -32768, 32767);
1304 1304

  
1305 1305
                *samples++ = c->status[i].predictor;
1306 1306
            }
libavcodec/cavs.c
128 128
}
129 129

  
130 130
#define SET_PARAMS                                            \
131
    alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)];   \
132
    beta  =  beta_tab[clip(qp_avg + h->beta_offset, 0,63)];   \
133
    tc    =    tc_tab[clip(qp_avg + h->alpha_offset,0,63)];
131
    alpha = alpha_tab[av_clip(qp_avg + h->alpha_offset,0,63)];   \
132
    beta  =  beta_tab[av_clip(qp_avg + h->beta_offset, 0,63)];   \
133
    tc    =    tc_tab[av_clip(qp_avg + h->alpha_offset,0,63)];
134 134

  
135 135
/**
136 136
 * in-loop deblocking filter for a single macroblock
libavcodec/cavsdsp.c
63 63
    int q0 = Q0;
64 64

  
65 65
    if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
66
        int delta = clip(((q0-p0)*3+P1-Q1+4)>>3,-tc, tc);
67
        P0 = clip_uint8(p0+delta);
68
        Q0 = clip_uint8(q0-delta);
66
        int delta = av_clip(((q0-p0)*3+P1-Q1+4)>>3,-tc, tc);
67
        P0 = av_clip_uint8(p0+delta);
68
        Q0 = av_clip_uint8(q0-delta);
69 69
        if(abs(P2-p0)<beta) {
70
            delta = clip(((P0-P1)*3+P2-Q0+4)>>3, -tc, tc);
71
            P1 = clip_uint8(P1+delta);
70
            delta = av_clip(((P0-P1)*3+P2-Q0+4)>>3, -tc, tc);
71
            P1 = av_clip_uint8(P1+delta);
72 72
        }
73 73
        if(abs(Q2-q0)<beta) {
74
            delta = clip(((Q1-Q0)*3+P0-Q2+4)>>3, -tc, tc);
75
            Q1 = clip_uint8(Q1-delta);
74
            delta = av_clip(((Q1-Q0)*3+P0-Q2+4)>>3, -tc, tc);
75
            Q1 = av_clip_uint8(Q1-delta);
76 76
        }
77 77
    }
78 78
}
......
98 98
static inline void loop_filter_c1(uint8_t *p0_p,int stride,int alpha, int beta,
99 99
                                  int tc) {
100 100
    if(abs(P0-Q0)<alpha && abs(P1-P0)<beta && abs(Q1-Q0)<beta) {
101
        int delta = clip(((Q0-P0)*3+P1-Q1+4)>>3, -tc, tc);
102
        P0 = clip_uint8(P0+delta);
103
        Q0 = clip_uint8(Q0-delta);
101
        int delta = av_clip(((Q0-P0)*3+P1-Q1+4)>>3, -tc, tc);
102
        P0 = av_clip_uint8(P0+delta);
103
        Q0 = av_clip_uint8(Q0-delta);
104 104
    }
105 105
}
106 106

  
libavcodec/cook.c
998 998
     */
999 999
    for (j = 0; j < q->samples_per_channel; j++) {
1000 1000
        out[chan + q->nb_channels * j] =
1001
          clip(lrintf(q->mono_mdct_output[j]), -32768, 32767);
1001
          av_clip(lrintf(q->mono_mdct_output[j]), -32768, 32767);
1002 1002
    }
1003 1003
}
1004 1004

  
libavcodec/dsicinav.c
327 327
    }
328 328
    while (buf_size > 0) {
329 329
        cin->delta += cinaudio_delta16_table[*src++];
330
        cin->delta = clip(cin->delta, -32768, 32767);
330
        cin->delta = av_clip(cin->delta, -32768, 32767);
331 331
        *samples++ = cin->delta;
332 332
        --buf_size;
333 333
    }
libavcodec/dsputil.c
1178 1178
                                           + src[index+stride+1]*   frac_x )*   frac_y
1179 1179
                                        + r)>>(shift*2);
1180 1180
                }else{
1181
                    index= src_x + clip(src_y, 0, height)*stride;
1181
                    index= src_x + av_clip(src_y, 0, height)*stride;
1182 1182
                    dst[y*stride + x]= ( (  src[index         ]*(s-frac_x)
1183 1183
                                          + src[index       +1]*   frac_x )*s
1184 1184
                                        + r)>>(shift*2);
1185 1185
                }
1186 1186
            }else{
1187 1187
                if((unsigned)src_y < height){
1188
                    index= clip(src_x, 0, width) + src_y*stride;
1188
                    index= av_clip(src_x, 0, width) + src_y*stride;
1189 1189
                    dst[y*stride + x]= (  (  src[index         ]*(s-frac_y)
1190 1190
                                           + src[index+stride  ]*   frac_y )*s
1191 1191
                                        + r)>>(shift*2);
1192 1192
                }else{
1193
                    index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
1193
                    index= av_clip(src_x, 0, width) + av_clip(src_y, 0, height)*stride;
1194 1194
                    dst[y*stride + x]=    src[index         ];
1195 1195
                }
1196 1196
            }
......
2434 2434
#undef op2_put
2435 2435
#endif
2436 2436

  
2437
#define op_scale1(x)  block[x] = clip_uint8( (block[x]*weight + offset) >> log2_denom )
2438
#define op_scale2(x)  dst[x] = clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
2437
#define op_scale1(x)  block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom )
2438
#define op_scale2(x)  dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
2439 2439
#define H264_WEIGHT(W,H) \
2440 2440
static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
2441 2441
    int y; \
......
2659 2659

  
2660 2660
        ad1= FFABS(d1)>>1;
2661 2661

  
2662
        d2= clip((p0-p3)/4, -ad1, ad1);
2662
        d2= av_clip((p0-p3)/4, -ad1, ad1);
2663 2663

  
2664 2664
        src[x-2*stride] = p0 - d2;
2665 2665
        src[x+  stride] = p3 + d2;
......
2694 2694

  
2695 2695
        ad1= FFABS(d1)>>1;
2696 2696

  
2697
        d2= clip((p0-p3)/4, -ad1, ad1);
2697
        d2= av_clip((p0-p3)/4, -ad1, ad1);
2698 2698

  
2699 2699
        src[y*stride-2] = p0 - d2;
2700 2700
        src[y*stride+1] = p3 + d2;
......
2752 2752
                int i_delta;
2753 2753

  
2754 2754
                if( FFABS( p2 - p0 ) < beta ) {
2755
                    pix[-2*xstride] = p1 + clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
2755
                    pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
2756 2756
                    tc++;
2757 2757
                }
2758 2758
                if( FFABS( q2 - q0 ) < beta ) {
2759
                    pix[   xstride] = q1 + clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
2759
                    pix[   xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
2760 2760
                    tc++;
2761 2761
                }
2762 2762

  
2763
                i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
2764
                pix[-xstride] = clip_uint8( p0 + i_delta );    /* p0' */
2765
                pix[0]        = clip_uint8( q0 - i_delta );    /* q0' */
2763
                i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
2764
                pix[-xstride] = av_clip_uint8( p0 + i_delta );    /* p0' */
2765
                pix[0]        = av_clip_uint8( q0 - i_delta );    /* q0' */
2766 2766
            }
2767 2767
            pix += ystride;
2768 2768
        }
......
2796 2796
                FFABS( p1 - p0 ) < beta &&
2797 2797
                FFABS( q1 - q0 ) < beta ) {
2798 2798

  
2799
                int delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
2799
                int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
2800 2800

  
2801
                pix[-xstride] = clip_uint8( p0 + delta );    /* p0' */
2802
                pix[0]        = clip_uint8( q0 - delta );    /* q0' */
2801
                pix[-xstride] = av_clip_uint8( p0 + delta );    /* p0' */
2802
                pix[0]        = av_clip_uint8( q0 - delta );    /* q0' */
2803 2803
            }
2804 2804
            pix += ystride;
2805 2805
        }
libavcodec/error_resilience.c
612 612
 *               error of the same type occured
613 613
 */
614 614
void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
615
    const int start_i= clip(startx + starty * s->mb_width    , 0, s->mb_num-1);
616
    const int end_i  = clip(endx   + endy   * s->mb_width    , 0, s->mb_num);
615
    const int start_i= av_clip(startx + starty * s->mb_width    , 0, s->mb_num-1);
616
    const int end_i  = av_clip(endx   + endy   * s->mb_width    , 0, s->mb_num);
617 617
    const int start_xy= s->mb_index2xy[start_i];
618 618
    const int end_xy  = s->mb_index2xy[end_i];
619 619
    int mask= -1;
libavcodec/eval.c
155 155
        case e_func2:  return e->value * e->a.func2(p->opaque, eval_expr(p, e->param[0]), eval_expr(p, e->param[1]));
156 156
        case e_squish: return 1/(1+exp(4*eval_expr(p, e->param[0])));
157 157
        case e_gauss: { double d = eval_expr(p, e->param[0]); return exp(-d*d/2)/sqrt(2*M_PI); }
158
        case e_ld:     return e->value * p->var[clip(eval_expr(p, e->param[0]), 0, VARS-1)];
158
        case e_ld:     return e->value * p->var[av_clip(eval_expr(p, e->param[0]), 0, VARS-1)];
159 159
        case e_while: {
160 160
            double d = NAN;
161 161
            while(eval_expr(p, e->param[0]))
......
177 177
                case e_div: return e->value * (d / d2);
178 178
                case e_add: return e->value * (d + d2);
179 179
                case e_last:return e->value * d2;
180
                case e_st : return e->value * (p->var[clip(d, 0, VARS-1)]= d2);
180
                case e_st : return e->value * (p->var[av_clip(d, 0, VARS-1)]= d2);
181 181
            }
182 182
        }
183 183
    }
libavcodec/flacenc.c
244 244

  
245 245
    /* set compression option overrides from AVCodecContext */
246 246
    if(avctx->use_lpc >= 0) {
247
        s->options.use_lpc = clip(avctx->use_lpc, 0, 11);
247
        s->options.use_lpc = av_clip(avctx->use_lpc, 0, 11);
248 248
    }
249 249
    if(s->options.use_lpc == 1)
250 250
        av_log(avctx, AV_LOG_DEBUG, " use lpc: Levinson-Durbin recursion with Welch window\n");
......
712 712
    error=0;
713 713
    for(i=0; i<order; i++) {
714 714
        error += lpc_in[i] * (1 << sh);
715
        lpc_out[i] = clip(lrintf(error), -qmax, qmax);
715
        lpc_out[i] = av_clip(lrintf(error), -qmax, qmax);
716 716
        error -= lpc_out[i];
717 717
    }
718 718
    *shift = sh;
libavcodec/g726.c
213 213
            c->b[i] = 0;
214 214
    } else {
215 215
        /* This is a bit crazy, but it really is +255 not +256 */
216
        fa1 = clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255);
216
        fa1 = av_clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255);
217 217

  
218 218
        c->a[1] += 128*pk0*c->pk[1] + fa1 - (c->a[1]>>7);
219
        c->a[1] = clip(c->a[1], -12288, 12288);
219
        c->a[1] = av_clip(c->a[1], -12288, 12288);
220 220
        c->a[0] += 64*3*pk0*c->pk[0] - (c->a[0] >> 8);
221
        c->a[0] = clip(c->a[0], -(15360 - c->a[1]), 15360 - c->a[1]);
221
        c->a[0] = av_clip(c->a[0], -(15360 - c->a[1]), 15360 - c->a[1]);
222 222

  
223 223
        for (i=0; i<6; i++)
224 224
            c->b[i] += 128*dq0*sgn(-c->dq[i].sign) - (c->b[i]>>8);
......
248 248
        c->ap += (0x200 - c->ap) >> 4;
249 249

  
250 250
    /* Update Yu and Yl */
251
    c->yu = clip(c->y + (((c->tbls->W[I] << 5) - c->y) >> 5), 544, 5120);
251
    c->yu = av_clip(c->y + (((c->tbls->W[I] << 5) - c->y) >> 5), 544, 5120);
252 252
    c->yl += c->yu + ((-c->yl)>>6);
253 253

  
254 254
    /* Next iteration for Y */
......
264 264
        c->se += mult(i2f(c->a[i] >> 2, &f), &c->sr[i]);
265 265
    c->se >>= 1;
266 266

  
267
    return clip(re_signal << 2, -0xffff, 0xffff);
267
    return av_clip(re_signal << 2, -0xffff, 0xffff);
268 268
}
269 269

  
270 270
static int g726_reset(G726Context* c, int bit_rate)
libavcodec/h263.c
211 211
        for(i=0; i<2; i++){
212 212
            int div, error;
213 213
            div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den);
214
            div= clip(1, div, 127);
214
            div= av_clip(1, div, 127);
215 215
            error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);
216 216
            if(error < best_error){
217 217
                best_error= error;
......
496 496
    for(i=0; i<s->mb_num; i++){
497 497
        unsigned int lam= s->lambda_table[ s->mb_index2xy[i] ];
498 498
        int qp= (lam*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
499
        qscale_table[ s->mb_index2xy[i] ]= clip(qp, s->avctx->qmin, s->avctx->qmax);
499
        qscale_table[ s->mb_index2xy[i] ]= av_clip(qp, s->avctx->qmin, s->avctx->qmax);
500 500
    }
501 501
}
502 502

  
libavcodec/h264.c
1322 1322
    int i;
1323 1323
    for(i=0; i<h->ref_count[0]; i++){
1324 1324
        int poc0 = h->ref_list[0][i].poc;
1325
        int td = clip(poc1 - poc0, -128, 127);
1325
        int td = av_clip(poc1 - poc0, -128, 127);
1326 1326
        if(td == 0 /* FIXME || pic0 is a long-term ref */){
1327 1327
            h->dist_scale_factor[i] = 256;
1328 1328
        }else{
1329
            int tb = clip(poc - poc0, -128, 127);
1329
            int tb = av_clip(poc - poc0, -128, 127);
1330 1330
            int tx = (16384 + (FFABS(td) >> 1)) / td;
1331
            h->dist_scale_factor[i] = clip((tb*tx + 32) >> 6, -1024, 1023);
1331
            h->dist_scale_factor[i] = av_clip((tb*tx + 32) >> 6, -1024, 1023);
1332 1332
        }
1333 1333
    }
1334 1334
    if(FRAME_MBAFF){
......
1948 1948
 */
1949 1949
static inline int get_chroma_qp(int chroma_qp_index_offset, int qscale){
1950 1950

  
1951
    return chroma_qp[clip(qscale + chroma_qp_index_offset, 0, 51)];
1951
    return chroma_qp[av_clip(qscale + chroma_qp_index_offset, 0, 51)];
1952 1952
}
1953 1953

  
1954 1954
//FIXME need to check that this doesnt overflow signed 32 bit for low qp, i am not sure, it's very close
......
4122 4122
        int poc0 = h->ref_list[0][ref0].poc;
4123 4123
        for(ref1=0; ref1 < h->ref_count[1]; ref1++){
4124 4124
            int poc1 = h->ref_list[1][ref1].poc;
4125
            int td = clip(poc1 - poc0, -128, 127);
4125
            int td = av_clip(poc1 - poc0, -128, 127);
4126 4126
            if(td){
4127
                int tb = clip(cur_poc - poc0, -128, 127);
4127
                int tb = av_clip(cur_poc - poc0, -128, 127);
4128 4128
                int tx = (16384 + (FFABS(td) >> 1)) / td;
4129
                int dist_scale_factor = clip((tb*tx + 32) >> 6, -1024, 1023) >> 2;
4129
                int dist_scale_factor = av_clip((tb*tx + 32) >> 6, -1024, 1023) >> 2;
4130 4130
                if(dist_scale_factor < -64 || dist_scale_factor > 128)
4131 4131
                    h->implicit_weight[ref0][ref1] = 32;
4132 4132
                else
......
6814 6814
                int i_delta;
6815 6815

  
6816 6816
                if( FFABS( p2 - p0 ) < beta ) {
6817
                    pix[-2] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 );
6817
                    pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 );
6818 6818
                    tc++;
6819 6819
                }
6820 6820
                if( FFABS( q2 - q0 ) < beta ) {
6821
                    pix[1] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 );
6821
                    pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 );
6822 6822
                    tc++;
6823 6823
                }
6824 6824

  
6825
                i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
6826
                pix[-1] = clip_uint8( p0 + i_delta );    /* p0' */
6827
                pix[0]  = clip_uint8( q0 - i_delta );    /* q0' */
6825
                i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
6826
                pix[-1] = av_clip_uint8( p0 + i_delta );    /* p0' */
6827
                pix[0]  = av_clip_uint8( q0 - i_delta );    /* q0' */
6828 6828
                tprintf("filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
6829 6829
            }
6830 6830
        }else{
......
6902 6902
            if( FFABS( p0 - q0 ) < alpha &&
6903 6903
                FFABS( p1 - p0 ) < beta &&
6904 6904
                FFABS( q1 - q0 ) < beta ) {
6905
                const int i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
6905
                const int i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
6906 6906

  
6907
                pix[-1] = clip_uint8( p0 + i_delta );    /* p0' */
6908
                pix[0]  = clip_uint8( q0 - i_delta );    /* q0' */
6907
                pix[-1] = av_clip_uint8( p0 + i_delta );    /* p0' */
6908
                pix[0]  = av_clip_uint8( q0 - i_delta );    /* q0' */
6909 6909
                tprintf("filter_mb_mbaff_edgecv i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1);
6910 6910
            }
6911 6911
        }else{
......
7387 7387
        for( i= 0; i < 460; i++ ) {
7388 7388
            int pre;
7389 7389
            if( h->slice_type == I_TYPE )
7390
                pre = clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 );
7390
                pre = av_clip( ((cabac_context_init_I[i][0] * s->qscale) >>4 ) + cabac_context_init_I[i][1], 1, 126 );
7391 7391
            else
7392
                pre = clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 );
7392
                pre = av_clip( ((cabac_context_init_PB[h->cabac_init_idc][i][0] * s->qscale) >>4 ) + cabac_context_init_PB[h->cabac_init_idc][i][1], 1, 126 );
7393 7393

  
7394 7394
            if( pre <= 63 )
7395 7395
                h->cabac_state[i] = 2 * ( 63 - pre ) + 0;
libavcodec/imc.c
347 347
        iacc = 0;
348 348

  
349 349
        for(j = (stream_format_code & 0x2)?4:0; j < BANDS; j++) {
350
            cwlen = clip((int)((q->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6);
350
            cwlen = av_clip((int)((q->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6);
351 351

  
352 352
            q->bitsBandT[j] = cwlen;
353 353
            summer += q->bandWidthT[j] * cwlen;
libavcodec/indeo2.c
87 87
                }
88 88
            } else { /* add two deltas from table */
89 89
                t = dst[out - stride] + (table[c * 2] - 128);
90
                t= clip_uint8(t);
90
                t= av_clip_uint8(t);
91 91
                dst[out] = t;
92 92
                out++;
93 93
                t = dst[out - stride] + (table[(c * 2) + 1] - 128);
94
                t= clip_uint8(t);
94
                t= av_clip_uint8(t);
95 95
                dst[out] = t;
96 96
                out++;
97 97
            }
......
121 121
                out += c * 2;
122 122
            } else { /* add two deltas from table */
123 123
                t = dst[out] + (((table[c * 2] - 128)*3) >> 2);
124
                t= clip_uint8(t);
124
                t= av_clip_uint8(t);
125 125
                dst[out] = t;
126 126
                out++;
127 127
                t = dst[out] + (((table[(c * 2) + 1] - 128)*3) >> 2);
128
                t= clip_uint8(t);
128
                t= av_clip_uint8(t);
129 129
                dst[out] = t;
130 130
                out++;
131 131
            }
libavcodec/jpeg_ls.c
366 366
            }
367 367

  
368 368
            if(sign){
369
                pred = clip(pred - state->C[context], 0, state->maxval);
369
                pred = av_clip(pred - state->C[context], 0, state->maxval);
370 370
                err = -ls_get_code_regular(&s->gb, state, context);
371 371
            } else {
372
                pred = clip(pred + state->C[context], 0, state->maxval);
372
                pred = av_clip(pred + state->C[context], 0, state->maxval);
373 373
                err = ls_get_code_regular(&s->gb, state, context);
374 374
            }
375 375

  
......
381 381
                pred += state->range * state->twonear;
382 382
            else if(pred > state->maxval + state->near)
383 383
                pred -= state->range * state->twonear;
384
            pred = clip(pred, 0, state->maxval);
384
            pred = av_clip(pred, 0, state->maxval);
385 385
        }
386 386

  
387 387
        pred &= state->maxval;
......
623 623
                    err = -(state->near - err) / state->twonear;
624 624

  
625 625
                if(RItype || (Rb >= Ra))
626
                    Ra = clip(pred + err * state->twonear, 0, state->maxval);
626
                    Ra = av_clip(pred + err * state->twonear, 0, state->maxval);
627 627
                else
628
                    Ra = clip(pred - err * state->twonear, 0, state->maxval);
628
                    Ra = av_clip(pred - err * state->twonear, 0, state->maxval);
629 629
                W(cur, x, Ra);
630 630
            }
631 631
            if(err < 0)
......
646 646
            if(context < 0){
647 647
                context = -context;
648 648
                sign = 1;
649
                pred = clip(pred - state->C[context], 0, state->maxval);
649
                pred = av_clip(pred - state->C[context], 0, state->maxval);
650 650
                err = pred - R(cur, x);
651 651
            }else{
652 652
                sign = 0;
653
                pred = clip(pred + state->C[context], 0, state->maxval);
653
                pred = av_clip(pred + state->C[context], 0, state->maxval);
654 654
                err = R(cur, x) - pred;
655 655
            }
656 656

  
......
660 660
                else
661 661
                    err = -(state->near - err) / state->twonear;
662 662
                if(!sign)
663
                    Ra = clip(pred + err * state->twonear, 0, state->maxval);
663
                    Ra = av_clip(pred + err * state->twonear, 0, state->maxval);
664 664
                else
665
                    Ra = clip(pred - err * state->twonear, 0, state->maxval);
665
                    Ra = av_clip(pred - err * state->twonear, 0, state->maxval);
666 666
                W(cur, x, Ra);
667 667
            }
668 668

  
libavcodec/motion_est.c
1798 1798
    c->pred_x=0;
1799 1799
    c->pred_y=0;
1800 1800

  
1801
    P_LEFT[0]        = clip(mv_table[mot_xy - 1][0], xmin<<shift, xmax<<shift);
1802
    P_LEFT[1]        = clip(mv_table[mot_xy - 1][1], ymin<<shift, ymax<<shift);
1801
    P_LEFT[0]        = av_clip(mv_table[mot_xy - 1][0], xmin<<shift, xmax<<shift);
1802
    P_LEFT[1]        = av_clip(mv_table[mot_xy - 1][1], ymin<<shift, ymax<<shift);
1803 1803

  
1804 1804
    /* special case for first line */
1805 1805
    if (!s->first_slice_line) { //FIXME maybe allow this over thread boundary as its clipped
1806
        P_TOP[0]      = clip(mv_table[mot_xy - mot_stride             ][0], xmin<<shift, xmax<<shift);
1807
        P_TOP[1]      = clip(mv_table[mot_xy - mot_stride             ][1], ymin<<shift, ymax<<shift);
1808
        P_TOPRIGHT[0] = clip(mv_table[mot_xy - mot_stride + 1         ][0], xmin<<shift, xmax<<shift);
1809
        P_TOPRIGHT[1] = clip(mv_table[mot_xy - mot_stride + 1         ][1], ymin<<shift, ymax<<shift);
1806
        P_TOP[0]      = av_clip(mv_table[mot_xy - mot_stride             ][0], xmin<<shift, xmax<<shift);
1807
        P_TOP[1]      = av_clip(mv_table[mot_xy - mot_stride             ][1], ymin<<shift, ymax<<shift);
1808
        P_TOPRIGHT[0] = av_clip(mv_table[mot_xy - mot_stride + 1         ][0], xmin<<shift, xmax<<shift);
1809
        P_TOPRIGHT[1] = av_clip(mv_table[mot_xy - mot_stride + 1         ][1], ymin<<shift, ymax<<shift);
1810 1810

  
1811 1811
        P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
1812 1812
        P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
libavcodec/mpegvideo.c
188 188

  
189 189
static inline void update_qscale(MpegEncContext *s){
190 190
    s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
191
    s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
191
    s->qscale= av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
192 192

  
193 193
    s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
194 194
}
......
1713 1713
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1714 1714
    int x, y, fr, f;
1715 1715

  
1716
    sx= clip(sx, 0, w-1);
1717
    sy= clip(sy, 0, h-1);
1718
    ex= clip(ex, 0, w-1);
1719
    ey= clip(ey, 0, h-1);
1716
    sx= av_clip(sx, 0, w-1);
1717
    sy= av_clip(sy, 0, h-1);
1718
    ex= av_clip(ex, 0, w-1);
1719
    ey= av_clip(ey, 0, h-1);
1720 1720

  
1721 1721
    buf[sy*stride + sx]+= color;
1722 1722

  
......
1762 1762
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1763 1763
    int dx,dy;
1764 1764

  
1765
    sx= clip(sx, -100, w+100);
1766
    sy= clip(sy, -100, h+100);
1767
    ex= clip(ex, -100, w+100);
1768
    ey= clip(ey, -100, h+100);
1765
    sx= av_clip(sx, -100, w+100);
1766
    sy= av_clip(sy, -100, h+100);
1767
    ex= av_clip(ex, -100, w+100);
1768
    ey= av_clip(ey, -100, h+100);
1769 1769

  
1770 1770
    dx= ex - sx;
1771 1771
    dy= ey - sy;
......
2664 2664
    src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
2665 2665
    motion_x<<=(3-s->sprite_warping_accuracy);
2666 2666
    motion_y<<=(3-s->sprite_warping_accuracy);
2667
    src_x = clip(src_x, -16, s->width);
2667
    src_x = av_clip(src_x, -16, s->width);
2668 2668
    if (src_x == s->width)
2669 2669
        motion_x =0;
2670
    src_y = clip(src_y, -16, s->height);
2670
    src_y = av_clip(src_y, -16, s->height);
2671 2671
    if (src_y == s->height)
2672 2672
        motion_y =0;
2673 2673

  
......
2706 2706
    src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
2707 2707
    motion_x<<=(3-s->sprite_warping_accuracy);
2708 2708
    motion_y<<=(3-s->sprite_warping_accuracy);
2709
    src_x = clip(src_x, -8, s->width>>1);
2709
    src_x = av_clip(src_x, -8, s->width>>1);
2710 2710
    if (src_x == s->width>>1)
2711 2711
        motion_x =0;
2712
    src_y = clip(src_y, -8, s->height>>1);
2712
    src_y = av_clip(src_y, -8, s->height>>1);
2713 2713
    if (src_y == s->height>>1)
2714 2714
        motion_y =0;
2715 2715

  
......
2879 2879
    src_y += motion_y >> 1;
2880 2880

  
2881 2881
    /* WARNING: do no forget half pels */
2882
    src_x = clip(src_x, -16, width); //FIXME unneeded for emu?
2882
    src_x = av_clip(src_x, -16, width); //FIXME unneeded for emu?
2883 2883
    if (src_x == width)
2884 2884
        dxy &= ~1;
2885
    src_y = clip(src_y, -16, height);
2885
    src_y = av_clip(src_y, -16, height);
2886 2886
    if (src_y == height)
2887 2887
        dxy &= ~2;
2888 2888
    src += src_y * stride + src_x;
......
3358 3358

  
3359 3359
    src_x = s->mb_x * 8 + mx;
3360 3360
    src_y = s->mb_y * 8 + my;
3361
    src_x = clip(src_x, -8, s->width/2);
3361
    src_x = av_clip(src_x, -8, s->width/2);
3362 3362
    if (src_x == s->width/2)
3363 3363
        dxy &= ~1;
3364
    src_y = clip(src_y, -8, s->height/2);
3364
    src_y = av_clip(src_y, -8, s->height/2);
3365 3365
    if (src_y == s->height/2)
3366 3366
        dxy &= ~2;
3367 3367

  
......
3574 3574
                src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
3575 3575

  
3576 3576
                /* WARNING: do no forget half pels */
3577
                src_x = clip(src_x, -16, s->width);
3577
                src_x = av_clip(src_x, -16, s->width);
3578 3578
                if (src_x == s->width)
3579 3579
                    dxy &= ~3;
3580
                src_y = clip(src_y, -16, s->height);
3580
                src_y = av_clip(src_y, -16, s->height);
3581 3581
                if (src_y == s->height)
3582 3582
                    dxy &= ~12;
3583 3583

  
......
4343 4343
            s->dquant= s->qscale - last_qp;
4344 4344

  
4345 4345
            if(s->out_format==FMT_H263){
4346
                s->dquant= clip(s->dquant, -2, 2);
4346
                s->dquant= av_clip(s->dquant, -2, 2);
4347 4347

  
4348 4348
                if(s->codec_id==CODEC_ID_MPEG4){
4349 4349
                    if(!s->mb_intra){
......
5742 5742
        for(i=1;i<64;i++){
5743 5743
            int j= s->dsp.idct_permutation[i];
5744 5744

  
5745
            s->intra_matrix[j] = clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
5745
            s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
5746 5746
        }
5747 5747
        convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
5748 5748
                       s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
libavcodec/ratecontrol.c
280 280
        }
281 281

  
282 282
        left= buffer_size - rcc->buffer_index - 1;
283
        rcc->buffer_index += clip(left, min_rate, max_rate);
283
        rcc->buffer_index += av_clip(left, min_rate, max_rate);
284 284

  
285 285
        if(rcc->buffer_index > buffer_size){
286 286
            int stuffing= ceil((rcc->buffer_index - buffer_size)/8);
......
417 417
        qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
418 418
    }
419 419

  
420
    qmin= clip(qmin, 1, FF_LAMBDA_MAX);
421
    qmax= clip(qmax, 1, FF_LAMBDA_MAX);
420
    qmin= av_clip(qmin, 1, FF_LAMBDA_MAX);
421
    qmax= av_clip(qmax, 1, FF_LAMBDA_MAX);
422 422

  
423 423
    if(qmax<qmin) qmax= qmin;
424 424

  
......
915 915
    for(i=0; i<rcc->num_entries; i++){
916 916
        /* av_log(s->avctx, AV_LOG_DEBUG, "[lavc rc] entry[%d].new_qscale = %.3f  qp = %.3f\n",
917 917
            i, rcc->entry[i].new_qscale, rcc->entry[i].new_qscale / FF_QP2LAMBDA); */
918
        qscale_sum += clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, s->avctx->qmin, s->avctx->qmax);
918
        qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, s->avctx->qmin, s->avctx->qmax);
919 919
    }
920 920
    assert(toobig <= 40);
921 921
    av_log(s->avctx, AV_LOG_DEBUG,
libavcodec/resample2.c
121 121

  
122 122
        /* normalize so that an uniform color remains the same */
123 123
        for(i=0;i<tap_count;i++) {
124
            v = clip(lrintf(tab[i] * scale / norm + e), FELEM_MIN, FELEM_MAX);
124
            v = av_clip(lrintf(tab[i] * scale / norm + e), FELEM_MIN, FELEM_MAX);
125 125
            filter[ph * tap_count + i] = v;
126 126
            e += tab[i] * scale / norm - v;
127 127
        }
libavcodec/snow.c
1810 1810
static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
1811 1811
    const int w= b->width;
1812 1812
    int y;
1813
    const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16);
1813
    const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
1814 1814
    int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
1815 1815
    int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
1816 1816
    int new_index = 0;
......
2898 2898
    }
2899 2899
    *b= backup;
2900 2900

  
2901
    return clip(((ab<<LOG2_OBMC_MAX) + aa/2)/aa, 0, 255); //FIXME we shouldnt need cliping
2901
    return av_clip(((ab<<LOG2_OBMC_MAX) + aa/2)/aa, 0, 255); //FIXME we shouldnt need cliping
2902 2902
}
2903 2903

  
2904 2904
static inline int get_block_bits(SnowContext *s, int x, int y, int w){
......
3407 3407
    const int level= b->level;
3408 3408
    const int w= b->width;
3409 3409
    const int h= b->height;
3410
    const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16);
3410
    const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
3411 3411
    const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
3412 3412
    int x,y, thres1, thres2;
3413 3413
//    START_TIMER
......
3466 3466

  
3467 3467
static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, DWTELEM *src, int stride, int start_y, int end_y){
3468 3468
    const int w= b->width;
3469
    const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16);
3469
    const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
3470 3470
    const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
3471 3471
    const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
3472 3472
    int x,y;
......
3494 3494
static void dequantize(SnowContext *s, SubBand *b, DWTELEM *src, int stride){
3495 3495
    const int w= b->width;
3496 3496
    const int h= b->height;
3497
    const int qlog= clip(s->qlog + b->qlog, 0, QROOT*16);
3497
    const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
3498 3498
    const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
3499 3499
    const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
3500 3500
    int x,y;
......
3869 3869
            const int w= b->width;
3870 3870
            const int h= b->height;
3871 3871
            const int stride= b->stride;
3872
            const int qlog= clip(2*QROOT + b->qlog, 0, QROOT*16);
3872
            const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
3873 3873
            const int qmul= qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
3874 3874
            const int qdiv= (1<<16)/qmul;
3875 3875
            int x, y;
libavcodec/svq1.c
1004 1004
                diff= block_sum[stage] - sum;
1005 1005
                mean= (diff + (size>>1)) >> (level+3);
1006 1006
                assert(mean >-300 && mean<300);
1007
                if(intra) mean= clip(mean, 0, 255);
1008
                else      mean= clip(mean, -256, 255);
1007
                if(intra) mean= av_clip(mean, 0, 255);
1008
                else      mean= av_clip(mean, -256, 255);
1009 1009
                score= sqr - ((diff*(int64_t)diff)>>(level+3)); //FIXME 64bit slooow
1010 1010
                if(score < best_vector_score){
1011 1011
                    best_vector_score= score;
libavcodec/svq3.c
285 285
      emu = 1;
286 286
    }
287 287

  
288
    mx = clip (mx, -16, (s->h_edge_pos - width  + 15));
289
    my = clip (my, -16, (s->v_edge_pos - height + 15));
288
    mx = av_clip (mx, -16, (s->h_edge_pos - width  + 15));
289
    my = av_clip (my, -16, (s->v_edge_pos - height + 15));
290 290
  }
291 291

  
292 292
  /* form component predictions */
......
361 361
      }
362 362

  
363 363
      /* clip motion vector prediction to frame border */
364
      mx = clip (mx, extra_width - 6*x, h_edge_pos - 6*x);
365
      my = clip (my, extra_width - 6*y, v_edge_pos - 6*y);
364
      mx = av_clip (mx, extra_width - 6*x, h_edge_pos - 6*x);
365
      my = av_clip (my, extra_width - 6*y, v_edge_pos - 6*y);
366 366

  
367 367
      /* get (optional) motion vector differential */
368 368
      if (mode == PREDICT_MODE) {
libavcodec/truemotion2.c
384 384
            d = deltas[i + j * 4];
385 385
            ct += d;
386 386
            last[i] += ct;
387
            Y[i] = clip_uint8(last[i]);
387
            Y[i] = av_clip_uint8(last[i]);
388 388
        }
389 389
        Y += stride;
390 390
        ctx->D[j] = ct;
......
735 735
    src = (ctx->cur?ctx->Y2:ctx->Y1);
736 736
    for(j = 0; j < ctx->avctx->height; j++){
737 737
        for(i = 0; i < ctx->avctx->width; i++){
738
            Y[i] = clip_uint8(*src++);
738
            Y[i] = av_clip_uint8(*src++);
739 739
        }
740 740
        Y += p->linesize[0];
741 741
    }
......
743 743
    src = (ctx->cur?ctx->U2:ctx->U1);
744 744
    for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){
745 745
        for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){
746
            U[i] = clip_uint8(*src++);
746
            U[i] = av_clip_uint8(*src++);
747 747
        }
748 748
        U += p->linesize[2];
749 749
    }
......
751 751
    src = (ctx->cur?ctx->V2:ctx->V1);
752 752
    for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){
753 753
        for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){
754
            V[i] = clip_uint8(*src++);
754
            V[i] = av_clip_uint8(*src++);
755 755
        }
756 756
        V += p->linesize[1];
757 757
    }
libavcodec/truespeech.c
281 281
        for(k = 0; k < 8; k++)
282 282
            sum += ptr0[k] * ptr1[k];
283 283
        sum = (sum + (out[i] << 12) + 0x800) >> 12;
284
        out[i] = clip(sum, -0x7FFE, 0x7FFE);
284
        out[i] = av_clip(sum, -0x7FFE, 0x7FFE);
285 285
        for(k = 7; k > 0; k--)
286 286
            ptr0[k] = ptr0[k - 1];
287 287
        ptr0[0] = out[i];
......
311 311
            sum += ptr0[k] * t[k];
312 312
        for(k = 7; k > 0; k--)
313 313
            ptr0[k] = ptr0[k - 1];
314
        ptr0[0] = clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE);
314
        ptr0[0] = av_clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE);
315 315

  
316 316
        sum = ((ptr0[1] * (dec->filtval - (dec->filtval >> 2))) >> 4) + sum;
317 317
        sum = sum - (sum >> 3);
318
        out[i] = clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE);
318
        out[i] = av_clip((sum + 0x800) >> 12, -0x7FFE, 0x7FFE);
319 319
    }
320 320
}
321 321

  
libavcodec/vc1.c
821 821
    uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
822 822
    uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
823 823

  
824
    src_x   = clip(  src_x, -16, s->mb_width  * 16);
825
    src_y   = clip(  src_y, -16, s->mb_height * 16);
826
    uvsrc_x = clip(uvsrc_x,  -8, s->mb_width  *  8);
827
    uvsrc_y = clip(uvsrc_y,  -8, s->mb_height *  8);
824
    src_x   = av_clip(  src_x, -16, s->mb_width  * 16);
825
    src_y   = av_clip(  src_y, -16, s->mb_height * 16);
826
    uvsrc_x = av_clip(uvsrc_x,  -8, s->mb_width  *  8);
827
    uvsrc_y = av_clip(uvsrc_y,  -8, s->mb_height *  8);
828 828

  
829 829
    srcY += src_y * s->linesize + src_x;
830 830
    srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
......
944 944
    src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
945 945
    src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
946 946

  
947
    src_x   = clip(  src_x, -16, s->mb_width  * 16);
948
    src_y   = clip(  src_y, -16, s->mb_height * 16);
947
    src_x   = av_clip(  src_x, -16, s->mb_width  * 16);
948
    src_y   = av_clip(  src_y, -16, s->mb_height * 16);
949 949

  
950 950
    srcY += src_y * s->linesize + src_x;
951 951

  
......
1071 1071
    uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1072 1072
    uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1073 1073

  
1074
    uvsrc_x = clip(uvsrc_x,  -8, s->mb_width  *  8);
1075
    uvsrc_y = clip(uvsrc_y,  -8, s->mb_height *  8);
1074
    uvsrc_x = av_clip(uvsrc_x,  -8, s->mb_width  *  8);
1075
    uvsrc_y = av_clip(uvsrc_y,  -8, s->mb_height *  8);
1076 1076
    srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1077 1077
    srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1078 1078
    if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
......
1499 1499
                    shift = v->lumshift << 6;
1500 1500
            }
1501 1501
            for(i = 0; i < 256; i++) {
1502
                v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1503
                v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1502
                v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1503
                v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1504 1504
            }
1505 1505
        }
1506 1506
        if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
......
1740 1740
                    shift = v->lumshift << 6;
1741 1741
            }
1742 1742
            for(i = 0; i < 256; i++) {
1743
                v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1744
                v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1743
                v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6);
1744
                v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1745 1745
            }
1746 1746
            v->use_ic = 1;
1747 1747
        }
......
2116 2116
    uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2117 2117
    uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2118 2118

  
2119
    src_x   = clip(  src_x, -16, s->mb_width  * 16);
2120
    src_y   = clip(  src_y, -16, s->mb_height * 16);
2121
    uvsrc_x = clip(uvsrc_x,  -8, s->mb_width  *  8);
2122
    uvsrc_y = clip(uvsrc_y,  -8, s->mb_height *  8);
2119
    src_x   = av_clip(  src_x, -16, s->mb_width  * 16);
2120
    src_y   = av_clip(  src_y, -16, s->mb_height * 16);
2121
    uvsrc_x = av_clip(uvsrc_x,  -8, s->mb_width  *  8);
2122
    uvsrc_y = av_clip(uvsrc_y,  -8, s->mb_height *  8);
2123 2123

  
2124 2124
    srcY += src_y * s->linesize + src_x;
2125 2125
    srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
libavcodec/vc1dsp.c
355 355
    tptr = tmp;
356 356
    for(j = 0; j < 11; j++) {
357 357
        for(i = 0; i < 8; i++)
358
            tptr[i] = clip_uint8(vc1_mspel_filter(src + i, 1, m, r));
358
            tptr[i] = av_clip_uint8(vc1_mspel_filter(src + i, 1, m, r));
359 359
        src += stride;
360 360
        tptr += 8;
361 361
    }
......
365 365
    tptr = tmp + 8;
366 366
    for(j = 0; j < 8; j++) {
367 367
        for(i = 0; i < 8; i++)
368
            dst[i] = clip_uint8(vc1_mspel_filter(tptr + i, 8, m, r));
368
            dst[i] = av_clip_uint8(vc1_mspel_filter(tptr + i, 8, m, r));
369 369
        dst += stride;
370 370
        tptr += 8;
371 371
    }
libavcodec/vmdav.c
462 462
            s->predictors[chan] -= vmdaudio_table[buf[i] & 0x7F];
463 463
        else
464 464
            s->predictors[chan] += vmdaudio_table[buf[i]];
465
        s->predictors[chan] = clip(s->predictors[chan], -32768, 32767);
465
        s->predictors[chan] = av_clip(s->predictors[chan], -32768, 32767);
466 466
        out[i] = s->predictors[chan];
467 467
        chan ^= stereo;
468 468
    }
libavcodec/vp3.c
633 633
                int qmin= 8<<(inter + !i);
634 634
                int qscale= i ? ac_scale_factor : dc_scale_factor;
635 635

  
636
                s->qmat[inter][plane][i]= clip((qscale * coeff)/100 * 4, qmin, 4096);
636
                s->qmat[inter][plane][i]= av_clip((qscale * coeff)/100 * 4, qmin, 4096);
637 637
            }
638 638
        }
639 639
    }
......
1729 1729
            (first_pixel[-2] - first_pixel[ 1])
1730 1730
         +3*(first_pixel[ 0] - first_pixel[-1]);
1731 1731
        filter_value = bounding_values[(filter_value + 4) >> 3];
1732
        first_pixel[-1] = clip_uint8(first_pixel[-1] + filter_value);
1733
        first_pixel[ 0] = clip_uint8(first_pixel[ 0] - filter_value);
1732
        first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value);
1733
        first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value);
1734 1734
    }
1735 1735
}
1736 1736

  
......
1746 1746
            (first_pixel[2 * nstride] - first_pixel[ stride])
1747 1747
         +3*(first_pixel[0          ] - first_pixel[nstride]);
1748 1748
        filter_value = bounding_values[(filter_value + 4) >> 3];
1749
        first_pixel[nstride] = clip_uint8(first_pixel[nstride] + filter_value);
1750
        first_pixel[0] = clip_uint8(first_pixel[0] - filter_value);
1749
        first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value);
1750
        first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value);
1751 1751
    }
1752 1752
}
1753 1753

  
libavcodec/vp5.c
164 164
    for (pt=0; pt<2; pt++)
165 165
        for (ctx=0; ctx<36; ctx++)
166 166
            for (node=0; node<5; node++)
167
                s->coeff_model_dcct[pt][ctx][node] = clip(((s->coeff_model_dccv[pt][node] * vp5_dccv_lc[node][ctx][0] + 128) >> 8) + vp5_dccv_lc[node][ctx][1], 1, 254);
167
                s->coeff_model_dcct[pt][ctx][node] = av_clip(((s->coeff_model_dccv[pt][node] * vp5_dccv_lc[node][ctx][0] + 128) >> 8) + vp5_dccv_lc[node][ctx][1], 1, 254);
168 168

  
169 169
    /* coeff_model_acct is a linear combination of coeff_model_ract */
170 170
    for (ct=0; ct<3; ct++)
......
172 172
            for (cg=0; cg<3; cg++)
173 173
                for (ctx=0; ctx<6; ctx++)
174 174
                    for (node=0; node<5; node++)
175
                        s->coeff_model_acct[pt][ct][cg][ctx][node] = clip(((s->coeff_model_ract[pt][ct][cg][node] * vp5_ract_lc[ct][cg][node][ctx][0] + 128) >> 8) + vp5_ract_lc[ct][cg][node][ctx][1], 1, 254);
175
                        s->coeff_model_acct[pt][ct][cg][ctx][node] = av_clip(((s->coeff_model_ract[pt][ct][cg][node] * vp5_ract_lc[ct][cg][node][ctx][0] + 128) >> 8) + vp5_ract_lc[ct][cg][node][ctx][1], 1, 254);
176 176
}
177 177

  
178 178
static void vp5_parse_coeff(vp56_context_t *s)
libavcodec/vp56.c
308 308
    for (i=0; i<12; i++) {
309 309
        v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3;
310 310
        v = s->adjust(v, t);
311
        yuv[-pix_inc] = clip_uint8(yuv[-pix_inc] + v);
312
        yuv[0] = clip_uint8(yuv[0] - v);
311
        yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v);
312
        yuv[0] = av_clip_uint8(yuv[0] - v);
313 313
        yuv += line_inc;
314 314
    }
315 315
}
libavcodec/vp6.c
236 236
    for (pt=0; pt<2; pt++)
237 237
        for (ctx=0; ctx<3; ctx++)
238 238
            for (node=0; node<5; node++)
239
                s->coeff_model_dcct[pt][ctx][node] = clip(((s->coeff_model_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255);
239
                s->coeff_model_dcct[pt][ctx][node] = av_clip(((s->coeff_model_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255);
240 240
}
241 241

  
242 242
static void vp6_parse_vector_adjustment(vp56_context_t *s, vp56_mv_t *vect)
......
395 395

  
396 396
    for (y=0; y<8; y++) {
397 397
        for (x=0; x<8; x++) {
398
            dst[x] = clip_uint8((  src[x-delta  ] * weights[0]
398
            dst[x] = av_clip_uint8((  src[x-delta  ] * weights[0]
399 399
                                 + src[x        ] * weights[1]
400 400
                                 + src[x+delta  ] * weights[2]
401 401
                                 + src[x+2*delta] * weights[3] + 64) >> 7);
......
434 434

  
435 435
    for (y=0; y<11; y++) {
436 436
        for (x=0; x<8; x++) {
437
            t[x] = clip_uint8((  src[x-1] * h_weights[0]
437
            t[x] = av_clip_uint8((  src[x-1] * h_weights[0]
438 438
                               + src[x  ] * h_weights[1]
439 439
                               + src[x+1] * h_weights[2]
440 440
                               + src[x+2] * h_weights[3] + 64) >> 7);
......
446 446
    t = tmp + 8;
447 447
    for (y=0; y<8; y++) {
448 448
        for (x=0; x<8; x++) {
449
            dst[x] = clip_uint8((  t[x-8 ] * v_weights[0]
449
            dst[x] = av_clip_uint8((  t[x-8 ] * v_weights[0]
450 450
                                 + t[x   ] * v_weights[1]
451 451
                                 + t[x+8 ] * v_weights[2]
452 452
                                 + t[x+16] * v_weights[3] + 64) >> 7);
libavcodec/wmv2.c
641 641

  
642 642
    /* WARNING: do no forget half pels */
643 643
    v_edge_pos = s->v_edge_pos;
644
    src_x = clip(src_x, -16, s->width);
645
    src_y = clip(src_y, -16, s->height);
644
    src_x = av_clip(src_x, -16, s->width);
645
    src_y = av_clip(src_y, -16, s->height);
646 646

  
647 647
    if(src_x<=-16 || src_x >= s->width)
648 648
        dxy &= ~3;
......
688 688

  
689 689
    src_x = s->mb_x * 8 + mx;
690 690
    src_y = s->mb_y * 8 + my;
691
    src_x = clip(src_x, -8, s->width >> 1);
691
    src_x = av_clip(src_x, -8, s->width >> 1);
692 692
    if (src_x == (s->width >> 1))
693 693
        dxy &= ~1;
694
    src_y = clip(src_y, -8, s->height >> 1);
694
    src_y = av_clip(src_y, -8, s->height >> 1);
695 695
    if (src_y == (s->height >> 1))
696 696
        dxy &= ~2;
697 697
    offset = (src_y * uvlinesize) + src_x;
libavutil/common.h
163 163
 * @param amax maximum value of the clip range
164 164
 * @return clipped value
165 165
 */
166
static inline int clip(int a, int amin, int amax)
166
static inline int av_clip(int a, int amin, int amax)
167 167
{
168 168
    if (a < amin)      return amin;
169 169
    else if (a > amax) return amax;
......
175 175
 * @param a value to clip
176 176
 * @return clipped value
177 177
 */
178
static inline uint8_t clip_uint8(int a)
178
static inline uint8_t av_clip_uint8(int a)
179 179
{
180 180
    if (a&(~255)) return (-a)>>31;
181 181
    else          return a;

Also available in: Unified diff