Revision 12802ec0 libavcodec/dsputil.c
libavcodec/dsputil.c | ||
---|---|---|
1600 | 1600 |
#undef op_avg |
1601 | 1601 |
#undef op_put |
1602 | 1602 |
|
1603 |
static void put_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){ |
|
1604 |
const int A=(8-x)*(8-y); |
|
1605 |
const int B=( x)*(8-y); |
|
1606 |
const int C=(8-x)*( y); |
|
1607 |
const int D=( x)*( y); |
|
1608 |
int i; |
|
1609 |
|
|
1610 |
assert(x<8 && y<8 && x>=0 && y>=0); |
|
1611 |
|
|
1612 |
for(i=0; i<h; i++) |
|
1613 |
{ |
|
1614 |
dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6; |
|
1615 |
dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6; |
|
1616 |
dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6; |
|
1617 |
dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6; |
|
1618 |
dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6; |
|
1619 |
dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6; |
|
1620 |
dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6; |
|
1621 |
dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6; |
|
1622 |
dst+= stride; |
|
1623 |
src+= stride; |
|
1624 |
} |
|
1625 |
} |
|
1626 |
|
|
1627 |
static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){ |
|
1628 |
const int A=(8-x)*(8-y); |
|
1629 |
const int B=( x)*(8-y); |
|
1630 |
const int C=(8-x)*( y); |
|
1631 |
const int D=( x)*( y); |
|
1632 |
int i; |
|
1633 |
|
|
1634 |
assert(x<8 && y<8 && x>=0 && y>=0); |
|
1635 |
|
|
1636 |
for(i=0; i<h; i++) |
|
1637 |
{ |
|
1638 |
dst[0] = avg2(dst[0], ((A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6)); |
|
1639 |
dst[1] = avg2(dst[1], ((A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6)); |
|
1640 |
dst[2] = avg2(dst[2], ((A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6)); |
|
1641 |
dst[3] = avg2(dst[3], ((A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6)); |
|
1642 |
dst[4] = avg2(dst[4], ((A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6)); |
|
1643 |
dst[5] = avg2(dst[5], ((A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6)); |
|
1644 |
dst[6] = avg2(dst[6], ((A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6)); |
|
1645 |
dst[7] = avg2(dst[7], ((A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6)); |
|
1646 |
dst+= stride; |
|
1647 |
src+= stride; |
|
1648 |
} |
|
1649 |
} |
|
1650 |
|
|
1651 | 1603 |
#define QPEL_MC(r, OPNAME, RND, OP) \ |
1652 | 1604 |
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
1653 | 1605 |
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ |
... | ... | |
4301 | 4253 |
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c; |
4302 | 4254 |
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c; |
4303 | 4255 |
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c; |
4304 |
c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c; |
|
4305 |
c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c; |
|
4306 | 4256 |
|
4307 | 4257 |
c->draw_edges = draw_edges_c; |
4308 | 4258 |
|
4309 | 4259 |
#if CONFIG_MLP_DECODER || CONFIG_TRUEHD_DECODER |
4310 | 4260 |
ff_mlp_init(c, avctx); |
4311 | 4261 |
#endif |
4312 |
#if CONFIG_VC1_DECODER |
|
4313 |
ff_vc1dsp_init(c,avctx); |
|
4314 |
#endif |
|
4315 | 4262 |
#if CONFIG_WMV2_DECODER || CONFIG_VC1_DECODER |
4316 | 4263 |
ff_intrax8dsp_init(c,avctx); |
4317 | 4264 |
#endif |
Also available in: Unified diff