diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c index 23012cdc63f23a8c11fe7cfc1402140add39bde1..7d09234f9b2f8d91c4ce8953a65c330efa3ac0f4 100644 --- a/libavcodec/vc1.c +++ b/libavcodec/vc1.c @@ -571,7 +571,7 @@ int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContex } /* fill lookup tables for intensity compensation */ -#define INIT_LUT(lumscale, lumshift, luty, lutuv) do { \ +#define INIT_LUT(lumscale, lumshift, luty, lutuv, chain) do { \ int scale, shift, i; \ if (!lumscale) { \ scale = -64; \ @@ -586,11 +586,36 @@ int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContex shift = lumshift << 6; \ } \ for (i = 0; i < 256; i++) { \ - luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6); \ - lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); \ + int iy = chain ? luty[i] : i; \ + int iu = chain ? lutuv[i] : i; \ + luty[i] = av_clip_uint8((scale * iy + shift + 32) >> 6); \ + lutuv[i] = av_clip_uint8((scale * (iu - 128) + 128*64 + 32) >> 6);\ } \ } while(0) +static void rotate_luts(VC1Context *v) +{ +#define ROTATE(DEF, L, N, C, A) do { \ + if (v->s.pict_type == AV_PICTURE_TYPE_BI || v->s.pict_type == AV_PICTURE_TYPE_B) { \ + C = A; \ + } else { \ + DEF; \ + memcpy(&tmp, &L , sizeof(tmp)); \ + memcpy(&L , &N , sizeof(tmp)); \ + memcpy(&N , &tmp, sizeof(tmp)); \ + C = N; \ + } \ + } while(0) + + ROTATE(int tmp, v->last_use_ic, v->next_use_ic, v->curr_use_ic, v->aux_use_ic); + ROTATE(uint8_t tmp[2][256], v->last_luty, v->next_luty, v->curr_luty, v->aux_luty); + ROTATE(uint8_t tmp[2][256], v->last_lutuv, v->next_lutuv, v->curr_lutuv, v->aux_lutuv); + + INIT_LUT(32, 0, v->curr_luty[0], v->curr_lutuv[0], 0); + INIT_LUT(32, 0, v->curr_luty[1], v->curr_lutuv[1], 0); + v->curr_use_ic = 0; +} + int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb) { int pqindex, lowquant, status; @@ -677,8 +702,8 @@ int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb) (v->s.pict_type == AV_PICTURE_TYPE_P) ? 'P' : ((v->s.pict_type == AV_PICTURE_TYPE_I) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm); - if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_P) - v->use_ic = 0; + if (v->first_pic_header_flag) + rotate_luts(v); switch (v->s.pict_type) { case AV_PICTURE_TYPE_P: @@ -692,9 +717,10 @@ int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb) v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][get_unary(gb, 1, 3)]; v->lumscale = get_bits(gb, 6); v->lumshift = get_bits(gb, 6); - v->use_ic = 1; + v->last_use_ic = 1; /* fill lookup tables for intensity compensation */ - INIT_LUT(v->lumscale, v->lumshift, v->luty, v->lutuv); + INIT_LUT(v->lumscale, v->lumshift, v->last_luty[0], v->last_lutuv[0], 1); + INIT_LUT(v->lumscale, v->lumshift, v->last_luty[1], v->last_lutuv[1], 1); } v->qs_last = v->s.quarter_sample; if (v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN) @@ -942,12 +968,12 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb) if (v->postprocflag) v->postproc = get_bits(gb, 2); - if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_P) - v->use_ic = 0; - if (v->parse_only) return 0; + if (v->first_pic_header_flag) + rotate_luts(v); + switch (v->s.pict_type) { case AV_PICTURE_TYPE_I: case AV_PICTURE_TYPE_BI: @@ -998,7 +1024,9 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb) if (v->intcomp) { v->lumscale = get_bits(gb, 6); v->lumshift = get_bits(gb, 6); - INIT_LUT(v->lumscale, v->lumshift, v->luty, v->lutuv); + INIT_LUT(v->lumscale, v->lumshift, v->last_luty[0], v->last_lutuv[0], 1); + INIT_LUT(v->lumscale, v->lumshift, v->last_luty[1], v->last_lutuv[1], 1); + v->last_use_ic = 1; } status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v); av_log(v->s.avctx, AV_LOG_DEBUG, "SKIPMB plane encoding: " @@ -1041,17 +1069,38 @@ int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb) int mvmode2; mvmode2 = get_unary(gb, 1, 3); v->mv_mode2 = ff_vc1_mv_pmode_table2[lowquant][mvmode2]; - if (v->field_mode) - v->intcompfield = decode210(gb); - v->lumscale = get_bits(gb, 6); - v->lumshift = get_bits(gb, 6); - INIT_LUT(v->lumscale, v->lumshift, v->luty, v->lutuv); - if ((v->field_mode) && !v->intcompfield) { + if (v->field_mode) { + v->intcompfield = decode210(gb) ^ 3; + } else + v->intcompfield = 3; + + v->lumscale2 = v->lumscale = 32; + v->lumshift2 = v->lumshift = 0; + if (v->intcompfield & 1) { + v->lumscale = get_bits(gb, 6); + v->lumshift = get_bits(gb, 6); + } + if ((v->intcompfield & 2) && v->field_mode) { v->lumscale2 = get_bits(gb, 6); v->lumshift2 = get_bits(gb, 6); - INIT_LUT(v->lumscale2, v->lumshift2, v->luty2, v->lutuv2); + } else if(!v->field_mode) { + v->lumscale2 = v->lumscale; + v->lumshift2 = v->lumshift; + } + if (v->field_mode && v->second_field) { + if (v->cur_field_type) { + INIT_LUT(v->lumscale , v->lumshift , v->curr_luty[v->cur_field_type^1], v->curr_lutuv[v->cur_field_type^1], 0); + INIT_LUT(v->lumscale2, v->lumshift2, v->last_luty[v->cur_field_type ], v->last_lutuv[v->cur_field_type ], 1); + } else { + INIT_LUT(v->lumscale2, v->lumshift2, v->curr_luty[v->cur_field_type^1], v->curr_lutuv[v->cur_field_type^1], 0); + INIT_LUT(v->lumscale , v->lumshift , v->last_luty[v->cur_field_type ], v->last_lutuv[v->cur_field_type ], 1); + } + v->next_use_ic = v->curr_use_ic = 1; + } else { + INIT_LUT(v->lumscale , v->lumshift , v->last_luty[0], v->last_lutuv[0], 1); + INIT_LUT(v->lumscale2, v->lumshift2, v->last_luty[1], v->last_lutuv[1], 1); } - v->use_ic = 1; + v->last_use_ic = 1; } v->qs_last = v->s.quarter_sample; if (v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN) diff --git a/libavcodec/vc1.h b/libavcodec/vc1.h index 9d7743a9fdc29f9fc9d695c8dc4d3557f81292a6..8439203f88d0c23fe48d4707aa57388eb1f85c6c 100644 --- a/libavcodec/vc1.h +++ b/libavcodec/vc1.h @@ -298,8 +298,11 @@ typedef struct VC1Context{ int dmb_is_raw; ///< direct mb plane is raw int fmb_is_raw; ///< forward mb plane is raw int skip_is_raw; ///< skip mb plane is not coded - uint8_t luty[256], lutuv[256]; ///< lookup tables used for intensity compensation - int use_ic; ///< use intensity compensation in B-frames + uint8_t last_luty[2][256], last_lutuv[2][256]; ///< lookup tables used for intensity compensation + uint8_t aux_luty[2][256], aux_lutuv[2][256]; ///< lookup tables used for intensity compensation + uint8_t next_luty[2][256], next_lutuv[2][256]; ///< lookup tables used for intensity compensation + uint8_t (*curr_luty)[256] ,(*curr_lutuv)[256]; + int last_use_ic, curr_use_ic, next_use_ic, aux_use_ic; int rnd; ///< rounding control /** Frame decoding info for S/M profiles only */ @@ -342,7 +345,6 @@ typedef struct VC1Context{ int intcomp; uint8_t lumscale2; ///< for interlaced field P picture uint8_t lumshift2; - uint8_t luty2[256], lutuv2[256]; // lookup tables used for intensity compensation VLC* mbmode_vlc; VLC* imv_vlc; VLC* twomvbp_vlc; diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 12e6a99e5d54672d712b7afdf22ce106e7944fc3..c47e3b043b61795d2c71595a74b959988f424e29 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -348,6 +348,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir) int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; int v_edge_pos = s->v_edge_pos >> v->field_mode; int i; + const uint8_t *luty, *lutuv; if ((!v->field_mode || (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) && @@ -386,15 +387,21 @@ static void vc1_mc_1mv(VC1Context *v, int dir) srcY = s->current_picture.f.data[0]; srcU = s->current_picture.f.data[1]; srcV = s->current_picture.f.data[2]; + luty = v->curr_luty [v->ref_field_type[dir]]; + lutuv = v->curr_lutuv[v->ref_field_type[dir]]; } else { srcY = s->last_picture.f.data[0]; srcU = s->last_picture.f.data[1]; srcV = s->last_picture.f.data[2]; + luty = v->last_luty [v->ref_field_type[dir]]; + lutuv = v->last_lutuv[v->ref_field_type[dir]]; } } else { srcY = s->next_picture.f.data[0]; srcU = s->next_picture.f.data[1]; srcV = s->next_picture.f.data[2]; + luty = v->next_luty [v->ref_field_type[dir]]; + lutuv = v->next_lutuv[v->ref_field_type[dir]]; } src_x = s->mb_x * 16 + (mx >> 2); @@ -478,15 +485,15 @@ static void vc1_mc_1mv(VC1Context *v, int dir) src = srcY; for (j = 0; j < 17 + s->mspel * 2; j++) { for (i = 0; i < 17 + s->mspel * 2; i++) - src[i] = v->luty[src[i]]; + src[i] = luty[src[i]]; src += s->linesize; } src = srcU; src2 = srcV; for (j = 0; j < 9; j++) { for (i = 0; i < 9; i++) { - src[i] = v->lutuv[src[i]]; - src2[i] = v->lutuv[src2[i]]; + src[i] = lutuv[src[i]]; + src2[i] = lutuv[src2[i]]; } src += s->uvlinesize; src2 += s->uvlinesize; @@ -544,6 +551,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg) int off; int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0; int v_edge_pos = s->v_edge_pos >> v->field_mode; + const uint8_t *luty; if ((!v->field_mode || (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) && @@ -556,10 +564,15 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg) if (!dir) { if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) { srcY = s->current_picture.f.data[0]; - } else + luty = v->curr_luty[v->ref_field_type[dir]]; + } else { srcY = s->last_picture.f.data[0]; - } else + luty = v->last_luty[v->ref_field_type[dir]]; + } + } else { srcY = s->next_picture.f.data[0]; + luty = v->next_luty[v->ref_field_type[dir]]; + } if (v->field_mode) { if (v->cur_field_type != v->ref_field_type[dir]) @@ -686,7 +699,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg) src = srcY; for (j = 0; j < 9 + s->mspel * 2; j++) { for (i = 0; i < 9 + s->mspel * 2; i++) - src[i] = v->luty[src[i]]; + src[i] = luty[src[i]]; src += s->linesize << fieldmv; } } @@ -774,6 +787,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) int valid_count; int chroma_ref_type = v->cur_field_type; int v_edge_pos = s->v_edge_pos >> v->field_mode; + const uint8_t *lutuv; if (!v->field_mode && !v->s.last_picture.f.data[0]) return; @@ -839,13 +853,16 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) { srcU = s->current_picture.f.data[1]; srcV = s->current_picture.f.data[2]; + lutuv = v->curr_lutuv[chroma_ref_type]; } else { srcU = s->last_picture.f.data[1]; srcV = s->last_picture.f.data[2]; + lutuv = v->last_lutuv[chroma_ref_type]; } } else { srcU = s->next_picture.f.data[1]; srcV = s->next_picture.f.data[2]; + lutuv = v->next_lutuv[chroma_ref_type]; } srcU += uvsrc_y * s->uvlinesize + uvsrc_x; @@ -896,8 +913,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) src2 = srcV; for (j = 0; j < 9; j++) { for (i = 0; i < 9; i++) { - src[i] = v->lutuv[src[i]]; - src2[i] = v->lutuv[src2[i]]; + src[i] = lutuv[src[i]]; + src2[i] = lutuv[src2[i]]; } src += s->uvlinesize; src2 += s->uvlinesize; @@ -980,13 +997,14 @@ static void vc1_mc_4mv_chroma4(VC1Context *v) if (v->mv_mode == MV_PMODE_INTENSITY_COMP) { int i, j; uint8_t *src, *src2; + const uint8_t *lutuv = v->last_lutuv[v->ref_field_type[0]]; src = srcU; src2 = srcV; for (j = 0; j < 5; j++) { for (i = 0; i < 5; i++) { - src[i] = v->lutuv[src[i]]; - src2[i] = v->lutuv[src2[i]]; + src[i] = lutuv[src[i]]; + src2[i] = lutuv[src2[i]]; } src += s->uvlinesize << 1; src2 += s->uvlinesize << 1; @@ -1978,29 +1996,30 @@ static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs) static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode) { - if (v->use_ic) { + int use_ic = v->next_use_ic || v->curr_use_ic || v->last_use_ic; + if (use_ic) { v->mv_mode2 = v->mv_mode; v->mv_mode = MV_PMODE_INTENSITY_COMP; } if (direct) { vc1_mc_1mv(v, 0); vc1_interp_mc(v); - if (v->use_ic) + if (use_ic) v->mv_mode = v->mv_mode2; return; } if (mode == BMV_TYPE_INTERPOLATED) { vc1_mc_1mv(v, 0); vc1_interp_mc(v); - if (v->use_ic) + if (use_ic) v->mv_mode = v->mv_mode2; return; } - if (v->use_ic && (mode == BMV_TYPE_BACKWARD)) + if (use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2; vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD)); - if (v->use_ic) + if (use_ic) v->mv_mode = v->mv_mode2; }