提交 a4d36c11 编写于 作者: M Michael Niedermayer

passing AVFrame instead of AVPicture around in ffmpeg, that way stuff like...

passing AVFrame instead of AVPicture around in ffmpeg, that way stuff like motion vectors can be passed from the decoder to the encoder
moving ref_index from Picture to AVFrame

Originally committed as revision 3028 to svn://svn.ffmpeg.org/ffmpeg/trunk
上级 14e2a940
......@@ -223,7 +223,7 @@ typedef struct AVOutputStream {
int64_t sync_opts;
/* video only */
int video_resample; /* video_resample and video_crop are mutually exclusive */
AVPicture pict_tmp; /* temporary image for resampling */
AVFrame pict_tmp; /* temporary image for resampling */
ImgReSampleContext *img_resample_ctx; /* for image resampling */
int video_crop; /* video_resample and video_crop are mutually exclusive */
......@@ -554,15 +554,18 @@ static void do_video_out(AVFormatContext *s,
int *frame_size, AVOutputStream *audio_sync)
{
int nb_frames, i, ret;
AVPicture *final_picture, *formatted_picture;
AVPicture picture_format_temp, picture_crop_temp;
AVFrame *final_picture, *formatted_picture;
AVFrame picture_format_temp, picture_crop_temp;
static uint8_t *video_buffer= NULL;
uint8_t *buf = NULL, *buf1 = NULL;
AVCodecContext *enc, *dec;
enum PixelFormat target_pixfmt;
#define VIDEO_BUFFER_SIZE (1024*1024)
avcodec_get_frame_defaults(&picture_format_temp);
avcodec_get_frame_defaults(&picture_crop_temp);
enc = &ost->st->codec;
dec = &ist->st->codec;
......@@ -641,9 +644,9 @@ static void do_video_out(AVFormatContext *s,
if (!buf)
return;
formatted_picture = &picture_format_temp;
avpicture_fill(formatted_picture, buf, target_pixfmt, dec->width, dec->height);
avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);
if (img_convert(formatted_picture, target_pixfmt,
if (img_convert((AVPicture*)formatted_picture, target_pixfmt,
(AVPicture *)in_picture, dec->pix_fmt,
dec->width, dec->height) < 0) {
......@@ -653,7 +656,7 @@ static void do_video_out(AVFormatContext *s,
goto the_end;
}
} else {
formatted_picture = (AVPicture *)in_picture;
formatted_picture = in_picture;
}
/* XXX: resampling could be done before raw format conversion in
......@@ -661,10 +664,10 @@ static void do_video_out(AVFormatContext *s,
/* XXX: only works for YUV420P */
if (ost->video_resample) {
final_picture = &ost->pict_tmp;
img_resample(ost->img_resample_ctx, final_picture, formatted_picture);
img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);
if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {
fill_pad_region(final_picture, enc->height, enc->width,
fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
ost->padtop, ost->padbottom, ost->padleft, ost->padright,
padcolor);
}
......@@ -679,10 +682,10 @@ static void do_video_out(AVFormatContext *s,
if (!buf)
return;
final_picture = &picture_format_temp;
avpicture_fill(final_picture, buf, enc->pix_fmt, enc->width, enc->height);
avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
if (img_convert(final_picture, enc->pix_fmt,
&ost->pict_tmp, PIX_FMT_YUV420P,
if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
(AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
enc->width, enc->height) < 0) {
if (verbose >= 0)
......@@ -729,7 +732,7 @@ static void do_video_out(AVFormatContext *s,
}
}
fill_pad_region(final_picture, enc->height, enc->width,
fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
ost->padtop, ost->padbottom, ost->padleft, ost->padright,
padcolor);
......@@ -743,10 +746,10 @@ static void do_video_out(AVFormatContext *s,
if (!buf)
return;
final_picture = &picture_format_temp;
avpicture_fill(final_picture, buf, enc->pix_fmt, enc->width, enc->height);
avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
if (img_convert(final_picture, enc->pix_fmt,
&ost->pict_tmp, PIX_FMT_YUV420P,
if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
(AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
enc->width, enc->height) < 0) {
if (verbose >= 0)
......@@ -772,9 +775,8 @@ static void do_video_out(AVFormatContext *s,
enc->coded_frame = old_frame;
} else {
AVFrame big_picture;
avcodec_get_frame_defaults(&big_picture);
*(AVPicture*)&big_picture= *final_picture;
big_picture= *final_picture;
/* better than nothing: use input picture interlaced
settings */
big_picture.interlaced_frame = in_picture->interlaced_frame;
......@@ -791,6 +793,7 @@ static void do_video_out(AVFormatContext *s,
big_picture.quality = ist->st->quality;
}else
big_picture.quality = ost->st->quality;
big_picture.pict_type = 0;
ret = avcodec_encode_video(enc,
video_buffer, VIDEO_BUFFER_SIZE,
&big_picture);
......@@ -1419,15 +1422,16 @@ static int av_encode(AVFormatContext **output_files,
ost->padleft = frame_padleft;
ost->padbottom = frame_padbottom;
ost->padright = frame_padright;
if( avpicture_alloc( &ost->pict_tmp, PIX_FMT_YUV420P,
if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
codec->width, codec->height ) )
goto fail;
} else {
ost->video_resample = 1;
ost->video_crop = 0; // cropping is handled as part of resample
if( avpicture_alloc( &ost->pict_tmp, PIX_FMT_YUV420P,
if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
codec->width, codec->height ) )
goto fail;
avcodec_get_frame_defaults(&ost->pict_tmp);
ost->img_resample_ctx = img_resample_full_init(
ost->st->codec.width, ost->st->codec.height,
......
......@@ -17,7 +17,7 @@ extern "C" {
#define FFMPEG_VERSION_INT 0x000408
#define FFMPEG_VERSION "0.4.8"
#define LIBAVCODEC_BUILD 4708
#define LIBAVCODEC_BUILD 4709
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVCODEC_VERSION FFMPEG_VERSION
......@@ -442,7 +442,7 @@ typedef struct AVPanScan{
\
/**\
* Motion vector table\
* - encoding: unused\
* - encoding: set by user\
* - decoding: set by lavc\
*/\
int16_t (*motion_val[2])[2];\
......@@ -450,7 +450,7 @@ typedef struct AVPanScan{
/**\
* Macroblock type table\
* mb_type_base + mb_width + 2\
* - encoding: unused\
* - encoding: set by user\
* - decoding: set by lavc\
*/\
uint32_t *mb_type;\
......@@ -538,13 +538,20 @@ typedef struct AVPanScan{
* - decoding: set by lavc\
*/\
short *dct_coeff;\
\
/**\
* Motion referece frame index\
* - encoding: set by user\
* - decoding: set by lavc\
*/\
int8_t *ref_index[2];
#define FF_QSCALE_TYPE_MPEG1 0
#define FF_QSCALE_TYPE_MPEG2 1
#define FF_BUFFER_TYPE_INTERNAL 1
#define FF_BUFFER_TYPE_USER 2 ///< Direct rendering buffers (image is (de)allocated by user)
#define FF_BUFFER_TYPE_SHARED 4 ///< buffer from somewher else, dont dealloc image (data/base)
#define FF_BUFFER_TYPE_SHARED 4 ///< buffer from somewher else, dont dealloc image (data/base), all other tables are not shared
#define FF_BUFFER_TYPE_COPY 8 ///< just a (modified) copy of some other buffer, dont dealloc anything
......@@ -1561,6 +1568,14 @@ typedef struct AVCodecContext {
* - decoding: set by execute()
*/
void *thread_opaque;
/**
* Motion estimation threshold.
*
* - encoding: set by user
* - decoding: set by user
*/
void *me_threshold;
} AVCodecContext;
......
......@@ -287,15 +287,28 @@ static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *sr
dst->interlaced_frame = src->interlaced_frame;
dst->top_field_first = src->top_field_first;
if(src->motion_val[0] && src->motion_val[0] != dst->motion_val[0]){
if(s->avctx->me_threshold){
if(!src->motion_val[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
if(!src->mb_type)
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
if(!src->ref_index[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
if(src->motion_subsample_log2 != dst->motion_subsample_log2)
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match!\n");
else{
memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
for(i=0; i<2; i++){
int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
int height= ((16*s->mb_height)>>src->motion_subsample_log2);
for(i=0; i<2; i++)
memcpy(dst->motion_val[i], src->motion_val[i], stride*height*sizeof(int16_t));
if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
}
if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
memcpy(dst->ref_index[i], src->ref_index[i], s->mb_stride*s->mb_height*sizeof(int8_t)); //FIXME init this too
}
}
}
}
......@@ -363,6 +376,7 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
for(i=0; i<2; i++){
CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t))
pic->motion_val[i]= pic->motion_val_base[i]+2;
CHECKED_ALLOCZ(pic->ref_index[i], mb_array_size * sizeof(int8_t))
}
pic->motion_subsample_log2= 3;
}
......
......@@ -138,7 +138,6 @@ typedef struct Picture{
*/
uint8_t *interpolated[3];
int16_t (*motion_val_base[2])[2];
int8_t *ref_index[2];
uint32_t *mb_type_base;
#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if theres just one type
#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册