提交 a4d36c11 编写于 作者: M Michael Niedermayer

passing AVFrame instead of AVPicture around in ffmpeg, that way stuff like...

passing AVFrame instead of AVPicture around in ffmpeg, that way stuff like motion vectors can be passed from the decoder to the encoder
moving ref_index from Picture to AVFrame

Originally committed as revision 3028 to svn://svn.ffmpeg.org/ffmpeg/trunk
上级 14e2a940
...@@ -223,7 +223,7 @@ typedef struct AVOutputStream { ...@@ -223,7 +223,7 @@ typedef struct AVOutputStream {
int64_t sync_opts; int64_t sync_opts;
/* video only */ /* video only */
int video_resample; /* video_resample and video_crop are mutually exclusive */ int video_resample; /* video_resample and video_crop are mutually exclusive */
AVPicture pict_tmp; /* temporary image for resampling */ AVFrame pict_tmp; /* temporary image for resampling */
ImgReSampleContext *img_resample_ctx; /* for image resampling */ ImgReSampleContext *img_resample_ctx; /* for image resampling */
int video_crop; /* video_resample and video_crop are mutually exclusive */ int video_crop; /* video_resample and video_crop are mutually exclusive */
...@@ -554,15 +554,18 @@ static void do_video_out(AVFormatContext *s, ...@@ -554,15 +554,18 @@ static void do_video_out(AVFormatContext *s,
int *frame_size, AVOutputStream *audio_sync) int *frame_size, AVOutputStream *audio_sync)
{ {
int nb_frames, i, ret; int nb_frames, i, ret;
AVPicture *final_picture, *formatted_picture; AVFrame *final_picture, *formatted_picture;
AVPicture picture_format_temp, picture_crop_temp; AVFrame picture_format_temp, picture_crop_temp;
static uint8_t *video_buffer= NULL; static uint8_t *video_buffer= NULL;
uint8_t *buf = NULL, *buf1 = NULL; uint8_t *buf = NULL, *buf1 = NULL;
AVCodecContext *enc, *dec; AVCodecContext *enc, *dec;
enum PixelFormat target_pixfmt; enum PixelFormat target_pixfmt;
#define VIDEO_BUFFER_SIZE (1024*1024) #define VIDEO_BUFFER_SIZE (1024*1024)
avcodec_get_frame_defaults(&picture_format_temp);
avcodec_get_frame_defaults(&picture_crop_temp);
enc = &ost->st->codec; enc = &ost->st->codec;
dec = &ist->st->codec; dec = &ist->st->codec;
...@@ -641,9 +644,9 @@ static void do_video_out(AVFormatContext *s, ...@@ -641,9 +644,9 @@ static void do_video_out(AVFormatContext *s,
if (!buf) if (!buf)
return; return;
formatted_picture = &picture_format_temp; formatted_picture = &picture_format_temp;
avpicture_fill(formatted_picture, buf, target_pixfmt, dec->width, dec->height); avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);
if (img_convert(formatted_picture, target_pixfmt, if (img_convert((AVPicture*)formatted_picture, target_pixfmt,
(AVPicture *)in_picture, dec->pix_fmt, (AVPicture *)in_picture, dec->pix_fmt,
dec->width, dec->height) < 0) { dec->width, dec->height) < 0) {
...@@ -653,7 +656,7 @@ static void do_video_out(AVFormatContext *s, ...@@ -653,7 +656,7 @@ static void do_video_out(AVFormatContext *s,
goto the_end; goto the_end;
} }
} else { } else {
formatted_picture = (AVPicture *)in_picture; formatted_picture = in_picture;
} }
/* XXX: resampling could be done before raw format conversion in /* XXX: resampling could be done before raw format conversion in
...@@ -661,10 +664,10 @@ static void do_video_out(AVFormatContext *s, ...@@ -661,10 +664,10 @@ static void do_video_out(AVFormatContext *s,
/* XXX: only works for YUV420P */ /* XXX: only works for YUV420P */
if (ost->video_resample) { if (ost->video_resample) {
final_picture = &ost->pict_tmp; final_picture = &ost->pict_tmp;
img_resample(ost->img_resample_ctx, final_picture, formatted_picture); img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);
if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) { if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {
fill_pad_region(final_picture, enc->height, enc->width, fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
ost->padtop, ost->padbottom, ost->padleft, ost->padright, ost->padtop, ost->padbottom, ost->padleft, ost->padright,
padcolor); padcolor);
} }
...@@ -679,10 +682,10 @@ static void do_video_out(AVFormatContext *s, ...@@ -679,10 +682,10 @@ static void do_video_out(AVFormatContext *s,
if (!buf) if (!buf)
return; return;
final_picture = &picture_format_temp; final_picture = &picture_format_temp;
avpicture_fill(final_picture, buf, enc->pix_fmt, enc->width, enc->height); avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
if (img_convert(final_picture, enc->pix_fmt, if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
&ost->pict_tmp, PIX_FMT_YUV420P, (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
enc->width, enc->height) < 0) { enc->width, enc->height) < 0) {
if (verbose >= 0) if (verbose >= 0)
...@@ -729,7 +732,7 @@ static void do_video_out(AVFormatContext *s, ...@@ -729,7 +732,7 @@ static void do_video_out(AVFormatContext *s,
} }
} }
fill_pad_region(final_picture, enc->height, enc->width, fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
ost->padtop, ost->padbottom, ost->padleft, ost->padright, ost->padtop, ost->padbottom, ost->padleft, ost->padright,
padcolor); padcolor);
...@@ -743,10 +746,10 @@ static void do_video_out(AVFormatContext *s, ...@@ -743,10 +746,10 @@ static void do_video_out(AVFormatContext *s,
if (!buf) if (!buf)
return; return;
final_picture = &picture_format_temp; final_picture = &picture_format_temp;
avpicture_fill(final_picture, buf, enc->pix_fmt, enc->width, enc->height); avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
if (img_convert(final_picture, enc->pix_fmt, if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
&ost->pict_tmp, PIX_FMT_YUV420P, (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
enc->width, enc->height) < 0) { enc->width, enc->height) < 0) {
if (verbose >= 0) if (verbose >= 0)
...@@ -772,9 +775,8 @@ static void do_video_out(AVFormatContext *s, ...@@ -772,9 +775,8 @@ static void do_video_out(AVFormatContext *s,
enc->coded_frame = old_frame; enc->coded_frame = old_frame;
} else { } else {
AVFrame big_picture; AVFrame big_picture;
avcodec_get_frame_defaults(&big_picture); big_picture= *final_picture;
*(AVPicture*)&big_picture= *final_picture;
/* better than nothing: use input picture interlaced /* better than nothing: use input picture interlaced
settings */ settings */
big_picture.interlaced_frame = in_picture->interlaced_frame; big_picture.interlaced_frame = in_picture->interlaced_frame;
...@@ -791,6 +793,7 @@ static void do_video_out(AVFormatContext *s, ...@@ -791,6 +793,7 @@ static void do_video_out(AVFormatContext *s,
big_picture.quality = ist->st->quality; big_picture.quality = ist->st->quality;
}else }else
big_picture.quality = ost->st->quality; big_picture.quality = ost->st->quality;
big_picture.pict_type = 0;
ret = avcodec_encode_video(enc, ret = avcodec_encode_video(enc,
video_buffer, VIDEO_BUFFER_SIZE, video_buffer, VIDEO_BUFFER_SIZE,
&big_picture); &big_picture);
...@@ -1419,15 +1422,16 @@ static int av_encode(AVFormatContext **output_files, ...@@ -1419,15 +1422,16 @@ static int av_encode(AVFormatContext **output_files,
ost->padleft = frame_padleft; ost->padleft = frame_padleft;
ost->padbottom = frame_padbottom; ost->padbottom = frame_padbottom;
ost->padright = frame_padright; ost->padright = frame_padright;
if( avpicture_alloc( &ost->pict_tmp, PIX_FMT_YUV420P, if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
codec->width, codec->height ) ) codec->width, codec->height ) )
goto fail; goto fail;
} else { } else {
ost->video_resample = 1; ost->video_resample = 1;
ost->video_crop = 0; // cropping is handled as part of resample ost->video_crop = 0; // cropping is handled as part of resample
if( avpicture_alloc( &ost->pict_tmp, PIX_FMT_YUV420P, if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
codec->width, codec->height ) ) codec->width, codec->height ) )
goto fail; goto fail;
avcodec_get_frame_defaults(&ost->pict_tmp);
ost->img_resample_ctx = img_resample_full_init( ost->img_resample_ctx = img_resample_full_init(
ost->st->codec.width, ost->st->codec.height, ost->st->codec.width, ost->st->codec.height,
......
...@@ -17,7 +17,7 @@ extern "C" { ...@@ -17,7 +17,7 @@ extern "C" {
#define FFMPEG_VERSION_INT 0x000408 #define FFMPEG_VERSION_INT 0x000408
#define FFMPEG_VERSION "0.4.8" #define FFMPEG_VERSION "0.4.8"
#define LIBAVCODEC_BUILD 4708 #define LIBAVCODEC_BUILD 4709
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT #define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVCODEC_VERSION FFMPEG_VERSION #define LIBAVCODEC_VERSION FFMPEG_VERSION
...@@ -442,7 +442,7 @@ typedef struct AVPanScan{ ...@@ -442,7 +442,7 @@ typedef struct AVPanScan{
\ \
/**\ /**\
* Motion vector table\ * Motion vector table\
* - encoding: unused\ * - encoding: set by user\
* - decoding: set by lavc\ * - decoding: set by lavc\
*/\ */\
int16_t (*motion_val[2])[2];\ int16_t (*motion_val[2])[2];\
...@@ -450,7 +450,7 @@ typedef struct AVPanScan{ ...@@ -450,7 +450,7 @@ typedef struct AVPanScan{
/**\ /**\
* Macroblock type table\ * Macroblock type table\
* mb_type_base + mb_width + 2\ * mb_type_base + mb_width + 2\
* - encoding: unused\ * - encoding: set by user\
* - decoding: set by lavc\ * - decoding: set by lavc\
*/\ */\
uint32_t *mb_type;\ uint32_t *mb_type;\
...@@ -538,13 +538,20 @@ typedef struct AVPanScan{ ...@@ -538,13 +538,20 @@ typedef struct AVPanScan{
* - decoding: set by lavc\ * - decoding: set by lavc\
*/\ */\
short *dct_coeff;\ short *dct_coeff;\
\
/**\
* Motion referece frame index\
* - encoding: set by user\
* - decoding: set by lavc\
*/\
int8_t *ref_index[2];
#define FF_QSCALE_TYPE_MPEG1 0 #define FF_QSCALE_TYPE_MPEG1 0
#define FF_QSCALE_TYPE_MPEG2 1 #define FF_QSCALE_TYPE_MPEG2 1
#define FF_BUFFER_TYPE_INTERNAL 1 #define FF_BUFFER_TYPE_INTERNAL 1
#define FF_BUFFER_TYPE_USER 2 ///< Direct rendering buffers (image is (de)allocated by user) #define FF_BUFFER_TYPE_USER 2 ///< Direct rendering buffers (image is (de)allocated by user)
#define FF_BUFFER_TYPE_SHARED 4 ///< buffer from somewher else, dont dealloc image (data/base) #define FF_BUFFER_TYPE_SHARED 4 ///< buffer from somewher else, dont dealloc image (data/base), all other tables are not shared
#define FF_BUFFER_TYPE_COPY 8 ///< just a (modified) copy of some other buffer, dont dealloc anything #define FF_BUFFER_TYPE_COPY 8 ///< just a (modified) copy of some other buffer, dont dealloc anything
...@@ -1561,6 +1568,14 @@ typedef struct AVCodecContext { ...@@ -1561,6 +1568,14 @@ typedef struct AVCodecContext {
* - decoding: set by execute() * - decoding: set by execute()
*/ */
void *thread_opaque; void *thread_opaque;
/**
* Motion estimation threshold.
*
* - encoding: set by user
* - decoding: set by user
*/
void *me_threshold;
} AVCodecContext; } AVCodecContext;
......
...@@ -287,15 +287,28 @@ static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *sr ...@@ -287,15 +287,28 @@ static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *sr
dst->interlaced_frame = src->interlaced_frame; dst->interlaced_frame = src->interlaced_frame;
dst->top_field_first = src->top_field_first; dst->top_field_first = src->top_field_first;
if(src->motion_val[0] && src->motion_val[0] != dst->motion_val[0]){ if(s->avctx->me_threshold){
if(!src->motion_val[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
if(!src->mb_type)
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
if(!src->ref_index[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
if(src->motion_subsample_log2 != dst->motion_subsample_log2) if(src->motion_subsample_log2 != dst->motion_subsample_log2)
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match!\n"); av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match!\n");
else{
memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
for(i=0; i<2; i++){
int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1; int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
int height= ((16*s->mb_height)>>src->motion_subsample_log2); int height= ((16*s->mb_height)>>src->motion_subsample_log2);
for(i=0; i<2; i++) if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
memcpy(dst->motion_val[i], src->motion_val[i], stride*height*sizeof(int16_t)); memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
}
if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
memcpy(dst->ref_index[i], src->ref_index[i], s->mb_stride*s->mb_height*sizeof(int8_t)); //FIXME init this too
}
} }
} }
} }
...@@ -363,6 +376,7 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){ ...@@ -363,6 +376,7 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
for(i=0; i<2; i++){ for(i=0; i<2; i++){
CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t)) CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t))
pic->motion_val[i]= pic->motion_val_base[i]+2; pic->motion_val[i]= pic->motion_val_base[i]+2;
CHECKED_ALLOCZ(pic->ref_index[i], mb_array_size * sizeof(int8_t))
} }
pic->motion_subsample_log2= 3; pic->motion_subsample_log2= 3;
} }
......
...@@ -138,7 +138,6 @@ typedef struct Picture{ ...@@ -138,7 +138,6 @@ typedef struct Picture{
*/ */
uint8_t *interpolated[3]; uint8_t *interpolated[3];
int16_t (*motion_val_base[2])[2]; int16_t (*motion_val_base[2])[2];
int8_t *ref_index[2];
uint32_t *mb_type_base; uint32_t *mb_type_base;
#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if theres just one type #define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if theres just one type
#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4) #define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册