提交 bb198e19 编写于 作者: M Michael Niedermayer

interlaced motion estimation

interlaced mpeg2 encoding
  P & B frames
  rate distored interlaced mb decission
  alternate scantable support
4mv encoding fixes (thats also why the regression tests change)
passing height to most dsp functions
interlaced mpeg4 encoding (no direct mode MBs yet)
various related cleanups
disabled old motion estimaton algorithms (log, full, ...) they will either be fixed or removed

Originally committed as revision 2638 to svn://svn.ffmpeg.org/ffmpeg/trunk
上级 b846b231
......@@ -119,8 +119,10 @@ static int use_obmc = 0;
static int use_aic = 0;
static int use_aiv = 0;
static int use_umv = 0;
static int use_alt_scan = 0;
static int do_deinterlace = 0;
static int do_interlace = 0;
static int do_interlace_dct = 0;
static int do_interlace_me = 0;
static int workaround_bugs = FF_BUG_AUTODETECT;
static int error_resilience = 2;
static int error_concealment = 3;
......@@ -130,6 +132,8 @@ static int use_part = 0;
static int packet_size = 0;
static int error_rate = 0;
static int strict = 0;
static int top_field_first = -1;
static int noise_reduction = 0;
static int debug = 0;
static int debug_mv = 0;
extern int loop_input; /* currently a hack */
......@@ -635,7 +639,12 @@ static void do_video_out(AVFormatContext *s,
/* better than nothing: use input picture interlaced
settings */
big_picture.interlaced_frame = in_picture->interlaced_frame;
big_picture.top_field_first = in_picture->top_field_first;
if(do_interlace_me || do_interlace_dct){
if(top_field_first == -1)
big_picture.top_field_first = in_picture->top_field_first;
else
big_picture.top_field_first = 1;
}
/* handles sameq here. This is not correct because it may
not be a global option */
......@@ -1946,6 +1955,16 @@ static void opt_strict(const char *arg)
strict= atoi(arg);
}
static void opt_top_field_first(const char *arg)
{
top_field_first= atoi(arg);
}
static void opt_noise_reduction(const char *arg)
{
noise_reduction= atoi(arg);
}
static void opt_audio_bitrate(const char *arg)
{
audio_bit_rate = atoi(arg) * 1000;
......@@ -2373,14 +2392,20 @@ static void opt_output_file(const char *filename)
if(use_part) {
video_enc->flags |= CODEC_FLAG_PART;
}
if (use_alt_scan) {
video_enc->flags |= CODEC_FLAG_ALT_SCAN;
}
if (b_frames) {
video_enc->max_b_frames = b_frames;
video_enc->b_frame_strategy = 0;
video_enc->b_quant_factor = 2.0;
}
if (do_interlace) {
if (do_interlace_dct) {
video_enc->flags |= CODEC_FLAG_INTERLACED_DCT;
}
if (do_interlace_me) {
video_enc->flags |= CODEC_FLAG_INTERLACED_ME;
}
video_enc->qmin = video_qmin;
video_enc->qmax = video_qmax;
video_enc->mb_qmin = video_mb_qmin;
......@@ -2430,6 +2455,7 @@ static void opt_output_file(const char *filename)
video_enc->idct_algo = idct_algo;
video_enc->strict_std_compliance = strict;
video_enc->error_rate = error_rate;
video_enc->noise_reduction= noise_reduction;
if(packet_size){
video_enc->rtp_mode= 1;
video_enc->rtp_payload_size= packet_size;
......@@ -2992,16 +3018,21 @@ const OptionDef options[] = {
{ "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename}, "select two pass log file name", "file" },
{ "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace},
"deinterlace pictures" },
{ "interlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_interlace},
"force interlacing support in encoder (MPEG2/MPEG4)" },
{ "ildct", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_interlace_dct},
"force interlaced dct support in encoder (MPEG2/MPEG4)" },
{ "ilme", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_interlace_me},
"force interlacied me support in encoder MPEG2" },
{ "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
{ "vstats", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_vstats}, "dump video coding statistics to file" },
{ "vhook", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)add_frame_hooker}, "insert video processing module", "module" },
{ "aic", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_aic}, "enable Advanced intra coding (h263+)" },
{ "aiv", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_aiv}, "enable Alternative inter vlc (h263+)" },
{ "umv", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_umv}, "enable Unlimited Motion Vector (h263+)" },
{ "alt", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_alt_scan}, "enable alternate scantable (mpeg2)" },
{ "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_intra_matrix}, "specify intra matrix coeffs", "matrix" },
{ "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_inter_matrix}, "specify inter matrix coeffs", "matrix" },
{ "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_top_field_first}, "top=1/bottom=0/auto=-1 field first", "" },
{ "nr", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_noise_reduction}, "noise reduction", "" },
/* audio options */
{ "ab", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_bitrate}, "set audio bitrate (in kbit/s)", "bitrate", },
......
......@@ -39,11 +39,11 @@ void get_pixels_mvi(DCTELEM *restrict block,
const uint8_t *restrict pixels, int line_size);
void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
int stride);
int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
int pix_abs16x16_mvi_asm(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
#if 0
/* These functions were the base for the optimized assembler routines,
......@@ -290,11 +290,6 @@ static int sad16x16_mvi(void *s, uint8_t *a, uint8_t *b, int stride)
return pix_abs16x16_mvi_asm(a, b, stride);
}
static int sad8x8_mvi(void *s, uint8_t *a, uint8_t *b, int stride)
{
return pix_abs8x8_mvi(a, b, stride);
}
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
{
c->put_pixels_tab[0][0] = put_pixels16_axp_asm;
......@@ -347,12 +342,13 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
c->get_pixels = get_pixels_mvi;
c->diff_pixels = diff_pixels_mvi;
c->sad[0] = sad16x16_mvi;
c->sad[1] = sad8x8_mvi;
c->pix_abs8x8 = pix_abs8x8_mvi;
c->pix_abs16x16 = pix_abs16x16_mvi_asm;
c->pix_abs16x16_x2 = pix_abs16x16_x2_mvi;
c->pix_abs16x16_y2 = pix_abs16x16_y2_mvi;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mvi;
c->sad[1] = pix_abs8x8_mvi;
// c->pix_abs[0][0] = pix_abs16x16_mvi_asm; //FIXME function arguments for the asm must be fixed
c->pix_abs[0][0] = sad16x16_mvi;
c->pix_abs[1][0] = pix_abs8x8_mvi;
c->pix_abs[0][1] = pix_abs16x16_x2_mvi;
c->pix_abs[0][2] = pix_abs16x16_y2_mvi;
c->pix_abs[0][3] = pix_abs16x16_xy2_mvi;
}
put_pixels_clamped_axp_p = c->put_pixels_clamped;
......
......@@ -84,10 +84,9 @@ static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
return r1 + r2;
}
int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 8;
if ((size_t) pix2 & 0x7) {
/* works only when pix2 is actually unaligned */
......@@ -160,10 +159,9 @@ int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
}
#endif
int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 16;
uint64_t disalign = (size_t) pix2 & 0x7;
switch (disalign) {
......@@ -234,10 +232,9 @@ int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
return result;
}
int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 16;
if ((size_t) pix2 & 0x7) {
uint64_t t, p2_l, p2_r;
......@@ -288,10 +285,9 @@ int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
return result;
}
int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 16;
uint64_t p1_l, p1_r;
uint64_t p2_l, p2_r, p2_x;
......
......@@ -17,7 +17,7 @@ extern "C" {
#define FFMPEG_VERSION_INT 0x000408
#define FFMPEG_VERSION "0.4.8"
#define LIBAVCODEC_BUILD 4697
#define LIBAVCODEC_BUILD 4698
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVCODEC_VERSION FFMPEG_VERSION
......@@ -263,7 +263,8 @@ static const __attribute__((unused)) int Motion_Est_QTab[] =
#define CODEC_FLAG_H263P_AIV 0x00000008 ///< H263 Alternative inter vlc
#define CODEC_FLAG_OBMC 0x00000001 ///< OBMC
#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000
#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000
#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
/* Unsupported options :
* Syntax Arithmetic coding (SAC)
* Reference Picture Selection
......
......@@ -218,13 +218,13 @@ static void bswap_buf(uint32_t *dst, uint32_t *src, int w){
}
}
static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size)
static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = squareTbl + 256;
s = 0;
for (i = 0; i < 8; i++) {
for (i = 0; i < h; i++) {
s += sq[pix1[0] - pix2[0]];
s += sq[pix1[1] - pix2[1]];
s += sq[pix1[2] - pix2[2]];
......@@ -239,13 +239,13 @@ static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size)
return s;
}
static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint32_t *sq = squareTbl + 256;
s = 0;
for (i = 0; i < 16; i++) {
for (i = 0; i < h; i++) {
s += sq[pix1[ 0] - pix2[ 0]];
s += sq[pix1[ 1] - pix2[ 1]];
s += sq[pix1[ 2] - pix2[ 2]];
......@@ -2331,12 +2331,12 @@ static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
}
}
static inline int pix_abs16x16_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static inline int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - pix2[0]);
s += abs(pix1[1] - pix2[1]);
s += abs(pix1[2] - pix2[2]);
......@@ -2359,12 +2359,12 @@ static inline int pix_abs16x16_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static int pix_abs16x16_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static int pix_abs16_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
......@@ -2387,13 +2387,13 @@ static int pix_abs16x16_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static int pix_abs16x16_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static int pix_abs16_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
......@@ -2417,13 +2417,13 @@ static int pix_abs16x16_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static int pix_abs16x16_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static int pix_abs16_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
......@@ -2447,12 +2447,12 @@ static int pix_abs16x16_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static inline int pix_abs8x8_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static inline int pix_abs8_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - pix2[0]);
s += abs(pix1[1] - pix2[1]);
s += abs(pix1[2] - pix2[2]);
......@@ -2467,12 +2467,12 @@ static inline int pix_abs8x8_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static int pix_abs8x8_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static int pix_abs8_x2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
s = 0;
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
......@@ -2487,13 +2487,13 @@ static int pix_abs8x8_x2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static int pix_abs8x8_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static int pix_abs8_y2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
......@@ -2509,13 +2509,13 @@ static int pix_abs8x8_y2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static int pix_abs8x8_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
static int pix_abs8_xy2_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
uint8_t *pix3 = pix2 + line_size;
s = 0;
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
......@@ -2531,14 +2531,6 @@ static int pix_abs8x8_xy2_c(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
static int sad16x16_c(void *s, uint8_t *a, uint8_t *b, int stride){
return pix_abs16x16_c(a,b,stride);
}
static int sad8x8_c(void *s, uint8_t *a, uint8_t *b, int stride){
return pix_abs8x8_c(a,b,stride);
}
/**
* permutes an 8x8 block.
* @param block the block which will be permuted according to the given permutation vector
......@@ -2641,10 +2633,12 @@ o2= (i1)-(i2);
#define BUTTERFLYA(x,y) (ABS((x)+(y)) + ABS((x)-(y)))
static int hadamard8_diff_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride){
static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
int i;
int temp[64];
int sum=0;
assert(h==8);
for(i=0; i<8; i++){
//FIXME try pointer walks
......@@ -2735,11 +2729,13 @@ static int hadamard8_abs_c(uint8_t *src, int stride, int mean){
return sum;
}
static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64/8];
DCTELEM * const temp= (DCTELEM*)aligned_temp;
int sum=0, i;
assert(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
s->dsp.fdct(temp);
......@@ -2752,13 +2748,14 @@ static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2
void simple_idct(DCTELEM *block); //FIXME
static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64*2/8];
DCTELEM * const temp= (DCTELEM*)aligned_temp;
DCTELEM * const bak = ((DCTELEM*)aligned_temp)+64;
int sum=0, i;
assert(h==8);
s->mb_intra=0;
s->dsp.diff_pixels(temp, src1, src2, stride);
......@@ -2775,7 +2772,7 @@ static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *s
return sum;
}
static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
const uint8_t *scantable= s->intra_scantable.permutated;
uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64/8];
......@@ -2787,6 +2784,8 @@ static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int
uint8_t * length;
uint8_t * last_length;
assert(h==8);
for(i=0; i<8; i++){
((uint32_t*)(bak + i*stride))[0]= ((uint32_t*)(src2 + i*stride))[0];
((uint32_t*)(bak + i*stride))[1]= ((uint32_t*)(src2 + i*stride))[1];
......@@ -2847,12 +2846,12 @@ static int rd8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int
s->dsp.idct_add(bak, stride, temp);
distoration= s->dsp.sse[1](NULL, bak, src1, stride);
distoration= s->dsp.sse[1](NULL, bak, src1, stride, 8);
return distoration + ((bits*s->qscale*s->qscale*109 + 64)>>7);
}
static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride){
static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
const uint8_t *scantable= s->intra_scantable.permutated;
uint64_t __align8 aligned_temp[sizeof(DCTELEM)*64/8];
......@@ -2861,6 +2860,8 @@ static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, in
const int esc_length= s->ac_esc_length;
uint8_t * length;
uint8_t * last_length;
assert(h==8);
s->dsp.diff_pixels(temp, src1, src2, stride);
......@@ -2910,12 +2911,11 @@ static int bit8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, in
return bits;
}
WARPER88_1616(hadamard8_diff_c, hadamard8_diff16_c)
WARPER88_1616(dct_sad8x8_c, dct_sad16x16_c)
WARPER88_1616(quant_psnr8x8_c, quant_psnr16x16_c)
WARPER88_1616(rd8x8_c, rd16x16_c)
WARPER88_1616(bit8x8_c, bit16x16_c)
WARPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
WARPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
WARPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
WARPER8_16_SQ(rd8x8_c, rd16_c)
WARPER8_16_SQ(bit8x8_c, bit16_c)
/* XXX: those functions should be suppressed ASAP when all IDCTs are
converted */
......@@ -2989,18 +2989,16 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->clear_blocks = clear_blocks_c;
c->pix_sum = pix_sum_c;
c->pix_norm1 = pix_norm1_c;
c->sse[0]= sse16_c;
c->sse[1]= sse8_c;
/* TODO [0] 16 [1] 8 */
c->pix_abs16x16 = pix_abs16x16_c;
c->pix_abs16x16_x2 = pix_abs16x16_x2_c;
c->pix_abs16x16_y2 = pix_abs16x16_y2_c;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_c;
c->pix_abs8x8 = pix_abs8x8_c;
c->pix_abs8x8_x2 = pix_abs8x8_x2_c;
c->pix_abs8x8_y2 = pix_abs8x8_y2_c;
c->pix_abs8x8_xy2 = pix_abs8x8_xy2_c;
c->pix_abs[0][0] = pix_abs16_c;
c->pix_abs[0][1] = pix_abs16_x2_c;
c->pix_abs[0][2] = pix_abs16_y2_c;
c->pix_abs[0][3] = pix_abs16_xy2_c;
c->pix_abs[1][0] = pix_abs8_c;
c->pix_abs[1][1] = pix_abs8_x2_c;
c->pix_abs[1][2] = pix_abs8_y2_c;
c->pix_abs[1][3] = pix_abs8_xy2_c;
#define dspfunc(PFX, IDX, NUM) \
c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## NUM ## _c; \
......@@ -3097,24 +3095,21 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
c->hadamard8_diff[0]= hadamard8_diff16_c;
c->hadamard8_diff[1]= hadamard8_diff_c;
c->hadamard8_abs = hadamard8_abs_c;
c->dct_sad[0]= dct_sad16x16_c;
c->dct_sad[1]= dct_sad8x8_c;
c->sad[0]= sad16x16_c;
c->sad[1]= sad8x8_c;
c->quant_psnr[0]= quant_psnr16x16_c;
c->quant_psnr[1]= quant_psnr8x8_c;
c->rd[0]= rd16x16_c;
c->rd[1]= rd8x8_c;
c->bit[0]= bit16x16_c;
c->bit[1]= bit8x8_c;
#define SET_CMP_FUNC(name) \
c->name[0]= name ## 16_c;\
c->name[1]= name ## 8x8_c;
SET_CMP_FUNC(hadamard8_diff)
SET_CMP_FUNC(dct_sad)
c->sad[0]= pix_abs16_c;
c->sad[1]= pix_abs8_c;
c->sse[0]= sse16_c;
c->sse[1]= sse8_c;
SET_CMP_FUNC(quant_psnr)
SET_CMP_FUNC(rd)
SET_CMP_FUNC(bit)
c->add_bytes= add_bytes_c;
c->diff_bytes= diff_bytes_c;
......
......@@ -110,9 +110,7 @@ static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
/* motion estimation */
typedef int (*op_pixels_abs_func)(uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size)/* __attribute__ ((const))*/;
typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size)/* __attribute__ ((const))*/;
typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/;
/**
......@@ -136,19 +134,21 @@ typedef struct DSPContext {
void (*clear_blocks)(DCTELEM *blocks/*align 16*/);
int (*pix_sum)(uint8_t * pix, int line_size);
int (*pix_norm1)(uint8_t * pix, int line_size);
me_cmp_func sad[2]; /* identical to pix_absAxA except additional void * */
me_cmp_func sse[2];
me_cmp_func hadamard8_diff[2];
me_cmp_func dct_sad[2];
me_cmp_func quant_psnr[2];
me_cmp_func bit[2];
me_cmp_func rd[2];
// 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4
me_cmp_func sad[4]; /* identical to pix_absAxA except additional void * */
me_cmp_func sse[4];
me_cmp_func hadamard8_diff[4];
me_cmp_func dct_sad[4];
me_cmp_func quant_psnr[4];
me_cmp_func bit[4];
me_cmp_func rd[4];
int (*hadamard8_abs )(uint8_t *src, int stride, int mean);
me_cmp_func me_pre_cmp[11];
me_cmp_func me_cmp[11];
me_cmp_func me_sub_cmp[11];
me_cmp_func mb_cmp[11];
me_cmp_func me_pre_cmp[5];
me_cmp_func me_cmp[5];
me_cmp_func me_sub_cmp[5];
me_cmp_func mb_cmp[5];
/* maybe create an array for 16/8/4/2 functions */
/**
......@@ -226,14 +226,7 @@ typedef struct DSPContext {
qpel_mc_func put_h264_qpel_pixels_tab[3][16];
qpel_mc_func avg_h264_qpel_pixels_tab[3][16];
op_pixels_abs_func pix_abs16x16;
op_pixels_abs_func pix_abs16x16_x2;
op_pixels_abs_func pix_abs16x16_y2;
op_pixels_abs_func pix_abs16x16_xy2;
op_pixels_abs_func pix_abs8x8;
op_pixels_abs_func pix_abs8x8_x2;
op_pixels_abs_func pix_abs8x8_y2;
op_pixels_abs_func pix_abs8x8_xy2;
me_cmp_func pix_abs[2][4];
/* huffyuv specific */
void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w);
......@@ -484,12 +477,24 @@ void ff_mdct_calc(MDCTContext *s, FFTSample *out,
const FFTSample *input, FFTSample *tmp);
void ff_mdct_end(MDCTContext *s);
#define WARPER88_1616(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride){\
return name8(s, dst , src , stride)\
+name8(s, dst+8 , src+8 , stride)\
+name8(s, dst +8*stride, src +8*stride, stride)\
+name8(s, dst+8+8*stride, src+8+8*stride, stride);\
#define WARPER8_16(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
return name8(s, dst , src , stride, h)\
+name8(s, dst+8 , src+8 , stride, h);\
}
#define WARPER8_16_SQ(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
int score=0;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
if(h==16){\
dst += 8*stride;\
src += 8*stride;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
}\
return score;\
}
#ifndef HAVE_LRINTF
......
......@@ -582,8 +582,8 @@ static int is_intra_more_likely(MpegEncContext *s){
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize);
is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize);
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
}else{
if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
is_intra_likely++;
......
......@@ -479,9 +479,9 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
for(i=1; i<s->mb_num; i++){
int mb_xy= s->mb_index2xy[i];
if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&MB_TYPE_INTER4V)){
s->mb_type[mb_xy]&= ~MB_TYPE_INTER4V;
s->mb_type[mb_xy]|= MB_TYPE_INTER;
if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTER4V)){
s->mb_type[mb_xy]&= ~CANDIDATE_MB_TYPE_INTER4V;
s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_INTER;
}
}
......@@ -508,9 +508,9 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
for(i=1; i<s->mb_num; i++){
int mb_xy= s->mb_index2xy[i];
if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&MB_TYPE_DIRECT)){
s->mb_type[mb_xy]&= ~MB_TYPE_DIRECT;
s->mb_type[mb_xy]|= MB_TYPE_BIDIR;
if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_DIRECT)){
s->mb_type[mb_xy]&= ~CANDIDATE_MB_TYPE_DIRECT;
s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_BIDIR;
}
}
}
......@@ -523,7 +523,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
*/
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
const int colocated_mb_type= s->next_picture.mb_type[mb_index]; //FIXME or next?
const int colocated_mb_type= s->next_picture.mb_type[mb_index];
int xy= s->block_index[0];
uint16_t time_pp= s->pp_time;
uint16_t time_pb= s->pb_time;
......@@ -547,18 +547,18 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
s->mv_type = MV_TYPE_FIELD;
for(i=0; i<2; i++){
if(s->top_field_first){
time_pp= s->pp_field_time - s->field_select_table[mb_index][i] + i;
time_pb= s->pb_field_time - s->field_select_table[mb_index][i] + i;
time_pp= s->pp_field_time - s->p_field_select_table[i][mb_index] + i;
time_pb= s->pb_field_time - s->p_field_select_table[i][mb_index] + i;
}else{
time_pp= s->pp_field_time + s->field_select_table[mb_index][i] - i;
time_pb= s->pb_field_time + s->field_select_table[mb_index][i] - i;
time_pp= s->pp_field_time + s->p_field_select_table[i][mb_index] - i;
time_pb= s->pb_field_time + s->p_field_select_table[i][mb_index] - i;
}
s->mv[0][i][0] = s->field_mv_table[mb_index][i][0]*time_pb/time_pp + mx;
s->mv[0][i][1] = s->field_mv_table[mb_index][i][1]*time_pb/time_pp + my;
s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->field_mv_table[mb_index][i][0]
: s->field_mv_table[mb_index][i][0]*(time_pb - time_pp)/time_pp;
s->mv[1][i][1] = my ? s->mv[0][i][1] - s->field_mv_table[mb_index][i][1]
: s->field_mv_table[mb_index][i][1]*(time_pb - time_pp)/time_pp;
s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0]*time_pb/time_pp + mx;
s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1]*time_pb/time_pp + my;
s->mv[1][i][0] = mx ? s->mv[0][i][0] - s->p_field_mv_table[i][0][mb_index][0]
: s->p_field_mv_table[i][0][mb_index][0]*(time_pb - time_pp)/time_pp;
s->mv[1][i][1] = my ? s->mv[0][i][1] - s->p_field_mv_table[i][0][mb_index][1]
: s->p_field_mv_table[i][0][mb_index][1]*(time_pb - time_pp)/time_pp;
}
return MB_TYPE_DIRECT2 | MB_TYPE_16x8 | MB_TYPE_L0L1 | MB_TYPE_INTERLACED;
}else{
......@@ -598,9 +598,9 @@ void ff_h263_update_motion_val(MpegEncContext * s){
motion_y = s->mv[0][0][1] + s->mv[0][1][1];
motion_x = (motion_x>>1) | (motion_x&1);
for(i=0; i<2; i++){
s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0];
s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1];
s->field_select_table[mb_xy][i]= s->field_select[0][i];
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
s->p_field_select_table[i][mb_xy]= s->field_select[0][i];
}
}
......@@ -744,12 +744,14 @@ void mpeg4_encode_mb(MpegEncContext * s,
if(s->pict_type==B_TYPE){
static const int mb_type_table[8]= {-1, 2, 3, 1,-1,-1,-1, 0}; /* convert from mv_dir to type */
int mb_type= mb_type_table[s->mv_dir];
if(s->mb_x==0){
s->last_mv[0][0][0]=
s->last_mv[0][0][1]=
s->last_mv[1][0][0]=
s->last_mv[1][0][1]= 0;
for(i=0; i<2; i++){
s->last_mv[i][0][0]=
s->last_mv[i][0][1]=
s->last_mv[i][1][0]=
s->last_mv[i][1][1]= 0;
}
}
assert(s->dquant>=-2 && s->dquant<=2);
......@@ -803,50 +805,64 @@ void mpeg4_encode_mb(MpegEncContext * s,
if(cbp)
put_bits(&s->pb, 1, s->interlaced_dct);
if(mb_type) // not diect mode
put_bits(&s->pb, 1, 0); // no interlaced ME yet
put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD);
}
if(interleaved_stats){
s->misc_bits+= get_bits_diff(s);
}
switch(mb_type)
{
case 0: /* direct */
if(mb_type == 0){
assert(s->mv_dir & MV_DIRECT);
h263_encode_motion(s, motion_x, 1);
h263_encode_motion(s, motion_y, 1);
s->b_count++;
s->f_count++;
break;
case 1: /* bidir */
h263_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
h263_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
h263_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
h263_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
s->last_mv[0][0][0]= s->mv[0][0][0];
s->last_mv[0][0][1]= s->mv[0][0][1];
s->last_mv[1][0][0]= s->mv[1][0][0];
s->last_mv[1][0][1]= s->mv[1][0][1];
s->b_count++;
s->f_count++;
break;
case 2: /* backward */
h263_encode_motion(s, motion_x - s->last_mv[1][0][0], s->b_code);
h263_encode_motion(s, motion_y - s->last_mv[1][0][1], s->b_code);
s->last_mv[1][0][0]= motion_x;
s->last_mv[1][0][1]= motion_y;
s->b_count++;
break;
case 3: /* forward */
h263_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code);
h263_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code);
s->last_mv[0][0][0]= motion_x;
s->last_mv[0][0][1]= motion_y;
s->f_count++;
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "unknown mb type\n");
return;
}else{
assert(mb_type > 0 && mb_type < 4);
if(s->mv_type != MV_TYPE_FIELD){
if(s->mv_dir & MV_DIR_FORWARD){
h263_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
h263_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
s->last_mv[0][0][0]= s->last_mv[0][1][0]= s->mv[0][0][0];
s->last_mv[0][0][1]= s->last_mv[0][1][1]= s->mv[0][0][1];
s->f_count++;
}
if(s->mv_dir & MV_DIR_BACKWARD){
h263_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
h263_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
s->last_mv[1][0][0]= s->last_mv[1][1][0]= s->mv[1][0][0];
s->last_mv[1][0][1]= s->last_mv[1][1][1]= s->mv[1][0][1];
s->b_count++;
}
}else{
if(s->mv_dir & MV_DIR_FORWARD){
put_bits(&s->pb, 1, s->field_select[0][0]);
put_bits(&s->pb, 1, s->field_select[0][1]);
}
if(s->mv_dir & MV_DIR_BACKWARD){
put_bits(&s->pb, 1, s->field_select[1][0]);
put_bits(&s->pb, 1, s->field_select[1][1]);
}
if(s->mv_dir & MV_DIR_FORWARD){
for(i=0; i<2; i++){
h263_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
h263_encode_motion(s, s->mv[0][i][1] - s->last_mv[0][i][1]/2, s->f_code);
s->last_mv[0][i][0]= s->mv[0][i][0];
s->last_mv[0][i][1]= s->mv[0][i][1]*2;
}
s->f_count++;
}
if(s->mv_dir & MV_DIR_BACKWARD){
for(i=0; i<2; i++){
h263_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code);
h263_encode_motion(s, s->mv[1][i][1] - s->last_mv[1][i][1]/2, s->b_code);
s->last_mv[1][i][0]= s->mv[1][i][0];
s->last_mv[1][i][1]= s->mv[1][i][1]*2;
}
s->b_count++;
}
}
}
if(interleaved_stats){
......@@ -861,6 +877,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
if(interleaved_stats){
s->p_tex_bits+= get_bits_diff(s);
}
}else{ /* s->pict_type==B_TYPE */
cbp= get_p_cbp(s, block, motion_x, motion_y);
......@@ -889,7 +906,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
if(pic==NULL || pic->pict_type!=B_TYPE) break;
b_pic= pic->data[0] + offset + 16; //FIXME +16
diff= s->dsp.pix_abs16x16(p_pic, b_pic, s->linesize);
diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
if(diff>s->qscale*70){ //FIXME check that 70 is optimal
s->mb_skiped=0;
break;
......@@ -929,7 +946,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
if(!s->progressive_sequence){
if(cbp)
put_bits(pb2, 1, s->interlaced_dct);
put_bits(pb2, 1, 0); // no interlaced ME yet
put_bits(pb2, 1, 0);
}
if(interleaved_stats){
......@@ -941,7 +958,38 @@ void mpeg4_encode_mb(MpegEncContext * s,
h263_encode_motion(s, motion_x - pred_x, s->f_code);
h263_encode_motion(s, motion_y - pred_y, s->f_code);
}else if(s->mv_type==MV_TYPE_FIELD){
if(s->dquant) cbpc+= 8;
put_bits(&s->pb,
inter_MCBPC_bits[cbpc],
inter_MCBPC_code[cbpc]);
put_bits(pb2, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]);
if(s->dquant)
put_bits(pb2, 2, dquant_code[s->dquant+2]);
assert(!s->progressive_sequence);
if(cbp)
put_bits(pb2, 1, s->interlaced_dct);
put_bits(pb2, 1, 1);
if(interleaved_stats){
s->misc_bits+= get_bits_diff(s);
}
/* motion vectors: 16x8 interlaced mode */
h263_pred_motion(s, 0, &pred_x, &pred_y);
pred_y /=2;
put_bits(&s->pb, 1, s->field_select[0][0]);
put_bits(&s->pb, 1, s->field_select[0][1]);
h263_encode_motion(s, s->mv[0][0][0] - pred_x, s->f_code);
h263_encode_motion(s, s->mv[0][0][1] - pred_y, s->f_code);
h263_encode_motion(s, s->mv[0][1][0] - pred_x, s->f_code);
h263_encode_motion(s, s->mv[0][1][1] - pred_y, s->f_code);
}else{
assert(s->mv_type==MV_TYPE_8X8);
put_bits(&s->pb,
inter_MCBPC_bits[cbpc+16],
inter_MCBPC_code[cbpc+16]);
......
......@@ -61,8 +61,8 @@ static const int h263_mb_type_b_map[15]= {
MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
0, //stuffing
MB_TYPE_INTRA | MB_TYPE_CBP,
MB_TYPE_INTRA | MB_TYPE_CBP | MB_TYPE_QUANT,
MB_TYPE_INTRA4x4 | MB_TYPE_CBP,
MB_TYPE_INTRA4x4 | MB_TYPE_CBP | MB_TYPE_QUANT,
};
const uint8_t cbpc_b_tab[4][2] = {
......
......@@ -687,10 +687,10 @@ static int pix_norm1_mmx(uint8_t *pix, int line_size) {
return tmp;
}
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) {
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
int tmp;
asm volatile (
"movl $16,%%ecx\n"
"movl %4,%%ecx\n"
"pxor %%mm0,%%mm0\n" /* mm0 = 0 */
"pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
"1:\n"
......@@ -741,7 +741,9 @@ static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) {
"psrlq $32, %%mm7\n" /* shift hi dword to lo */
"paddd %%mm7,%%mm1\n"
"movd %%mm1,%2\n"
: "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" (line_size) : "%ecx");
: "+r" (pix1), "+r" (pix2), "=r"(tmp)
: "r" (line_size) , "m" (h)
: "%ecx");
return tmp;
}
......@@ -866,9 +868,11 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t
"movq "#c", "#o"+32(%1) \n\t"\
"movq "#d", "#o"+48(%1) \n\t"\
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride){
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
uint64_t temp[16] __align8;
int sum=0;
assert(h==8);
diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
......@@ -951,9 +955,11 @@ static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride)
return sum&0xFFFF;
}
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride){
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
uint64_t temp[16] __align8;
int sum=0;
assert(h==8);
diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
......@@ -1037,8 +1043,8 @@ static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride
}
WARPER88_1616(hadamard8_diff_mmx, hadamard8_diff16_mmx)
WARPER88_1616(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
#endif //CONFIG_ENCODERS
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
......
......@@ -28,9 +28,9 @@ static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={
static __attribute__ ((aligned(8), unused)) uint64_t bone= 0x0101010101010101LL;
static inline void sad8_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -64,9 +64,9 @@ static inline void sad8_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
);
}
static inline void sad8_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -88,7 +88,7 @@ static inline void sad8_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -114,7 +114,7 @@ static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, in
static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{ //FIXME reuse src
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"movq "MANGLE(bone)", %%mm5 \n\t"
......@@ -151,7 +151,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -189,7 +189,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -265,85 +265,69 @@ static inline int sum_mmx2(void)
#define PIX_SAD(suf)\
static int pix_abs8x8_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1, blk2, stride, 3);\
sad8_1_ ## suf(blk1, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
static int sad8x8_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1, blk2, stride, 3);\
\
return sum_ ## suf();\
}\
\
static int pix_abs8x8_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
"movq %0, %%mm5 \n\t"\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 3);\
sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
\
static int pix_abs8x8_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
"movq %0, %%mm5 \n\t"\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 3);\
sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
\
static int pix_abs8x8_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
"movq %0, %%mm5 \n\t"\
:: "m"(round_tab[2]) \
);\
\
sad8_4_ ## suf(blk1, blk2, stride, 3);\
sad8_4_ ## suf(blk1, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
\
static int pix_abs16x16_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1 , blk2 , stride, 4);\
sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
\
return sum_ ## suf();\
}\
static int sad16x16_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1 , blk2 , stride, 4);\
sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
sad8_1_ ## suf(blk1 , blk2 , stride, h);\
sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
static int pix_abs16x16_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
......@@ -351,12 +335,12 @@ static int pix_abs16x16_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, 4);\
sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, 4);\
sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, h);\
sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
static int pix_abs16x16_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
......@@ -364,12 +348,12 @@ static int pix_abs16x16_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, 4);\
sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, 4);\
sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, h);\
sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
static int pix_abs16x16_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
......@@ -377,8 +361,8 @@ static int pix_abs16x16_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
:: "m"(round_tab[2]) \
);\
\
sad8_4_ ## suf(blk1 , blk2 , stride, 4);\
sad8_4_ ## suf(blk1+8, blk2+8, stride, 4);\
sad8_4_ ## suf(blk1 , blk2 , stride, h);\
sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
......@@ -389,32 +373,32 @@ PIX_SAD(mmx2)
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
{
if (mm_flags & MM_MMX) {
c->pix_abs16x16 = pix_abs16x16_mmx;
c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx;
c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx;
c->pix_abs8x8 = pix_abs8x8_mmx;
c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx;
c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx;
c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx;
c->pix_abs[0][0] = sad16_mmx;
c->pix_abs[0][1] = sad16_x2_mmx;
c->pix_abs[0][2] = sad16_y2_mmx;
c->pix_abs[0][3] = sad16_xy2_mmx;
c->pix_abs[1][0] = sad8_mmx;
c->pix_abs[1][1] = sad8_x2_mmx;
c->pix_abs[1][2] = sad8_y2_mmx;
c->pix_abs[1][3] = sad8_xy2_mmx;
c->sad[0]= sad16x16_mmx;
c->sad[1]= sad8x8_mmx;
c->sad[0]= sad16_mmx;
c->sad[1]= sad8_mmx;
}
if (mm_flags & MM_MMXEXT) {
c->pix_abs16x16 = pix_abs16x16_mmx2;
c->pix_abs8x8 = pix_abs8x8_mmx2;
c->pix_abs[0][0] = sad16_mmx2;
c->pix_abs[1][0] = sad8_mmx2;
c->sad[0]= sad16x16_mmx2;
c->sad[1]= sad8x8_mmx2;
c->sad[0]= sad16_mmx2;
c->sad[1]= sad8_mmx2;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx2;
c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx2;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx2;
c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx2;
c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx2;
c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx2;
c->pix_abs[0][1] = sad16_x2_mmx2;
c->pix_abs[0][2] = sad16_y2_mmx2;
c->pix_abs[0][3] = sad16_xy2_mmx2;
c->pix_abs[1][1] = sad8_x2_mmx2;
c->pix_abs[1][2] = sad8_y2_mmx2;
c->pix_abs[1][3] = sad8_xy2_mmx2;
}
}
}
此差异已折叠。
此差异已折叠。
......@@ -29,6 +29,9 @@
#include "mpeg12data.h"
//#undef NDEBUG
//#include <assert.h>
/* Start codes. */
#define SEQ_END_CODE 0x000001b7
......@@ -476,12 +479,12 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
}
static inline void put_mb_modes(MpegEncContext *s, int n, int bits,
int has_mv)
int has_mv, int field_motion)
{
put_bits(&s->pb, n, bits);
if (!s->frame_pred_frame_dct) {
if (has_mv)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
put_bits(&s->pb, 2, 2 - field_motion); /* motion_type: frame/field */
put_bits(&s->pb, 1, s->interlaced_dct);
}
}
......@@ -501,9 +504,9 @@ void mpeg1_encode_mb(MpegEncContext *s,
if (s->block_last_index[i] >= 0)
cbp |= 1 << (5 - i);
}
if (cbp == 0 && !first_mb && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) &&
((s->pict_type == P_TYPE && (motion_x | motion_y) == 0) ||
((s->pict_type == P_TYPE && s->mv_type == MV_TYPE_16X16 && (motion_x | motion_y) == 0) ||
(s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
s->mb_skip_run++;
......@@ -511,6 +514,10 @@ void mpeg1_encode_mb(MpegEncContext *s,
s->skip_count++;
s->misc_bits++;
s->last_bits++;
if(s->pict_type == P_TYPE){
s->last_mv[0][1][0]= s->last_mv[0][0][0]=
s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0;
}
} else {
if(first_mb){
assert(s->mb_skip_run == 0);
......@@ -521,150 +528,167 @@ void mpeg1_encode_mb(MpegEncContext *s,
if (s->pict_type == I_TYPE) {
if(s->dquant && cbp){
put_mb_modes(s, 2, 1, 0); /* macroblock_type : macroblock_quant = 1 */
put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 1, 1, 0); /* macroblock_type : macroblock_quant = 0 */
put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */
s->qscale -= s->dquant;
}
s->misc_bits+= get_bits_diff(s);
s->i_count++;
} else if (s->mb_intra) {
if(s->dquant && cbp){
put_mb_modes(s, 6, 0x01, 0);
put_mb_modes(s, 6, 0x01, 0, 0);
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 5, 0x03, 0);
put_mb_modes(s, 5, 0x03, 0, 0);
s->qscale -= s->dquant;
}
s->misc_bits+= get_bits_diff(s);
s->i_count++;
s->last_mv[0][0][0] =
s->last_mv[0][0][1] = 0;
memset(s->last_mv, 0, sizeof(s->last_mv));
} else if (s->pict_type == P_TYPE) {
if(s->mv_type == MV_TYPE_16X16){
if (cbp != 0) {
if (motion_x == 0 && motion_y == 0) {
if ((motion_x|motion_y) == 0) {
if(s->dquant){
put_mb_modes(s, 5, 1, 0); /* macroblock_pattern & quant */
put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 2, 1, 0); /* macroblock_pattern only */
put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */
}
s->misc_bits+= get_bits_diff(s);
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
} else {
if(s->dquant){
put_mb_modes(s, 5, 2, 1); /* motion + cbp */
put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 1, 1, 1); /* motion + cbp */
put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */
}
s->misc_bits+= get_bits_diff(s);
mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
s->mv_bits+= get_bits_diff(s);
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
}
} else {
put_bits(&s->pb, 3, 1); /* motion only */
if (!s->frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
s->misc_bits+= get_bits_diff(s);
mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
s->qscale -= s->dquant;
s->mv_bits+= get_bits_diff(s);
}
s->f_count++;
} else
{ // RAL: All the following bloc added for B frames:
if (cbp != 0)
{ // With coded bloc pattern
if (s->mv_dir == (MV_DIR_FORWARD | MV_DIR_BACKWARD))
{ // Bi-directional motion
if (s->dquant) {
put_mb_modes(s, 5, 2, 1);
put_bits(&s->pb, 5, s->qscale);
} else {
put_mb_modes(s, 2, 3, 1);
}
s->misc_bits += get_bits_diff(s);
mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
s->b_count++;
s->f_count++;
s->mv_bits += get_bits_diff(s);
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
}
else if (s->mv_dir == MV_DIR_BACKWARD)
{ // Backward motion
if (s->dquant) {
put_mb_modes(s, 6, 2, 1);
put_bits(&s->pb, 5, s->qscale);
} else {
put_mb_modes(s, 3, 3, 1);
}
s->misc_bits += get_bits_diff(s);
mpeg1_encode_motion(s, motion_x - s->last_mv[1][0][0], s->b_code);
mpeg1_encode_motion(s, motion_y - s->last_mv[1][0][1], s->b_code);
s->b_count++;
s->mv_bits += get_bits_diff(s);
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
}
else if (s->mv_dir == MV_DIR_FORWARD)
{ // Forward motion
if (s->dquant) {
put_mb_modes(s, 6, 3, 1);
put_bits(&s->pb, 5, s->qscale);
} else {
put_mb_modes(s, 4, 3, 1);
}
s->misc_bits += get_bits_diff(s);
mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code);
mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code);
s->f_count++;
s->mv_bits += get_bits_diff(s);
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
}
s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x;
s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y;
}else{
assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD);
if (cbp) {
if(s->dquant){
put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */
}
else
{ // No coded bloc pattern
if (s->mv_dir == (MV_DIR_FORWARD | MV_DIR_BACKWARD))
{ // Bi-directional motion
put_bits(&s->pb, 2, 2); /* backward & forward motion */
if (!s->frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
s->b_count++;
s->f_count++;
}
else if (s->mv_dir == MV_DIR_BACKWARD)
{ // Backward motion
put_bits(&s->pb, 3, 2); /* backward motion only */
if (!s->frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
mpeg1_encode_motion(s, motion_x - s->last_mv[1][0][0], s->b_code);
mpeg1_encode_motion(s, motion_y - s->last_mv[1][0][1], s->b_code);
s->b_count++;
}
else if (s->mv_dir == MV_DIR_FORWARD)
{ // Forward motion
put_bits(&s->pb, 4, 2); /* forward motion only */
if (!s->frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code);
mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code);
s->f_count++;
}
} else {
put_bits(&s->pb, 3, 1); /* motion only */
put_bits(&s->pb, 2, 1); /* motion_type: field */
s->qscale -= s->dquant;
s->mv_bits += get_bits_diff(s);
}
s->misc_bits+= get_bits_diff(s);
for(i=0; i<2; i++){
put_bits(&s->pb, 1, s->field_select[0][i]);
mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
s->last_mv[0][i][0]= s->mv[0][i][0];
s->last_mv[0][i][1]= 2*s->mv[0][i][1];
}
s->mv_bits+= get_bits_diff(s);
}
if(cbp)
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
s->f_count++;
} else{
static const int mb_type_len[4]={0,3,4,2}; //bak,for,bi
if(s->mv_type == MV_TYPE_16X16){
if (cbp){ // With coded bloc pattern
if (s->dquant) {
if(s->mv_dir == MV_DIR_FORWARD)
put_mb_modes(s, 6, 3, 1, 0);
else
put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 0);
put_bits(&s->pb, 5, s->qscale);
} else {
put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 0);
}
}else{ // No coded bloc pattern
put_bits(&s->pb, mb_type_len[s->mv_dir], 2);
if (!s->frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
s->qscale -= s->dquant;
}
s->misc_bits += get_bits_diff(s);
if (s->mv_dir&MV_DIR_FORWARD){
mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0];
s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1];
s->f_count++;
}
if (s->mv_dir&MV_DIR_BACKWARD){
mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0];
s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1];
s->b_count++;
}
}else{
assert(s->mv_type == MV_TYPE_FIELD);
assert(!s->frame_pred_frame_dct);
if (cbp){ // With coded bloc pattern
if (s->dquant) {
if(s->mv_dir == MV_DIR_FORWARD)
put_mb_modes(s, 6, 3, 1, 1);
else
put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 1);
put_bits(&s->pb, 5, s->qscale);
} else {
put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 1);
}
// End of bloc from RAL
}else{ // No coded bloc pattern
put_bits(&s->pb, mb_type_len[s->mv_dir], 2);
put_bits(&s->pb, 2, 1); /* motion_type: field */
s->qscale -= s->dquant;
}
s->misc_bits += get_bits_diff(s);
if (s->mv_dir&MV_DIR_FORWARD){
for(i=0; i<2; i++){
put_bits(&s->pb, 1, s->field_select[0][i]);
mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
s->last_mv[0][i][0]= s->mv[0][i][0];
s->last_mv[0][i][1]= 2*s->mv[0][i][1];
}
s->f_count++;
}
if (s->mv_dir&MV_DIR_BACKWARD){
for(i=0; i<2; i++){
put_bits(&s->pb, 1, s->field_select[1][i]);
mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code);
mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code);
s->last_mv[1][i][0]= s->mv[1][i][0];
s->last_mv[1][i][1]= 2*s->mv[1][i][1];
}
s->b_count++;
}
}
s->mv_bits += get_bits_diff(s);
if(cbp)
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
}
for(i=0;i<6;i++) {
if (cbp & (1 << (5 - i))) {
mpeg1_encode_block(s, block[i], i);
......@@ -676,18 +700,6 @@ void mpeg1_encode_mb(MpegEncContext *s,
else
s->p_tex_bits+= get_bits_diff(s);
}
// RAL: By this:
if (s->mv_dir & MV_DIR_FORWARD)
{
s->last_mv[0][0][0]= s->mv[0][0][0];
s->last_mv[0][0][1]= s->mv[0][0][1];
}
if (s->mv_dir & MV_DIR_BACKWARD)
{
s->last_mv[1][0][0]= s->mv[1][0][0];
s->last_mv[1][0][1]= s->mv[1][0][1];
}
}
// RAL: Parameter added: f_or_b_code
......@@ -1952,7 +1964,7 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s)
s->repeat_first_field = get_bits1(&s->gb);
s->chroma_420_type = get_bits1(&s->gb);
s->progressive_frame = get_bits1(&s->gb);
if(s->picture_structure == PICT_FRAME)
s->first_field=0;
else{
......@@ -1963,13 +1975,9 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s)
if(s->alternate_scan){
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
}else{
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
}
/* composite display not parsed */
......@@ -2103,10 +2111,10 @@ static int mpeg_decode_slice(AVCodecContext *avctx,
s->qscale = get_qscale(s);
if (s->first_slice && (s->first_field || s->picture_structure==PICT_FRAME)) {
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1],
s->pict_type == I_TYPE ? "I" : (s->pict_type == P_TYPE ? "P" : (s->pict_type == B_TYPE ? "B" : "S")),
s->progressive_sequence ? "pro" :"", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors,
s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :"");
}
......
此差异已折叠。
......@@ -137,6 +137,7 @@ typedef struct Picture{
int16_t (*motion_val_base[2])[2];
int8_t *ref_index[2];
uint32_t *mb_type_base;
#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if theres just one type
#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
#define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16)
#define IS_PCM(a) ((a)&MB_TYPE_INTRA_PCM)
......@@ -206,23 +207,28 @@ typedef struct MotionEstContext{
int mb_penalty_factor;
int pre_pass; ///< = 1 for the pre pass
int dia_size;
int xmin;
int xmax;
int ymin;
int ymax;
uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV
int (*sub_motion_search)(struct MpegEncContext * s,
int *mx_ptr, int *my_ptr, int dmin,
int xmin, int ymin, int xmax, int ymax,
int pred_x, int pred_y, Picture *ref_picture,
int n, int size, uint8_t * const mv_penalty);
int (*motion_search[7])(struct MpegEncContext * s, int block,
int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride,
int size, int h, uint8_t * const mv_penalty);
int (*motion_search[7])(struct MpegEncContext * s,
int *mx_ptr, int *my_ptr,
int P[10][2], int pred_x, int pred_y,
int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2],
int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2],
int ref_mv_scale, uint8_t * const mv_penalty);
int (*pre_motion_search)(struct MpegEncContext * s, int block,
int (*pre_motion_search)(struct MpegEncContext * s,
int *mx_ptr, int *my_ptr,
int P[10][2], int pred_x, int pred_y,
int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2],
int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2],
int ref_mv_scale, uint8_t * const mv_penalty);
int (*get_mb_score)(struct MpegEncContext * s, int mx, int my, int pred_x, int pred_y, Picture *ref_picture,
int (*get_mb_score)(struct MpegEncContext * s, int mx, int my, int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride,
uint8_t * const mv_penalty);
}MotionEstContext;
......@@ -351,12 +357,18 @@ typedef struct MpegEncContext {
int16_t (*b_bidir_forw_mv_table_base)[2];
int16_t (*b_bidir_back_mv_table_base)[2];
int16_t (*b_direct_mv_table_base)[2];
int16_t (*p_field_mv_table_base[2][2])[2];
int16_t (*b_field_mv_table_base[2][2][2])[2];
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding
int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding
int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding
int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode b-frame encoding
int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced p-frame encoding
int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced b-frame encoding
uint8_t (*p_field_select_table[2]);
uint8_t (*b_field_select_table[2][2]);
int me_method; ///< ME algorithm
int scene_change_score;
int mv_dir;
......@@ -391,17 +403,22 @@ typedef struct MpegEncContext {
int mb_x, mb_y;
int mb_skip_run;
int mb_intra;
uint8_t *mb_type; ///< Table for MB type FIXME remove and use picture->mb_type
#define MB_TYPE_INTRA 0x01
#define MB_TYPE_INTER 0x02
#define MB_TYPE_INTER4V 0x04
#define MB_TYPE_SKIPED 0x08
uint16_t *mb_type; ///< Table for candidate MB types for encoding
#define CANDIDATE_MB_TYPE_INTRA 0x01
#define CANDIDATE_MB_TYPE_INTER 0x02
#define CANDIDATE_MB_TYPE_INTER4V 0x04
#define CANDIDATE_MB_TYPE_SKIPED 0x08
//#define MB_TYPE_GMC 0x10
#define MB_TYPE_DIRECT 0x10
#define MB_TYPE_FORWARD 0x20
#define MB_TYPE_BACKWARD 0x40
#define MB_TYPE_BIDIR 0x80
#define CANDIDATE_MB_TYPE_DIRECT 0x10
#define CANDIDATE_MB_TYPE_FORWARD 0x20
#define CANDIDATE_MB_TYPE_BACKWARD 0x40
#define CANDIDATE_MB_TYPE_BIDIR 0x80
#define CANDIDATE_MB_TYPE_INTER_I 0x100
#define CANDIDATE_MB_TYPE_FORWARD_I 0x200
#define CANDIDATE_MB_TYPE_BACKWARD_I 0x400
#define CANDIDATE_MB_TYPE_BIDIR_I 0x800
int block_index[6]; ///< index to current MB in block based arrays with edges
int block_wrap[6];
......@@ -551,8 +568,6 @@ typedef struct MpegEncContext {
uint8_t *tex_pb_buffer;
uint8_t *pb2_buffer;
int mpeg_quant;
int16_t (*field_mv_table)[2][2]; ///< used for interlaced b frame decoding
int8_t (*field_select_table)[2]; ///< wtf, no really another table for interlaced b frames
int t_frame; ///< time distance of first I -> B, used for interlaced b frames
int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG4
......@@ -748,7 +763,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
int mb_x, int mb_y);
int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type);
void ff_fix_long_p_mvs(MpegEncContext * s);
void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type);
void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select,
int16_t (*mv_table)[2], int f_code, int type, int truncate);
void ff_init_me(MpegEncContext *s);
int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y);
......
......@@ -45,7 +45,7 @@ static void sigill_handler (int sig)
}
#endif /* CONFIG_DARWIN */
int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -57,7 +57,7 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
s = 0;
sad = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
pix1v: pix1[0]-pix1[15]
......@@ -92,7 +92,7 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -118,7 +118,7 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
tv = (vector unsigned char *) &pix2[0];
pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
pix1v: pix1[0]-pix1[15]
......@@ -152,7 +152,7 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -194,7 +194,7 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
t1 = vec_add(pix2hv, pix2ihv);
t2 = vec_add(pix2lv, pix2ilv);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
pix1v: pix1[0]-pix1[15]
......@@ -253,7 +253,7 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -266,7 +266,7 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
sad = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2 */
perm1 = vec_lvsl(0, pix1);
pix1v = (vector unsigned char *) pix1;
......@@ -295,7 +295,7 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -309,7 +309,7 @@ int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2
Since we're reading 16 pixels, and actually only want 8,
mask out the last 8 pixels. The 0s don't change the sum. */
......@@ -374,9 +374,9 @@ int pix_norm1_altivec(uint8_t *pix, int line_size)
/**
* Sum of Squared Errors for a 8x8 block.
* AltiVec-enhanced.
* It's the pix_abs8x8_altivec code above w/ squaring added.
* It's the sad8_altivec code above w/ squaring added.
*/
int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -391,7 +391,7 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2
Since we're reading 16 pixels, and actually only want 8,
mask out the last 8 pixels. The 0s don't change the sum. */
......@@ -430,9 +430,9 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
/**
* Sum of Squared Errors for a 16x16 block.
* AltiVec-enhanced.
* It's the pix_abs16x16_altivec code above w/ squaring added.
* It's the sad16_altivec code above w/ squaring added.
*/
int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -444,7 +444,7 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
sum = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2 */
perm1 = vec_lvsl(0, pix1);
pix1v = (vector unsigned char *) pix1;
......@@ -609,14 +609,6 @@ void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
}
}
int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
return pix_abs16x16_altivec(a,b,stride);
}
int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
return pix_abs8x8_altivec(a,b,stride);
}
void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
int i;
......
......@@ -24,16 +24,14 @@
#ifdef HAVE_ALTIVEC
extern int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
extern int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
extern int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int pix_norm1_altivec(uint8_t *pix, int line_size);
extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int pix_sum_altivec(uint8_t * pix, int line_size);
extern void diff_pixels_altivec(DCTELEM* block, const uint8_t* s1, const uint8_t* s2, int stride);
extern void get_pixels_altivec(DCTELEM* block, const uint8_t * pixels, int line_size);
......
......@@ -240,13 +240,13 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
mm_flags |= MM_ALTIVEC;
// Altivec specific optimisations
c->pix_abs16x16_x2 = pix_abs16x16_x2_altivec;
c->pix_abs16x16_y2 = pix_abs16x16_y2_altivec;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_altivec;
c->pix_abs16x16 = pix_abs16x16_altivec;
c->pix_abs8x8 = pix_abs8x8_altivec;
c->sad[0]= sad16x16_altivec;
c->sad[1]= sad8x8_altivec;
c->pix_abs[0][1] = sad16_x2_altivec;
c->pix_abs[0][2] = sad16_y2_altivec;
c->pix_abs[0][3] = sad16_xy2_altivec;
c->pix_abs[0][0] = sad16_altivec;
c->pix_abs[1][0] = sad8_altivec;
c->sad[0]= sad16_altivec;
c->sad[1]= sad8_altivec;
c->pix_norm1 = pix_norm1_altivec;
c->sse[1]= sse8_altivec;
c->sse[0]= sse16_altivec;
......
......@@ -520,7 +520,7 @@ static void adaptive_quantization(MpegEncContext *s, double q){
if(spat_cplx < 4) spat_cplx= 4; //FIXME finetune
if(temp_cplx < 4) temp_cplx= 4; //FIXME finetune
if((s->mb_type[mb_xy]&MB_TYPE_INTRA)){//FIXME hq mode
if((s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTRA)){//FIXME hq mode
cplx= spat_cplx;
factor= 1.0 + p_masking;
}else{
......
......@@ -26,21 +26,21 @@ stddev: 8.18 bytes:7602176
920a0a8a0063655d1f34dcaad7857f98 *./data/a-h263p.avi
0eb167c9dfcbeeecbf3debed8af8f811 *./data/out.yuv
stddev: 2.08 bytes:7602176
a8cc41cd5016bbb821e7c2691f5090ea *./data/a-odivx.mp4
e48114a50ef4cfb4fe2016fa5b34ae4c *./data/out.yuv
stddev: 8.02 bytes:7602176
66f8b4b5b4f0655cff7bdbc44969cab3 *./data/a-odivx.mp4
5bd332c77ef45e58b7017e06a0467dd3 *./data/out.yuv
stddev: 7.94 bytes:7602176
5704a082cc5c5970620123ae20566286 *./data/a-huffyuv.avi
799d3db687f6cdd7a837ec156efc171f *./data/out.yuv
stddev: 0.00 bytes:7602176
e9f63126859b97cd23cd1413038f8f7b *./data/a-mpeg4-rc.avi
90a159074b1b109569914ee63f387860 *./data/out.yuv
stddev: 10.18 bytes:7145472
b3f1425e266569d5d726b88eadc13dd4 *./data/a-mpeg4-adv.avi
fb61365b22c947adbaeab74478579020 *./data/out.yuv
stddev: 7.31 bytes:7602176
25ec5ab399fd4db0c8aaea78cb692611 *./data/a-error-mpeg4-adv.avi
bd441fc1e2fb9a3c0bdc9c5f1ed25ef0 *./data/out.yuv
stddev: 13.57 bytes:7602176
d7d295f97a1e07b633f973d2325880ce *./data/a-mpeg4-adv.avi
612f79510c8098f1421aa154047e2bf2 *./data/out.yuv
stddev: 7.25 bytes:7602176
f863f4198521bd76930ea33991b47273 *./data/a-error-mpeg4-adv.avi
ba7fcd126c7c9fead5a5de71aaaf0624 *./data/out.yuv
stddev: 16.80 bytes:7602176
328ebd044362116e274739e23c482ee7 *./data/a-mpeg1b.mpg
788a9d500dc8986231a18076fc80fd73 *./data/out.yuv
stddev: 10.07 bytes:7145472
......
......@@ -138,7 +138,7 @@ do_ffmpeg $raw_dst -y -i $file -f rawvideo $raw_dst
# mpeg2 encoding interlaced
file=${outfile}mpeg2i.mpg
do_ffmpeg $file -y -qscale 10 -f pgmyuv -i $raw_src -vcodec mpeg2video -f mpeg1video -interlace $file
do_ffmpeg $file -y -qscale 10 -f pgmyuv -i $raw_src -vcodec mpeg2video -f mpeg1video -ildct $file
# mpeg2 decoding
do_ffmpeg $raw_dst -y -i $file -f rawvideo $raw_dst
......
......@@ -26,21 +26,21 @@ stddev: 5.41 bytes:7602176
f7828488c31ccb6787367ef4e4a2ad42 *./data/a-h263p.avi
7d39d1f272205a6a231d0e0baf32ff9d *./data/out.yuv
stddev: 1.91 bytes:7602176
f17dc7346f5d1d4307ecf4507f10fcc6 *./data/a-odivx.mp4
ff7ddb57d9038b94f08c43bae7e1329f *./data/out.yuv
stddev: 5.28 bytes:7602176
a831828595e5764e6ee30c2d9e548385 *./data/a-odivx.mp4
ad75d173bd30d642147f00da21df0012 *./data/out.yuv
stddev: 5.27 bytes:7602176
242a7a18c2793e115007bc163861ef4e *./data/a-huffyuv.avi
dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv
stddev: 0.00 bytes:7602176
6a469f42ce6946dd4c708f9e51e3da6a *./data/a-mpeg4-rc.avi
df9de7134d961119705b4e0cabca1f12 *./data/out.yuv
stddev: 4.20 bytes:7145472
742ffadf3c309d2c4ac888a6a0905bf9 *./data/a-mpeg4-adv.avi
b02f71e91e9368ce94814ab3d74f91ba *./data/out.yuv
stddev: 4.97 bytes:7602176
f2888ab759ac28aba85a16d3d54b80d0 *./data/a-error-mpeg4-adv.avi
93ab926aad2e658a5bb00c25b7cefdab *./data/out.yuv
stddev: 5.22 bytes:7602176
483504d060b0bd8ac1acfa3a823c2ad7 *./data/a-mpeg4-adv.avi
08d24bdd7da80cffaf8abaa3e71b1843 *./data/out.yuv
stddev: 4.96 bytes:7602176
03ff35856faefb4882eaf4d86d95bea7 *./data/a-error-mpeg4-adv.avi
8550acff0851ee915bd5800f1e20f37c *./data/out.yuv
stddev: 9.66 bytes:7602176
671802a2c5078e69f7f422765ea87f2a *./data/a-mpeg1b.mpg
d3d5876cef34b728602d5a22eee9249f *./data/out.yuv
stddev: 5.93 bytes:7145472
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册