diff --git a/libavcodec/cavs.h b/libavcodec/cavs.h index d33ff0a32bc9f40293247e1dff21c3c70c66dbb1..7d9b94e8159158dc2e6d6a261ca23d4abd2a195b 100644 --- a/libavcodec/cavs.h +++ b/libavcodec/cavs.h @@ -210,7 +210,7 @@ typedef struct AVSContext { 6: A3 X2 X3 */ int pred_mode_Y[3*3]; int *top_pred_Y; - int l_stride, c_stride; + ptrdiff_t l_stride, c_stride; int luma_scan[4]; int qp; int qp_fixed; diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 5a9ab6a0843773dea988b6a28408a10b22112c85..68f51a99df442c5853dc02b1a52f3221506852b2 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -875,7 +875,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8; int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8; const int luma_xy = (mx & 3) + ((my & 3) << 2); - int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; + ptrdiff_t offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; uint8_t *src_y = pic->f.data[0] + offset; uint8_t *src_cb, *src_cr; int extra_width = 0; diff --git a/libavcodec/h264.h b/libavcodec/h264.h index 2fead60244c3bba2ec1ec145780627588c116d81..cb970673ad294d138238fbd79b346cfdf9408d17 100644 --- a/libavcodec/h264.h +++ b/libavcodec/h264.h @@ -276,7 +276,7 @@ typedef struct H264Context { /* coded dimensions -- 16 * mb w/h */ int width, height; - int linesize, uvlinesize; + ptrdiff_t linesize, uvlinesize; int chroma_x_shift, chroma_y_shift; int qscale; @@ -349,8 +349,8 @@ typedef struct H264Context { uint32_t *mb2br_xy; int b_stride; // FIXME use s->b4_stride - int mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff - int mb_uvlinesize; + ptrdiff_t mb_linesize; ///< may be equal to s->linesize or s->linesize * 2, for mbaff + ptrdiff_t mb_uvlinesize; unsigned current_sps_id; ///< id of the current SPS SPS sps; ///< current sps diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index 81e3d2b9ae57d09d005b389d5a04e5ad44bbbb7d..9f222c57737649986f93f5743ec4523e0604f30f 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -275,8 +275,8 @@ typedef struct MpegEncContext { int b4_stride; ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressing int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication) int mb_num; ///< number of MBs of a picture - int linesize; ///< line size, in bytes, may be different from width - int uvlinesize; ///< line size, for chroma in bytes, may be different from width + ptrdiff_t linesize; ///< line size, in bytes, may be different from width + ptrdiff_t uvlinesize; ///< line size, for chroma in bytes, may be different from width Picture *picture; ///< main picture buffer Picture **input_picture; ///< next pictures on display order for encoding Picture **reordered_input_picture; ///< pointer to the next pictures in codedorder for encoding diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index 2bafedd338989fb6fda15c492414a0288e63493c..c2459960aff2643e7f89b7aa334ad22a3b0e7179 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -877,7 +877,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg) if (pic_arg->linesize[2] != s->uvlinesize) direct = 0; - av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0], + av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); if (direct) { @@ -1641,7 +1641,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int skip_dct[8]; int dct_offset = s->linesize * 8; // default for progressive frames uint8_t *ptr_y, *ptr_cb, *ptr_cr; - int wrap_y, wrap_c; + ptrdiff_t wrap_y, wrap_c; for (i = 0; i < mb_block_count; i++) skip_dct[i] = s->skipdct; diff --git a/libavcodec/mpegvideo_motion.c b/libavcodec/mpegvideo_motion.c index cbf200f3f5d3fddc1bd2d8c28f77ffe207342c59..8562a7233c8c4eee6125e0142fcc5c83e822cb1e 100644 --- a/libavcodec/mpegvideo_motion.c +++ b/libavcodec/mpegvideo_motion.c @@ -37,8 +37,8 @@ static void gmc1_motion(MpegEncContext *s, uint8_t **ref_picture) { uint8_t *ptr; - int offset, src_x, src_y, linesize, uvlinesize; - int motion_x, motion_y; + int src_x, src_y, motion_x, motion_y; + ptrdiff_t offset, linesize, uvlinesize; int emu=0; motion_x= s->sprite_offset[0][0]; @@ -462,7 +462,8 @@ static inline void qpel_motion(MpegEncContext *s, int motion_x, int motion_y, int h) { uint8_t *ptr_y, *ptr_cb, *ptr_cr; - int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize; + int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos; + ptrdiff_t linesize, uvlinesize; dxy = ((motion_y & 3) << 2) | (motion_x & 3); src_x = s->mb_x * 16 + (motion_x >> 2); @@ -555,8 +556,9 @@ static void chroma_4mv_motion(MpegEncContext *s, op_pixels_func *pix_op, int mx, int my) { - int dxy, emu=0, src_x, src_y, offset; uint8_t *ptr; + int src_x, src_y, dxy, emu = 0; + ptrdiff_t offset; /* In case of 8X8, we construct a single chroma motion vector with a special rounding */ diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index b326a1c60bd8f511eb6c7ff2a71d0b02f00bee6f..467dfd565520e957bcee9394c174400394849ae8 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -1288,7 +1288,7 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye int width = s->fragment_width[!!plane]; int height = s->fragment_height[!!plane]; int fragment = s->fragment_start [plane] + ystart * width; - int stride = s->current_frame.f->linesize[plane]; + ptrdiff_t stride = s->current_frame.f->linesize[plane]; uint8_t *plane_data = s->current_frame.f->data [plane]; if (!s->flipped_image) stride = -stride; plane_data += s->data_offset[plane] + 8*ystart*stride; @@ -1470,7 +1470,7 @@ static void render_slice(Vp3DecodeContext *s, int slice) uint8_t *output_plane = s->current_frame.f->data [plane] + s->data_offset[plane]; uint8_t * last_plane = s-> last_frame.f->data [plane] + s->data_offset[plane]; uint8_t *golden_plane = s-> golden_frame.f->data [plane] + s->data_offset[plane]; - int stride = s->current_frame.f->linesize[plane]; + ptrdiff_t stride = s->current_frame.f->linesize[plane]; int plane_width = s->width >> (plane && s->chroma_x_shift); int plane_height = s->height >> (plane && s->chroma_y_shift); int8_t (*motion_val)[2] = s->motion_val[!!plane]; diff --git a/libavcodec/vp56.c b/libavcodec/vp56.c index a0cbcd708eafe5987470caa0a43376424dda403c..35df0e0e1cb27ad6232cad01c0d21cc28c92eeea 100644 --- a/libavcodec/vp56.c +++ b/libavcodec/vp56.c @@ -303,7 +303,7 @@ static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame) } static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, - int stride, int dx, int dy) + ptrdiff_t stride, int dx, int dy) { int t = ff_vp56_filter_threshold[s->quantizer]; if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t); @@ -311,7 +311,7 @@ static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, } static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, - int stride, int x, int y) + ptrdiff_t stride, int x, int y) { uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b]; uint8_t *src_block; @@ -509,7 +509,8 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, for (is_alpha=0; is_alpha < 1+s->has_alpha; is_alpha++) { int mb_row, mb_col, mb_row_flip, mb_offset = 0; - int block, y, uv, stride_y, stride_uv; + int block, y, uv; + ptrdiff_t stride_y, stride_uv; int golden_frame = 0; s->modelp = &s->models[is_alpha]; diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c index 5b11cd1a85aaac92bf57947f62ef57ae2e3a692a..910ec209a4e6b57b1d3a0b731bade5a5aef42443 100644 --- a/libavcodec/vp8.c +++ b/libavcodec/vp8.c @@ -1179,7 +1179,7 @@ static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, - int width, int height, int linesize, + int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3]) { uint8_t *src = ref->f->data[0]; @@ -1229,7 +1229,7 @@ void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, - int block_w, int block_h, int width, int height, int linesize, + int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3]) { uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2]; diff --git a/libavcodec/wmv2.c b/libavcodec/wmv2.c index 4420f494a639bdb8f57f631f1f51b73c931885eb..f4bc907740fbf4c21c32ef4df99bccce8c0f7a31 100644 --- a/libavcodec/wmv2.c +++ b/libavcodec/wmv2.c @@ -94,7 +94,8 @@ void ff_mspel_motion(MpegEncContext *s, { Wmv2Context * const w= (Wmv2Context*)s; uint8_t *ptr; - int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize; + int dxy, offset, mx, my, src_x, src_y, v_edge_pos; + ptrdiff_t linesize, uvlinesize; int emu=0; dxy = ((motion_y & 1) << 1) | (motion_x & 1);