提交 7458ccbb 编写于 作者: R Roman Shaposhnik

* DV handling was streamlined for both muxing/demuxing and

     decoding. All muxing/demuxing functionality is now available
     in libavformat/dv.[ch].

   * dv1394.c and avidec.c were hooked up with general DV demuxer.

   * DVAUDIO is dead! Long live pcm_s16le!

   * DV audio is now always recognized -- which means we can
     now hear all those ducks quaking in pond.dv.

Originally committed as revision 2319 to svn://svn.ffmpeg.org/ffmpeg/trunk
上级 da64ecc3
......@@ -103,7 +103,6 @@ void avcodec_register_all(void)
register_avcodec(&mpeg_xvmc_decoder);
#endif
register_avcodec(&dvvideo_decoder);
register_avcodec(&dvaudio_decoder);
register_avcodec(&mjpeg_decoder);
register_avcodec(&mjpegb_decoder);
register_avcodec(&mp2_decoder);
......
......@@ -1388,7 +1388,6 @@ extern AVCodec rv10_decoder;
extern AVCodec svq1_decoder;
extern AVCodec svq3_decoder;
extern AVCodec dvvideo_decoder;
extern AVCodec dvaudio_decoder;
extern AVCodec wmav1_decoder;
extern AVCodec wmav2_decoder;
extern AVCodec mjpeg_decoder;
......
......@@ -25,34 +25,29 @@
#include "dsputil.h"
#include "mpegvideo.h"
#include "simple_idct.h"
#define NTSC_FRAME_SIZE 120000
#define PAL_FRAME_SIZE 144000
#define TEX_VLC_BITS 9
#include "dvdata.h"
typedef struct DVVideoDecodeContext {
AVCodecContext *avctx;
const DVprofile* sys;
GetBitContext gb;
VLC *vlc;
int sampling_411; /* 0 = 420, 1 = 411 */
int width, height;
uint8_t *current_picture[3]; /* picture structure */
AVFrame picture;
int linesize[3];
DCTELEM block[5*6][64] __align8;
/* FIXME: the following is extracted from DSP */
uint8_t dv_zigzag[2][64];
uint8_t idct_permutation[64];
void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size);
void (*fdct)(DCTELEM *block);
/* XXX: move it to static storage ? */
uint8_t dv_shift[2][22][64];
void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block);
} DVVideoDecodeContext;
#include "dvdata.h"
static VLC dv_vlc;
#define TEX_VLC_BITS 9
/* XXX: also include quantization */
static RL_VLC_ELEM *dv_rl_vlc[1];
static VLC_TYPE dv_vlc_codes[15][23];
static void dv_build_unquantize_tables(DVVideoDecodeContext *s)
{
......@@ -85,6 +80,7 @@ static int dvvideo_decode_init(AVCodecContext *avctx)
if (!done) {
int i;
VLC dv_vlc;
done = 1;
......@@ -114,6 +110,12 @@ static int dvvideo_decode_init(AVCodecContext *avctx)
dv_rl_vlc[0][i].level = level;
dv_rl_vlc[0][i].run = run;
}
memset(dv_vlc_codes, 0xff, sizeof(dv_vlc_codes));
for (i = 0; i < NB_DV_VLC - 1; i++) {
if (dv_vlc_run[i] < 15 && dv_vlc_level[i] < 23 && dv_vlc_len[i] < 15)
dv_vlc_codes[dv_vlc_run[i]][dv_vlc_level[i]] = i;
}
}
/* ugly way to get the idct & scantable */
......@@ -124,6 +126,9 @@ static int dvvideo_decode_init(AVCodecContext *avctx)
if (DCT_common_init(&s2) < 0)
return -1;
s->get_pixels = s2.dsp.get_pixels;
s->fdct = s2.dsp.fdct;
s->idct_put[0] = s2.dsp.idct_put;
memcpy(s->idct_permutation, s2.dsp.idct_permutation, 64);
memcpy(s->dv_zigzag[0], s2.intra_scantable.permutated, 64);
......@@ -134,11 +139,11 @@ static int dvvideo_decode_init(AVCodecContext *avctx)
/* XXX: do it only for constant case */
dv_build_unquantize_tables(s);
return 0;
}
//#define VLC_DEBUG
// #define VLC_DEBUG
typedef struct BlockInfo {
const uint8_t *shift_table;
......@@ -450,29 +455,29 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s,
v = *mb_pos_ptr++;
mb_x = v & 0xff;
mb_y = v >> 8;
y_ptr = s->current_picture[0] + (mb_y * s->linesize[0] * 8) + (mb_x * 8);
if (s->sampling_411)
c_offset = (mb_y * s->linesize[1] * 8) + ((mb_x >> 2) * 8);
y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8);
if (s->sys->pix_fmt == PIX_FMT_YUV411P)
c_offset = (mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8);
else
c_offset = ((mb_y >> 1) * s->linesize[1] * 8) + ((mb_x >> 1) * 8);
c_offset = ((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8);
for(j = 0;j < 6; j++) {
idct_put = s->idct_put[mb->dct_mode];
if (j < 4) {
if (s->sampling_411 && mb_x < (704 / 8)) {
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) {
/* NOTE: at end of line, the macroblock is handled as 420 */
idct_put(y_ptr + (j * 8), s->linesize[0], block);
idct_put(y_ptr + (j * 8), s->picture.linesize[0], block);
} else {
idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->linesize[0]),
s->linesize[0], block);
idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]),
s->picture.linesize[0], block);
}
} else {
if (s->sampling_411 && mb_x >= (704 / 8)) {
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint8_t pixels[64], *c_ptr, *c_ptr1, *ptr;
int y, linesize;
/* NOTE: at end of line, the macroblock is handled as 420 */
idct_put(pixels, 8, block);
linesize = s->linesize[6 - j];
c_ptr = s->current_picture[6 - j] + c_offset;
linesize = s->picture.linesize[6 - j];
c_ptr = s->picture.data[6 - j] + c_offset;
ptr = pixels;
for(y = 0;y < 8; y++) {
/* convert to 411P */
......@@ -486,8 +491,8 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s,
}
} else {
/* don't ask me why they inverted Cb and Cr ! */
idct_put(s->current_picture[6 - j] + c_offset,
s->linesize[6 - j], block);
idct_put(s->picture.data[6 - j] + c_offset,
s->picture.linesize[6 - j], block);
}
}
block += 64;
......@@ -496,7 +501,6 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s,
}
}
/* NOTE: exactly one frame must be given (120000 bytes for NTSC,
144000 bytes for PAL) */
static int dvvideo_decode_frame(AVCodecContext *avctx,
......@@ -504,115 +508,35 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
uint8_t *buf, int buf_size)
{
DVVideoDecodeContext *s = avctx->priv_data;
int sct, dsf, apt, ds, nb_dif_segs, vs, width, height, i, packet_size;
uint8_t *buf_ptr;
int ds, vs;
const uint16_t *mb_pos_ptr;
/* parse id */
init_get_bits(&s->gb, buf, buf_size*8);
sct = get_bits(&s->gb, 3);
if (sct != 0)
return -1;
skip_bits(&s->gb, 5);
get_bits(&s->gb, 4); /* dsn (sequence number */
get_bits(&s->gb, 1); /* fsc (channel number) */
skip_bits(&s->gb, 3);
get_bits(&s->gb, 8); /* dbn (diff block number 0-134) */
dsf = get_bits(&s->gb, 1); /* 0 = NTSC 1 = PAL */
if (get_bits(&s->gb, 1) != 0)
return -1;
skip_bits(&s->gb, 11);
apt = get_bits(&s->gb, 3); /* apt */
get_bits(&s->gb, 1); /* tf1 */
skip_bits(&s->gb, 4);
get_bits(&s->gb, 3); /* ap1 */
get_bits(&s->gb, 1); /* tf2 */
skip_bits(&s->gb, 4);
get_bits(&s->gb, 3); /* ap2 */
get_bits(&s->gb, 1); /* tf3 */
skip_bits(&s->gb, 4);
get_bits(&s->gb, 3); /* ap3 */
/* init size */
width = 720;
if (dsf) {
avctx->frame_rate = 25;
avctx->frame_rate_base = 1;
packet_size = PAL_FRAME_SIZE;
height = 576;
nb_dif_segs = 12;
} else {
avctx->frame_rate = 30000;
avctx->frame_rate_base = 1001;
packet_size = NTSC_FRAME_SIZE;
height = 480;
nb_dif_segs = 10;
}
/* NOTE: we only accept several full frames */
if (buf_size < packet_size)
return -1;
/* NTSC[dsf == 0] is always 720x480, 4:1:1
* PAL[dsf == 1] is always 720x576, 4:2:0 for IEC 68134[apt == 0]
* but for the SMPTE 314M[apt == 1] it is 720x576, 4:1:1
*/
s->sampling_411 = !dsf || apt;
if (s->sampling_411) {
mb_pos_ptr = dsf ? dv_place_411P : dv_place_411;
avctx->pix_fmt = PIX_FMT_YUV411P;
} else {
mb_pos_ptr = dv_place_420;
avctx->pix_fmt = PIX_FMT_YUV420P;
}
avctx->width = width;
avctx->height = height;
/* Once again, this is pretty complicated by the fact that the same
* field is used differently by IEC 68134[apt == 0] and
* SMPTE 314M[apt == 1].
*/
if (buf[VAUX_TC61_OFFSET] == 0x61 &&
((apt == 0 && (buf[VAUX_TC61_OFFSET + 2] & 0x07) == 0x07) ||
(apt == 1 && (buf[VAUX_TC61_OFFSET + 2] & 0x07) == 0x02)))
avctx->aspect_ratio = 16.0 / 9.0;
else
avctx->aspect_ratio = 4.0 / 3.0;
s->sys = dv_frame_profile(buf);
if (!s->sys || buf_size < s->sys->frame_size)
return -1; /* NOTE: we only accept several full frames */
if(s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
s->picture.reference= 0;
s->picture.reference = 0;
avctx->pix_fmt = s->sys->pix_fmt;
if(avctx->get_buffer(avctx, &s->picture) < 0) {
fprintf(stderr, "get_buffer() failed\n");
return -1;
}
for(i=0;i<3;i++) {
s->current_picture[i] = s->picture.data[i];
s->linesize[i] = s->picture.linesize[i];
if (!s->current_picture[i])
return -1;
}
s->width = width;
s->height = height;
/* for each DIF segment */
buf_ptr = buf;
for (ds = 0; ds < nb_dif_segs; ds++) {
buf_ptr += 6 * 80; /* skip DIF segment header */
mb_pos_ptr = s->sys->video_place;
for (ds = 0; ds < s->sys->difseg_size; ds++) {
buf += 6 * 80; /* skip DIF segment header */
for(vs = 0; vs < 27; vs++) {
if ((vs % 3) == 0) {
/* skip audio block */
buf_ptr += 80;
}
dv_decode_video_segment(s, buf_ptr, mb_pos_ptr);
buf_ptr += 5 * 80;
if ((vs % 3) == 0)
buf += 80; /* skip audio block */
dv_decode_video_segment(s, buf, mb_pos_ptr);
buf += 5 * 80;
mb_pos_ptr += 5;
}
}
......@@ -623,7 +547,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
*data_size = sizeof(AVFrame);
*(AVFrame*)data= s->picture;
return packet_size;
return s->sys->frame_size;
}
static int dvvideo_decode_end(AVCodecContext *avctx)
......@@ -645,158 +569,3 @@ AVCodec dvvideo_decoder = {
CODEC_CAP_DR1,
NULL
};
typedef struct DVAudioDecodeContext {
AVCodecContext *avctx;
GetBitContext gb;
} DVAudioDecodeContext;
static int dvaudio_decode_init(AVCodecContext *avctx)
{
// DVAudioDecodeContext *s = avctx->priv_data;
return 0;
}
static uint16_t dv_audio_12to16(uint16_t sample)
{
uint16_t shift, result;
sample = (sample < 0x800) ? sample : sample | 0xf000;
shift = (sample & 0xf00) >> 8;
if (shift < 0x2 || shift > 0xd) {
result = sample;
} else if (shift < 0x8) {
shift--;
result = (sample - (256 * shift)) << shift;
} else {
shift = 0xe - shift;
result = ((sample + ((256 * shift) + 1)) << shift) - 1;
}
return result;
}
/* NOTE: exactly one frame must be given (120000 bytes for NTSC,
144000 bytes for PAL)
There's a couple of assumptions being made here:
1. By default we silence erroneous (0x8000/16bit 0x800/12bit)
audio samples. We can pass them upwards when ffmpeg will be ready
to deal with them.
2. We don't do software emphasis.
3. Audio is always returned as 16bit linear samples: 12bit
nonlinear samples are converted into 16bit linear ones.
*/
static int dvaudio_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
DVVideoDecodeContext *s = avctx->priv_data;
const uint16_t (*unshuffle)[9];
int smpls, freq, quant, sys, stride, difseg, ad, dp, nb_dif_segs, i;
uint16_t lc, rc;
uint8_t *buf_ptr;
/* parse id */
init_get_bits(&s->gb, &buf[AAUX_AS_OFFSET], 5*8);
i = get_bits(&s->gb, 8);
if (i != 0x50) { /* No audio ? */
*data_size = 0;
return buf_size;
}
get_bits(&s->gb, 1); /* 0 - locked audio, 1 - unlocked audio */
skip_bits(&s->gb, 1);
smpls = get_bits(&s->gb, 6); /* samples in this frame - min. samples */
skip_bits(&s->gb, 8);
skip_bits(&s->gb, 2);
sys = get_bits(&s->gb, 1); /* 0 - 60 fields, 1 = 50 fields */
skip_bits(&s->gb, 5);
get_bits(&s->gb, 1); /* 0 - emphasis on, 1 - emphasis off */
get_bits(&s->gb, 1); /* 0 - reserved, 1 - emphasis time constant 50/15us */
freq = get_bits(&s->gb, 3); /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */
quant = get_bits(&s->gb, 3); /* 0 - 16bit linear, 1 - 12bit nonlinear */
if (quant > 1)
return -1; /* Unsupported quantization */
avctx->sample_rate = dv_audio_frequency[freq];
avctx->channels = 2;
avctx->bit_rate = avctx->channels * avctx->sample_rate * 16;
// What about:
// avctx->frame_size =
*data_size = (dv_audio_min_samples[sys][freq] + smpls) *
avctx->channels * 2;
if (sys) {
nb_dif_segs = 12;
stride = 108;
unshuffle = dv_place_audio50;
} else {
nb_dif_segs = 10;
stride = 90;
unshuffle = dv_place_audio60;
}
/* for each DIF segment */
buf_ptr = buf;
for (difseg = 0; difseg < nb_dif_segs; difseg++) {
buf_ptr += 6 * 80; /* skip DIF segment header */
for (ad = 0; ad < 9; ad++) {
for (dp = 8; dp < 80; dp+=2) {
if (quant == 0) { /* 16bit quantization */
i = unshuffle[difseg][ad] + (dp - 8)/2 * stride;
((short *)data)[i] = (buf_ptr[dp] << 8) | buf_ptr[dp+1];
if (((unsigned short *)data)[i] == 0x8000)
((short *)data)[i] = 0;
} else { /* 12bit quantization */
if (difseg >= nb_dif_segs/2)
goto out; /* We're not doing 4ch at this time */
lc = ((uint16_t)buf_ptr[dp] << 4) |
((uint16_t)buf_ptr[dp+2] >> 4);
rc = ((uint16_t)buf_ptr[dp+1] << 4) |
((uint16_t)buf_ptr[dp+2] & 0x0f);
lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc));
rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc));
i = unshuffle[difseg][ad] + (dp - 8)/3 * stride;
((short *)data)[i] = lc;
i = unshuffle[difseg+nb_dif_segs/2][ad] + (dp - 8)/3 * stride;
((short *)data)[i] = rc;
++dp;
}
}
buf_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
}
}
out:
return buf_size;
}
static int dvaudio_decode_end(AVCodecContext *avctx)
{
// DVAudioDecodeContext *s = avctx->priv_data;
return 0;
}
AVCodec dvaudio_decoder = {
"dvaudio",
CODEC_TYPE_AUDIO,
CODEC_ID_DVAUDIO,
sizeof(DVAudioDecodeContext),
dvaudio_decode_init,
NULL,
dvaudio_decode_end,
dvaudio_decode_frame,
0,
NULL
};
......@@ -21,11 +21,34 @@
* @file dvdata.h
* Constants for DV codec.
*/
/*
* DVprofile is used to express the differences between various
* DV flavors. For now it's primarily used for differentiating
* 525/60 and 625/50, but the plans are to use it for various
* DV specs as well (e.g. SMPTE314M vs. IEC 61834).
*/
typedef struct DVprofile {
int dsf; /* value of the dsf in the DV header */
int frame_size; /* total size of one frame in bytes */
int difseg_size; /* number of DIF segments */
int frame_rate;
int frame_rate_base;
int ltc_divisor; /* FPS from the LTS standpoint */
int height; /* picture height in pixels */
int width; /* picture width in pixels */
const uint16_t *video_place; /* positions of all DV macro blocks */
enum PixelFormat pix_fmt; /* picture pixel format */
int audio_stride; /* size of audio_shuffle table */
int audio_min_samples[3];/* min ammount of audio samples */
/* for 48Khz, 44.1Khz and 32Khz */
int audio_samples_dist[5];/* how many samples are supposed to be */
/* in each frame in a 5 frames window */
const uint16_t (*audio_shuffle)[9]; /* PCM shuffling table */
} DVprofile;
#define NB_DV_VLC 409
#define AAUX_AS_OFFSET (80*6 + 80*16*3 + 3)
#define AAUX_ASC_OFFSET (80*6 + 80*16*4 + 3)
#define VAUX_TC61_OFFSET (80*5 + 48 + 5)
static const uint16_t dv_vlc_bits[409] = {
0x0000, 0x0002, 0x0007, 0x0008, 0x0009, 0x0014, 0x0015, 0x0016,
......@@ -283,7 +306,7 @@ static const uint8_t dv_248_areas[64] = {
1,2,2,3,3,3,3,3,
};
static uint8_t dv_quant_shifts[22][4] = {
static const uint8_t dv_quant_shifts[22][4] = {
{ 3,3,4,4 },
{ 3,3,4,4 },
{ 2,3,3,4 },
......@@ -1240,7 +1263,7 @@ static const uint16_t dv_place_411[1350] = {
0x0834, 0x2320, 0x2f44, 0x3810, 0x1658,
};
static const uint16_t dv_place_audio60[10][9] = {
static const uint16_t dv_audio_shuffle525[10][9] = {
{ 0, 30, 60, 20, 50, 80, 10, 40, 70 }, /* 1st channel */
{ 6, 36, 66, 26, 56, 86, 16, 46, 76 },
{ 12, 42, 72, 2, 32, 62, 22, 52, 82 },
......@@ -1254,7 +1277,7 @@ static const uint16_t dv_place_audio60[10][9] = {
{ 25, 55, 85, 15, 45, 75, 5, 35, 65 },
};
static const uint16_t dv_place_audio50[12][9] = {
static const uint16_t dv_audio_shuffle625[12][9] = {
{ 0, 36, 72, 26, 62, 98, 16, 52, 88}, /* 1st channel */
{ 6, 42, 78, 32, 68, 104, 22, 58, 94},
{ 12, 48, 84, 2, 38, 74, 28, 64, 100},
......@@ -1271,10 +1294,77 @@ static const uint16_t dv_place_audio50[12][9] = {
};
static const int dv_audio_frequency[3] = {
48000, 44100, 32000,
48000, 44100, 32000,
};
static const int dv_audio_min_samples[2][3] = {
{ 1580, 1452, 1053 }, /* 60 fields */
{ 1896, 1742, 1264 }, /* 50 fileds */
static const DVprofile dv_profiles[] = {
{ .dsf = 0,
.frame_size = 120000, /* IEC 61834, SMPTE-314M - 525/60 (NTSC) */
.difseg_size = 10,
.frame_rate = 30000,
.ltc_divisor = 30,
.frame_rate_base = 1001,
.height = 480,
.width = 720,
.video_place = dv_place_411,
.pix_fmt = PIX_FMT_YUV411P,
.audio_stride = 90,
.audio_min_samples = { 1580, 1452, 1053 }, /* for 48, 44.1 and 32Khz */
.audio_samples_dist = { 1602, 1601, 1602, 1601, 1602 },
.audio_shuffle = dv_audio_shuffle525,
},
{ .dsf = 1,
.frame_size = 144000, /* IEC 61834 - 625/50 (PAL) */
.difseg_size = 12,
.frame_rate = 25,
.frame_rate_base = 1,
.ltc_divisor = 25,
.height = 576,
.width = 720,
.video_place = dv_place_420,
.pix_fmt = PIX_FMT_YUV420P,
.audio_stride = 108,
.audio_min_samples = { 1896, 1742, 1264 }, /* for 48, 44.1 and 32Khz */
.audio_samples_dist = { 1920, 1920, 1920, 1920, 1920 },
.audio_shuffle = dv_audio_shuffle625,
},
{ .dsf = 1,
.frame_size = 144000, /* SMPTE-314M - 625/50 (PAL) */
.difseg_size = 12,
.frame_rate = 25,
.frame_rate_base = 1,
.ltc_divisor = 25,
.height = 576,
.width = 720,
.video_place = dv_place_411P,
.pix_fmt = PIX_FMT_YUV411P,
.audio_stride = 108,
.audio_min_samples = { 1896, 1742, 1264 }, /* for 48, 44.1 and 32Khz */
.audio_samples_dist = { 1920, 1920, 1920, 1920, 1920 },
.audio_shuffle = dv_audio_shuffle625,
}
};
static inline const DVprofile* dv_frame_profile(uint8_t* frame)
{
if ((frame[3] & 0x80) == 0) { /* DSF flag */
return &dv_profiles[0];
}
else if ((frame[5] & 0x07) == 0) { /* APT flag */
return &dv_profiles[1];
}
else
return &dv_profiles[2];
}
static inline const DVprofile* dv_codec_profile(AVCodecContext* codec)
{
if (codec->width != 720) {
return NULL;
}
else if (codec->height == 480) {
return &dv_profiles[0];
}
else
return &dv_profiles[1];
}
......@@ -13,7 +13,7 @@ PPOBJS=
# mux and demuxes
OBJS+=mpeg.o mpegts.o mpegtsenc.o ffm.o crc.o img.o raw.o rm.o \
avienc.o avidec.o wav.o swf.o au.o gif.o mov.o mpjpeg.o dvcore.o dv.o \
avienc.o avidec.o wav.o swf.o au.o gif.o mov.o mpjpeg.o dv.o \
yuv4mpeg.o 4xm.o flvenc.o flvdec.o movenc.o psxstr.o idroq.o ipmovie.o \
nut.o wc3movie.o mp3.o
......
......@@ -18,21 +18,10 @@
*/
#include "avformat.h"
#include "avi.h"
#include "dv.h"
//#define DEBUG
static const struct AVI1Handler {
enum CodecID vcid;
enum CodecID acid;
uint32_t tag;
} AVI1Handlers[] = {
{ CODEC_ID_DVVIDEO, CODEC_ID_DVAUDIO, MKTAG('d', 'v', 's', 'd') },
{ CODEC_ID_DVVIDEO, CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'h', 'd') },
{ CODEC_ID_DVVIDEO, CODEC_ID_DVAUDIO, MKTAG('d', 'v', 's', 'l') },
/* This is supposed to be the last one */
{ CODEC_ID_NONE, CODEC_ID_NONE, 0 },
};
typedef struct AVIIndex {
unsigned char tag[4];
unsigned int flags, pos, len;
......@@ -40,14 +29,11 @@ typedef struct AVIIndex {
} AVIIndex;
typedef struct {
int64_t riff_end;
int64_t movi_end;
int type;
uint8_t *buf;
int buf_size;
int stream_index;
int64_t riff_end;
int64_t movi_end;
offset_t movi_list;
AVIIndex *first, *last;
void* dv_demux;
} AVIContext;
#ifdef DEBUG
......@@ -97,11 +83,6 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
stream_index = -1;
codec_type = -1;
frame_period = 0;
avi->type = 2;
avi->buf = av_malloc(1);
if (!avi->buf)
return -1;
avi->buf_size = 1;
for(;;) {
if (url_feof(pb))
goto fail;
......@@ -134,7 +115,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
url_fskip(pb, 4 * 4);
n = get_le32(pb);
for(i=0;i<n;i++) {
st = av_new_stream(s, 0);
st = av_new_stream(s, i);
if (!st)
goto fail;
}
......@@ -144,24 +125,36 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
/* stream header */
stream_index++;
tag1 = get_le32(pb);
handler = get_le32(pb); /* codec tag */
switch(tag1) {
case MKTAG('i', 'a', 'v', 's'):
case MKTAG('i', 'v', 'a', 's'):
/*
* After some consideration -- I don't think we
* have to support anything but DV in a type1 AVIs.
*/
if (s->nb_streams != 1)
goto fail;
avi->type = 1;
avi->stream_index = 0;
if (handler != MKTAG('d', 'v', 's', 'd') &&
handler != MKTAG('d', 'v', 'h', 'd') &&
handler != MKTAG('d', 'v', 's', 'l'))
goto fail;
avi->dv_demux = dv_init_demux(s, stream_index, stream_index + 1);
if (!avi->dv_demux)
goto fail;
stream_index++;
case MKTAG('v', 'i', 'd', 's'):
codec_type = CODEC_TYPE_VIDEO;
if (stream_index >= s->nb_streams) {
url_fskip(pb, size - 4);
url_fskip(pb, size - 8);
break;
}
st = s->streams[stream_index];
handler = get_le32(pb); /* codec tag */
get_le32(pb); /* flags */
get_le16(pb); /* priority */
get_le16(pb); /* language */
......@@ -186,29 +179,6 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
st->codec.frame_rate_base * AV_TIME_BASE /
st->codec.frame_rate;
if (avi->type == 1) {
AVStream *st;
st = av_new_stream(s, 0);
if (!st)
goto fail;
stream_index++;
for (i=0; AVI1Handlers[i].tag != 0; ++i)
if (AVI1Handlers[i].tag == handler)
break;
if (AVI1Handlers[i].tag != 0) {
s->streams[0]->codec.codec_type = CODEC_TYPE_VIDEO;
s->streams[0]->codec.codec_id = AVI1Handlers[i].vcid;
s->streams[1]->codec.codec_type = CODEC_TYPE_AUDIO;
s->streams[1]->codec.codec_id = AVI1Handlers[i].acid;
} else {
goto fail;
}
}
url_fskip(pb, size - 9 * 4);
break;
case MKTAG('a', 'u', 'd', 's'):
......@@ -218,12 +188,11 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
codec_type = CODEC_TYPE_AUDIO;
if (stream_index >= s->nb_streams) {
url_fskip(pb, size - 4);
url_fskip(pb, size - 8);
break;
}
st = s->streams[stream_index];
get_le32(pb); /* tag */
get_le32(pb); /* flags */
get_le16(pb); /* priority */
get_le16(pb); /* language */
......@@ -244,7 +213,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
break;
case MKTAG('s', 't', 'r', 'f'):
/* stream header */
if (stream_index >= s->nb_streams || avi->type == 1) {
if (stream_index >= s->nb_streams || avi->dv_demux) {
url_fskip(pb, size);
} else {
st = s->streams[stream_index];
......@@ -305,7 +274,6 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
/* check stream number */
if (stream_index != s->nb_streams - 1) {
fail:
av_free(avi->buf);
for(i=0;i<s->nb_streams;i++) {
av_freep(&s->streams[i]->codec.extradata);
av_freep(&s->streams[i]);
......@@ -316,31 +284,21 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
return 0;
}
static void __destruct_pkt(struct AVPacket *pkt)
{
pkt->data = NULL; pkt->size = 0;
return;
}
static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = &s->pb;
int n, d[8], size, i;
void* dstr;
memset(d, -1, sizeof(int)*8);
if (avi->type == 1 && avi->stream_index) {
/* duplicate DV packet */
av_init_packet(pkt);
pkt->data = avi->buf;
pkt->size = avi->buf_size;
pkt->destruct = __destruct_pkt;
pkt->stream_index = avi->stream_index;
avi->stream_index = !avi->stream_index;
return 0;
if (avi->dv_demux) {
size = dv_get_packet(avi->dv_demux, pkt);
if (size >= 0)
return size;
}
for(i=url_ftell(pb); !url_feof(pb); i++) {
int j;
......@@ -387,26 +345,24 @@ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
&& n < s->nb_streams
&& i + size <= avi->movi_end) {
if (avi->type == 1) {
uint8_t *tbuf = av_realloc(avi->buf, size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!tbuf)
return -1;
avi->buf = tbuf;
avi->buf_size = size;
av_init_packet(pkt);
pkt->data = avi->buf;
pkt->size = avi->buf_size;
pkt->destruct = __destruct_pkt;
avi->stream_index = n;
} else {
av_new_packet(pkt, size);
}
av_new_packet(pkt, size);
get_buffer(pb, pkt->data, size);
if (size & 1)
if (size & 1) {
get_byte(pb);
pkt->stream_index = n;
pkt->flags |= PKT_FLAG_KEY; // FIXME: We really should read index for that
return 0;
size++;
}
if (avi->dv_demux) {
dstr = pkt->destruct;
size = dv_produce_packet(avi->dv_demux, pkt,
pkt->data, pkt->size);
pkt->destruct = dstr;
} else {
pkt->stream_index = n;
pkt->flags |= PKT_FLAG_KEY; // FIXME: We really should read
// index for that
}
return size;
}
}
return -1;
......@@ -416,7 +372,6 @@ static int avi_read_close(AVFormatContext *s)
{
int i;
AVIContext *avi = s->priv_data;
av_free(avi->buf);
for(i=0;i<s->nb_streams;i++) {
AVStream *st = s->streams[i];
......@@ -424,6 +379,9 @@ static int avi_read_close(AVFormatContext *s)
av_free(st->codec.extradata);
}
if (avi->dv_demux)
av_free(avi->dv_demux);
return 0;
}
......
此差异已折叠。
/*
* General DV muxer/demuxer
* Copyright (c) 2003 Roman Shaposhnick
*
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
* of DV technical info.
*
* Raw DV format
* Copyright (c) 2002 Fabrice Bellard.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
void* dv_init_demux(AVFormatContext *s, int vid, int aid);
int dv_get_packet(void*, AVPacket *);
int dv_produce_packet(void*, AVPacket*, uint8_t*, int);
void* dv_init_mux(AVFormatContext* s);
int dv_assemble_frame(void *c, AVStream*, const uint8_t*, int, uint8_t**);
void dv_delete_mux(void*);
......@@ -31,13 +31,11 @@
#undef DV1394_DEBUG
#include "dv1394.h"
#include "dv.h"
struct dv1394_data {
int fd;
int channel;
int width, height;
int frame_rate;
int frame_size;
int format;
void *ring; /* Ring buffer */
......@@ -45,9 +43,9 @@ struct dv1394_data {
int avail; /* Number of frames available for reading */
int done; /* Number of completed frames */
int stream; /* Current stream. 0 - video, 1 - audio */
int64_t pts; /* Current timestamp */
AVStream *vst, *ast;
void* dv_demux; /* Generic DV muxing/demuxing context */
};
/*
......@@ -69,7 +67,6 @@ static int dv1394_reset(struct dv1394_data *dv)
return -1;
dv->avail = dv->done = 0;
dv->stream = 0;
return 0;
}
......@@ -88,14 +85,9 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap
struct dv1394_data *dv = context->priv_data;
const char *video_device;
dv->vst = av_new_stream(context, 0);
if (!dv->vst)
return -ENOMEM;
dv->ast = av_new_stream(context, 1);
if (!dv->ast) {
av_free(dv->vst);
return -ENOMEM;
}
dv->dv_demux = dv_init_demux(context, 0, 1);
if (!dv->dv_demux)
goto failed;
if (ap->standard && !strcasecmp(ap->standard, "pal"))
dv->format = DV1394_PAL;
......@@ -107,17 +99,6 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap
else
dv->channel = DV1394_DEFAULT_CHANNEL;
dv->width = DV1394_WIDTH;
if (dv->format == DV1394_NTSC) {
dv->height = DV1394_NTSC_HEIGHT;
dv->frame_size = DV1394_NTSC_FRAME_SIZE;
dv->frame_rate = 30;
} else {
dv->height = DV1394_PAL_HEIGHT;
dv->frame_size = DV1394_PAL_FRAME_SIZE;
dv->frame_rate = 25;
}
/* Open and initialize DV1394 device */
video_device = ap->device;
if (!video_device)
......@@ -140,21 +121,6 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap
goto failed;
}
dv->stream = 0;
dv->vst->codec.codec_type = CODEC_TYPE_VIDEO;
dv->vst->codec.codec_id = CODEC_ID_DVVIDEO;
dv->vst->codec.width = dv->width;
dv->vst->codec.height = dv->height;
dv->vst->codec.frame_rate = dv->frame_rate;
dv->vst->codec.frame_rate_base = 1;
dv->vst->codec.bit_rate = 25000000; /* Consumer DV is 25Mbps */
dv->ast->codec.codec_type = CODEC_TYPE_AUDIO;
dv->ast->codec.codec_id = CODEC_ID_DVAUDIO;
dv->ast->codec.channels = 2;
dv->ast->codec.sample_rate= 48000;
av_set_pts_info(context, 48, 1, 1000000);
if (dv1394_start(dv) < 0)
......@@ -164,55 +130,17 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap
failed:
close(dv->fd);
av_free(dv->vst);
av_free(dv->ast);
return -EIO;
}
static void __destruct_pkt(struct AVPacket *pkt)
{
pkt->data = NULL; pkt->size = 0;
return;
}
static inline int __get_frame(struct dv1394_data *dv, AVPacket *pkt)
{
char *ptr = dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE);
if (dv->stream) {
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
} else {
dv->pts = av_gettime() & ((1LL << 48) - 1);
}
dv->format = ((ptr[3] & 0x80) == 0) ? DV1394_NTSC : DV1394_PAL;
if (dv->format == DV1394_NTSC) {
dv->frame_size = DV1394_NTSC_FRAME_SIZE;
dv->vst->codec.height = dv->height = DV1394_NTSC_HEIGHT;
dv->vst->codec.frame_rate = dv->frame_rate = 30;
} else {
dv->frame_size = DV1394_PAL_FRAME_SIZE;
dv->vst->codec.height = dv->height = DV1394_PAL_HEIGHT;
dv->vst->codec.frame_rate = dv->frame_rate = 25;
}
av_init_packet(pkt);
pkt->destruct = __destruct_pkt;
pkt->data = ptr;
pkt->size = dv->frame_size;
pkt->pts = dv->pts;
pkt->stream_index = dv->stream;
pkt->flags |= PKT_FLAG_KEY;
dv->stream ^= 1;
return dv->frame_size;
}
static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
{
struct dv1394_data *dv = context->priv_data;
int size;
size = dv_get_packet(dv->dv_demux, pkt);
if (size > 0)
goto out;
if (!dv->avail) {
struct dv1394_status s;
......@@ -276,7 +204,16 @@ restart_poll:
dv->done);
#endif
return __get_frame(dv, pkt);
size = dv_produce_packet(dv->dv_demux, pkt,
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
DV1394_PAL_FRAME_SIZE);
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
dv->pts = av_gettime() & ((1LL << 48) - 1);
out:
pkt->pts = dv->pts;
return size;
}
static int dv1394_close(AVFormatContext * context)
......@@ -292,6 +229,7 @@ static int dv1394_close(AVFormatContext * context)
perror("Failed to munmap DV1394 ring buffer");
close(dv->fd);
av_free(dv->dv_demux);
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册