提交 5b4f039e 编写于 作者: M Matt Oliver

Merge remote-tracking branch 'upstream/master'

......@@ -35,6 +35,7 @@ version <next>:
- Fix stsd atom corruption in DNxHD QuickTimes
- Canopus HQX decoder
- RTP depacketization of T.140 text (RFC 4103)
- VP9 RTP payload format (draft 0) experimental depacketizer
version 2.5:
......
......@@ -2493,7 +2493,7 @@ xwma_demuxer_select="riffdec"
# indevs / outdevs
alsa_indev_deps="alsa_asoundlib_h snd_pcm_htimestamp"
alsa_outdev_deps="alsa_asoundlib_h"
avfoundation_indev_extralibs="-framework CoreVideo -framework Foundation -framework AVFoundation -framework CoreMedia -framework CoreGraphics"
avfoundation_indev_extralibs="-framework CoreVideo -framework Foundation -framework AVFoundation -framework CoreMedia"
avfoundation_indev_select="avfoundation"
bktr_indev_deps_any="dev_bktr_ioctl_bt848_h machine_ioctl_bt848_h dev_video_bktr_ioctl_bt848_h dev_ic_bt8xx_h"
caca_outdev_deps="libcaca"
......@@ -4881,6 +4881,8 @@ done
# these are off by default, so fail if requested and not available
enabled avfoundation_indev && { check_header_oc AVFoundation/AVFoundation.h || disable avfoundation_indev; }
enabled avfoundation_indev && { check_lib2 CoreGraphics/CoreGraphics.h CGGetActiveDisplayList -framework CoreGraphics ||
check_lib2 ApplicationServices/ApplicationServices.h CGGetActiveDisplayList -framework ApplicationServices; }
enabled avisynth && { { check_lib2 "windows.h" LoadLibrary; } ||
{ check_lib2 "dlfcn.h" dlopen -ldl; } ||
die "ERROR: LoadLibrary/dlopen not found for avisynth"; }
......
......@@ -15,6 +15,9 @@ libavutil: 2014-08-09
API changes, most recent first:
2015-03-04 - xxxxxxx - lavf 56.25.100
Add avformat_flush()
2015-03-xx - xxxxxxx - lavf 56.24.100
Add avio_put_str16be()
......
......@@ -1081,8 +1081,8 @@ Set raise error timeout, expressed in microseconds.
This option is only relevant in read mode: if no data arrived in more
than this time interval, raise error.
@item listen_timeout=@var{microseconds}
Set listen timeout, expressed in microseconds.
@item listen_timeout=@var{milliseconds}
Set listen timeout, expressed in milliseconds.
@end table
The following example shows how to setup a listening TCP connection
......
......@@ -1236,7 +1236,8 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
if (aot != AOT_ER_AAC_ELD) {
if (get_bits1(gb)) {
av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
return AVERROR_INVALIDDATA;
if (ac->avctx->err_recognition & AV_EF_BITSTREAM)
return AVERROR_INVALIDDATA;
}
ics->window_sequence[1] = ics->window_sequence[0];
ics->window_sequence[0] = get_bits(gb, 2);
......
......@@ -28,6 +28,7 @@
#include "libavutil/error.h"
#include "dca.h"
#include "dca_syncwords.h"
#include "put_bits.h"
const uint32_t avpriv_dca_sample_rates[16] = {
......@@ -49,18 +50,18 @@ int avpriv_dca_convert_bitstream(const uint8_t *src, int src_size, uint8_t *dst,
mrk = AV_RB32(src);
switch (mrk) {
case DCA_MARKER_RAW_BE:
case DCA_SYNCWORD_CORE_BE:
memcpy(dst, src, src_size);
return src_size;
case DCA_MARKER_RAW_LE:
case DCA_SYNCWORD_CORE_LE:
for (i = 0; i < (src_size + 1) >> 1; i++)
*sdst++ = av_bswap16(*ssrc++);
return src_size;
case DCA_MARKER_14B_BE:
case DCA_MARKER_14B_LE:
case DCA_SYNCWORD_CORE_14B_BE:
case DCA_SYNCWORD_CORE_14B_LE:
init_put_bits(&pb, dst, max_size);
for (i = 0; i < (src_size + 1) >> 1; i++, src += 2) {
tmp = ((mrk == DCA_MARKER_14B_BE) ? AV_RB16(src) : AV_RL16(src)) & 0x3FFF;
tmp = ((mrk == DCA_SYNCWORD_CORE_14B_BE) ? AV_RB16(src) : AV_RL16(src)) & 0x3FFF;
put_bits(&pb, 14, tmp);
}
flush_put_bits(&pb);
......
......@@ -35,15 +35,6 @@
#include "fmtconvert.h"
#include "get_bits.h"
/** DCA syncwords, also used for bitstream type detection */
#define DCA_MARKER_RAW_BE 0x7FFE8001
#define DCA_MARKER_RAW_LE 0xFE7F0180
#define DCA_MARKER_14B_BE 0x1FFFE800
#define DCA_MARKER_14B_LE 0xFF1F00E8
/** DCA-HD specific block starts with this marker. */
#define DCA_HD_MARKER 0x64582025
#define DCA_PRIM_CHANNELS_MAX (7)
#define DCA_ABITS_MAX (32) /* Should be 28 */
#define DCA_SUBSUBFRAMES_MAX (4)
......
......@@ -23,6 +23,7 @@
*/
#include "dca.h"
#include "dca_syncwords.h"
#include "get_bits.h"
#include "parser.h"
......@@ -35,9 +36,9 @@ typedef struct DCAParseContext {
} DCAParseContext;
#define IS_MARKER(state, i, buf, buf_size) \
((state == DCA_MARKER_14B_LE && (i < buf_size - 2) && (buf[i + 1] & 0xF0) == 0xF0 && buf[i + 2] == 0x07) || \
(state == DCA_MARKER_14B_BE && (i < buf_size - 2) && buf[i + 1] == 0x07 && (buf[i + 2] & 0xF0) == 0xF0) || \
state == DCA_MARKER_RAW_LE || state == DCA_MARKER_RAW_BE || state == DCA_HD_MARKER)
((state == DCA_SYNCWORD_CORE_14B_LE && (i < buf_size - 2) && (buf[i + 1] & 0xF0) == 0xF0 && buf[i + 2] == 0x07) || \
(state == DCA_SYNCWORD_CORE_14B_BE && (i < buf_size - 2) && buf[i + 1] == 0x07 && (buf[i + 2] & 0xF0) == 0xF0) || \
state == DCA_SYNCWORD_CORE_LE || state == DCA_SYNCWORD_CORE_BE || state == DCA_SYNCWORD_SUBSTREAM)
/**
* Find the end of the current frame in the bitstream.
......@@ -58,7 +59,7 @@ static int dca_find_frame_end(DCAParseContext *pc1, const uint8_t *buf,
for (i = 0; i < buf_size; i++) {
state = (state << 8) | buf[i];
if (IS_MARKER(state, i, buf, buf_size)) {
if (!pc1->lastmarker || state == pc1->lastmarker || pc1->lastmarker == DCA_HD_MARKER) {
if (!pc1->lastmarker || state == pc1->lastmarker || pc1->lastmarker == DCA_SYNCWORD_SUBSTREAM) {
start_found = 1;
pc1->lastmarker = state;
i++;
......@@ -71,9 +72,9 @@ static int dca_find_frame_end(DCAParseContext *pc1, const uint8_t *buf,
for (; i < buf_size; i++) {
pc1->size++;
state = (state << 8) | buf[i];
if (state == DCA_HD_MARKER && !pc1->hd_pos)
if (state == DCA_SYNCWORD_SUBSTREAM && !pc1->hd_pos)
pc1->hd_pos = pc1->size;
if (IS_MARKER(state, i, buf, buf_size) && (state == pc1->lastmarker || pc1->lastmarker == DCA_HD_MARKER)) {
if (IS_MARKER(state, i, buf, buf_size) && (state == pc1->lastmarker || pc1->lastmarker == DCA_SYNCWORD_SUBSTREAM)) {
if (pc1->framesize > pc1->size)
continue;
pc->frame_start_found = 0;
......
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DCA_SYNCWORDS_H
#define AVCODEC_DCA_SYNCWORDS_H
enum DCASyncwords {
DCA_SYNCWORD_CORE_BE = 0x7FFE8001,
DCA_SYNCWORD_CORE_LE = 0xFE7F0180,
DCA_SYNCWORD_CORE_14B_BE = 0x1FFFE800,
DCA_SYNCWORD_CORE_14B_LE = 0xFF1F00E8,
DCA_SYNCWORD_XCH = 0x5A5A5A5A,
DCA_SYNCWORD_XXCH = 0x47004A03,
DCA_SYNCWORD_X96 = 0x1D95F262,
DCA_SYNCWORD_XBR = 0x655E315E,
DCA_SYNCWORD_LBR = 0x0A801921,
DCA_SYNCWORD_XLL = 0x41A29547,
DCA_SYNCWORD_SUBSTREAM = 0x64582025,
DCA_SYNCWORD_SUBSTREAM_CORE = 0x02B09261,
};
#endif /* AVCODEC_DCA_SYNCWORDS_H */
......@@ -37,6 +37,7 @@
#include "avcodec.h"
#include "dca.h"
#include "dca_syncwords.h"
#include "dcadata.h"
#include "dcadsp.h"
#include "dcahuff.h"
......@@ -1437,7 +1438,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
uint32_t bits = get_bits_long(&s->gb, 32);
switch (bits) {
case 0x5a5a5a5a: {
case DCA_SYNCWORD_XCH: {
int ext_amode, xch_fsize;
s->xch_base_channel = s->prim_channels;
......@@ -1479,7 +1480,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
s->xch_present = 1;
break;
}
case 0x47004a03:
case DCA_SYNCWORD_XXCH:
/* XXCh: extended channels */
/* usually found either in core or HD part in DTS-HD HRA streams,
* but not in DTS-ES which contains XCh extensions instead */
......@@ -1517,7 +1518,7 @@ static int dca_decode_frame(AVCodecContext *avctx, void *data,
/* check for ExSS (HD part) */
if (s->dca_buffer_size - s->frame_size > 32 &&
get_bits_long(&s->gb, 32) == DCA_HD_MARKER)
get_bits_long(&s->gb, 32) == DCA_SYNCWORD_SUBSTREAM)
ff_dca_exss_parse_header(s);
avctx->profile = s->profile;
......
......@@ -784,7 +784,10 @@ static const AVOption options[] = {
{ "aq-mode", "AQ method", OFFSET(aq_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE, "aq_mode"},
{ "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = X264_AQ_NONE}, INT_MIN, INT_MAX, VE, "aq_mode" },
{ "variance", "Variance AQ (complexity mask)", 0, AV_OPT_TYPE_CONST, {.i64 = X264_AQ_VARIANCE}, INT_MIN, INT_MAX, VE, "aq_mode" },
{ "autovariance", "Auto-variance AQ (experimental)", 0, AV_OPT_TYPE_CONST, {.i64 = X264_AQ_AUTOVARIANCE}, INT_MIN, INT_MAX, VE, "aq_mode" },
{ "autovariance", "Auto-variance AQ", 0, AV_OPT_TYPE_CONST, {.i64 = X264_AQ_AUTOVARIANCE}, INT_MIN, INT_MAX, VE, "aq_mode" },
#if X264_BUILD >= 144
{ "autovariance-biased", "Auto-variance AQ with bias to dark scenes", 0, AV_OPT_TYPE_CONST, {.i64 = X264_AQ_AUTOVARIANCE_BIASED}, INT_MIN, INT_MAX, VE, "aq_mode" },
#endif
{ "aq-strength", "AQ strength. Reduces blocking and blurring in flat and textured areas.", OFFSET(aq_strength), AV_OPT_TYPE_FLOAT, {.dbl = -1}, -1, FLT_MAX, VE},
{ "psy", "Use psychovisual optimizations.", OFFSET(psy), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE },
{ "psy-rd", "Strength of psychovisual optimization, in <psy-rd>:<psy-trellis> format.", OFFSET(psy_rd), AV_OPT_TYPE_STRING, {0 }, 0, 0, VE},
......
......@@ -69,7 +69,7 @@ static int pcx_rle_encode( uint8_t *dst, int dst_size,
// check worst-case upper bound on dst_size
if (dst_size < 2LL * src_plane_size * nplanes || src_plane_size <= 0)
return -1;
return AVERROR(EINVAL);
for (p = 0; p < nplanes; p++) {
int count = 1;
......@@ -114,7 +114,7 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if (avctx->width > 65535 || avctx->height > 65535) {
av_log(avctx, AV_LOG_ERROR, "image dimensions do not fit in 16 bits\n");
return -1;
return AVERROR(EINVAL);
}
switch (avctx->pix_fmt) {
......@@ -144,7 +144,7 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
break;
default:
av_log(avctx, AV_LOG_ERROR, "unsupported pixfmt\n");
return -1;
return AVERROR(EINVAL);
}
line_bytes = (avctx->width * bpp + 7) >> 3;
......@@ -186,7 +186,7 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if ((written = pcx_rle_encode(buf, buf_end - buf,
src, line_bytes, nplanes)) < 0) {
av_log(avctx, AV_LOG_ERROR, "buffer too small\n");
return -1;
return AVERROR_BUG;
}
buf += written;
src += frame->linesize[0];
......@@ -195,7 +195,7 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if (nplanes == 1 && bpp == 8) {
if (buf_end - buf < 257) {
av_log(avctx, AV_LOG_ERROR, "buffer too small\n");
return -1;
return AVERROR_BUG;
}
bytestream_put_byte(&buf, 12);
for (i = 0; i < 256; i++) {
......
......@@ -28,6 +28,7 @@
#include "libavutil/avassert.h"
#include "libavutil/libm.h"
#include "libavutil/opt.h"
#include "libavutil/color_utils.h"
#include <zlib.h>
......@@ -277,31 +278,9 @@ static int png_get_chrm(enum AVColorPrimaries prim, uint8_t *buf)
static int png_get_gama(enum AVColorTransferCharacteristic trc, uint8_t *buf)
{
double gamma;
switch (trc) {
case AVCOL_TRC_BT709:
case AVCOL_TRC_SMPTE170M:
case AVCOL_TRC_SMPTE240M:
case AVCOL_TRC_BT1361_ECG:
case AVCOL_TRC_BT2020_10:
case AVCOL_TRC_BT2020_12:
/* these share a segmented TRC, but gamma 1.961 is a close
approximation, and also more correct for decoding content */
gamma = 1.961;
break;
case AVCOL_TRC_GAMMA22:
case AVCOL_TRC_IEC61966_2_1:
gamma = 2.2;
break;
case AVCOL_TRC_GAMMA28:
gamma = 2.8;
break;
case AVCOL_TRC_LINEAR:
gamma = 1.0;
break;
default:
return 0;
}
double gamma = avpriv_get_gamma_from_trc(trc);
if (gamma <= 1e-6)
return 0;
AV_WB32_PNG(buf, 1.0 / gamma);
return 1;
......
......@@ -104,7 +104,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
}
av_log(ctx, AV_LOG_INFO,
"n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
"n:%4"PRId64" pts:%7s pts_time:%-7s pos:%9"PRId64" "
"fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
"checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
inlink->frame_count,
......
......@@ -2136,6 +2136,24 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,
*/
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);
/**
* Discard all internally buffered data. This can be useful when dealing with
* discontinuities in the byte stream. Generally works only with formats that
* can resync. This includes headerless formats like MPEG-TS/TS but should also
* work with NUT, Ogg and in a limited way AVI for example.
*
* The set of streams, the detected duration, stream parameters and codecs do
* not change when calling this function. If you want a complete reset, it's
* better to open a new AVFormatContext.
*
* This does not flush the AVIOContext (s->pb). If necessary, call
* avio_flush(s->pb) before calling this function.
*
* @param s media file handle
* @return >=0 on success, error code otherwise
*/
int avformat_flush(AVFormatContext *s);
/**
* Start playing a network-based stream (e.g. RTSP stream) at the
* current position.
......
......@@ -72,7 +72,6 @@ typedef struct OutputStream {
int bit_rate;
char bandwidth_str[64];
int codec_str_extradata_size;
char codec_str[100];
} OutputStream;
......@@ -503,12 +502,6 @@ static int write_manifest(AVFormatContext *s, int final)
if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
if (os->codec_str_extradata_size != st->codec->extradata_size) {
memset(os->codec_str, 0, sizeof(os->codec_str));
set_codec_str(s, st->codec, os->codec_str, sizeof(os->codec_str));
os->codec_str_extradata_size = st->codec->extradata_size;
}
avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/mp4\" codecs=\"%s\"%s width=\"%d\" height=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->width, st->codec->height);
output_segment_list(&c->streams[i], out, c);
avio_printf(out, "\t\t\t</Representation>\n");
......@@ -524,12 +517,6 @@ static int write_manifest(AVFormatContext *s, int final)
if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
continue;
if (os->codec_str_extradata_size != st->codec->extradata_size) {
memset(os->codec_str, 0, sizeof(os->codec_str));
set_codec_str(s, st->codec, os->codec_str, sizeof(os->codec_str));
os->codec_str_extradata_size = st->codec->extradata_size;
}
avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/mp4\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->sample_rate);
avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n", st->codec->channels);
output_segment_list(&c->streams[i], out, c);
......@@ -664,8 +651,7 @@ static int dash_write_header(AVFormatContext *s)
else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
c->has_audio = 1;
set_codec_str(s, s->streams[i]->codec, os->codec_str, sizeof(os->codec_str));
os->codec_str_extradata_size = s->streams[i]->codec->extradata_size;
set_codec_str(s, st->codec, os->codec_str, sizeof(os->codec_str));
os->first_pts = AV_NOPTS_VALUE;
os->max_pts = AV_NOPTS_VALUE;
os->segment_index = 1;
......@@ -750,6 +736,29 @@ static void find_index_range(AVFormatContext *s, const char *full_path,
*index_length = AV_RB32(&buf[0]);
}
static int update_stream_extradata(AVFormatContext *s, OutputStream *os,
AVCodecContext *codec)
{
uint8_t *extradata;
if (os->ctx->streams[0]->codec->extradata_size || !codec->extradata_size)
return 0;
extradata = av_malloc(codec->extradata_size);
if (!extradata)
return AVERROR(ENOMEM);
memcpy(extradata, codec->extradata, codec->extradata_size);
os->ctx->streams[0]->codec->extradata = extradata;
os->ctx->streams[0]->codec->extradata_size = codec->extradata_size;
set_codec_str(s, codec, os->codec_str, sizeof(os->codec_str));
return 0;
}
static int dash_flush(AVFormatContext *s, int final, int stream)
{
DASHContext *c = s->priv_data;
......@@ -853,6 +862,10 @@ static int dash_write_packet(AVFormatContext *s, AVPacket *pkt)
int64_t seg_end_duration = (os->segment_index) * (int64_t) c->min_seg_duration;
int ret;
ret = update_stream_extradata(s, os, st->codec);
if (ret < 0)
return ret;
// If forcing the stream to start at 0, the mp4 muxer will set the start
// timestamps to 0. Do the same here, to avoid mismatches in duration/timestamps.
if (os->first_pts == AV_NOPTS_VALUE &&
......
......@@ -20,16 +20,13 @@
*/
#include "libavcodec/bytestream.h"
#include "libavcodec/get_bits.h"
#include "libavcodec/dca.h"
#include "libavcodec/dca_syncwords.h"
#include "libavcodec/get_bits.h"
#include "avformat.h"
#include "rawdec.h"
#define DCA_MARKER_14B_BE 0x1FFFE800
#define DCA_MARKER_14B_LE 0xFF1F00E8
#define DCA_MARKER_RAW_BE 0x7FFE8001
#define DCA_MARKER_RAW_LE 0xFE7F0180
static int dts_probe(AVProbeData *p)
{
const uint8_t *buf, *bufp;
......@@ -53,18 +50,18 @@ static int dts_probe(AVProbeData *p)
diff += FFABS(((int16_t)AV_RL16(buf)) - (int16_t)AV_RL16(buf-4));
/* regular bitstream */
if (state == DCA_MARKER_RAW_BE)
if (state == DCA_SYNCWORD_CORE_BE)
marker = 0;
else if (state == DCA_MARKER_RAW_LE)
else if (state == DCA_SYNCWORD_CORE_LE)
marker = 1;
/* 14 bits big-endian bitstream */
else if (state == DCA_MARKER_14B_BE &&
else if (state == DCA_SYNCWORD_CORE_14B_BE &&
(bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0)
marker = 2;
/* 14 bits little-endian bitstream */
else if (state == DCA_MARKER_14B_LE &&
else if (state == DCA_SYNCWORD_CORE_14B_LE &&
(bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007)
marker = 3;
else
......
......@@ -325,6 +325,17 @@ static void write_metadata(AVFormatContext *s, unsigned int ts)
avio_wb32(pb, data_size + 11);
}
static int unsupported_codec(AVFormatContext *s,
const char* type, int codec_id)
{
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
av_log(s, AV_LOG_ERROR,
"%s codec %s not compatible with flv\n",
type,
desc ? desc->name : "unknown");
return AVERROR(ENOSYS);
}
static int flv_write_header(AVFormatContext *s)
{
int i;
......@@ -347,11 +358,9 @@ static int flv_write_header(AVFormatContext *s)
return AVERROR(EINVAL);
}
flv->video_enc = enc;
if (enc->codec_tag == 0) {
av_log(s, AV_LOG_ERROR, "Video codec '%s' for stream %d is not compatible with FLV\n",
avcodec_get_name(enc->codec_id), i);
return AVERROR(EINVAL);
}
if (!ff_codec_get_tag(flv_video_codec_ids, enc->codec_id))
return unsupported_codec(s, "Video", enc->codec_id);
if (enc->codec_id == AV_CODEC_ID_MPEG4 ||
enc->codec_id == AV_CODEC_ID_H263) {
int error = s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL;
......@@ -376,17 +385,14 @@ static int flv_write_header(AVFormatContext *s)
}
flv->audio_enc = enc;
if (get_audio_flags(s, enc) < 0)
return AVERROR_INVALIDDATA;
return unsupported_codec(s, "Audio", enc->codec_id);
if (enc->codec_id == AV_CODEC_ID_PCM_S16BE)
av_log(s, AV_LOG_WARNING,
"16-bit big-endian audio in flv is valid but most likely unplayable (hardware dependent); use s16le\n");
break;
case AVMEDIA_TYPE_DATA:
if (enc->codec_id != AV_CODEC_ID_TEXT && enc->codec_id != AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_ERROR, "Data codec '%s' for stream %d is not compatible with FLV\n",
avcodec_get_name(enc->codec_id), i);
return AVERROR_INVALIDDATA;
}
if (enc->codec_id != AV_CODEC_ID_TEXT && enc->codec_id != AV_CODEC_ID_NONE)
return unsupported_codec(s, "Data", enc->codec_id);
flv->data_enc = enc;
break;
case AVMEDIA_TYPE_SUBTITLE:
......@@ -536,13 +542,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
case AVMEDIA_TYPE_VIDEO:
avio_w8(pb, FLV_TAG_TYPE_VIDEO);
flags = enc->codec_tag;
if (flags <= 0 || flags > 15) {
av_log(s, AV_LOG_ERROR,
"Video codec '%s' is not compatible with FLV\n",
avcodec_get_name(enc->codec_id));
return AVERROR(EINVAL);
}
flags = ff_codec_get_tag(flv_video_codec_ids, enc->codec_id);
flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
break;
......
......@@ -40,10 +40,12 @@
#include "libavutil/avstring.h"
#include "libavutil/intfloat.h"
#include "libavutil/mathematics.h"
#include "libavutil/libm.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavutil/pixdesc.h"
#include "libavutil/timecode.h"
#include "libavutil/color_utils.h"
#include "hevc.h"
#include "rtpenc.h"
#include "mov_chan.h"
......@@ -64,7 +66,8 @@ static const AVOption options[] = {
{ "dash", "Write DASH compatible fragmented MP4", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DASH}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "frag_discont", "Signal that the next fragment is discontinuous from earlier ones", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FRAG_DISCONT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "delay_moov", "Delay writing the initial moov until the first fragment is cut, or until the first fragment flush", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DELAY_MOOV}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "write_colr", "Write colr atom", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_WRITE_COLR}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "write_colr", "Write colr atom (Experimental, may be renamed or changed, do not use from scripts)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_WRITE_COLR}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
{ "write_gama", "Write deprecated gama atom", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_WRITE_GAMA}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" },
FF_RTP_FLAG_OPTS(MOVMuxContext, rtp_flags),
{ "skip_iods", "Skip writing iods atom.", offsetof(MOVMuxContext, iods_skip), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ "iods_audio_profile", "iods audio profile atom.", offsetof(MOVMuxContext, iods_audio_profile), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM},
......@@ -77,6 +80,7 @@ static const AVOption options[] = {
{ "brand", "Override major brand", offsetof(MOVMuxContext, major_brand), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = AV_OPT_FLAG_ENCODING_PARAM },
{ "use_editlist", "use edit list", offsetof(MOVMuxContext, use_editlist), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, AV_OPT_FLAG_ENCODING_PARAM},
{ "fragment_index", "Fragment number of the next fragment", offsetof(MOVMuxContext, fragments), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM},
{ "mov_gamma", "gamma value for gama atom", offsetof(MOVMuxContext, gamma), AV_OPT_TYPE_FLOAT, {.dbl = 0.0 }, 0.0, 10, AV_OPT_FLAG_ENCODING_PARAM},
{ NULL },
};
......@@ -1519,6 +1523,31 @@ static int mov_write_pasp_tag(AVIOContext *pb, MOVTrack *track)
return 16;
}
static int mov_write_gama_tag(AVIOContext *pb, MOVTrack *track, double gamma)
{
uint32_t gama = 0;
if (gamma <= 0.0)
{
gamma = avpriv_get_gamma_from_trc(track->enc->color_trc);
}
av_log(pb, AV_LOG_DEBUG, "gamma value %g\n", gamma);
if (gamma > 1e-6) {
gama = (uint32_t)lrint((double)(1<<16) * gamma);
av_log(pb, AV_LOG_DEBUG, "writing gama value %d\n", gama);
av_assert0(track->mode == MODE_MOV);
avio_wb32(pb, 12);
ffio_wfourcc(pb, "gama");
avio_wb32(pb, gama);
return 12;
}
else {
av_log(pb, AV_LOG_WARNING, "gamma value unknown, unable to write gama atom\n");
}
return 0;
}
static int mov_write_colr_tag(AVIOContext *pb, MOVTrack *track)
{
// Ref (MOV): https://developer.apple.com/library/mac/technotes/tn2162/_index.html#//apple_ref/doc/uid/DTS40013070-CH1-TNTAG9
......@@ -1700,6 +1729,12 @@ static int mov_write_video_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *tr
if (track->enc->field_order != AV_FIELD_UNKNOWN)
mov_write_fiel_tag(pb, track);
if (mov->flags & FF_MOV_FLAG_WRITE_GAMA) {
if (track->mode == MODE_MOV)
mov_write_gama_tag(pb, track, mov->gamma);
else
av_log(mov->fc, AV_LOG_WARNING, "Not writing 'gama' atom. Format is not MOV.\n");
}
if (mov->flags & FF_MOV_FLAG_WRITE_COLR) {
if (track->mode == MODE_MOV || track->mode == MODE_MP4)
mov_write_colr_tag(pb, track);
......
......@@ -185,6 +185,7 @@ typedef struct MOVMuxContext {
AVFormatContext *fc;
int use_editlist;
float gamma;
} MOVMuxContext;
#define FF_MOV_FLAG_RTP_HINT (1 << 0)
......@@ -202,6 +203,7 @@ typedef struct MOVMuxContext {
#define FF_MOV_FLAG_FRAG_DISCONT (1 << 12)
#define FF_MOV_FLAG_DELAY_MOOV (1 << 13)
#define FF_MOV_FLAG_WRITE_COLR (1 << 14)
#define FF_MOV_FLAG_WRITE_GAMA (1 << 15)
int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt);
......
......@@ -50,6 +50,7 @@ const MXFCodecUL ff_mxf_codec_uls[] = {
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x32,0x00,0x00 }, 14, AV_CODEC_ID_H264 }, /* H.264/MPEG-4 AVC Intra */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x01,0x31,0x11,0x01 }, 14, AV_CODEC_ID_H264 }, /* H.264/MPEG-4 AVC SPS/PPS in-band */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x01,0x01,0x02,0x02,0x01 }, 16, AV_CODEC_ID_V210 }, /* V210 */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0E,0x04,0x02,0x01,0x02,0x11,0x04,0x00 }, 15, AV_CODEC_ID_PRORES }, /* PRORES */
/* SoundEssenceCompression */
{ { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x03,0x04,0x02,0x02,0x02,0x03,0x03,0x01,0x00 }, 14, AV_CODEC_ID_AAC }, /* MPEG2 AAC ADTS (legacy) */
{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 }, 13, AV_CODEC_ID_PCM_S16LE }, /* Uncompressed */
......
......@@ -281,6 +281,7 @@ static const uint8_t mxf_encrypted_essence_container[] = { 0x06,0x0e,0x2b,0x
static const uint8_t mxf_random_index_pack_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x11,0x01,0x00 };
static const uint8_t mxf_sony_mpeg4_extradata[] = { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0e,0x06,0x06,0x02,0x02,0x01,0x00,0x00 };
static const uint8_t mxf_avid_project_name[] = { 0xa5,0xfb,0x7b,0x25,0xf6,0x15,0x94,0xb9,0x62,0xfc,0x37,0x17,0x49,0x2d,0x42,0xbf };
static const uint8_t mxf_jp2k_rsiz[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02,0x01,0x00 };
#define IS_KLV_KEY(x, y) (!memcmp(x, y, sizeof(y)))
......@@ -1000,6 +1001,12 @@ static int mxf_read_generic_descriptor(void *arg, AVIOContext *pb, int tag, int
descriptor->extradata_size = size;
avio_read(pb, descriptor->extradata, size);
}
if (IS_KLV_KEY(uid, mxf_jp2k_rsiz)) {
uint32_t rsiz = avio_rb16(pb);
if (rsiz == FF_PROFILE_JPEG2000_DCINEMA_2K ||
rsiz == FF_PROFILE_JPEG2000_DCINEMA_4K)
descriptor->pix_fmt = AV_PIX_FMT_XYZ12;
}
break;
}
return 0;
......
......@@ -299,12 +299,12 @@ rdt_parse_packet (AVFormatContext *ctx, PayloadContext *rdt, AVStream *st,
AVIOContext pb;
if (rdt->audio_pkt_cnt == 0) {
int pos;
int pos, rmflags;
ffio_init_context(&pb, (uint8_t *)buf, len, 0, NULL, NULL, NULL, NULL);
flags = (flags & RTP_FLAG_KEY) ? 2 : 0;
rmflags = (flags & RTP_FLAG_KEY) ? 2 : 0;
res = ff_rm_parse_packet (rdt->rmctx, &pb, st, rdt->rmst[st->index], len, pkt,
&seq, flags, *timestamp);
&seq, rmflags, *timestamp);
pos = avio_tell(&pb);
if (res < 0)
return res;
......
......@@ -205,8 +205,6 @@ static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
int start_off = avio_tell(pb);
mflags = avio_r8(pb);
if (mflags & 0x80)
flags |= RTP_FLAG_KEY;
len_off = avio_rb24(pb);
if (mflags & 0x20) /**< relative timestamp */
avio_skip(pb, 4);
......@@ -289,7 +287,6 @@ static void asfrtp_close_context(PayloadContext *asf)
{
ffio_free_dyn_buf(&asf->pktbuf);
av_freep(&asf->buf);
av_free(asf);
}
#define RTP_ASF_HANDLER(n, s, t) \
......
......@@ -47,7 +47,8 @@ static int qt_rtp_parse_packet(AVFormatContext *s, PayloadContext *qt,
AVIOContext pb;
GetBitContext gb;
int packing_scheme, has_payload_desc, has_packet_info, alen,
has_marker_bit = flags & RTP_FLAG_MARKER;
has_marker_bit = flags & RTP_FLAG_MARKER,
keyframe;
if (qt->remaining) {
int num = qt->pkt.size / qt->bytes_per_frame;
......@@ -79,8 +80,7 @@ static int qt_rtp_parse_packet(AVFormatContext *s, PayloadContext *qt,
skip_bits(&gb, 4); // version
if ((packing_scheme = get_bits(&gb, 2)) == 0)
return AVERROR_INVALIDDATA;
if (get_bits1(&gb))
flags |= RTP_FLAG_KEY;
keyframe = get_bits1(&gb);
has_payload_desc = get_bits1(&gb);
has_packet_info = get_bits1(&gb);
skip_bits(&gb, 23); // reserved:7, cache payload info:1, payload ID:15
......@@ -196,7 +196,7 @@ static int qt_rtp_parse_packet(AVFormatContext *s, PayloadContext *qt,
qt->pkt.size = 0;
qt->pkt.data = NULL;
pkt->flags = flags & RTP_FLAG_KEY ? AV_PKT_FLAG_KEY : 0;
pkt->flags = keyframe ? AV_PKT_FLAG_KEY : 0;
pkt->stream_index = st->index;
memset(pkt->data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
return 0;
......@@ -211,7 +211,7 @@ static int qt_rtp_parse_packet(AVFormatContext *s, PayloadContext *qt,
if (av_new_packet(pkt, qt->bytes_per_frame))
return AVERROR(ENOMEM);
memcpy(pkt->data, buf + avio_tell(&pb), qt->bytes_per_frame);
pkt->flags = flags & RTP_FLAG_KEY ? AV_PKT_FLAG_KEY : 0;
pkt->flags = keyframe ? AV_PKT_FLAG_KEY : 0;
pkt->stream_index = st->index;
if (qt->remaining > 0) {
av_freep(&qt->pkt.data);
......
......@@ -17,10 +17,9 @@
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include "libavcodec/bytestream.h"
#include "libavutil/intreadwrite.h"
#include "avio_internal.h"
#include "rtpdec_formats.h"
......@@ -33,17 +32,11 @@ struct PayloadContext {
};
static av_cold int vp9_init(AVFormatContext *ctx, int st_index,
PayloadContext *data)
PayloadContext *data)
{
av_dlog(ctx, "vp9_init() for stream %d\n", st_index);
av_log(ctx, AV_LOG_WARNING,
"RTP/VP9 support is still experimental\n");
if (st_index < 0)
return 0;
ctx->streams[st_index]->need_parsing = AVSTREAM_PARSE_FULL;
return 0;
}
......@@ -57,12 +50,12 @@ static int vp9_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_vp9_ctx,
av_unused int layer_temporal = -1, layer_spatial = -1, layer_quality = -1;
int ref_fields = 0, has_ref_field_ext_pic_id = 0;
int first_fragment, last_fragment;
int rtp_m;
int res = 0;
/* drop data of previous packets in case of non-continuous (lossy) packet stream */
if (rtp_vp9_ctx->buf && rtp_vp9_ctx->timestamp != *timestamp) {
if (rtp_vp9_ctx->buf && rtp_vp9_ctx->timestamp != *timestamp)
ffio_free_dyn_buf(&rtp_vp9_ctx->buf);
}
/* sanity check for size of input packet: 1 byte payload at least */
if (len < RTP_VP9_DESC_REQUIRED_SIZE + 1) {
......@@ -71,32 +64,34 @@ static int vp9_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_vp9_ctx,
}
/*
decode the required VP9 payload descriptor according to section 4.2 of the spec.:
0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+
|I|L|F|B|E|V|U|-| (REQUIRED)
+-+-+-+-+-+-+-+-+
I: PictureID present
L: Layer indices present
F: Reference indices present
B: Start of VP9 frame
E: End of picture
V: Scalability Structure (SS) present
U: Scalability Structure Update (SU) present
*/
has_pic_id = buf[0] & 0x80;
has_layer_idc = buf[0] & 0x40;
has_ref_idc = buf[0] & 0x20;
first_fragment = buf[0] & 0x10;
last_fragment = buf[0] & 0x08;
has_ss_data = buf[0] & 0x04;
has_su_data = buf[0] & 0x02;
* decode the required VP9 payload descriptor according to section 4.2 of the spec.:
*
* 0 1 2 3 4 5 6 7
* +-+-+-+-+-+-+-+-+
* |I|L|F|B|E|V|U|-| (REQUIRED)
* +-+-+-+-+-+-+-+-+
*
* I: PictureID present
* L: Layer indices present
* F: Reference indices present
* B: Start of VP9 frame
* E: End of picture
* V: Scalability Structure (SS) present
* U: Scalability Structure Update (SU) present
*/
has_pic_id = !!(buf[0] & 0x80);
has_layer_idc = !!(buf[0] & 0x40);
has_ref_idc = !!(buf[0] & 0x20);
first_fragment = !!(buf[0] & 0x10);
last_fragment = !!(buf[0] & 0x08);
has_ss_data = !!(buf[0] & 0x04);
has_su_data = !!(buf[0] & 0x02);
rtp_m = !!(flags & RTP_FLAG_MARKER);
/* sanity check for markers: B should always be equal to the RTP M marker */
if (last_fragment >> 2 != flags & RTP_FLAG_MARKER) {
av_log(ctx, AV_LOG_ERROR, "Invalid combination of B and M marker\n");
if (last_fragment != rtp_m) {
av_log(ctx, AV_LOG_ERROR, "Invalid combination of B and M marker (%d != %d)\n", last_fragment, rtp_m);
return AVERROR_INVALIDDATA;
}
......@@ -105,17 +100,17 @@ static int vp9_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_vp9_ctx,
len -= RTP_VP9_DESC_REQUIRED_SIZE;
/*
decode the 1-byte/2-byte picture ID:
0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+
I: |M|PICTURE ID | (RECOMMENDED)
+-+-+-+-+-+-+-+-+
M: | EXTENDED PID | (RECOMMENDED)
+-+-+-+-+-+-+-+-+
M: The most significant bit of the first octet is an extension flag.
PictureID: 8 or 16 bits including the M bit.
* decode the 1-byte/2-byte picture ID:
*
* 0 1 2 3 4 5 6 7
* +-+-+-+-+-+-+-+-+
* I: |M|PICTURE ID | (RECOMMENDED)
* +-+-+-+-+-+-+-+-+
* M: | EXTENDED PID | (RECOMMENDED)
* +-+-+-+-+-+-+-+-+
*
* M: The most significant bit of the first octet is an extension flag.
* PictureID: 8 or 16 bits including the M bit.
*/
if (has_pic_id) {
if (len < 1) {
......@@ -140,20 +135,20 @@ static int vp9_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_vp9_ctx,
}
/*
decode layer indices
0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+
L: | T | S | Q | R | (CONDITIONALLY RECOMMENDED)
+-+-+-+-+-+-+-+-+
T, S and Q are 2-bit indices for temporal, spatial, and quality layers.
If "F" is set in the initial octet, R is 2 bits representing the number
of reference fields this frame refers to.
* decode layer indices
*
* 0 1 2 3 4 5 6 7
* +-+-+-+-+-+-+-+-+
* L: | T | S | Q | R | (CONDITIONALLY RECOMMENDED)
* +-+-+-+-+-+-+-+-+
*
* T, S and Q are 2-bit indices for temporal, spatial, and quality layers.
* If "F" is set in the initial octet, R is 2 bits representing the number
* of reference fields this frame refers to.
*/
if (has_layer_idc) {
if (len < 1) {
av_log(ctx, AV_LOG_ERROR, "Too short RTP/VP9 packet");
av_log(ctx, AV_LOG_ERROR, "Too short RTP/VP9 packet\n");
return AVERROR_INVALIDDATA;
}
layer_temporal = buf[0] & 0xC0;
......@@ -169,18 +164,18 @@ static int vp9_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_vp9_ctx,
}
/*
decode the reference fields
0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+ -\
F: | PID |X| RS| RQ| (OPTIONAL) .
+-+-+-+-+-+-+-+-+ . - R times
X: | EXTENDED PID | (OPTIONAL) .
+-+-+-+-+-+-+-+-+ -/
PID: The relative Picture ID referred to by this frame.
RS and RQ: The spatial and quality layer IDs.
X: 1 if this layer index has an extended relative Picture ID.
* decode the reference fields
*
* 0 1 2 3 4 5 6 7
* +-+-+-+-+-+-+-+-+ -\
* F: | PID |X| RS| RQ| (OPTIONAL) .
* +-+-+-+-+-+-+-+-+ . - R times
* X: | EXTENDED PID | (OPTIONAL) .
* +-+-+-+-+-+-+-+-+ -/
*
* PID: The relative Picture ID referred to by this frame.
* RS and RQ: The spatial and quality layer IDs.
* X: 1 if this layer index has an extended relative Picture ID.
*/
if (has_ref_idc) {
while (ref_fields) {
......@@ -214,42 +209,42 @@ static int vp9_handle_packet(AVFormatContext *ctx, PayloadContext *rtp_vp9_ctx,
}
/*
decode the scalability structure (SS)
0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+
V: | PATTERN LENGTH|
+-+-+-+-+-+-+-+-+ -\
| T | S | Q | R | (OPTIONAL) .
+-+-+-+-+-+-+-+-+ -\ .
| PID |X| RS| RQ| (OPTIONAL) . . - PAT. LEN. times
+-+-+-+-+-+-+-+-+ . - R times .
X: | EXTENDED PID | (OPTIONAL) . .
+-+-+-+-+-+-+-+-+ -/ -/
PID: The relative Picture ID referred to by this frame.
RS and RQ: The spatial and quality layer IDs.
X: 1 if this layer index has an extended relative Picture ID.
* decode the scalability structure (SS)
*
* 0 1 2 3 4 5 6 7
* +-+-+-+-+-+-+-+-+
* V: | PATTERN LENGTH|
* +-+-+-+-+-+-+-+-+ -\
* | T | S | Q | R | (OPTIONAL) .
* +-+-+-+-+-+-+-+-+ -\ .
* | PID |X| RS| RQ| (OPTIONAL) . . - PAT. LEN. times
* +-+-+-+-+-+-+-+-+ . - R times .
* X: | EXTENDED PID | (OPTIONAL) . .
* +-+-+-+-+-+-+-+-+ -/ -/
*
* PID: The relative Picture ID referred to by this frame.
* RS and RQ: The spatial and quality layer IDs.
* X: 1 if this layer index has an extended relative Picture ID.
*/
if (has_ss_data) {
avpriv_report_missing_feature(ctx, "VP9 scalability structure data\n");
return AVERROR_PATCHWELCOME;
avpriv_report_missing_feature(ctx, "VP9 scalability structure data");
return AVERROR(ENOSYS);
}
/*
decode the scalability update structure (SU)
spec. is tbd
* decode the scalability update structure (SU)
*
* spec. is tbd
*/
if (has_su_data) {
avpriv_report_missing_feature(ctx, "VP9 scalability update structure data\n");
return AVERROR_PATCHWELCOME;
avpriv_report_missing_feature(ctx, "VP9 scalability update structure data");
return AVERROR(ENOSYS);
}
/*
decode the VP9 payload header
spec. is tbd
* decode the VP9 payload header
*
* spec. is tbd
*/
//XXX: implement when specified
......@@ -293,7 +288,7 @@ RTPDynamicProtocolHandler ff_vp9_dynamic_handler = {
.enc_name = "VP9",
.codec_type = AVMEDIA_TYPE_VIDEO,
.codec_id = AV_CODEC_ID_VP9,
.init = vp9_init,
.priv_data_size = sizeof(PayloadContext),
.init = vp9_init,
.parse_packet = vp9_handle_packet
};
......@@ -51,6 +51,7 @@
#include "spdif.h"
#include "libavcodec/ac3.h"
#include "libavcodec/dca.h"
#include "libavcodec/dca_syncwords.h"
#include "libavcodec/aacadtsdec.h"
#include "libavutil/opt.h"
......@@ -251,25 +252,25 @@ static int spdif_header_dts(AVFormatContext *s, AVPacket *pkt)
return AVERROR_INVALIDDATA;
switch (syncword_dts) {
case DCA_MARKER_RAW_BE:
case DCA_SYNCWORD_CORE_BE:
blocks = (AV_RB16(pkt->data + 4) >> 2) & 0x7f;
core_size = ((AV_RB24(pkt->data + 5) >> 4) & 0x3fff) + 1;
sample_rate = avpriv_dca_sample_rates[(pkt->data[8] >> 2) & 0x0f];
break;
case DCA_MARKER_RAW_LE:
case DCA_SYNCWORD_CORE_LE:
blocks = (AV_RL16(pkt->data + 4) >> 2) & 0x7f;
ctx->extra_bswap = 1;
break;
case DCA_MARKER_14B_BE:
case DCA_SYNCWORD_CORE_14B_BE:
blocks =
(((pkt->data[5] & 0x07) << 4) | ((pkt->data[6] & 0x3f) >> 2));
break;
case DCA_MARKER_14B_LE:
case DCA_SYNCWORD_CORE_14B_LE:
blocks =
(((pkt->data[4] & 0x07) << 4) | ((pkt->data[7] & 0x3f) >> 2));
ctx->extra_bswap = 1;
break;
case DCA_HD_MARKER:
case DCA_SYNCWORD_SUBSTREAM:
/* We only handle HD frames that are paired with core. However,
sometimes DTS-HD streams with core have a stray HD frame without
core in the beginning of the stream. */
......
......@@ -2201,6 +2201,12 @@ int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
return -1; //unreachable
}
int avformat_flush(AVFormatContext *s)
{
ff_read_frame_flush(s);
return 0;
}
/*******************************************************/
/**
......
......@@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFORMAT_VERSION_MAJOR 56
#define LIBAVFORMAT_VERSION_MINOR 24
#define LIBAVFORMAT_VERSION_MINOR 25
#define LIBAVFORMAT_VERSION_MICRO 101
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
......
......@@ -88,6 +88,7 @@ OBJS = adler32.o \
cast5.o \
camellia.o \
channel_layout.o \
color_utils.o \
cpu.o \
crc.o \
des.o \
......
/*
* Copyright (c) 2015 Kevin Wheatley <kevin.j.wheatley@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/color_utils.h"
#include "libavutil/pixfmt.h"
double avpriv_get_gamma_from_trc(enum AVColorTransferCharacteristic trc)
{
double gamma;
switch (trc) {
case AVCOL_TRC_BT709:
case AVCOL_TRC_SMPTE170M:
case AVCOL_TRC_SMPTE240M:
case AVCOL_TRC_BT1361_ECG:
case AVCOL_TRC_BT2020_10:
case AVCOL_TRC_BT2020_12:
/* these share a segmented TRC, but gamma 1.961 is a close
approximation, and also more correct for decoding content */
gamma = 1.961;
break;
case AVCOL_TRC_GAMMA22:
case AVCOL_TRC_IEC61966_2_1:
gamma = 2.2;
break;
case AVCOL_TRC_GAMMA28:
gamma = 2.8;
break;
case AVCOL_TRC_LINEAR:
gamma = 1.0;
break;
default:
gamma = 0.0; // Unknown value representation
}
return gamma;
}
/*
* Copyright (c) 2015 Kevin Wheatley <kevin.j.wheatley@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_COLOR_UTILS_H
#define AVUTIL_COLOR_UTILS_H
#include "libavutil/pixfmt.h"
/**
* Determine a suitable 'gamma' value to match the supplied
* AVColorTransferCharacteristic.
*
* See Apple Technical Note TN2257 (https://developer.apple.com/library/mac/technotes/tn2257/_index.html)
*
* @return Will return an approximation to the simple gamma function matching
* the supplied Transfer Characteristic, Will return 0.0 for any
* we cannot reasonably match against.
*/
double avpriv_get_gamma_from_trc(enum AVColorTransferCharacteristic trc);
#endif
......@@ -144,114 +144,82 @@ static void vector_fmul_scalar_mips(float *dst, const float *src, float mul,
}
static void vector_fmul_window_mips(float *dst, const float *src0,
const float *src1, const float *win, int len)
const float *src1, const float *win, int len)
{
int i, j;
/*
* variables used in inline assembler
*/
float * dst_i, * dst_j, * dst_i2, * dst_j2;
float temp, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
float * dst_j, *win_j, *src0_i, *src1_j, *dst_i, *win_i;
float temp, temp1, temp2, temp3;
float s0, s01, s1, s11;
float wi, wi1, wi2, wi3;
float wj, wj1, wj2, wj3;
const float * lp_end = win + len;
win_i = (float *)win;
win_j = (float *)(win + 2 * len -1);
src1_j = (float *)(src1 + len - 1);
src0_i = (float *)src0;
dst_i = (float *)dst;
dst_j = (float *)(dst + 2 * len -1);
dst += len;
win += len;
src0 += len;
for (i = -len, j = len - 1; i < 0; i += 8, j -= 8) {
dst_i = dst + i;
dst_j = dst + j;
dst_i2 = dst + i + 4;
dst_j2 = dst + j - 4;
__asm__ volatile (
"mul.s %[temp], %[s1], %[wi] \n\t"
"mul.s %[temp1], %[s1], %[wj] \n\t"
"mul.s %[temp2], %[s11], %[wi1] \n\t"
"mul.s %[temp3], %[s11], %[wj1] \n\t"
"msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
"madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
"msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
"madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
"swc1 %[temp], 0(%[dst_i]) \n\t" /* dst[i] = s0*wj - s1*wi; */
"swc1 %[temp1], 0(%[dst_j]) \n\t" /* dst[j] = s0*wi + s1*wj; */
"swc1 %[temp2], 4(%[dst_i]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
"swc1 %[temp3], -4(%[dst_j]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
"mul.s %[temp4], %[s12], %[wi2] \n\t"
"mul.s %[temp5], %[s12], %[wj2] \n\t"
"mul.s %[temp6], %[s13], %[wi3] \n\t"
"mul.s %[temp7], %[s13], %[wj3] \n\t"
"msub.s %[temp4], %[temp4], %[s02], %[wj2] \n\t"
"madd.s %[temp5], %[temp5], %[s02], %[wi2] \n\t"
"msub.s %[temp6], %[temp6], %[s03], %[wj3] \n\t"
"madd.s %[temp7], %[temp7], %[s03], %[wi3] \n\t"
"swc1 %[temp4], 8(%[dst_i]) \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
"swc1 %[temp5], -8(%[dst_j]) \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
"swc1 %[temp6], 12(%[dst_i]) \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
"swc1 %[temp7], -12(%[dst_j]) \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
: [temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
[temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
: [dst_j]"r"(dst_j), [dst_i]"r" (dst_i),
[s0] "f"(src0[i]), [wj] "f"(win[j]), [s1] "f"(src1[j]),
[wi] "f"(win[i]), [s01]"f"(src0[i + 1]),[wj1]"f"(win[j - 1]),
[s11]"f"(src1[j - 1]), [wi1]"f"(win[i + 1]), [s02]"f"(src0[i + 2]),
[wj2]"f"(win[j - 2]), [s12]"f"(src1[j - 2]),[wi2]"f"(win[i + 2]),
[s03]"f"(src0[i + 3]), [wj3]"f"(win[j - 3]), [s13]"f"(src1[j - 3]),
[wi3]"f"(win[i + 3])
: "memory"
);
__asm__ volatile (
"mul.s %[temp], %[s1], %[wi] \n\t"
"mul.s %[temp1], %[s1], %[wj] \n\t"
"mul.s %[temp2], %[s11], %[wi1] \n\t"
"mul.s %[temp3], %[s11], %[wj1] \n\t"
"msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
"madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
"msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
"madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
"swc1 %[temp], 0(%[dst_i2]) \n\t" /* dst[i] = s0*wj - s1*wi; */
"swc1 %[temp1], 0(%[dst_j2]) \n\t" /* dst[j] = s0*wi + s1*wj; */
"swc1 %[temp2], 4(%[dst_i2]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
"swc1 %[temp3], -4(%[dst_j2]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
"mul.s %[temp4], %[s12], %[wi2] \n\t"
"mul.s %[temp5], %[s12], %[wj2] \n\t"
"mul.s %[temp6], %[s13], %[wi3] \n\t"
"mul.s %[temp7], %[s13], %[wj3] \n\t"
"msub.s %[temp4], %[temp4], %[s02], %[wj2] \n\t"
"madd.s %[temp5], %[temp5], %[s02], %[wi2] \n\t"
"msub.s %[temp6], %[temp6], %[s03], %[wj3] \n\t"
"madd.s %[temp7], %[temp7], %[s03], %[wi3] \n\t"
"swc1 %[temp4], 8(%[dst_i2]) \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
"swc1 %[temp5], -8(%[dst_j2]) \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
"swc1 %[temp6], 12(%[dst_i2]) \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
"swc1 %[temp7], -12(%[dst_j2]) \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
: [temp]"=&f"(temp),
[temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
[temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
[temp7] "=&f" (temp7)
: [dst_j2]"r"(dst_j2), [dst_i2]"r"(dst_i2),
[s0] "f"(src0[i + 4]), [wj] "f"(win[j - 4]), [s1] "f"(src1[j - 4]),
[wi] "f"(win[i + 4]), [s01]"f"(src0[i + 5]),[wj1]"f"(win[j - 5]),
[s11]"f"(src1[j - 5]), [wi1]"f"(win[i + 5]), [s02]"f"(src0[i + 6]),
[wj2]"f"(win[j - 6]), [s12]"f"(src1[j - 6]),[wi2]"f"(win[i + 6]),
[s03]"f"(src0[i + 7]), [wj3]"f"(win[j - 7]), [s13]"f"(src1[j - 7]),
[wi3]"f"(win[i + 7])
: "memory"
);
}
/* loop unrolled 4 times */
__asm__ volatile (
"1:"
"lwc1 %[s1], 0(%[src1_j]) \n\t"
"lwc1 %[wi], 0(%[win_i]) \n\t"
"lwc1 %[wj], 0(%[win_j]) \n\t"
"lwc1 %[s11], -4(%[src1_j]) \n\t"
"lwc1 %[wi1], 4(%[win_i]) \n\t"
"lwc1 %[wj1], -4(%[win_j]) \n\t"
"lwc1 %[s0], 0(%[src0_i]) \n\t"
"lwc1 %[s01], 4(%[src0_i]) \n\t"
"mul.s %[temp], %[s1], %[wi] \n\t"
"mul.s %[temp1], %[s1], %[wj] \n\t"
"mul.s %[temp2], %[s11], %[wi1] \n\t"
"mul.s %[temp3], %[s11], %[wj1] \n\t"
"lwc1 %[s1], -8(%[src1_j]) \n\t"
"lwc1 %[wi2], 8(%[win_i]) \n\t"
"lwc1 %[wj2], -8(%[win_j]) \n\t"
"lwc1 %[s11], -12(%[src1_j]) \n\t"
"msub.s %[temp], %[temp], %[s0], %[wj] \n\t"
"madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t"
"msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t"
"madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t"
"lwc1 %[wi3], 12(%[win_i]) \n\t"
"lwc1 %[wj3], -12(%[win_j]) \n\t"
"lwc1 %[s0], 8(%[src0_i]) \n\t"
"lwc1 %[s01], 12(%[src0_i]) \n\t"
"addiu %[src1_j],-16 \n\t"
"addiu %[win_i], 16 \n\t"
"addiu %[win_j], -16 \n\t"
"addiu %[src0_i], 16 \n\t"
"swc1 %[temp], 0(%[dst_i]) \n\t" /* dst[i] = s0*wj - s1*wi; */
"swc1 %[temp1], 0(%[dst_j]) \n\t" /* dst[j] = s0*wi + s1*wj; */
"swc1 %[temp2], 4(%[dst_i]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
"swc1 %[temp3], -4(%[dst_j]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
"mul.s %[temp], %[s1], %[wi2] \n\t"
"mul.s %[temp1], %[s1], %[wj2] \n\t"
"mul.s %[temp2], %[s11], %[wi3] \n\t"
"mul.s %[temp3], %[s11], %[wj3] \n\t"
"msub.s %[temp], %[temp], %[s0], %[wj2] \n\t"
"madd.s %[temp1], %[temp1], %[s0], %[wi2] \n\t"
"msub.s %[temp2], %[temp2], %[s01], %[wj3] \n\t"
"madd.s %[temp3], %[temp3], %[s01], %[wi3] \n\t"
"swc1 %[temp], 8(%[dst_i]) \n\t" /* dst[i+2] = s0*wj2 - s1*wi2; */
"swc1 %[temp1], -8(%[dst_j]) \n\t" /* dst[j-2] = s0*wi2 + s1*wj2; */
"swc1 %[temp2], 12(%[dst_i]) \n\t" /* dst[i+2] = s01*wj3 - s11*wi3; */
"swc1 %[temp3], -12(%[dst_j]) \n\t" /* dst[j-3] = s01*wi3 + s11*wj3; */
"addiu %[dst_i], 16 \n\t"
"addiu %[dst_j], -16 \n\t"
"bne %[win_i], %[lp_end], 1b \n\t"
: [temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
[temp3]"=&f"(temp3), [src0_i]"+r"(src0_i), [win_i]"+r"(win_i),
[src1_j]"+r"(src1_j), [win_j]"+r"(win_j), [dst_i]"+r"(dst_i),
[dst_j]"+r"(dst_j), [s0] "=&f"(s0), [s01]"=&f"(s01), [s1] "=&f"(s1),
[s11]"=&f"(s11), [wi] "=&f"(wi), [wj] "=&f"(wj), [wi2]"=&f"(wi2),
[wj2]"=&f"(wj2), [wi3]"=&f"(wi3), [wj3]"=&f"(wj3), [wi1]"=&f"(wi1),
[wj1]"=&f"(wj1)
: [lp_end]"r"(lp_end)
: "memory"
);
}
static void butterflies_float_mips(float *av_restrict v1, float *av_restrict v2,
......
......@@ -2123,11 +2123,15 @@ int main(void)
av_log_set_level(AV_LOG_QUIET);
for (i=0; i < FF_ARRAY_ELEMS(options); i++) {
int silence_log = !strcmp(options[i], "rational=-1/0"); // inf formating differs between platforms
av_log(&test_ctx, AV_LOG_DEBUG, "Setting options string '%s'\n", options[i]);
if (silence_log)
av_log_set_callback(NULL);
if (av_set_options_string(&test_ctx, options[i], "=", ":") < 0)
printf("Error '%s'\n", options[i]);
else
printf("OK '%s'\n", options[i]);
av_log_set_callback(log_callback_help);
}
av_opt_free(&test_ctx);
}
......
......@@ -93,9 +93,9 @@ typedef struct AVPixFmtDescriptor {
* Parameters that describe how pixels are packed.
* If the format has 2 or 4 components, then alpha is last.
* If the format has 1 or 2 components, then luma is 0.
* If the format has 3 or 4 components,
* if the RGB flag is set then 0 is red, 1 is green and 2 is blue;
* otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.
* If the format has 3 or 4 components:
* if the RGB flag is set then 0 is red, 1 is green and 2 is blue;
* otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.
*/
AVComponentDescriptor comp[4];
......
......@@ -56,7 +56,7 @@
*/
#define LIBAVUTIL_VERSION_MAJOR 54
#define LIBAVUTIL_VERSION_MINOR 19
#define LIBAVUTIL_VERSION_MINOR 20
#define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
......
......@@ -43,6 +43,12 @@ void ff_pixelutils_sad_init_x86(av_pixelutils_sad_fn *sad, int aligned)
sad[2] = ff_pixelutils_sad_8x8_mmx;
}
// The best way to use SSE2 would be to do 2 SADs in parallel,
// but we'd have to modify the pixelutils API to return SIMD functions.
// It's probably not faster to shuffle data around
// to get two lines of 8 pixels into a single 16byte register,
// so just use the MMX 8x8 version even when SSE2 is available.
if (EXTERNAL_MMXEXT(cpu_flags)) {
sad[2] = ff_pixelutils_sad_8x8_mmxext;
sad[3] = ff_pixelutils_sad_16x16_mmxext;
......
......@@ -91,8 +91,9 @@ fate-aac-aref-encode: CMD = enc_dec_pcm adts wav s16le $(REF) -strict -2 -c:a aa
fate-aac-aref-encode: CMP = stddev
fate-aac-aref-encode: REF = ./tests/data/asynth-44100-2.wav
fate-aac-aref-encode: CMP_SHIFT = -4096
fate-aac-aref-encode: CMP_TARGET = 438
fate-aac-aref-encode: CMP_TARGET = 434
fate-aac-aref-encode: SIZE_TOLERANCE = 2464
fate-aac-aref-encode: FUZZ = 5
FATE_AAC_ENCODE += fate-aac-ln-encode
fate-aac-ln-encode: CMD = enc_dec_pcm adts wav s16le $(TARGET_SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav -strict -2 -c:a aac -b:a 512k
......
......@@ -14,7 +14,7 @@ FATE_VIDEO-$(call DEMDEC, MOV, AIC) += fate-aic
fate-aic: CMD = framecrc -idct simple -i $(TARGET_SAMPLES)/aic/small_apple_intermediate_codec.mov -an -vframes 15
FATE_VIDEO-$(call DEMDEC, MOV, AIC) += fate-aic-oddsize
fate-aic-oddsize: CMD = framecrc -i $(TARGET_SAMPLES)/aic/aic_odd_dimensions.mov
fate-aic-oddsize: CMD = framecrc -idct simple -i $(TARGET_SAMPLES)/aic/aic_odd_dimensions.mov
FATE_VIDEO-$(call DEMDEC, MM, MMVIDEO) += fate-alg-mm
fate-alg-mm: CMD = framecrc -i $(TARGET_SAMPLES)/alg-mm/ibmlogo.mm -an -pix_fmt rgb24
......
......@@ -183,9 +183,6 @@ Value -1.000000 for parameter 'rational' out of range [0 - 10]
Value -1.000000 for parameter 'rational' out of range [0 - 10]
Error 'rational=0 : rational=1/2 : rational=1/-1'
Setting options string 'rational=-1/0'
Setting entry with key 'rational' to value '-1/0'
Value -inf for parameter 'rational' out of range [0 - 10]
Value -inf for parameter 'rational' out of range [0 - 10]
Error 'rational=-1/0'
Setting options string 'size=1024x768'
Setting entry with key 'size' to value '1024x768'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册