/* * ffplay_def.c * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2013 Zhang Rui * * This file is part of ijkPlayer. * * ijkPlayer is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * ijkPlayer is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with ijkPlayer; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "ff_ffplay.h" #include #include "ff_cmdutils.h" #include "ff_fferror.h" // FIXME: 9 work around NDKr8e or gcc4.7 bug // isnan() may not recognize some double NAN, so we test both double and float #if defined(__ANDROID__) #ifdef isnan #undef isnan #endif #define isnan(x) (isnan((double)(x)) || isnanf((float)(x))) #endif #if defined(__ANDROID__) #define printf(...) ALOGD(__VA_ARGS__) #endif // #define FFP_SHOW_FPS // #define FFP_SHOW_VDPS // #define FFP_SHOW_AUDIO_DELAY // #define FFP_SHOW_DEMUX_CACHE static AVPacket flush_pkt; // FFP_MERGE: cmp_audio_fmts // FFP_MERGE: get_valid_channel_layout static int packet_queue_put(PacketQueue *q, AVPacket *pkt); static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt) { MyAVPacketList *pkt1; if (q->abort_request) return -1; pkt1 = av_malloc(sizeof(MyAVPacketList)); if (!pkt1) return -1; pkt1->pkt = *pkt; pkt1->next = NULL; if (pkt == &flush_pkt) q->serial++; pkt1->serial = q->serial; if (!q->last_pkt) q->first_pkt = pkt1; else q->last_pkt->next = pkt1; q->last_pkt = pkt1; q->nb_packets++; q->size += pkt1->pkt.size + sizeof(*pkt1); if (pkt1->pkt.duration > 0) q->duration += pkt1->pkt.duration; /* XXX: should duplicate packet data in DV case */ SDL_CondSignal(q->cond); return 0; } static int packet_queue_put(PacketQueue *q, AVPacket *pkt) { int ret; /* duplicate the packet */ if (pkt != &flush_pkt && av_dup_packet(pkt) < 0) return -1; SDL_LockMutex(q->mutex); ret = packet_queue_put_private(q, pkt); SDL_UnlockMutex(q->mutex); if (pkt != &flush_pkt && ret < 0) av_free_packet(pkt); return ret; } static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index) { AVPacket pkt1, *pkt = &pkt1; av_init_packet(pkt); pkt->data = NULL; pkt->size = 0; pkt->stream_index = stream_index; return packet_queue_put(q, pkt); } /* packet queue handling */ static void packet_queue_init(PacketQueue *q) { memset(q, 0, sizeof(PacketQueue)); q->mutex = SDL_CreateMutex(); q->cond = SDL_CreateCond(); q->abort_request = 1; } static void packet_queue_flush(PacketQueue *q) { MyAVPacketList *pkt, *pkt1; SDL_LockMutex(q->mutex); for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) { pkt1 = pkt->next; av_free_packet(&pkt->pkt); av_freep(&pkt); } q->last_pkt = NULL; q->first_pkt = NULL; q->nb_packets = 0; q->size = 0; q->duration = 0; SDL_UnlockMutex(q->mutex); } static void packet_queue_destroy(PacketQueue *q) { packet_queue_flush(q); SDL_DestroyMutex(q->mutex); SDL_DestroyCond(q->cond); } static void packet_queue_abort(PacketQueue *q) { SDL_LockMutex(q->mutex); q->abort_request = 1; SDL_CondSignal(q->cond); SDL_UnlockMutex(q->mutex); } static void packet_queue_start(PacketQueue *q) { SDL_LockMutex(q->mutex); q->abort_request = 0; packet_queue_put_private(q, &flush_pkt); SDL_UnlockMutex(q->mutex); } /* return < 0 if aborted, 0 if no packet and > 0 if packet. */ static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial) { MyAVPacketList *pkt1; int ret; SDL_LockMutex(q->mutex); for (;;) { if (q->abort_request) { ret = -1; break; } pkt1 = q->first_pkt; if (pkt1) { q->first_pkt = pkt1->next; if (!q->first_pkt) q->last_pkt = NULL; q->nb_packets--; q->size -= pkt1->pkt.size + sizeof(*pkt1); if (pkt1->pkt.duration > 0) q->duration -= pkt1->pkt.duration; *pkt = pkt1->pkt; if (serial) *serial = pkt1->serial; av_free(pkt1); ret = 1; break; } else if (!block) { ret = 0; break; } else { SDL_CondWait(q->cond, q->mutex); } } SDL_UnlockMutex(q->mutex); return ret; } static int packet_queue_get_or_buffering(FFPlayer *ffp, PacketQueue *q, AVPacket *pkt, int *serial, int finished) { while (1) { int new_packet = packet_queue_get(q, pkt, 0, serial); if (new_packet < 0) return -1; else if (new_packet == 0) { if (!finished) ffp_toggle_buffering(ffp, 1); new_packet = packet_queue_get(q, pkt, 1, serial); if (new_packet < 0) return -1; } if (finished) continue; else break; } return 1; } // FFP_MERGE: fill_rectangle // FFP_MERGE: fill_border // FFP_MERGE: ALPHA_BLEND // FFP_MERGE: RGBA_IN // FFP_MERGE: YUVA_IN // FFP_MERGE: YUVA_OUT // FFP_MERGE: BPP // FFP_MERGE: blend_subrect static void free_picture(VideoPicture *vp) { if (vp->bmp) { SDL_VoutFreeYUVOverlay(vp->bmp); vp->bmp = NULL; } } // FFP_MERGE: free_subpicture // FFP_MERGE: calculate_display_rect // FFP_MERGE: video_image_display #ifdef FFP_SHOW_FPS static int g_fps_counter = 0; static int64_t g_fps_total_time = 0; #endif static void video_image_display2(FFPlayer *ffp) { VideoState *is = ffp->is; VideoPicture *vp; vp = &is->pictq[is->pictq_rindex]; if (vp->bmp) { #ifdef FFP_SHOW_FPS int64_t start = SDL_GetTickHR(); #endif SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp); #ifdef FFP_SHOW_FPS int64_t dur = SDL_GetTickHR() - start; g_fps_total_time += dur; g_fps_counter++; int64_t avg_frame_time = g_fps_total_time / g_fps_counter; double fps = 1.0f / avg_frame_time * 1000; ALOGE("fps: [%f][%d] %"PRId64" ms/frame, fps=%f, +%"PRId64"\n", vp->pts, g_fps_counter, (int64_t)avg_frame_time, fps, dur); #endif } } // FFP_MERGE: compute_mod // FFP_MERGE: video_audio_display static void stream_close(FFPlayer *ffp) { VideoState *is = ffp->is; int i; /* XXX: use a special url_shutdown call to abort parse cleanly */ is->abort_request = 1; packet_queue_abort(&is->videoq); packet_queue_abort(&is->audioq); ALOGW("wait for read_tid\n"); SDL_WaitThread(is->read_tid, NULL); ALOGW("wait for video_refresh_tid\n"); SDL_WaitThread(is->video_refresh_tid, NULL); packet_queue_destroy(&is->videoq); packet_queue_destroy(&is->audioq); #ifdef FFP_MERGE packet_queue_destroy(&is->subtitleq); #endif /* free all pictures */ for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) free_picture(&is->pictq[i]); #ifdef FFP_MERGE for (i = 0; i < SUBPICTURE_QUEUE_SIZE; i++) free_subpicture(&is->subpq[i]); #endif SDL_DestroyMutex(is->pictq_mutex); SDL_DestroyCond(is->pictq_cond); #ifdef FFP_MERGE SDL_DestroyMutex(is->subpq_mutex); SDL_DestroyCond(is->subpq_cond); #endif SDL_DestroyCond(is->continue_read_thread); SDL_DestroyMutex(is->play_mutex); #if !CONFIG_AVFILTER sws_freeContext(is->img_convert_ctx); #endif av_free(is); } // FFP_MERGE: do_exit // FFP_MERGE: sigterm_handler // FFP_MERGE: video_open // FFP_MERGE: video_display /* display the current picture, if any */ static void video_display2(FFPlayer *ffp) { VideoState *is = ffp->is; if (is->video_st) video_image_display2(ffp); } static double get_clock(Clock *c) { if (*c->queue_serial != c->serial) return NAN; if (c->paused) { return c->pts; } else { double time = av_gettime() / 1000000.0; return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed); } } static void set_clock_at(Clock *c, double pts, int serial, double time) { c->pts = pts; c->last_updated = time; c->pts_drift = c->pts - time; c->serial = serial; } static void set_clock(Clock *c, double pts, int serial) { double time = av_gettime() / 1000000.0; set_clock_at(c, pts, serial, time); } static void set_clock_speed(Clock *c, double speed) { set_clock(c, get_clock(c), c->serial); c->speed = speed; } static void init_clock(Clock *c, int *queue_serial) { c->speed = 1.0; c->paused = 0; c->queue_serial = queue_serial; set_clock(c, NAN, -1); } static void sync_clock_to_slave(Clock *c, Clock *slave) { double clock = get_clock(c); double slave_clock = get_clock(slave); if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD)) set_clock(c, slave_clock, slave->serial); } static int get_master_sync_type(VideoState *is) { if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) { if (is->video_st) return AV_SYNC_VIDEO_MASTER; else return AV_SYNC_AUDIO_MASTER; } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) { if (is->audio_st) return AV_SYNC_AUDIO_MASTER; else return AV_SYNC_EXTERNAL_CLOCK; } else { return AV_SYNC_EXTERNAL_CLOCK; } } /* get the current master clock value */ static double get_master_clock(VideoState *is) { double val; switch (get_master_sync_type(is)) { case AV_SYNC_VIDEO_MASTER: val = get_clock(&is->vidclk); break; case AV_SYNC_AUDIO_MASTER: val = get_clock(&is->audclk); break; default: val = get_clock(&is->extclk); break; } return val; } static void check_external_clock_speed(VideoState *is) { if ((is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2) || (is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2)) { set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP)); } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) && (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) { set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP)); } else { double speed = is->extclk.speed; if (speed != 1.0) set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed)); } } /* seek in the stream */ static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes) { if (!is->seek_req) { is->seek_pos = pos; is->seek_rel = rel; is->seek_flags &= ~AVSEEK_FLAG_BYTE; if (seek_by_bytes) is->seek_flags |= AVSEEK_FLAG_BYTE; is->seek_req = 1; SDL_CondSignal(is->continue_read_thread); } } /* pause or resume the video */ static void stream_toggle_pause_l(FFPlayer *ffp, int pause_on) { VideoState *is = ffp->is; if (is->paused && !pause_on) { is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts; #ifdef FFP_MERGE if (is->read_pause_return != AVERROR(ENOSYS)) { is->vidclk.paused = 0; } #endif set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial); // ALOGE("stream_toggle_pause_l: pause -> start\n"); } else { // ALOGE("stream_toggle_pause_l: %d\n", pause_on); } set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial); is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = pause_on; SDL_AoutPauseAudio(ffp->aout, pause_on); } static void stream_update_pause_l(FFPlayer *ffp) { VideoState *is = ffp->is; // ALOGE("stream_update_pause_l: (!%d && (%d || %d)\n", is->step, is->pause_req, is->buffering_on); if (!is->step && (is->pause_req || is->buffering_on)) { // ALOGE("stream_update_pause_l: 1\n"); stream_toggle_pause_l(ffp, 1); } else { // ALOGE("stream_update_pause_l: 0\n"); stream_toggle_pause_l(ffp, 0); } } static void toggle_pause_l(FFPlayer *ffp, int pause_on) { // ALOGE("toggle_pause_l\n"); VideoState *is = ffp->is; is->pause_req = pause_on; ffp->auto_start = !pause_on; stream_update_pause_l(ffp); is->step = 0; } static void toggle_pause(FFPlayer *ffp, int pause_on) { SDL_LockMutex(ffp->is->play_mutex); toggle_pause_l(ffp, pause_on); SDL_UnlockMutex(ffp->is->play_mutex); } static void step_to_next_frame_l(FFPlayer *ffp) { VideoState *is = ffp->is; /* if the stream is paused unpause it, then step */ // ALOGE("step_to_next_frame\n"); if (is->paused) stream_toggle_pause_l(ffp, 0); is->step = 1; } static double compute_target_delay(double delay, VideoState *is) { double sync_threshold, diff = 0.0f; /* update delay to follow master synchronisation source */ if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) { /* if video is slave, we try to correct big delays by duplicating or deleting a frame */ diff = get_clock(&is->vidclk) - get_master_clock(is); /* skip or repeat frame. We take into account the delay to compute the threshold. I still don't know if it is the best guess */ sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay)); if (!isnan(diff) && fabs(diff) < is->max_frame_duration) { if (diff <= -sync_threshold) delay = FFMAX(0, delay + diff); else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD) delay = delay + diff; else if (diff >= sync_threshold) delay = 2 * delay; } } av_dlog(NULL, "video: delay=%0.3f A-V=%f\n", delay, -diff); return delay; } static void pictq_next_picture(VideoState *is) { /* update queue size and signal for next picture */ if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) is->pictq_rindex = 0; SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } static int pictq_prev_picture(VideoState *is) { VideoPicture *prevvp; int ret = 0; /* update queue size and signal for the previous picture */ prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE]; if (prevvp->allocated && prevvp->serial == is->videoq.serial) { SDL_LockMutex(is->pictq_mutex); if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) { if (--is->pictq_rindex == -1) is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1; is->pictq_size++; ret = 1; } SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } return ret; } static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) { /* update current video pts */ set_clock(&is->vidclk, pts, serial); sync_clock_to_slave(&is->extclk, &is->vidclk); is->video_current_pos = pos; is->frame_last_pts = pts; } /* called to display each frame */ static void video_refresh(FFPlayer *opaque, double *remaining_time) { FFPlayer *ffp = opaque; VideoState *is = ffp->is; VideoPicture *vp; double time; #ifdef FFP_MERGE SubPicture *sp, *sp2; #endif if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime) check_external_clock_speed(is); if (!ffp->display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) { time = av_gettime() / 1000000.0; if (is->force_refresh || is->last_vis_time + ffp->rdftspeed < time) { video_display2(ffp); is->last_vis_time = time; } /* ALOGE("remaining[1] %.3f = FFMIN(%.3f, %.3f + %.3f - %.3f)\n", *remaining_time, is->last_vis_time, ffp->rdftspeed, time); */ *remaining_time = FFMIN(*remaining_time, is->last_vis_time + ffp->rdftspeed - time); } if (is->video_st) { int redisplay = 0; if (is->force_refresh) redisplay = pictq_prev_picture(is); retry: if (is->pictq_size == 0) { SDL_LockMutex(is->pictq_mutex); if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) { update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial); is->frame_last_dropped_pts = AV_NOPTS_VALUE; } SDL_UnlockMutex(is->pictq_mutex); // nothing to do, no picture to display in the queue } else { double last_duration, duration, delay; /* dequeue the picture */ vp = &is->pictq[is->pictq_rindex]; if (vp->serial != is->videoq.serial) { pictq_next_picture(is); redisplay = 0; goto retry; } if (is->paused) goto display; /* compute nominal last_duration */ last_duration = vp->pts - is->frame_last_pts; if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) { /* if duration of the last frame was sane, update last_duration in video state */ is->frame_last_duration = last_duration; } if (redisplay) delay = 0.0; else delay = compute_target_delay(is->frame_last_duration, is); time= av_gettime()/1000000.0; if (isnan(is->frame_timer) || time < is->frame_timer) is->frame_timer = time; if (time < is->frame_timer + delay && !redisplay) { *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time); return; } is->frame_timer += delay; if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX) is->frame_timer = time; SDL_LockMutex(is->pictq_mutex); if (!redisplay && !isnan(vp->pts)) update_video_pts(is, vp->pts, vp->pos, vp->serial); SDL_UnlockMutex(is->pictq_mutex); if (is->pictq_size > 1) { VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE]; duration = nextvp->pts - vp->pts; if(!is->step && (redisplay || ffp->framedrop > 0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration) { if (!redisplay) is->frame_drops_late++; pictq_next_picture(is); redisplay = 0; goto retry; } } // FFP_MERGE: if (is->subtitle_st) { {...} display: /* display picture */ if (!ffp->display_disable && is->show_mode == SHOW_MODE_VIDEO) video_display2(ffp); pictq_next_picture(is); SDL_LockMutex(ffp->is->play_mutex); if (is->step) { is->step = 0; if (!is->paused) stream_update_pause_l(ffp); } SDL_UnlockMutex(ffp->is->play_mutex); } } is->force_refresh = 0; if (ffp->show_status) { static int64_t last_time; int64_t cur_time; int aqsize, vqsize, sqsize; double av_diff; cur_time = av_gettime(); if (!last_time || (cur_time - last_time) >= 30000) { aqsize = 0; vqsize = 0; sqsize = 0; if (is->audio_st) aqsize = is->audioq.size; if (is->video_st) vqsize = is->videoq.size; #ifdef FFP_MERGE if (is->subtitle_st) sqsize = is->subtitleq.size; #else sqsize = 0; #endif av_diff = 0; if (is->audio_st && is->video_st) av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk); else if (is->video_st) av_diff = get_master_clock(is) - get_clock(&is->vidclk); else if (is->audio_st) av_diff = get_master_clock(is) - get_clock(&is->audclk); av_log(NULL, AV_LOG_INFO, "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r", get_master_clock(is), (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")), av_diff, is->frame_drops_early + is->frame_drops_late, aqsize / 1024, vqsize / 1024, sqsize, is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0, is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0); fflush(stdout); last_time = cur_time; } } } // TODO: 9 alloc_picture in video_refresh_thread if overlay referenced by vout /* allocate a picture (needs to do that in main thread to avoid potential locking problems */ static void alloc_picture(FFPlayer *ffp) { VideoState *is = ffp->is; VideoPicture *vp; #ifdef FFP_MERGE int64_t bufferdiff; #endif vp = &is->pictq[is->pictq_windex]; free_picture(vp); #ifdef FFP_MERGE video_open(ffp, 0, vp); #endif vp->bmp = SDL_Vout_CreateOverlay(vp->width, vp->height, ffp->overlay_format, ffp->vout); #ifdef FFP_MERGE bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0; if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) { #else /* RV16, RV32 contains only one plane */ if (!vp->bmp || vp->bmp->pitches[0] < vp->width) { #endif /* SDL allocates a buffer smaller than requested if the video * overlay hardware is unable to support the requested size. */ av_log(NULL, AV_LOG_FATAL, "Error: the video system does not support an image\n" "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n" "to reduce the image size.\n", vp->width, vp->height ); free_picture(vp); } SDL_LockMutex(is->pictq_mutex); vp->allocated = 1; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } #ifdef FFP_MERGE static void duplicate_right_border_pixels(SDL_Overlay *bmp) { int i, width, height; Uint8 *p, *maxp; for (i = 0; i < 3; i++) { width = bmp->w; height = bmp->h; if (i > 0) { width >>= 1; height >>= 1; } if (bmp->pitches[i] > width) { maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1; for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i]) *(p+1) = *p; } } } #endif static int queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, int64_t pos, int serial) { VideoState *is = ffp->is; VideoPicture *vp; #if defined(DEBUG_SYNC) && 0 printf("frame_type=%c pts=%0.3f\n", av_get_picture_type_char(src_frame->pict_type), pts); #endif /* wait until we have space to put a new picture */ SDL_LockMutex(is->pictq_mutex); /* keep the last already displayed picture in the queue */ while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 && !is->videoq.abort_request) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } SDL_UnlockMutex(is->pictq_mutex); if (is->videoq.abort_request) return -1; vp = &is->pictq[is->pictq_windex]; vp->sar = src_frame->sample_aspect_ratio; /* alloc or resize hardware picture buffer */ if (!vp->bmp || vp->reallocate || !vp->allocated || vp->width != src_frame->width || vp->height != src_frame->height) { if (vp->width != src_frame->width || vp->height != src_frame->height) ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, src_frame->width, src_frame->height); vp->allocated = 0; vp->reallocate = 0; vp->width = src_frame->width; vp->height = src_frame->height; /* the allocation must be done in the main thread to avoid locking problems. */ alloc_picture(ffp); if (is->videoq.abort_request) return -1; } /* if the frame is not skipped, then display it */ if (vp->bmp) { /* get a pointer on the bitmap */ SDL_VoutLockYUVOverlay(vp->bmp); if (SDL_VoutFFmpeg_ConvertFrame(vp->bmp, src_frame, &is->img_convert_ctx, ffp->sws_flags) < 0) { av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n"); exit(1); } /* update the bitmap content */ SDL_VoutUnlockYUVOverlay(vp->bmp); vp->pts = pts; vp->pos = pos; vp->serial = serial; /* now we can update the picture count */ if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) is->pictq_windex = 0; SDL_LockMutex(is->pictq_mutex); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); } return 0; } #ifdef FFP_SHOW_VDPS static int g_vdps_counter = 0; static int64_t g_vdps_total_time = 0; #endif static int get_video_frame(FFPlayer *ffp, AVFrame *frame, AVPacket *pkt, int *serial) { VideoState *is = ffp->is; int got_picture; if (packet_queue_get_or_buffering(ffp, &is->videoq, pkt, serial, is->video_finished) < 0) return -1; if (pkt->data == flush_pkt.data) { avcodec_flush_buffers(is->video_st->codec); SDL_LockMutex(is->pictq_mutex); // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder) while (is->pictq_size && !is->videoq.abort_request) { SDL_CondWait(is->pictq_cond, is->pictq_mutex); } is->video_current_pos = -1; is->frame_last_pts = AV_NOPTS_VALUE; is->frame_last_duration = 0; is->frame_timer = (double)av_gettime() / 1000000.0; is->frame_last_dropped_pts = AV_NOPTS_VALUE; SDL_UnlockMutex(is->pictq_mutex); return 0; } if (is->dropping_frame) { if (pkt->flags & AV_PKT_FLAG_KEY) { ALOGW("skip frame(fast): end\n"); is->dropping_frame = 0; is->video_st->codec->skip_frame = ffp->skip_frame; avcodec_flush_buffers(is->video_st->codec); } else { return 0; } } if (is->frame_drops_early > 12) { ALOGW("skip frame: start\n"); is->dropping_frame = 1; is->frame_drops_early = 0; is->video_st->codec->skip_frame = AVDISCARD_NONKEY; avcodec_flush_buffers(is->video_st->codec); return 0; } #ifdef FFP_SHOW_VDPS int64_t start = SDL_GetTickHR(); #endif if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0) return 0; #ifdef FFP_SHOW_VDPS int64_t dur = SDL_GetTickHR() - start; g_vdps_total_time += dur; g_vdps_counter++; int64_t avg_frame_time = g_vdps_total_time / g_vdps_counter; double fps = 1.0f / avg_frame_time * 1000; ALOGE("vdps: [%f][%d] %"PRId64" ms/frame, vdps=%f, +%"PRId64"\n", frame->pts, g_vdps_counter, (int64_t)avg_frame_time, fps, dur); #endif if (!got_picture && !pkt->data) { ALOGE("video_finished"); is->video_finished = *serial; } if (got_picture) { int ret = 1; double dpts = NAN; if (ffp->decoder_reorder_pts == -1) { frame->pts = av_frame_get_best_effort_timestamp(frame); } else if (ffp->decoder_reorder_pts) { frame->pts = frame->pkt_pts; } else { frame->pts = frame->pkt_dts; } if (frame->pts != AV_NOPTS_VALUE) dpts = av_q2d(is->video_st->time_base) * frame->pts; frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); if (ffp->framedrop > 0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) { SDL_LockMutex(is->pictq_mutex); if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) { double clockdiff = get_clock(&is->vidclk) - get_master_clock(is); double ptsdiff = dpts - is->frame_last_pts; if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD && !isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD && clockdiff + ptsdiff - is->frame_last_filter_delay < 0 && is->videoq.nb_packets) { is->frame_last_dropped_pos = av_frame_get_pkt_pos(frame); is->frame_last_dropped_pts = dpts; is->frame_last_dropped_serial = *serial; is->frame_drops_early++; av_frame_unref(frame); ret = 0; } } SDL_UnlockMutex(is->pictq_mutex); } return ret; } return 0; } #if CONFIG_AVFILTER static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx) { int ret; AVFilterInOut *outputs = NULL, *inputs = NULL; if (filtergraph) { outputs = avfilter_inout_alloc(); inputs = avfilter_inout_alloc(); if (!outputs || !inputs) { ret = AVERROR(ENOMEM); goto fail; } outputs->name = av_strdup("in"); outputs->filter_ctx = source_ctx; outputs->pad_idx = 0; outputs->next = NULL; inputs->name = av_strdup("out"); inputs->filter_ctx = sink_ctx; inputs->pad_idx = 0; inputs->next = NULL; if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0) goto fail; } else { if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0) goto fail; } ret = avfilter_graph_config(graph, NULL); fail: avfilter_inout_free(&outputs); avfilter_inout_free(&inputs); return ret; } static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame) { static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; char sws_flags_str[128]; char buffersrc_args[256]; int ret; AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop; AVCodecContext *codec = is->video_st->codec; AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL); av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags); snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags); graph->scale_sws_opts = av_strdup(sws_flags_str); snprintf(buffersrc_args, sizeof(buffersrc_args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", frame->width, frame->height, frame->format, is->video_st->time_base.num, is->video_st->time_base.den, codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1)); if (fr.num && fr.den) av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den); if ((ret = avfilter_graph_create_filter(&filt_src, avfilter_get_by_name("buffer"), "ffplay_buffer", buffersrc_args, NULL, graph)) < 0) goto fail; ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "ffplay_buffersink", NULL, NULL, graph); if (ret < 0) goto fail; if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0) goto fail; /* SDL YUV code is not handling odd width/height for some driver * combinations, therefore we crop the picture to an even width/height. */ if ((ret = avfilter_graph_create_filter(&filt_crop, avfilter_get_by_name("crop"), "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0) goto fail; if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0) goto fail; if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0) goto fail; is->in_video_filter = filt_src; is->out_video_filter = filt_out; fail: return ret; } static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format) { static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }; int sample_rates[2] = { 0, -1 }; int64_t channel_layouts[2] = { 0, -1 }; int channels[2] = { 0, -1 }; AVFilterContext *filt_asrc = NULL, *filt_asink = NULL; char aresample_swr_opts[512] = ""; AVDictionaryEntry *e = NULL; char asrc_args[256]; int ret; avfilter_graph_free(&is->agraph); if (!(is->agraph = avfilter_graph_alloc())) return AVERROR(ENOMEM); while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX))) av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value); if (strlen(aresample_swr_opts)) aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0'; av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0); ret = snprintf(asrc_args, sizeof(asrc_args), "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d", is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt), is->audio_filter_src.channels, 1, is->audio_filter_src.freq); if (is->audio_filter_src.channel_layout) snprintf(asrc_args + ret, sizeof(asrc_args) - ret, ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout); ret = avfilter_graph_create_filter(&filt_asrc, avfilter_get_by_name("abuffer"), "ffplay_abuffer", asrc_args, NULL, is->agraph); if (ret < 0) goto end; ret = avfilter_graph_create_filter(&filt_asink, avfilter_get_by_name("abuffersink"), "ffplay_abuffersink", NULL, NULL, is->agraph); if (ret < 0) goto end; if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0) goto end; if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0) goto end; if (force_output_format) { channel_layouts[0] = is->audio_tgt.channel_layout; channels [0] = is->audio_tgt.channels; sample_rates [0] = is->audio_tgt.freq; if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0) goto end; if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0) goto end; if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0) goto end; if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0) goto end; } if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0) goto end; is->in_audio_filter = filt_asrc; is->out_audio_filter = filt_asink; end: if (ret < 0) avfilter_graph_free(&is->agraph); return ret; } #endif /* CONFIG_AVFILTER */ static int video_thread(void *arg) { AVPacket pkt = { 0 }; FFPlayer *ffp = arg; VideoState *is = ffp->is; AVFrame *frame = av_frame_alloc(); double pts; int ret; int serial = 0; #if CONFIG_AVFILTER AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterContext *filt_out = NULL, *filt_in = NULL; int last_w = 0; int last_h = 0; enum AVPixelFormat last_format = -2; int last_serial = -1; #endif for (;;) { while (is->paused && !is->videoq.abort_request) SDL_Delay(10); avcodec_get_frame_defaults(frame); av_free_packet(&pkt); ret = get_video_frame(ffp, frame, &pkt, &serial); if (ret < 0) goto the_end; if (!ret) continue; #if CONFIG_AVFILTER if ( last_w != frame->width || last_h != frame->height || last_format != frame->format || last_serial != serial) { av_log(NULL, AV_LOG_DEBUG, "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n", last_w, last_h, (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial, frame->width, frame->height, (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial); avfilter_graph_free(&graph); graph = avfilter_graph_alloc(); if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); av_free_packet(&pkt); goto the_end; } filt_in = is->in_video_filter; filt_out = is->out_video_filter; last_w = frame->width; last_h = frame->height; last_format = frame->format; last_serial = serial; } ret = av_buffersrc_add_frame(filt_in, frame); if (ret < 0) goto the_end; av_frame_unref(frame); avcodec_get_frame_defaults(frame); av_free_packet(&pkt); while (ret >= 0) { is->frame_last_returned_time = av_gettime() / 1000000.0; ret = av_buffersink_get_frame_flags(filt_out, frame, 0); if (ret < 0) { if (ret == AVERROR_EOF) is->video_finished = serial; ret = 0; break; } is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time; if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) is->frame_last_filter_delay = 0; pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base); ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial); av_frame_unref(frame); } #else pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base); ret = queue_picture(ffp, frame, pts, av_frame_get_pkt_pos(frame), serial); av_frame_unref(frame); #endif if (ret < 0) goto the_end; } the_end: avcodec_flush_buffers(is->video_st->codec); #if CONFIG_AVFILTER avfilter_graph_free(&graph); #endif av_free_packet(&pkt); av_frame_free(&frame); return 0; } // FFP_MERGE: subtitle_thread /* copy samples for viewing in editor window */ static void update_sample_display(VideoState *is, short *samples, int samples_size) { int size, len; size = samples_size / sizeof(short); while (size > 0) { len = SAMPLE_ARRAY_SIZE - is->sample_array_index; if (len > size) len = size; memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short)); samples += len; is->sample_array_index += len; if (is->sample_array_index >= SAMPLE_ARRAY_SIZE) is->sample_array_index = 0; size -= len; } } /* return the wanted number of samples to get better sync if sync_type is video * or external master clock */ static int synchronize_audio(VideoState *is, int nb_samples) { int wanted_nb_samples = nb_samples; /* if not master, then we try to remove or add samples to correct the clock */ if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) { double diff, avg_diff; int min_nb_samples, max_nb_samples; diff = get_clock(&is->audclk) - get_master_clock(is); if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) { is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { /* not enough measures to have a correct estimate */ is->audio_diff_avg_count++; } else { /* estimate the A-V difference */ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if (fabs(avg_diff) >= is->audio_diff_threshold) { wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq); min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100)); max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100)); wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples); } av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n", diff, avg_diff, wanted_nb_samples - nb_samples, is->audio_clock, is->audio_diff_threshold); } } else { /* too big difference : may be initial PTS errors, so reset A-V filter */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return wanted_nb_samples; } /** * Decode one audio frame and return its uncompressed size. * * The processed audio frame is decoded, converted if required, and * stored in is->audio_buf, with size in bytes given by the return * value. */ static int audio_decode_frame(FFPlayer *ffp) { VideoState *is = ffp->is; AVPacket *pkt_temp = &is->audio_pkt_temp; AVPacket *pkt = &is->audio_pkt; AVCodecContext *dec = is->audio_st->codec; int len1, data_size, resampled_data_size; int64_t dec_channel_layout; int got_frame; av_unused double audio_clock0; int wanted_nb_samples; AVRational tb; #if CONFIG_AVFILTER int ret; int reconfigure; #endif for (;;) { /* NOTE: the audio packet can contain several frames */ while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) { if (!is->frame) { if (!(is->frame = avcodec_alloc_frame())) return AVERROR(ENOMEM); } else { av_frame_unref(is->frame); avcodec_get_frame_defaults(is->frame); } if (is->audioq.serial != is->audio_pkt_temp_serial) break; if (is->paused) return -1; if (!is->audio_buf_frames_pending) { len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp); if (len1 < 0) { /* if error, we skip the frame */ pkt_temp->size = 0; break; } pkt_temp->dts = pkt_temp->pts = AV_NOPTS_VALUE; pkt_temp->data += len1; pkt_temp->size -= len1; if ((pkt_temp->data && pkt_temp->size <= 0) || (!pkt_temp->data && !got_frame)) pkt_temp->stream_index = -1; if (!pkt_temp->data && !got_frame) { ALOGE("audio_finished"); is->audio_finished = is->audio_pkt_temp_serial; } if (!got_frame) continue; tb = (AVRational){1, is->frame->sample_rate}; if (is->frame->pts != AV_NOPTS_VALUE) is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb); else if (is->frame->pkt_pts != AV_NOPTS_VALUE) is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb); else if (is->audio_frame_next_pts != AV_NOPTS_VALUE) #if CONFIG_AVFILTER is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb); #else is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_src.freq}, tb); #endif if (is->frame->pts != AV_NOPTS_VALUE) is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples; #if CONFIG_AVFILTER dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame)); reconfigure = cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels, is->frame->format, av_frame_get_channels(is->frame)) || is->audio_filter_src.channel_layout != dec_channel_layout || is->audio_filter_src.freq != is->frame->sample_rate || is->audio_pkt_temp_serial != is->audio_last_serial; if (reconfigure) { char buf1[1024], buf2[1024]; av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout); av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout); av_log(NULL, AV_LOG_DEBUG, "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n", is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial, is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial); is->audio_filter_src.fmt = is->frame->format; is->audio_filter_src.channels = av_frame_get_channels(is->frame); is->audio_filter_src.channel_layout = dec_channel_layout; is->audio_filter_src.freq = is->frame->sample_rate; is->audio_last_serial = is->audio_pkt_temp_serial; if ((ret = configure_audio_filters(is, afilters, 1)) < 0) return ret; } if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0) return ret; av_frame_unref(is->frame); #endif } #if CONFIG_AVFILTER if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) { if (ret == AVERROR(EAGAIN)) { is->audio_buf_frames_pending = 0; continue; } if (ret == AVERROR_EOF) is->audio_finished = is->audio_pkt_temp_serial; return ret; } is->audio_buf_frames_pending = 1; tb = is->out_audio_filter->inputs[0]->time_base; #endif data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame), is->frame->nb_samples, is->frame->format, 1); dec_channel_layout = (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ? is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame)); wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples); if (is->frame->format != is->audio_src.fmt || dec_channel_layout != is->audio_src.channel_layout || is->frame->sample_rate != is->audio_src.freq || (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) { swr_free(&is->swr_ctx); is->swr_ctx = swr_alloc_set_opts(NULL, is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq, dec_channel_layout, is->frame->format, is->frame->sample_rate, 0, NULL); if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) { av_log(NULL, AV_LOG_ERROR, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n", is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame), is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels); break; } is->audio_src.channel_layout = dec_channel_layout; is->audio_src.channels = av_frame_get_channels(is->frame); is->audio_src.freq = is->frame->sample_rate; is->audio_src.fmt = is->frame->format; } if (is->swr_ctx) { const uint8_t **in = (const uint8_t **)is->frame->extended_data; uint8_t **out = &is->audio_buf1; int out_count = (int)((int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256); int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0); int len2; if (out_size < 0) { av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n"); break; } if (wanted_nb_samples != is->frame->nb_samples) { if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate, wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) { av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n"); break; } } av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size); if (!is->audio_buf1) return AVERROR(ENOMEM); len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples); if (len2 < 0) { av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n"); break; } if (len2 == out_count) { av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n"); swr_init(is->swr_ctx); } is->audio_buf = is->audio_buf1; resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt); } else { is->audio_buf = is->frame->data[0]; resampled_data_size = data_size; } audio_clock0 = is->audio_clock; /* update the audio clock with the pts */ if (is->frame->pts != AV_NOPTS_VALUE) is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate; else is->audio_clock = NAN; is->audio_clock_serial = is->audio_pkt_temp_serial; #ifdef FFP_SHOW_AUDIO_DELAY #ifdef DEBUG { static double last_clock; printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n", is->audio_clock - last_clock, is->audio_clock, audio_clock0); last_clock = is->audio_clock; } #endif #endif return resampled_data_size; } /* free the current packet */ if (pkt->data) av_free_packet(pkt); memset(pkt_temp, 0, sizeof(*pkt_temp)); pkt_temp->stream_index = -1; if (is->audioq.abort_request) { return -1; } if (is->audioq.nb_packets == 0) SDL_CondSignal(is->continue_read_thread); /* read next packet */ if (packet_queue_get_or_buffering(ffp, &is->audioq, pkt, &is->audio_pkt_temp_serial, is->audio_finished) <= 0) return -1; if (pkt->data == flush_pkt.data) { avcodec_flush_buffers(dec); is->audio_buf_frames_pending = 0; is->audio_frame_next_pts = AV_NOPTS_VALUE; if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) is->audio_frame_next_pts = is->audio_st->start_time; } *pkt_temp = *pkt; } return -1; } /* prepare a new audio buffer */ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len) { FFPlayer *ffp = opaque; VideoState *is = ffp->is; int audio_size, len1; int bytes_per_sec; int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1); ffp->audio_callback_time = av_gettime(); while (len > 0) { if (is->audio_buf_index >= is->audio_buf_size) { audio_size = audio_decode_frame(ffp); if (audio_size < 0) { /* if error, just output silence */ is->audio_buf = is->silence_buf; is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size; } else { if (is->show_mode != SHOW_MODE_VIDEO) update_sample_display(is, (int16_t *)is->audio_buf, audio_size); is->audio_buf_size = audio_size; } is->audio_buf_index = 0; } if (is->audio_pkt_temp_serial != is->audioq.serial) { // ALOGE("aout_cb: flush\n"); is->audio_buf_index = is->audio_buf_size; memset(stream, 0, len); stream += len; len = 0; SDL_AoutFlushAudio(ffp->aout); break; } len1 = is->audio_buf_size - is->audio_buf_index; if (len1 > len) len1 = len; memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); len -= len1; stream += len1; is->audio_buf_index += len1; } bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt); is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index; /* Let's assume the audio driver that is used by SDL has two periods. */ if (!isnan(is->audio_clock)) { set_clock_at(&is->audclk, is->audio_clock - (double)(1 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, ffp->audio_callback_time / 1000000.0); sync_clock_to_slave(&is->extclk, &is->audclk); } } static int audio_open(FFPlayer *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params) { FFPlayer *ffp = opaque; SDL_AudioSpec wanted_spec, spec; const char *env; static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6}; env = SDL_getenv("SDL_AUDIO_CHANNELS"); if (env) { wanted_nb_channels = atoi(env); wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels); } if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) { wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels); wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX; } wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout); wanted_spec.freq = wanted_sample_rate; if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) { av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n"); return -1; } wanted_spec.format = AUDIO_S16SYS; wanted_spec.silence = 0; wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; wanted_spec.callback = sdl_audio_callback; wanted_spec.userdata = opaque; while (SDL_AoutOpenAudio(ffp->aout, &wanted_spec, &spec) < 0) { av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError()); wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)]; if (!wanted_spec.channels) { av_log(NULL, AV_LOG_ERROR, "No more channel combinations to try, audio open failed\n"); return -1; } wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels); } if (spec.format != AUDIO_S16SYS) { av_log(NULL, AV_LOG_ERROR, "SDL advised audio format %d is not supported!\n", spec.format); return -1; } if (spec.channels != wanted_spec.channels) { wanted_channel_layout = av_get_default_channel_layout(spec.channels); if (!wanted_channel_layout) { av_log(NULL, AV_LOG_ERROR, "SDL advised channel count %d is not supported!\n", spec.channels); return -1; } } audio_hw_params->fmt = AV_SAMPLE_FMT_S16; audio_hw_params->freq = spec.freq; audio_hw_params->channel_layout = wanted_channel_layout; audio_hw_params->channels = spec.channels; return spec.size; } /* open a given stream. Return 0 if OK */ static int stream_component_open(FFPlayer *ffp, int stream_index) { VideoState *is = ffp->is; AVFormatContext *ic = is->ic; AVCodecContext *avctx; AVCodec *codec; const char *forced_codec_name = NULL; AVDictionary *opts; AVDictionaryEntry *t = NULL; int sample_rate, nb_channels; int64_t channel_layout; int ret; int stream_lowres = ffp->lowres; if (stream_index < 0 || stream_index >= ic->nb_streams) return -1; avctx = ic->streams[stream_index]->codec; codec = avcodec_find_decoder(avctx->codec_id); switch (avctx->codec_type) { case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = ffp->audio_codec_name; break; // FFP_MERGE: case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = ffp->video_codec_name; break; default: break; } if (forced_codec_name) codec = avcodec_find_decoder_by_name(forced_codec_name); if (!codec) { // FIXME: 9 report unknown codec id/name if (forced_codec_name) av_log(NULL, AV_LOG_WARNING, "No codec could be found with name '%s'\n", forced_codec_name); else av_log(NULL, AV_LOG_WARNING, "No codec could be found with id %d\n", avctx->codec_id); return -1; } // FIXME: android set these from outside #ifndef __APPLE__ avctx->skip_frame = ffp->skip_frame; avctx->skip_loop_filter = ffp->skip_loop_filter; #endif avctx->codec_id = codec->id; avctx->workaround_bugs = ffp->workaround_bugs; if(stream_lowres > av_codec_get_max_lowres(codec)){ av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n", av_codec_get_max_lowres(codec)); stream_lowres = av_codec_get_max_lowres(codec); } av_codec_set_lowres(avctx, stream_lowres); avctx->error_concealment = ffp->error_concealment; if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE; if (ffp->fast) avctx->flags2 |= CODEC_FLAG2_FAST; if(codec->capabilities & CODEC_CAP_DR1) avctx->flags |= CODEC_FLAG_EMU_EDGE; opts = filter_codec_opts(ffp->codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec); if (!av_dict_get(opts, "threads", NULL, 0)) av_dict_set(&opts, "threads", "auto", 0); if (stream_lowres) av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL); if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) av_dict_set(&opts, "refcounted_frames", "1", 0); if (avcodec_open2(avctx, codec, &opts) < 0) return -1; if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); #ifdef FFP_MERGE return AVERROR_OPTION_NOT_FOUND; #endif } ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; switch (avctx->codec_type) { case AVMEDIA_TYPE_AUDIO: #if CONFIG_AVFILTER { AVFilterLink *link; is->audio_filter_src.freq = avctx->sample_rate; is->audio_filter_src.channels = avctx->channels; is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels); is->audio_filter_src.fmt = avctx->sample_fmt; if ((ret = configure_audio_filters(is, afilters, 0)) < 0) return ret; link = is->out_audio_filter->inputs[0]; sample_rate = link->sample_rate; nb_channels = link->channels; channel_layout = link->channel_layout; } #else sample_rate = avctx->sample_rate; nb_channels = avctx->channels; channel_layout = avctx->channel_layout; #endif /* prepare audio output */ if ((ret = audio_open(ffp, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0) return ret; is->audio_hw_buf_size = ret; is->audio_src = is->audio_tgt; is->audio_buf_size = 0; is->audio_buf_index = 0; /* init averaging filter */ is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB); is->audio_diff_avg_count = 0; /* since we do not have a precise anough audio fifo fullness, we correct audio sync only if larger than this threshold */ is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1); memset(&is->audio_pkt, 0, sizeof(is->audio_pkt)); memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp)); is->audio_pkt_temp.stream_index = -1; is->audio_stream = stream_index; is->audio_st = ic->streams[stream_index]; packet_queue_start(&is->audioq); SDL_AoutPauseAudio(ffp->aout, 0); break; case AVMEDIA_TYPE_VIDEO: is->video_stream = stream_index; is->video_st = ic->streams[stream_index]; packet_queue_start(&is->videoq); is->video_tid = SDL_CreateThreadEx(&is->_video_tid, video_thread, ffp, "ff_video"); is->queue_attachments_req = 1; break; // FFP_MERGE: case AVMEDIA_TYPE_SUBTITLE: default: break; } return 0; } static void stream_component_close(FFPlayer *ffp, int stream_index) { VideoState *is = ffp->is; AVFormatContext *ic = is->ic; AVCodecContext *avctx; if (stream_index < 0 || stream_index >= ic->nb_streams) return; avctx = ic->streams[stream_index]->codec; switch (avctx->codec_type) { case AVMEDIA_TYPE_AUDIO: packet_queue_abort(&is->audioq); SDL_AoutCloseAudio(ffp->aout); packet_queue_flush(&is->audioq); av_free_packet(&is->audio_pkt); swr_free(&is->swr_ctx); av_freep(&is->audio_buf1); is->audio_buf1_size = 0; is->audio_buf = NULL; av_frame_free(&is->frame); #ifdef FFP_MERGE if (is->rdft) { av_rdft_end(is->rdft); av_freep(&is->rdft_data); is->rdft = NULL; is->rdft_bits = 0; } #endif #if CONFIG_AVFILTER avfilter_graph_free(&is->agraph); #endif break; case AVMEDIA_TYPE_VIDEO: packet_queue_abort(&is->videoq); /* note: we also signal this mutex to make sure we deblock the video thread in all cases */ SDL_LockMutex(is->pictq_mutex); SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); SDL_WaitThread(is->video_tid, NULL); packet_queue_flush(&is->videoq); break; // FFP_MERGE: case AVMEDIA_TYPE_SUBTITLE: default: break; } ic->streams[stream_index]->discard = AVDISCARD_ALL; avcodec_close(avctx); switch (avctx->codec_type) { case AVMEDIA_TYPE_AUDIO: is->audio_st = NULL; is->audio_stream = -1; break; case AVMEDIA_TYPE_VIDEO: is->video_st = NULL; is->video_stream = -1; break; // FFP_MERGE: case AVMEDIA_TYPE_SUBTITLE: default: break; } } static int decode_interrupt_cb(void *ctx) { VideoState *is = ctx; return is->abort_request; } static int is_realtime(AVFormatContext *s) { if( !strcmp(s->iformat->name, "rtp") || !strcmp(s->iformat->name, "rtsp") || !strcmp(s->iformat->name, "sdp") ) return 1; if(s->pb && ( !strncmp(s->filename, "rtp:", 4) || !strncmp(s->filename, "udp:", 4) ) ) return 1; return 0; } /* this thread gets the stream from the disk or the network */ static int read_thread(void *arg) { FFPlayer *ffp = arg; VideoState *is = ffp->is; AVFormatContext *ic = NULL; int err, i, ret; int st_index[AVMEDIA_TYPE_NB]; AVPacket pkt1, *pkt = &pkt1; int eof = 0; int64_t stream_start_time; int completed = 0; int pkt_in_play_range = 0; AVDictionaryEntry *t; AVDictionary **opts; int orig_nb_streams; SDL_mutex *wait_mutex = SDL_CreateMutex(); int last_error = 0; int last_buffered_time_percentage = -1; int last_buffered_size_percentage = -1; int hwm_in_ms = ffp->fast_high_water_mark_in_ms; // use fast water mark for first loading memset(st_index, -1, sizeof(st_index)); is->last_video_stream = is->video_stream = -1; is->last_audio_stream = is->audio_stream = -1; #ifdef FFP_MERGE is->last_subtitle_stream = is->subtitle_stream = -1; #endif ic = avformat_alloc_context(); ic->interrupt_callback.callback = decode_interrupt_cb; ic->interrupt_callback.opaque = is; err = avformat_open_input(&ic, is->filename, is->iformat, &ffp->format_opts); if (err < 0) { print_error(is->filename, err); ret = -1; goto fail; } if ((t = av_dict_get(ffp->format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); #ifdef FFP_MERGE ret = AVERROR_OPTION_NOT_FOUND; goto fail; #endif } is->ic = ic; if (ffp->genpts) ic->flags |= AVFMT_FLAG_GENPTS; opts = setup_find_stream_info_opts(ic, ffp->codec_opts); orig_nb_streams = ic->nb_streams; err = avformat_find_stream_info(ic, opts); if (err < 0) { av_log(NULL, AV_LOG_WARNING, "%s: could not find codec parameters\n", is->filename); ret = -1; goto fail; } for (i = 0; i < orig_nb_streams; i++) av_dict_free(&opts[i]); av_freep(&opts); if (ic->pb) ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end if (ffp->seek_by_bytes < 0) ffp->seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name); is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0; ALOGI("max_frame_duration: %.3f\n", is->max_frame_duration); #ifdef FFP_MERGE if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0))) window_title = av_asprintf("%s - %s", t->value, input_filename); #endif /* if seeking requested, we execute it */ if (ffp->start_time != AV_NOPTS_VALUE) { int64_t timestamp; timestamp = ffp->start_time; /* add the stream start time */ if (ic->start_time != AV_NOPTS_VALUE) timestamp += ic->start_time; ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0); if (ret < 0) { av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n", is->filename, (double)timestamp / AV_TIME_BASE); } } is->realtime = is_realtime(ic); for (i = 0; i < ic->nb_streams; i++) ic->streams[i]->discard = AVDISCARD_ALL; if (!ffp->video_disable) st_index[AVMEDIA_TYPE_VIDEO] = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, ffp->wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0); if (!ffp->audio_disable) st_index[AVMEDIA_TYPE_AUDIO] = av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, ffp->wanted_stream[AVMEDIA_TYPE_AUDIO], st_index[AVMEDIA_TYPE_VIDEO], NULL, 0); #ifdef FFP_MERGE if (!ffp->video_disable && !ffp->subtitle_disable) st_index[AVMEDIA_TYPE_SUBTITLE] = av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE, ffp->wanted_stream[AVMEDIA_TYPE_SUBTITLE], (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ? st_index[AVMEDIA_TYPE_AUDIO] : st_index[AVMEDIA_TYPE_VIDEO]), NULL, 0); #endif av_dump_format(ic, 0, is->filename, 0); is->show_mode = ffp->show_mode; /* open the streams */ if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) { stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]); } ret = -1; if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { ret = stream_component_open(ffp, st_index[AVMEDIA_TYPE_VIDEO]); } if (is->show_mode == SHOW_MODE_NONE) is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT; #ifdef FFP_MERGE if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) { stream_component_open(ffp, st_index[AVMEDIA_TYPE_SUBTITLE]); } #endif if (is->video_stream < 0 && is->audio_stream < 0) { av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n", is->filename); ret = -1; goto fail; } if (ffp->infinite_buffer < 0 && is->realtime) ffp->infinite_buffer = 1; ffp->prepared = true; ffp_notify_msg1(ffp, FFP_MSG_PREPARED); if (is->video_st && is->video_st->codec) { AVCodecContext *avctx = is->video_st->codec; ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, avctx->width, avctx->height); ffp_notify_msg3(ffp, FFP_MSG_SAR_CHANGED, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); } if (ffp->auto_start) { ffp_notify_msg1(ffp, FFP_REQ_START); ffp->auto_start = 0; } for (;;) { if (is->abort_request) break; #ifdef FFP_MERGE if (is->paused != is->last_paused) { is->last_paused = is->paused; if (is->paused) is->read_pause_return = av_read_pause(ic); else av_read_play(ic); } #endif #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL if (is->paused && (!strcmp(ic->iformat->name, "rtsp") || (ic->pb && !strncmp(ffp->input_filename, "mmsh:", 5)))) { /* wait 10 ms to avoid trying to get another packet */ /* XXX: horrible */ SDL_Delay(10); continue; } #endif if (is->seek_req) { int64_t seek_target = is->seek_pos; int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN; int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX; // FIXME the +-2 is due to rounding being not done in the correct direction in generation // of the seek_pos/seek_rel variables hwm_in_ms = ffp->fast_high_water_mark_in_ms; ffp_toggle_buffering(ffp, 1); ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "%s: error while seeking\n", is->ic->filename); } else { if (is->audio_stream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } #ifdef FFP_MERGE if (is->subtitle_stream >= 0) { packet_queue_flush(&is->subtitleq); packet_queue_put(&is->subtitleq, &flush_pkt); } #endif if (is->video_stream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } if (is->seek_flags & AVSEEK_FLAG_BYTE) { set_clock(&is->extclk, NAN, 0); } else { set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0); } } is->seek_req = 0; is->queue_attachments_req = 1; eof = 0; completed = 0; SDL_LockMutex(ffp->is->play_mutex); if (ffp->auto_start) { // ALOGE("seek: auto_start\n"); is->pause_req = 0; is->buffering_on = 1; ffp->auto_start = 0; stream_update_pause_l(ffp); } if (is->pause_req) step_to_next_frame_l(ffp); SDL_UnlockMutex(ffp->is->play_mutex); ffp_notify_msg1(ffp, FFP_MSG_SEEK_COMPLETE); ffp_toggle_buffering(ffp, 1); } if (is->queue_attachments_req) { if (is->video_st && (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) { AVPacket copy; if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0) goto fail; packet_queue_put(&is->videoq, ©); packet_queue_put_nullpacket(&is->videoq, is->video_stream); } is->queue_attachments_req = 0; } /* if the queue are full, no need to read more */ if (ffp->infinite_buffer<1 && !is->seek_req && #ifdef FFP_MERGE (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE #else (is->audioq.size + is->videoq.size > ffp->max_buffer_size #endif || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request) && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) #ifdef FFP_MERGE && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) { #else ))) { #endif if (!eof) { // ALOGE("ffp_toggle_buffering: full\n"); ffp_toggle_buffering(ffp, 0); } /* wait 10 ms */ SDL_LockMutex(wait_mutex); SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); SDL_UnlockMutex(wait_mutex); continue; } if ((!is->paused || completed) && (!is->audio_st || is->audio_finished == is->audioq.serial) && (!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) { if (ffp->loop != 1 && (!ffp->loop || --ffp->loop)) { stream_seek(is, ffp->start_time != AV_NOPTS_VALUE ? ffp->start_time : 0, 0, 0); } else if (ffp->autoexit) { ret = AVERROR_EOF; goto fail; } else { if (completed) { ALOGE("ffp_toggle_buffering: eof\n"); SDL_LockMutex(wait_mutex); // infinite wait may block shutdown while(!is->abort_request && !is->seek_req) SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); SDL_UnlockMutex(wait_mutex); if (!is->abort_request) continue; } else { completed = 1; ffp->auto_start = 0; // TODO: 0 it's a bit early to notify complete here ALOGE("ffp_toggle_buffering: completed\n"); ffp_toggle_buffering(ffp, 0); toggle_pause(ffp, 1); ffp_notify_msg1(ffp, FFP_MSG_COMPLETED); } } } if (eof) { if (is->video_stream >= 0) packet_queue_put_nullpacket(&is->videoq, is->video_stream); if (is->audio_stream >= 0) packet_queue_put_nullpacket(&is->audioq, is->audio_stream); ffp_toggle_buffering(ffp, 0); SDL_Delay(10); eof=0; continue; } ret = av_read_frame(ic, pkt); if (ret < 0) { if (ret == AVERROR_EOF || url_feof(ic->pb)) { eof = 1; } else if (ic->pb && ic->pb->error) { // TODO: 9 notify error until a/v finished last_error = ic->pb->error; ALOGE("av_read_frame error: %d\n", ic->pb->error); ffp_notify_msg2(ffp, FFP_MSG_ERROR, last_error); break; } SDL_LockMutex(wait_mutex); SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); SDL_UnlockMutex(wait_mutex); continue; } /* check if packet is in play range specified by user, then queue, otherwise discard */ stream_start_time = ic->streams[pkt->stream_index]->start_time; pkt_in_play_range = ffp->duration == AV_NOPTS_VALUE || (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) * av_q2d(ic->streams[pkt->stream_index]->time_base) - (double)(ffp->start_time != AV_NOPTS_VALUE ? ffp->start_time : 0) / 1000000 <= ((double)ffp->duration / 1000000); if (pkt->stream_index == is->audio_stream && pkt_in_play_range) { packet_queue_put(&is->audioq, pkt); } else if (pkt->stream_index == is->video_stream && pkt_in_play_range && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) { packet_queue_put(&is->videoq, pkt); #ifdef FFP_MERGE } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) { packet_queue_put(&is->subtitleq, pkt); #endif } else { av_free_packet(pkt); } do { int buf_size_percent = -1; int buf_time_percent = -1; int hwm_in_bytes = ffp->high_water_mark_in_bytes; int need_start_buffering = 0; if (hwm_in_ms > 0) { int cached_duration_in_ms = -1; int64_t audio_cached_duration = -1; int64_t video_cached_duration = -1; if (is->audio_st && is->audio_st->time_base.den > 0 && is->audio_st->time_base.num > 0) audio_cached_duration = is->audioq.duration * av_q2d(is->audio_st->time_base) * 1000; if (is->video_st && is->video_st->time_base.den > 0 && is->video_st->time_base.num > 0) video_cached_duration = is->videoq.duration * av_q2d(is->video_st->time_base) * 1000; if (video_cached_duration >= 0 && audio_cached_duration >= 0) { cached_duration_in_ms = (int)IJKMAX(video_cached_duration, audio_cached_duration); } else if (video_cached_duration >= 0) { cached_duration_in_ms = (int)video_cached_duration; } else if (audio_cached_duration >= 0) { cached_duration_in_ms = (int)audio_cached_duration; } if (cached_duration_in_ms >= 0) { buf_time_percent = (int)av_rescale(cached_duration_in_ms, 1005, hwm_in_ms * 10); if (last_buffered_time_percentage != buf_time_percent) { #ifdef FFP_SHOW_DEMUX_CACHE ALOGE("time cache=%%%d (%d/%d)\n", buf_time_percent, cached_duration_in_ms, hwm_in_ms); #endif last_buffered_time_percentage = buf_time_percent; ffp_notify_msg3(ffp, FFP_MSG_BUFFERING_TIME_UPDATE, cached_duration_in_ms, hwm_in_ms); } } } int cached_size = is->audioq.size + is->videoq.size; if (hwm_in_bytes > 0) { buf_size_percent = (int)av_rescale(cached_size, 1005, hwm_in_bytes * 10); if (last_buffered_size_percentage != buf_size_percent) { #ifdef FFP_SHOW_DEMUX_CACHE ALOGE("size cache=%%%d (%d/%d)\n", buf_size_percent, cached_size, hwm_in_bytes); #endif last_buffered_size_percentage = buf_size_percent; ffp_notify_msg3(ffp, FFP_MSG_BUFFERING_BYTES_UPDATE, cached_size, hwm_in_bytes); } } if (buf_time_percent >= 0) { // alwas depend on cache duration if valid if (buf_time_percent >= 100) need_start_buffering = 1; } else { if (buf_size_percent >= 100) need_start_buffering = 1; } if (need_start_buffering) { if (hwm_in_ms < ffp->normal_high_water_mark_in_ms) hwm_in_ms = ffp->normal_high_water_mark_in_ms; hwm_in_ms *= 2; if (hwm_in_ms > ffp->max_high_water_mark_in_ms) hwm_in_ms = ffp->max_high_water_mark_in_ms; ffp_toggle_buffering(ffp, 0); } } while(0); // FIXME: 0 notify progress } /* wait until the end */ while (!is->abort_request) { SDL_Delay(100); } ret = 0; fail: /* close each stream */ if (is->audio_stream >= 0) stream_component_close(ffp, is->audio_stream); if (is->video_stream >= 0) stream_component_close(ffp, is->video_stream); #ifdef FFP_MERGE if (is->subtitle_stream >= 0) stream_component_close(ffp, is->subtitle_stream); #endif if (is->ic) { avformat_close_input(&is->ic); } if (!ffp->prepared || !is->abort_request) { ffp->last_error = last_error; ffp_notify_msg2(ffp, FFP_MSG_ERROR, last_error); } SDL_DestroyMutex(wait_mutex); return 0; } static int video_refresh_thread(void *arg); static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat) { assert(!ffp->is); VideoState *is = av_mallocz(sizeof(VideoState)); if (!is) return NULL; av_strlcpy(is->filename, filename, sizeof(is->filename)); is->iformat = iformat; is->ytop = 0; is->xleft = 0; /* start video display */ is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); #ifdef FFP_MERGE is->subpq_mutex = SDL_CreateMutex(); is->subpq_cond = SDL_CreateCond(); #endif packet_queue_init(&is->videoq); packet_queue_init(&is->audioq); #ifdef FFP_MERGE packet_queue_init(&is->subtitleq); #endif is->continue_read_thread = SDL_CreateCond(); init_clock(&is->vidclk, &is->videoq.serial); init_clock(&is->audclk, &is->audioq.serial); init_clock(&is->extclk, &is->extclk.serial); is->audio_clock_serial = -1; is->audio_last_serial = -1; is->av_sync_type = ffp->av_sync_type; is->play_mutex = SDL_CreateMutex(); ffp->is = is; is->video_refresh_tid = SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout"); if (!is->video_refresh_tid) { av_freep(&ffp->is); return NULL; } is->read_tid = SDL_CreateThreadEx(&is->_read_tid, read_thread, ffp, "ff_read"); if (!is->read_tid) { is->abort_request = true; SDL_WaitThread(is->video_refresh_tid, NULL); av_freep(&ffp->is); return NULL; } return is; } // FFP_MERGE: stream_cycle_channel // FFP_MERGE: toggle_full_screen // FFP_MERGE: toggle_audio_display // FFP_MERGE: refresh_loop_wait_event // FFP_MERGE: event_loop // FFP_MERGE: opt_frame_size // FFP_MERGE: opt_width // FFP_MERGE: opt_height // FFP_MERGE: opt_format // FFP_MERGE: opt_frame_pix_fmt // FFP_MERGE: opt_sync // FFP_MERGE: opt_seek // FFP_MERGE: opt_duration // FFP_MERGE: opt_show_mode // FFP_MERGE: opt_input_file // FFP_MERGE: opt_codec // FFP_MERGE: dummy // FFP_MERGE: options // FFP_MERGE: show_usage // FFP_MERGE: show_help_default static int video_refresh_thread(void *arg) { FFPlayer *ffp = arg; VideoState *is = ffp->is; double remaining_time = 0.0; while (!is->abort_request) { if (remaining_time > 0.0) av_usleep((int)(int64_t)(remaining_time * 1000000.0)); remaining_time = REFRESH_RATE; if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh)) video_refresh(ffp, &remaining_time); } return 0; } static int lockmgr(void **mtx, enum AVLockOp op) { switch (op) { case AV_LOCK_CREATE: *mtx = SDL_CreateMutex(); if (!*mtx) return 1; return 0; case AV_LOCK_OBTAIN: return !!SDL_LockMutex(*mtx); case AV_LOCK_RELEASE: return !!SDL_UnlockMutex(*mtx); case AV_LOCK_DESTROY: SDL_DestroyMutex(*mtx); return 0; } return 1; } // FFP_MERGE: main /***************************************************************************** * end last line in ffplay.c ****************************************************************************/ static bool g_ffmpeg_global_inited = false; static void ffp_log_callback_help(void *ptr, int level, const char *fmt, va_list vl) { int ffplv = IJK_LOG_VERBOSE; if (level <= AV_LOG_ERROR) ffplv = IJK_LOG_ERROR; else if (level <= AV_LOG_WARNING) ffplv = IJK_LOG_WARN; else if (level <= AV_LOG_INFO) ffplv = IJK_LOG_INFO; else if (level <= AV_LOG_VERBOSE) ffplv = IJK_LOG_VERBOSE; else ffplv = IJK_LOG_DEBUG; if (level <= AV_LOG_INFO) VLOG(ffplv, IJK_LOG_TAG, fmt, vl); } void ffp_global_init() { if (g_ffmpeg_global_inited) return; /* register all codecs, demux and protocols */ avcodec_register_all(); #if CONFIG_AVDEVICE avdevice_register_all(); #endif #if CONFIG_AVFILTER avfilter_register_all(); #endif av_register_all(); avformat_network_init(); av_lockmgr_register(lockmgr); av_log_set_callback(ffp_log_callback_help); av_init_packet(&flush_pkt); flush_pkt.data = (uint8_t *)&flush_pkt; g_ffmpeg_global_inited = true; } void ffp_global_uninit() { if (!g_ffmpeg_global_inited) return; av_lockmgr_register(NULL); #if CONFIG_AVFILTER avfilter_uninit(); av_freep(&vfilters); #endif avformat_network_deinit(); g_ffmpeg_global_inited = false; } FFPlayer *ffp_create() { FFPlayer* ffp = (FFPlayer*) av_mallocz(sizeof(FFPlayer)); if (!ffp) return NULL; msg_queue_init(&ffp->msg_queue); ffp_reset_internal(ffp); return ffp; } void ffp_destroy(FFPlayer *ffp) { if (!ffp) return; if (ffp && ffp->is) { av_log(NULL, AV_LOG_WARNING, "ffp_destroy_ffplayer: force stream_close()"); stream_close(ffp); ffp->is = NULL; } ffp_reset_internal(ffp); msg_queue_destroy(&ffp->msg_queue); SDL_VoutFreeP(&ffp->vout); SDL_AoutFreeP(&ffp->aout); av_free(ffp); } void ffp_destroy_p(FFPlayer **pffp) { if (!pffp) return; ffp_destroy(*pffp); *pffp = NULL; } void ffp_set_format_option(FFPlayer *ffp, const char *name, const char *value) { if (!ffp) return; av_dict_set(&ffp->format_opts, name, value, 0); } void ffp_set_codec_option(FFPlayer *ffp, const char *name, const char *value) { if (!ffp) return; av_dict_set(&ffp->codec_opts, name, value, 0); } void ffp_set_sws_option(FFPlayer *ffp, const char *name, const char *value) { if (!ffp) return; av_dict_set(&ffp->sws_opts, name, value, 0); } void ffp_set_overlay_format(FFPlayer *ffp, int chroma_fourcc) { switch (chroma_fourcc) { case SDL_FCC_I420: case SDL_FCC_YV12: case SDL_FCC_RV16: case SDL_FCC_RV24: case SDL_FCC_RV32: ffp->overlay_format = chroma_fourcc; break; default: ALOGE("ffp_set_overlay_format: unknown chroma fourcc: %d\n", chroma_fourcc); break; } } int ffp_prepare_async_l(FFPlayer *ffp, const char *file_name) { assert(ffp); assert(!ffp->is); assert(file_name); VideoState *is = stream_open(ffp, file_name, NULL); if (!is) { av_log(NULL, AV_LOG_WARNING, "ffp_prepare_async_l: stream_open failed OOM"); return EIJK_OUT_OF_MEMORY; } ffp->is = is; return 0; } int ffp_start_from_l(FFPlayer *ffp, long msec) { // ALOGE("ffp_start_at_l\n"); assert(ffp); VideoState *is = ffp->is; if (!is) return EIJK_NULL_IS_PTR; ffp->auto_start = 1; ffp_toggle_buffering(ffp, 1); ffp_seek_to_l(ffp, msec); return 0; } int ffp_start_l(FFPlayer *ffp) { // ALOGE("ffp_start_l\n"); assert(ffp); VideoState *is = ffp->is; if (!is) return EIJK_NULL_IS_PTR; toggle_pause(ffp, 0); return 0; } int ffp_pause_l(FFPlayer *ffp) { // ALOGE("ffp_pause_l\n"); assert(ffp); VideoState *is = ffp->is; if (!is) return EIJK_NULL_IS_PTR; toggle_pause(ffp, 1); return 0; } int ffp_stop_l(FFPlayer *ffp) { assert(ffp); VideoState *is = ffp->is; if (is) is->abort_request = 1; msg_queue_abort(&ffp->msg_queue); return 0; } int ffp_wait_stop_l(FFPlayer *ffp) { assert(ffp); if (ffp->is) { ffp_stop_l(ffp); stream_close(ffp); ffp->is = NULL; } return 0; } int ffp_seek_to_l(FFPlayer *ffp, long msec) { assert(ffp); VideoState *is = ffp->is; if (!is) return EIJK_NULL_IS_PTR; int64_t seek_pos = milliseconds_to_fftime(msec); int64_t start_time = is->ic->start_time; if (start_time > 0 && start_time != AV_NOPTS_VALUE) seek_pos += start_time; // FIXME: 9 seek by bytes // FIXME: 9 seek out of range // FIXME: 9 seekable ALOGE("stream_seek %"PRId64"(%d) + %"PRId64", \n", seek_pos, (int)msec, start_time); stream_seek(is, seek_pos, 0, 0); return 0; } long ffp_get_current_position_l(FFPlayer *ffp) { assert(ffp); VideoState *is = ffp->is; if (!is || !is->ic) return 0; int64_t start_time = is->ic->start_time; int64_t start_diff = 0; if (start_time > 0 && start_time != AV_NOPTS_VALUE) start_diff = fftime_to_milliseconds(start_time); int64_t pos = 0; double pos_clock = get_master_clock(is); if (isnan(pos_clock)) { // ALOGE("pos = seek_pos: %d\n", (int)is->seek_pos); pos = fftime_to_milliseconds(is->seek_pos); } else { // ALOGE("pos = pos_clock: %f\n", pos_clock); pos = pos_clock * 1000; } if (pos < 0 || pos < start_diff) return 0; int64_t adjust_pos = pos - start_diff; // ALOGE("pos=%ld\n", (long)adjust_pos); return (long)adjust_pos; } long ffp_get_duration_l(FFPlayer *ffp) { assert(ffp); VideoState *is = ffp->is; if (!is || !is->ic) return 0; int64_t start_time = is->ic->start_time; int64_t start_diff = 0; if (start_time > 0 && start_time != AV_NOPTS_VALUE) start_diff = fftime_to_milliseconds(start_time); int64_t duration = fftime_to_milliseconds(is->ic->duration); if (duration < 0 || duration < start_diff) return 0; int64_t adjust_duration = duration - start_diff; // ALOGE("dur=%ld\n", (long)adjust_duration); return (long)adjust_duration; } void ffp_toggle_buffering_l(FFPlayer *ffp, int buffering_on) { VideoState *is = ffp->is; if (buffering_on && !is->buffering_on) { ALOGD("ffp_toggle_buffering_l: start\n"); is->buffering_on = 1; is->frame_drops_early = 0; stream_update_pause_l(ffp); ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_START); } else if (!buffering_on && is->buffering_on){ ALOGD("ffp_toggle_buffering_l: end\n"); is->buffering_on = 0; is->frame_drops_early = 0; stream_update_pause_l(ffp); ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_END); } } void ffp_toggle_buffering(FFPlayer *ffp, int start_buffering) { SDL_LockMutex(ffp->is->play_mutex); ffp_toggle_buffering_l(ffp, start_buffering); SDL_UnlockMutex(ffp->is->play_mutex); }