您的位置:首页 > 其它

ffplay程序运行流程分析

2013-12-30 10:58 393 查看
/article/11042214.html



1、main()开始:

分别注册编解码器,复用以及解复用器

[cpp] view
plaincopyprint?

avcodec_register_all(); //register codec

avdevice_register_all();

av_register_all(); //register demux and mux

接着就是一些分配内存空间的代码

代码 略

[csharp] view
plaincopyprint?

parse_options(argc, argv, options, opt_input_file); //分析是否带有选项?

flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER; //设置SDL的参数,我们选择了显示图像声音,并注册了一个定时器

SDL_Init (flags) <span style="white-space:pre"> </span>//初始化SDL

SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE); //设置SDL的响应事件

SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);

SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);

SDL_EventState(SDL_USEREVENT, SDL_IGNORE);

av_init_packet(&flush_pkt); //初始化一个AVPacket

这个是重点:

[csharp] view
plaincopyprint?

cur_stream = stream_open(input_filename, file_iformat); //打开一个流,可以是file,tcp,rtp,udp,http等

2、具体分析stream_open()函数:

首先初始化了显示视频要的互斥锁,条件变量

[csharp] view
plaincopyprint?

is->pictq_mutex = SDL_CreateMutex();

is->pictq_cond = SDL_CreateCond();

is->subpq_mutex = SDL_CreateMutex();

is->subpq_cond = SDL_CreateCond();

/* add the refresh timer to draw the picture */

schedule_refresh(is, 40);

is->parse_tid = SDL_CreateThread(decode_thread, is); //创建解码线程

stream_open()函数调用结束。

3、接着分析decode_thread线程:

开始会初始化VideoState的部分参数


[csharp] view
plaincopyprint?

static int decode_thread(void *arg)

{

VideoState *is = arg;

AVFormatContext *ic;

int err, i, ret, video_index, audio_index, subtitle_index;

AVPacket pkt1, *pkt = &pkt1;

AVFormatParameters params, *ap = ¶ms;

video_index = -1;

audio_index = -1;

subtitle_index = -1;

is->video_stream = -1;

is->audio_stream = -1;

is->subtitle_stream = -1;

global_video_state = is;

url_set_interrupt_cb(decode_interrupt_cb);

memset(ap, 0, sizeof(*ap));

ap->width = frame_width;

ap->height= frame_height;

ap->time_base= (AVRational){1, 25};

ap->pix_fmt = frame_pix_fmt;

err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap); //要求打开文件或流

if (err < 0) {

print_error(is->filename, err);

ret = -1;

goto fail;

}

is->ic = ic;

if(genpts)

ic->flags |= AVFMT_FLAG_GENPTS;

err = av_find_stream_info(ic); //从文件中找到媒体即填充AVFormatContext结构体

if (err < 0) {

fprintf(stderr, "%s: could not find codec parameters\n", is->filename);

ret = -1;

goto fail;

}

if(ic->pb)

ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end

/* if seeking requested, we execute it */

if (start_time != AV_NOPTS_VALUE) {

int64_t timestamp;

timestamp = start_time;

/* add the stream start time */

if (ic->start_time != AV_NOPTS_VALUE)

timestamp += ic->start_time;

ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);

if (ret < 0) {

fprintf(stderr, "%s: could not seek to position %0.3f\n",

is->filename, (double)timestamp / AV_TIME_BASE);

}

}

for(i = 0; i < ic->nb_streams; i++) { //从流中探测是否有音频流和视频流,字幕流,并将找到的流ID赋值给video

AVCodecContext *enc = ic->streams[i]->codec;//_index,探测完所有的流后,退出

ic->streams[i]->discard = AVDISCARD_ALL;

switch(enc->codec_type) {

case CODEC_TYPE_AUDIO:

if (wanted_audio_stream-- >= 0 && !audio_disable)

audio_index = i; //found audio stream

break;

case CODEC_TYPE_VIDEO:

if (wanted_video_stream-- >= 0 && !video_disable)

video_index = i; //founf video stream

break;

case CODEC_TYPE_SUBTITLE:

if (wanted_subtitle_stream-- >= 0 && !video_disable)

subtitle_index = i; //found subtile stream

break;

default:

break;

}

}

if (show_status) {

dump_format(ic, 0, is->filename, 0);//

dump_stream_info(ic);

}

/* open the streams */

if (audio_index >= 0) {

stream_component_open(is, audio_index); //此函数里,会新建一个解码线程,用于音频解码

}

if (video_index >= 0) {

stream_component_open(is, video_index); //此函数里,会新建一个解码线程,用于视频解码

} else {

if (!display_disable)

is->show_audio = 1;

}

if (subtitle_index >= 0) {

stream_component_open(is, subtitle_index);

}

if (is->video_stream < 0 && is->audio_stream < 0) {

fprintf(stderr, "%s: could not open codecs\n", is->filename);

ret = -1;

goto fail;

}

for(;;) {

if (is->abort_request)

break;

if (is->paused != is->last_paused) {

is->last_paused = is->paused;

if (is->paused)

av_read_pause(ic);

else

av_read_play(ic);

}

#if CONFIG_RTSP_DEMUXER

if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {

/* wait 10 ms to avoid trying to get another packet */

/* XXX: horrible */

SDL_Delay(10);

continue;

}

#endif

if (is->seek_req) {

int stream_index= -1;

int64_t seek_target= is->seek_pos;

if (is-> video_stream >= 0) stream_index= is-> video_stream;

else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;

else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;

if(stream_index>=0){

seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base); //av_rescale_q(a,b,c)是用来把时间戳从一个时基调整到另外一个时基时候用的函数

}

ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags); //移动IC的指针(实现快进快退的功能)

if (ret < 0) {

fprintf(stderr, "%s: error while seeking\n", is->ic->filename);

}else{

if (is->audio_stream >= 0) {

packet_queue_flush(&is->audioq); //清空pkt队列缓冲区 ,这是因为is->seek_req==1,用户要实现视频片段的跳转。在跳转到一个新的片段时,之前的缓冲区需要清空

packet_queue_put(&is->audioq, &flush_pkt); //队列已经被清空,需要将flush_pkt包(清空包)加入到队列,相当于一个链表的header(这样说不知道是不是正确)

}

if (is->subtitle_stream >= 0) {

packet_queue_flush(&is->subtitleq);

packet_queue_put(&is->subtitleq, &flush_pkt);

}

if (is->video_stream >= 0) {

packet_queue_flush(&is->videoq);

packet_queue_put(&is->videoq, &flush_pkt);

}

}

is->seek_req = 0;

}

/* if the queue are full, no need to read more */

if (is->audioq.size > MAX_AUDIOQ_SIZE ||

is->videoq.size > MAX_VIDEOQ_SIZE ||

is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {

/* wait 10 ms */

SDL_Delay(10);

continue;

}

if(url_feof(ic->pb)) {

av_init_packet(pkt);

pkt->data=NULL;

pkt->size=0;

pkt->stream_index= is->video_stream;

packet_queue_put(&is->videoq, pkt);

continue;

}

ret = av_read_frame(ic, pkt); //读取一帧数据到pkt

if (ret < 0) {

if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0) {

SDL_Delay(100); /* wait for user event */

continue;

} else

break;

}

if (pkt->stream_index == is->audio_stream) { //如果该帧为音频帧,则将该包加入到音频队列中

packet_queue_put(&is->audioq, pkt);

} else if (pkt->stream_index == is->video_stream) {

packet_queue_put(&is->videoq, pkt);

} else if (pkt->stream_index == is->subtitle_stream) {

packet_queue_put(&is->subtitleq, pkt);

} else {

av_free_packet(pkt); //如果不为以上的包,则释放该包。

}

}

/* wait until the end */

while (!is->abort_request) {

SDL_Delay(100);

}

ret = 0;

fail:

/* disable interrupting */

global_video_state = NULL;

/* close each stream */

if (is->audio_stream >= 0)

stream_component_close(is, is->audio_stream);

if (is->video_stream >= 0)

stream_component_close(is, is->video_stream);

if (is->subtitle_stream >= 0)

stream_component_close(is, is->subtitle_stream);

if (is->ic) {

av_close_input_file(is->ic);

is->ic = NULL; /* safety */

}

url_set_interrupt_cb(NULL);

if (ret != 0) {

SDL_Event event;

event.type = FF_QUIT_EVENT;

event.user.data1 = is;

SDL_PushEvent(&event);

}

return 0;

}

4、下面再来分析视频解码线程(函数为 ffplay.c / stream_component_open())

下面是解码线程

[csharp] view
plaincopyprint?

static int stream_component_open(VideoState *is, int stream_index)

{

AVFormatContext *ic = is->ic;

AVCodecContext *enc;

AVCodec *codec;

<strong>SDL_AudioSpec wanted_spec, spec;</strong>

if (stream_index < 0 || stream_index >= ic->nb_streams)

return -1;

enc = ic->streams[stream_index]->codec;

/* prepare audio output */

if (enc->codec_type == CODEC_TYPE_AUDIO) { //如果为音频解码,需要初始化参数

if (enc->channels > 0) {

enc->request_channels = FFMIN(2, enc->channels);

} else {

enc->request_channels = 2;

}

}

codec = avcodec_find_decoder(enc->codec_id); //根据id找到具体的解码器

enc->debug_mv = debug_mv;

enc->debug = debug;

enc->workaround_bugs = workaround_bugs;

enc->lowres = lowres;

if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;

enc->idct_algo= idct;

if(fast) enc->flags2 |= CODEC_FLAG2_FAST;

enc->skip_frame= skip_frame;

enc->skip_idct= skip_idct;

enc->skip_loop_filter= skip_loop_filter;

enc->error_recognition= error_recognition;

enc->error_concealment= error_concealment;

set_context_opts(enc, avctx_opts[enc->codec_type], 0); //set decode option

if (!codec ||

avcodec_open(enc, codec) < 0) //open decode

return -1;

/* prepare audio output */

if (enc->codec_type == CODEC_TYPE_AUDIO) {

wanted_spec.freq = enc->sample_rate;

wanted_spec.format = AUDIO_S16SYS;

wanted_spec.channels = enc->channels;

wanted_spec.silence = 0;

wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;

wanted_spec.callback = <strong><span style="color:#ff0000;">sdl_audio_callback</span></strong>; //此处初始化了一个音频解码线程(线程是在SDL中实现的,此接口是对用户提供了一个回调函数接口)

wanted_spec.userdata = is;

if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {

fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());

return -1;

}

is->audio_hw_buf_size = spec.size;

is->audio_src_fmt= SAMPLE_FMT_S16;

}

if(thread_count>1)

avcodec_thread_init(enc, thread_count); //解码线程初始化

enc->thread_count= thread_count;

ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;

switch(enc->codec_type) {

case CODEC_TYPE_AUDIO:

is->audio_stream = stream_index;

is->audio_st = ic->streams[stream_index];

is->audio_buf_size = 0;

is->audio_buf_index = 0;

/* init averaging filter */

is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);

is->audio_diff_avg_count = 0;

/* since we do not have a precise anough audio fifo fullness,

we correct audio sync only if larger than this threshold */

is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;

memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));

packet_queue_init(&is->audioq);

SDL_PauseAudio(0);

break;

case CODEC_TYPE_VIDEO:

is->video_stream = stream_index;

is->video_st = ic->streams[stream_index];

is->frame_last_delay = 40e-3;

is->frame_timer = (double)av_gettime() / 1000000.0;

is->video_current_pts_time = av_gettime();

packet_queue_init(&is->videoq);

is->video_tid = SDL_CreateThread(video_thread, is); //如果为视频,则创建一个视频处理线程

break;

case CODEC_TYPE_SUBTITLE:

is->subtitle_stream = stream_index;

is->subtitle_st = ic->streams[stream_index];

packet_queue_init(&is->subtitleq);

is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);

break;

default:

break;

}

return 0;

}

5、接着分析video_thread线程:

[csharp] view
plaincopyprint?

static int video_thread(void *arg)

{

VideoState *is = arg;

AVPacket pkt1, *pkt = &pkt1;

int len1, got_picture;

AVFrame *frame= avcodec_alloc_frame();

double pts;

for(;;) {

while (is->paused && !is->videoq.abort_request) {

SDL_Delay(10);

}

if (packet_queue_get(&is->videoq, pkt, 1) < 0) //从队列获取一帧数据

break;

if(pkt->data == flush_pkt.data){ //如果当前包的数据等于清空包的数据,则刷新解码器的缓冲

avcodec_flush_buffers(is->video_st->codec);

continue;

}

/* NOTE: ipts is the PTS of the _first_ picture beginning in

this packet, if any */

is->video_st->codec->reordered_opaque= pkt->pts;

len1 = avcodec_decode_video(is->video_st->codec, //解码视频

frame, &got_picture,

pkt->data, pkt->size);

if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE) //解码or播放的时间 dts、pts

&& frame->reordered_opaque != AV_NOPTS_VALUE)

pts= frame->reordered_opaque;

else if(pkt->dts != AV_NOPTS_VALUE)

pts= pkt->dts;

else

pts= 0;

pts *= av_q2d(is->video_st->time_base);

// if (len1 < 0)

// break;

if (got_picture) {

if (output_picture2(is, frame, pts) < 0) //输出图像

goto the_end;

}

av_free_packet(pkt); //释放缓冲

if (step)

if (cur_stream)

stream_pause(cur_stream);

}

the_end:

av_free(frame);

return 0;

}

6、最后分析一下main中的event_loop()

[csharp] view
plaincopyprint?

/* handle an event sent by the GUI */

static void event_loop(void)

{

SDL_Event event;

double incr, pos, frac;

for(;;) {

SDL_WaitEvent(&event);

switch(event.type) {

case SDL_KEYDOWN: //如果为按键事件

switch(event.key.keysym.sym) {

case SDLK_ESCAPE:

case SDLK_q: //如果按下 Q 则退出

do_exit();

break;

case SDLK_f: //如果按下 F 则全屏

toggle_full_screen();

break;

case SDLK_p:

case SDLK_SPACE: //如果按下SPACE 则暂停

toggle_pause();

break;

case SDLK_s: //S: Step to next frame

step_to_next_frame();

break;

case SDLK_a:

if (cur_stream)

stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);

break;

case SDLK_v:

if (cur_stream)

stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);

break;

case SDLK_t:

if (cur_stream)

stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);

break;

case SDLK_w:

toggle_audio_display();

break;

case SDLK_LEFT:

incr = -10.0;

goto do_seek;

case SDLK_RIGHT:

incr = 10.0;

goto do_seek;

case SDLK_UP:

incr = 60.0;

goto do_seek;

case SDLK_DOWN:

incr = -60.0;

do_seek:

if (cur_stream) {

if (seek_by_bytes) {

pos = url_ftell(cur_stream->ic->pb);

if (cur_stream->ic->bit_rate)

incr *= cur_stream->ic->bit_rate / 60.0;

else

incr *= 180000.0;

pos += incr;

stream_seek(cur_stream, pos, incr);

} else {

pos = get_master_clock(cur_stream);

pos += incr;

stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);

}

}

break;

default:

break;

}

break;

case SDL_MOUSEBUTTONDOWN:

if (cur_stream) {

int ns, hh, mm, ss;

int tns, thh, tmm, tss;

tns = cur_stream->ic->duration/1000000LL;

thh = tns/3600;

tmm = (tns%3600)/60;

tss = (tns%60);

frac = (double)event.button.x/(double)cur_stream->width;

ns = frac*tns;

hh = ns/3600;

mm = (ns%3600)/60;

ss = (ns%60);

fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,

hh, mm, ss, thh, tmm, tss);

stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);

}

break;

case SDL_VIDEORESIZE:

if (cur_stream) {

screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,

SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);

screen_width = cur_stream->width = event.resize.w;

screen_height= cur_stream->height= event.resize.h;

}

break;

case SDL_QUIT:

case FF_QUIT_EVENT:

do_exit();

break;

case FF_ALLOC_EVENT:

video_open(event.user.data1);

alloc_picture(event.user.data1);

break;

case FF_REFRESH_EVENT: //若是刷新时间,则显示图像

video_refresh_timer(event.user.data1);

break;

default:

break;

}

}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: