您的位置:首页 > 其它

ffmpeg+sdl教程——编写一个简单的播放器3(为视频加入音频)

2012-09-13 16:41 776 查看
上个教程实现了视频的简单播放,但那是个哑巴电影,完全没有声音。

这个教程第一次用到了SDL的线程,涉及到了两个线程间的同步协调,有几个地方需要特别留意,SDL_OpenAudio库函数会打开音频设备(0是恢复,其他的是暂停),SDL_PauseAudio库函数可以暂停或者恢复audio_callback函数的执行,程序中的这行代码“SDL_PauseAudio(0);”执行后,让audio_callback函数开始反复的被调用。在这之前audio_callback回调函数还没有被调用。

audio_callback函数

原型为void callback(void *userdata, Uint8 *stream, int len),userdata是输入,stream是输出,len是输入,len的值一般为4096(调试中发现的),audio_callback函数的功能是调用audio_decode_frame函数,把解码后数据块audio_buf追加在stream的后面,通过SDL库对audio_callback的不断调用,不断解码数据,然后放到stream的末尾,SDL库认为stream中数据够播放一帧音频了,就播放它,第三个参数len是向stream中写数据的内存分配尺度,是分配给audio_callback函数写入缓存大小。

假设len=4096,解码后数据块audio_buf的大小为4608,那么一次audio_callback调用不能把audio_buf中全部数据写入stream末尾,就分两次,第一次先把audio_buf的前4096个字节写入stream末尾,第二次调用audio_callback函数时,由于写缓存用光了,又分配4096个字节的缓存,再写剩余的512个字节到stream末尾,写缓存还剩余3584个字节留给下次audio_callback调用使用。

audio_decode_frame函数

原型:int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)

返回值:解码完一帧音频到缓存后,缓存占用的实际大小,以字节为单位,为负数表示失败

aCodecCtx:输入,解码上下文

audio_buf:输出,解码成功后,输出到的缓存的首地址

buf_size:输入,audio_buf的预留空间

该函数是实际上是从尾部开始执行的,先取得main线程放入队列的包,再用库函数avcodec_decode_audio2处理,如果一次调用没有处理完一个包的数据,记录下处理到包的那个位置了,下次接着处理(这种情况可能是因为一个音频包,包含多个音频帧的数据引起)

库函数avcodec_decode_audio2

原型:int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,

int *frame_size_ptr,

const uint8_t *buf, int buf_size);

avctx : 解码器上下文

samples: 输出参数 输出数据的缓存首地址.

frame_size_ptr:既是输入又是输出,无帧可解返回0,解码失败返回负数,解码成功返回,解码后一帧音频所占空间,以字节为单位

buf: 输入参数,输入数据的缓存

buf_size:输入参数,buf的大小

返回值:无帧可解返回0,解码失败返回负数,解码成功返回解码前一帧音频所占空间

SDL_CondWait库函数

等待消息时解锁,等到消息后加锁,该函数可以阻塞代码的执行,一般和SDL_CondSignal库函数(或SDL_CondBroadcast库函数)配对使用

[cpp:collapse:showcolumns]
+ expand sourceview plaincopyprint?

·········10········20········30········40········50········60········70········80········90········100·······110·······120·······130·······140·······150

// ffmpegExe.cpp: 主项目文件。

#include "libavformat/avformat.h"

#include "libswscale/swscale.h"

#include <windows.h>

#include <stdlib.h>

#include <stdio.h>
#include <string.h>

#include <math.h>
#include <SDL/SDL.h>

#include <SDL/SDL_thread.h>

#ifdef main
#undef main
#endif

#define SDL_AUDIO_BUFFER_SIZE 1024

static int sws_flags = SWS_BICUBIC;

typedef struct PacketQueue
{
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
int quit = 0;
void packet_queue_init(PacketQueue *q)
{
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0)
{
return -1;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;)
{
if(quit)
{
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1)
{
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}
else if (!block)
{
ret = 0;
break;
}
else
{
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for(;;)
{
while(audio_pkt_size > 0)
{
data_size = buf_size;
len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, audio_pkt_data, audio_pkt_size);
if(len1 < 0)
{ /* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if(data_size <= 0)
{ /* No data yet, get more frames */
continue;
} /* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
if(quit)
{
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0)
{
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}

void audio_callback(void *userdata, Uint8 *stream, int len)
{
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
while(len > 0)
{
if(audio_buf_index >= audio_buf_size)
{ /* We have already sent all our data; get more */
audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
if(audio_size < 0)
{ /* If error, output silence */
audio_buf_size = 1024; // arbitrary?

memset(audio_buf, 0, audio_buf_size);
}
else
{
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if(len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}

int main(int argc, char *argv[])
{
AVFormatContext *pFormatCtx;
int i, videoStream(-1), audioStream(-1);
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVPacket packet;
int frameFinished;
float aspect_ratio;
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
SDL_Overlay *bmp;
SDL_Surface *screen;
SDL_Rect rect;
SDL_Event event;
SDL_AudioSpec wanted_spec, spec;
if(argc < 2)
{
fprintf(stderr, "Usage: test /n");
exit(1);
}

av_register_all();
pFormatCtx = av_alloc_format_context();
if (!pFormatCtx) {
fprintf(stderr, "Memory error/n");
exit(1);
}
if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
return -1; // Couldn't open file

if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information

// Dump information about file onto standard error

dump_format(pFormatCtx, 0, argv[1], 0);

// Find the first video stream

for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream<0)
{
videoStream=i;
}
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream<0)
{
audioStream=i;
}
}
if(videoStream==-1||audioStream==-1)
return -1; // Didn't find a video stream

// Get a pointer to the codec context for the video stream

aCodecCtx=pFormatCtx->streams[audioStream]->codec;
wanted_spec.freq = aCodecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = aCodecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
{
fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());
return -1;
}
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if(!aCodec)
{
fprintf(stderr, "Unsupported codec!/n"); return -1;
}
avcodec_open(aCodecCtx, aCodec); // audio_st = pFormatCtx->streams[index]

packet_queue_init(&audioq);
SDL_PauseAudio(0);

pCodecCtx=pFormatCtx->streams[videoStream]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
{
fprintf(stderr, "Unsupported codec!/n");
return -1; // Codec not found

}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec

// Allocate video frame

pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure

uint8_t *buffer;
int numBytes;
// Determine required buffer size and allocate buffer

numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

// Assign appropriate parts of buffer to image planes in pFrameRGB

if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
{
fprintf(stderr, "Could not initialize SDL - %s/n", SDL_GetError());
exit(1);
}

#ifndef __DARWIN__
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
if(!screen)
{
fprintf(stderr, "SDL: could not set video mode - exiting/n");
exit(1);
}

bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
SDL_YV12_OVERLAY, screen);

static struct SwsContext *img_convert_ctx;
if (img_convert_ctx == NULL)
{
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height,
PIX_FMT_YUV420P,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context/n");
exit(1);
}
}
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?

if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?

if(frameFinished)
{
SDL_LockYUVOverlay(bmp);
AVPicture pict;
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];

pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];

// Convert the image into YUV format that SDL uses

/*img_convert(&pict, PIX_FMT_YUV420P,
(AVPicture *)pFrame, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height);*/
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
0, pCodecCtx->height, pict.data, pict.linesize);
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
Sleep(60);
av_free_packet(&packet);
}
}
else if(packet.stream_index==audioStream)
{
packet_queue_put(&audioq, &packet);
}
else
{
av_free_packet(&packet);
}
// Free the packet that was allocated by av_read_frame

SDL_PollEvent(&event);
switch(event.type)
{
case SDL_QUIT:
quit = 1;
SDL_Quit();
exit(0);
break;
default: break;
}
}
// Free the RGB image
av_free(buffer);
//av_free(pFrameRGB);
// Free the YUV frame

av_free(pFrame);
// Close the codec

avcodec_close(pCodecCtx);
// Close the video file

av_close_input_file(pFormatCtx);
return 0;
}

// ffmpegExe.cpp: 主项目文件。

#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include <windows.h>

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <SDL/SDL.h>
#include <SDL/SDL_thread.h>

#ifdef main
#undef main
#endif

#define SDL_AUDIO_BUFFER_SIZE 1024
static int sws_flags = SWS_BICUBIC;

typedef struct PacketQueue
{
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
int quit = 0;
void packet_queue_init(PacketQueue *q)
{
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0)
{
return -1;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;)
{
if(quit)
{
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1)
{
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}
else if (!block)
{
ret = 0;
break;
}
else
{
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for(;;)
{
while(audio_pkt_size > 0)
{
data_size = buf_size;
len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, audio_pkt_data, audio_pkt_size);
if(len1 < 0)
{ /* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if(data_size <= 0)
{ /* No data yet, get more frames */
continue;
} /* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
if(quit)
{
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0)
{
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}

void audio_callback(void *userdata, Uint8 *stream, int len)
{
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
while(len > 0)
{
if(audio_buf_index >= audio_buf_size)
{ /* We have already sent all our data; get more */
audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
if(audio_size < 0)
{ /* If error, output silence */
audio_buf_size = 1024; // arbitrary?
memset(audio_buf, 0, audio_buf_size);
}
else
{
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if(len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}

int main(int argc, char *argv[])
{
AVFormatContext *pFormatCtx;
int i, videoStream(-1), audioStream(-1);
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVPacket packet;
int frameFinished;
float aspect_ratio;
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
SDL_Overlay *bmp;
SDL_Surface *screen;
SDL_Rect rect;
SDL_Event event;
SDL_AudioSpec wanted_spec, spec;
if(argc < 2)
{
fprintf(stderr, "Usage: test /n");
exit(1);
}

av_register_all();
pFormatCtx = av_alloc_format_context();
if (!pFormatCtx) {
fprintf(stderr, "Memory error/n");
exit(1);
}
if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
return -1; // Couldn't open file
if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
dump_format(pFormatCtx, 0, argv[1], 0);

// Find the first video stream
for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream<0)
{
videoStream=i;
}
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream<0)
{
audioStream=i;
}
}
if(videoStream==-1||audioStream==-1)
return -1; // Didn't find a video stream

// Get a pointer to the codec context for the video stream

aCodecCtx=pFormatCtx->streams[audioStream]->codec;
wanted_spec.freq = aCodecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = aCodecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
{
fprintf(stderr, "SDL_OpenAudio: %s/n", SDL_GetError());
return -1;
}
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if(!aCodec)
{
fprintf(stderr, "Unsupported codec!/n"); return -1;
}
avcodec_open(aCodecCtx, aCodec); // audio_st = pFormatCtx->streams[index]
packet_queue_init(&audioq);
SDL_PauseAudio(0);

pCodecCtx=pFormatCtx->streams[videoStream]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
{
fprintf(stderr, "Unsupported codec!/n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec

// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
uint8_t *buffer;
int numBytes;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

// Assign appropriate parts of buffer to image planes in pFrameRGB
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
{
fprintf(stderr, "Could not initialize SDL - %s/n", SDL_GetError());
exit(1);
}

#ifndef __DARWIN__
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
if(!screen)
{
fprintf(stderr, "SDL: could not set video mode - exiting/n");
exit(1);
}

bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
SDL_YV12_OVERLAY, screen);

static struct SwsContext *img_convert_ctx;
if (img_convert_ctx == NULL)
{
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height,
PIX_FMT_YUV420P,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context/n");
exit(1);
}
}
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished)
{
SDL_LockYUVOverlay(bmp);
AVPicture pict;
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];

pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];

// Convert the image into YUV format that SDL uses
/*img_convert(&pict, PIX_FMT_YUV420P,
(AVPicture *)pFrame, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height);*/
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
0, pCodecCtx->height, pict.data, pict.linesize);
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
Sleep(60);
av_free_packet(&packet);
}
}
else if(packet.stream_index==audioStream)
{
packet_queue_put(&audioq, &packet);
}
else
{
av_free_packet(&packet);
}
// Free the packet that was allocated by av_read_frame
SDL_PollEvent(&event);
switch(event.type)
{
case SDL_QUIT:
quit = 1;
SDL_Quit();
exit(0);
break;
default: break;
}
}
// Free the RGB image
av_free(buffer);
//av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}

此篇文章来自【/article/8858410.html
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: 
相关文章推荐