您的位置:首页 > 其它

ffmpeg 解码h264数据

2017-05-15 13:50 260 查看
转自http://blog.csdn.net/liushu1231/article/details/9203713

使用ffmpeg解码h264数据其实相对使用x264进行视频编码是简单了许多的,因为ffmpeg提供了一个decoding_encoding.c的文件,这个文件里面有简单的使用ffmpeg进行视频、音频编解码的例子,不过可能有的人也会找不到这个示例,我就讲我改造过的这个示例放在这里,同时加一些解释。

其中需要注意的的一点我需要在此说明,就是ffmpeg在进行解码的时候是会考虑要解码的数据包是否有0x00 00 001这样的头的,如果没有的话,ffmpeg会认为是错误的数据包。下面是使用OpenCV对解码后的图像进行显示,所以还要配置opencv的环境,如果没有的话,可以注释掉ShowImage这个函数,然后使用pgm_save这个函数将解码后的图像保存。

下面将我的代码放在下面,同样,过程参见代码注释,相对来说比较简单,不在此过多叙述:

[cpp] view plain copy print?static void pgm_save(unsigned char buf, int wrap, int xsize, int ysize,char*filename)
{
FILE *f;
int i;

f=fopen(filename,”wb”);
fprintf(f,”P5\n%d%d\n%d\n”,xsize,ysize,255);
for(i=0;i<ysize;i++)
fwrite(buf + i wrap,1,xsize,f);
fclose(f);
}

//通过查找0x000001或者0x00000001找到下一个数据包的头部
static int _find_head(unsigned charbuffer, int len)
{
int i;

for(i=512;i<len;i++)
{
if(buffer[i] == 0 && buffer[i+1] == 0 && buffer[i+2] == 0&& buffer[i+3] == 1)
break;
if(buffer[i]== 0 && buffer[i+1] == 0 && buffer[i+2] == 1)
break;
}
if (i ==len)
return0;
if (i ==512)
return0;
return i;
}

//将文件中的一个数据包转换成AVPacket类型以便ffmpeg进行解码
#define FILE_READING_BUFFER (1*1024*1024)
static void build_avpkt(AVPacket *avpkt, FILE *fp)
{
static unsigned charbuffer[1*1024*1024];
static int readptr = 0;
static int writeptr = 0;
intlen,toread;

intnexthead;

if (writeptr- readptr < 200 1024)
{
memmove(buffer, &buffer[readptr],writeptr - readptr);
writeptr -= readptr;
readptr = 0;
toread = FILE_READING_BUFFER - writeptr;
len = fread(&buffer[writeptr], 1,toread, fp);
writeptr += len;
}

nexthead = _find_head(&buffer[readptr], writeptr-readptr);
if (nexthead== 0)
{
printf(”failedfind next head…\n”);
nexthead = writeptr - readptr;
}

avpkt->size = nexthead;
avpkt->data = &buffer[readptr];
readptr += nexthead;

}

static voidvideo_decode_example(const char outfilename, constchar *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int frame,got_picture, len;
FILE *f, *fout;
AVFrame *picture;
uint8_t inbuf[INBUF_SIZE +FF_INPUT_BUFFER_PADDING_SIZE];
charbuf[1024];
AVPacket avpkt;

av_init_packet(&avpkt);

/ set end ofbuffer to 0 (this ensures that no overreading happens for damaged mpeg streams)/
memset(inbuf + INBUF_SIZE, 0,FF_INPUT_BUFFER_PADDING_SIZE);

printf(”Videodecoding\n”);
opts = NULL;
//av_dict_set(&opts,”b”, “2.5M”, 0);
/ find the h264video decoder /
codec = avcodec_find_decoder(CODEC_ID_H264);
if (!codec){
fprintf(stderr, ”codecnot found\n”);
return ;
}

c = avcodec_alloc_context3(codec);
picture= avcodec_alloc_frame();

if(codec->capabilities&CODEC_CAP_TRUNCATED)
c->flags|= CODEC_FLAG_TRUNCATED; / we do not send complete frames /

/ For somecodecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because thisinformation is not
available in the bitstream. /

/ open it /
if(avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, ”couldnot open codec\n”);
exit(1);
}

// fout=fopen(outfilename,”wb”);
/ the codec givesus the frame size, in samples /

f = fopen(filename, ”rb”);
if (!f) {
fprintf(stderr, ”couldnot open %s\n”, filename);
exit(1);
}
//解码与显示需要的辅助的数据结构,需要注意的是,AVFrame必须经过alloc才能使用,不然其内存的缓存空间指针是空的,程序会崩溃
AVFrame frameRGB;
IplImage *showImage =cvCreateImage(cvSize(352,288),8,3);
avpicture_alloc((AVPicture)&frameRGB,PIX_FMT_RGB24,352,288);
cvNamedWindow(”decode”);

frame = 0;
for(;;) {

build_avpkt(&avpkt, f);

if(avpkt.size == 0)
break;

while(avpkt.size > 0) {
len = avcodec_decode_video2(c,picture, &got_picture, &avpkt);//解码每一帧
if(len < 0) {
fprintf(stderr, ”Error while decoding frame %d\n”,frame);
break;
}
if(got_picture) {
printf(”savingframe %3d\n”, frame);
fflush(stdout);

/ thepicture is allocated by the decoder. no need to free it /
//将YUV420格式的图像转换成RGB格式所需要的转换上下文
SwsContext* scxt =sws_getContext(picture->width,picture->height,PIX_FMT_YUV420P,
picture->width,picture->height,PIX_FMT_RGB24,
2,NULL,NULL,NULL);
if(scxt != NULL)
{
sws_scale(scxt,picture->data,picture->linesize,0,c->height,frameRGB.data,frameRGB.linesize);//图像格式转换
showImage->imageSize =frameRGB.linesize[0];//指针赋值给要显示的图像
showImage->imageData = (char )frameRGB.data[0];
cvShowImage(”decode”,showImage);//显示
cvWaitKey(0.5);//设置0.5s显示一帧,如果不设置由于这是个循环,会导致看不到显示出来的图像
}

//sprintf(buf,outfilename,frame);

//pgm_save(picture->data[0],picture->linesize[0],
//c->width,c->height, buf);
//pgm_save(picture->data[1],picture->linesize[1],
//c->width/2,c->height/2, fout);
//pgm_save(picture->data[2],picture->linesize[2],
&nb
bbc2
sp; //c->width/2,c->height/2, fout);
frame++;
}
avpkt.size -= len;
avpkt.data += len;
}
}

/ some codecs,such as MPEG, transmit the I and P frame with a
latency of one frame. You must do thefollowing to have a
chance to get the last frame of the video /
avpkt.data = NULL;
avpkt.size = 0;
len = avcodec_decode_video2(c, picture,&got_picture, &avpkt);
if(got_picture) {
printf(”savinglast frame %3d\n”, frame);
fflush(stdout);

/ the pictureis allocated by the decoder. no need to
free it /
sprintf(buf, outfilename, frame);
//pgm_save(picture->data[0],picture->linesize[0],
// c->width, c->height, fout);
pgm_save(picture->data[0],picture->linesize[0],c->width, c->height, fout);
pgm_save(picture->data[1],picture->linesize[1],c->width/2, c->height/2, fout);
pgm_save(picture->data[2],picture->linesize[2],c->width/2, c->height/2, fout);

frame++;
}

fclose(f);
// fclose(fout);

avcodec_close(c);
av_free(c);
av_free(picture);
printf(”\n”);
}

int main(int argc, char argv[])
{
avcodec_register_all();//注册所有的编解码器,一定要注意,如果没有这行代码则会出错,提示没有找不到编解码器
video_decode_example(”%3d.pgm”,“test.264”);//可以使用x264编码出来的264文件
system(”pause”);
return 0;
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  ffmpeg h264 视频