您的位置:首页 > 其它

利用ffmpeg0.5 和libjpeg实现抽取视频帧并将其保存为jpeg文件格式程序

2009-07-02 17:30 531 查看
简单修改网上关于ffmpeg的例子,实现提取视频帧并将其保存为jpeg图像的源程序。

由于ffmpeg0.5不在兼容img_convert函数,使用sws_scale函数修改了实现YUV颜色空间到RGB空间的转换

这里面一定要注意sws_getCachedContext函数参数的设置问题,一旦设置错了,会出现解码出来的图像出现3个现象!

直接使用libjpeg库实现图像数据的jpeg压缩

#include <stdlib.h>
#include <stdio.h>
#include <memory.h>//注意要包含此头文件与sprintf函数相关

extern "C"
{
//ffmpeg相关的头文件
#include "avstring.h"
#include "avformat.h"
#include "swscale.h"
#include "opt.h"
//libjpeg相关的头文件

#include "jpeglib.h"

}
int framenum=0 ;
//static int sws_flags = SWS_BICUBIC ;

//实现视频帧的jpeg压缩
void draw_jpeg(AVPicture *pic,int width,int height)
{
char fname[128] ;
// AVPicture my_pic ;
struct jpeg_compress_struct cinfo ;
struct jpeg_error_mgr jerr ;
JSAMPROW row_pointer[1] ;
int row_stride ;
uint8_t *buffer ;
FILE *fp ;

//vfmt2rgb(my_pic,pic) ;
buffer = pic->data[0];

#ifdef __MINGW32__
sprintf(fname, "%sDLPShot-%d.jpg", "frame", framenum++);
#else
sprintf(fname, "%sDLPShot-%d.jpg", "frame", framenum++);
#endif
fp = fopen (fname, "wb");
if (fp == NULL)
{
av_log(NULL, AV_LOG_ERROR, "fopen %s error/n", fname);
return;
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
jpeg_stdio_dest(&cinfo, fp);

cinfo.image_width = width;
cinfo.image_height = height;
cinfo.input_components = 3;
cinfo.in_color_space = JCS_RGB;

jpeg_set_defaults(&cinfo);

jpeg_set_quality(&cinfo, 80,true);

jpeg_start_compress(&cinfo, TRUE);

row_stride = width * 3;
while (cinfo.next_scanline < height)
{
row_pointer[0] = &buffer[cinfo.next_scanline * row_stride];
(void)jpeg_write_scanlines(&cinfo, row_pointer, 1);
}

jpeg_finish_compress(&cinfo);
fclose(fp);
jpeg_destroy_compress(&cinfo);
printf("compress %d frame finished!/n",framenum) ;
return ;

}

void main()
{
AVFormatContext *pFormatCtx;
static char *ifilename="1.asf" ;
AVStream *st;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame,*pFrameRGB;
uint8_t *buffer;
AVPacket packet;
struct SwsContext *img_convert_ctx=NULL;
int numBytes;

int i,videoStream=-1,frameFinished;

av_register_all();
if(av_open_input_file(&pFormatCtx, ifilename, NULL, 0, NULL)!=0)
{
printf("open video failed!/n") ;
return ;
}

//read information about input file ;
if(av_find_stream_info(pFormatCtx)<0)
{
printf("get information failed!/n") ;
return ;
}
//print information about file
dump_format(pFormatCtx, 0, ifilename, 0);

for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream==-1)
return ; // Didn't find a video stream

// Get a pointer to the codec context for the video stream
st=pFormatCtx->streams[videoStream] ;
pCodecCtx=st->codec;

// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!/n");
return ; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
{
printf("open encoder failed!") ;
return ;
}

// Allocate video frame
pFrame=avcodec_alloc_frame();

// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
{
printf("allocate AVframe failed!/n") ;
return ;
}

// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer,PIX_FMT_RGB24,
// avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
//pFrameRGB->alloc_picture(PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height) ;

i=0;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);

// Did we get a video frame?
if(frameFinished)
{
// Convert the image from its native format to RGB

if(img_convert_ctx==NULL)
{
img_convert_ctx=sws_getCachedContext(img_convert_ctx,pCodecCtx->width,pCodecCtx->height,
//PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
pCodecCtx->pix_fmt,pCodecCtx->width,pCodecCtx->height,PIX_FMT_RGB24 ,
SWS_X ,NULL,NULL,NULL) ;
if (img_convert_ctx == NULL)
{

printf("can't init convert context!/n") ;
return ;
}

}
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
0, pCodecCtx->width, pFrameRGB->data, pFrameRGB->linesize);
//av_picture_copy((AVPicture*)pFrameRGB,(AVPicture*)pFrame,PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height) ;

// Save the frame to disk
int a=++i ;
if((a>50)&&(a<100))
// SaveFrame(pFrameRGB, pCodecCtx->width,
// pCodecCtx->height, i);
draw_jpeg((AVPicture*)pFrameRGB,pCodecCtx->width,pCodecCtx->height) ;
}
}

// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);

// Free the YUV frame
av_free(pFrame);

// Close the codec
avcodec_close(pCodecCtx);

// Close the video file
av_close_input_file(pFormatCtx);

}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: 
相关文章推荐