利用ffmpeg0.5 和libjpeg实现抽取视频帧并将其保存为jpeg文件格式程序
2009-07-02 17:30
531 查看
简单修改网上关于ffmpeg的例子,实现提取视频帧并将其保存为jpeg图像的源程序。
由于ffmpeg0.5不在兼容img_convert函数,使用sws_scale函数修改了实现YUV颜色空间到RGB空间的转换
这里面一定要注意sws_getCachedContext函数参数的设置问题,一旦设置错了,会出现解码出来的图像出现3个现象!
直接使用libjpeg库实现图像数据的jpeg压缩
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>//注意要包含此头文件与sprintf函数相关
extern "C"
{
//ffmpeg相关的头文件
#include "avstring.h"
#include "avformat.h"
#include "swscale.h"
#include "opt.h"
//libjpeg相关的头文件
#include "jpeglib.h"
}
int framenum=0 ;
//static int sws_flags = SWS_BICUBIC ;
//实现视频帧的jpeg压缩
void draw_jpeg(AVPicture *pic,int width,int height)
{
char fname[128] ;
// AVPicture my_pic ;
struct jpeg_compress_struct cinfo ;
struct jpeg_error_mgr jerr ;
JSAMPROW row_pointer[1] ;
int row_stride ;
uint8_t *buffer ;
FILE *fp ;
//vfmt2rgb(my_pic,pic) ;
buffer = pic->data[0];
#ifdef __MINGW32__
sprintf(fname, "%sDLPShot-%d.jpg", "frame", framenum++);
#else
sprintf(fname, "%sDLPShot-%d.jpg", "frame", framenum++);
#endif
fp = fopen (fname, "wb");
if (fp == NULL)
{
av_log(NULL, AV_LOG_ERROR, "fopen %s error/n", fname);
return;
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
jpeg_stdio_dest(&cinfo, fp);
cinfo.image_width = width;
cinfo.image_height = height;
cinfo.input_components = 3;
cinfo.in_color_space = JCS_RGB;
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, 80,true);
jpeg_start_compress(&cinfo, TRUE);
row_stride = width * 3;
while (cinfo.next_scanline < height)
{
row_pointer[0] = &buffer[cinfo.next_scanline * row_stride];
(void)jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
jpeg_finish_compress(&cinfo);
fclose(fp);
jpeg_destroy_compress(&cinfo);
printf("compress %d frame finished!/n",framenum) ;
return ;
}
void main()
{
AVFormatContext *pFormatCtx;
static char *ifilename="1.asf" ;
AVStream *st;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame,*pFrameRGB;
uint8_t *buffer;
AVPacket packet;
struct SwsContext *img_convert_ctx=NULL;
int numBytes;
int i,videoStream=-1,frameFinished;
av_register_all();
if(av_open_input_file(&pFormatCtx, ifilename, NULL, 0, NULL)!=0)
{
printf("open video failed!/n") ;
return ;
}
//read information about input file ;
if(av_find_stream_info(pFormatCtx)<0)
{
printf("get information failed!/n") ;
return ;
}
//print information about file
dump_format(pFormatCtx, 0, ifilename, 0);
for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream==-1)
return ; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
st=pFormatCtx->streams[videoStream] ;
pCodecCtx=st->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!/n");
return ; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
{
printf("open encoder failed!") ;
return ;
}
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
{
printf("allocate AVframe failed!/n") ;
return ;
}
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer,PIX_FMT_RGB24,
// avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
//pFrameRGB->alloc_picture(PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height) ;
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished)
{
// Convert the image from its native format to RGB
if(img_convert_ctx==NULL)
{
img_convert_ctx=sws_getCachedContext(img_convert_ctx,pCodecCtx->width,pCodecCtx->height,
//PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
pCodecCtx->pix_fmt,pCodecCtx->width,pCodecCtx->height,PIX_FMT_RGB24 ,
SWS_X ,NULL,NULL,NULL) ;
if (img_convert_ctx == NULL)
{
printf("can't init convert context!/n") ;
return ;
}
}
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
0, pCodecCtx->width, pFrameRGB->data, pFrameRGB->linesize);
//av_picture_copy((AVPicture*)pFrameRGB,(AVPicture*)pFrame,PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height) ;
// Save the frame to disk
int a=++i ;
if((a>50)&&(a<100))
// SaveFrame(pFrameRGB, pCodecCtx->width,
// pCodecCtx->height, i);
draw_jpeg((AVPicture*)pFrameRGB,pCodecCtx->width,pCodecCtx->height) ;
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
}
由于ffmpeg0.5不在兼容img_convert函数,使用sws_scale函数修改了实现YUV颜色空间到RGB空间的转换
这里面一定要注意sws_getCachedContext函数参数的设置问题,一旦设置错了,会出现解码出来的图像出现3个现象!
直接使用libjpeg库实现图像数据的jpeg压缩
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>//注意要包含此头文件与sprintf函数相关
extern "C"
{
//ffmpeg相关的头文件
#include "avstring.h"
#include "avformat.h"
#include "swscale.h"
#include "opt.h"
//libjpeg相关的头文件
#include "jpeglib.h"
}
int framenum=0 ;
//static int sws_flags = SWS_BICUBIC ;
//实现视频帧的jpeg压缩
void draw_jpeg(AVPicture *pic,int width,int height)
{
char fname[128] ;
// AVPicture my_pic ;
struct jpeg_compress_struct cinfo ;
struct jpeg_error_mgr jerr ;
JSAMPROW row_pointer[1] ;
int row_stride ;
uint8_t *buffer ;
FILE *fp ;
//vfmt2rgb(my_pic,pic) ;
buffer = pic->data[0];
#ifdef __MINGW32__
sprintf(fname, "%sDLPShot-%d.jpg", "frame", framenum++);
#else
sprintf(fname, "%sDLPShot-%d.jpg", "frame", framenum++);
#endif
fp = fopen (fname, "wb");
if (fp == NULL)
{
av_log(NULL, AV_LOG_ERROR, "fopen %s error/n", fname);
return;
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
jpeg_stdio_dest(&cinfo, fp);
cinfo.image_width = width;
cinfo.image_height = height;
cinfo.input_components = 3;
cinfo.in_color_space = JCS_RGB;
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, 80,true);
jpeg_start_compress(&cinfo, TRUE);
row_stride = width * 3;
while (cinfo.next_scanline < height)
{
row_pointer[0] = &buffer[cinfo.next_scanline * row_stride];
(void)jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
jpeg_finish_compress(&cinfo);
fclose(fp);
jpeg_destroy_compress(&cinfo);
printf("compress %d frame finished!/n",framenum) ;
return ;
}
void main()
{
AVFormatContext *pFormatCtx;
static char *ifilename="1.asf" ;
AVStream *st;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame,*pFrameRGB;
uint8_t *buffer;
AVPacket packet;
struct SwsContext *img_convert_ctx=NULL;
int numBytes;
int i,videoStream=-1,frameFinished;
av_register_all();
if(av_open_input_file(&pFormatCtx, ifilename, NULL, 0, NULL)!=0)
{
printf("open video failed!/n") ;
return ;
}
//read information about input file ;
if(av_find_stream_info(pFormatCtx)<0)
{
printf("get information failed!/n") ;
return ;
}
//print information about file
dump_format(pFormatCtx, 0, ifilename, 0);
for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if(videoStream==-1)
return ; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
st=pFormatCtx->streams[videoStream] ;
pCodecCtx=st->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!/n");
return ; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
{
printf("open encoder failed!") ;
return ;
}
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
{
printf("allocate AVframe failed!/n") ;
return ;
}
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer,PIX_FMT_RGB24,
// avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
//pFrameRGB->alloc_picture(PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height) ;
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished)
{
// Convert the image from its native format to RGB
if(img_convert_ctx==NULL)
{
img_convert_ctx=sws_getCachedContext(img_convert_ctx,pCodecCtx->width,pCodecCtx->height,
//PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
pCodecCtx->pix_fmt,pCodecCtx->width,pCodecCtx->height,PIX_FMT_RGB24 ,
SWS_X ,NULL,NULL,NULL) ;
if (img_convert_ctx == NULL)
{
printf("can't init convert context!/n") ;
return ;
}
}
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
0, pCodecCtx->width, pFrameRGB->data, pFrameRGB->linesize);
//av_picture_copy((AVPicture*)pFrameRGB,(AVPicture*)pFrame,PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height) ;
// Save the frame to disk
int a=++i ;
if((a>50)&&(a<100))
// SaveFrame(pFrameRGB, pCodecCtx->width,
// pCodecCtx->height, i);
draw_jpeg((AVPicture*)pFrameRGB,pCodecCtx->width,pCodecCtx->height) ;
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
}
相关文章推荐
- 利用ffmpeg0.5 和libjpeg实现抽取视频帧并将其保存为jpeg文件格式程序
- 利用java程序将表格保存成excel格式的文件全代码
- 利用ffmpeg将YUV420P转成jpg格式文件,保存
- 利用ffmpeg截取视频图像并保存为ppm和jpg图片格式
- javaCV开发详解之4:转流器实现(也可作为本地收流器、推流器,新增添加图片及文字水印,视频图像帧保存),实现rtsp/rtmp/本地文件转发到rtmp流媒体服务器(基于javaCV-FFMPEG)
- 利用Winpcap的pcap_dump()保存的文件格式
- 完成一个学生管理程序,使用学号作为键添加5个学生对象,并可以将全部信息保存在文件中,可以实现对学生信息的学号查找,输出全部学生信息的功能。
- 使用FFMPEG编码保存MPEG-1/MPEG-2文件格式
- 用java程序调用ffmpeg执行视频文件格式转换flv
- Asp.net利用一般处理程序实现文件下载功能
- 利用 js-xlsx 实现 Excel 文件导入并解析Excel数据成json格式的数据并且获取其中某列数据
- 利用JS实现常用格式文件下载
- 利用ffmpeg实现视频格式转换成flv,保证能成功运行
- 利用FFmpeg将Jpeg图片转为任意视频容器格式
- 用java程序调用ffmpeg执行视频文件格式转换flv
- javaCV开发详解之4:转流器实现(也可作为本地收流器、推流器,新增添加图片及文字水印,视频图像帧保存),实现rtsp/rtmp/本地文件转发到rtmp流媒体服务器(基于javaCV-FFMPEG)
- 利用小马下载全站程序,(php inc格式文件包括目录结构)的py代码
- 位图文件(BMP)格式以及Linux下C程序实现
- [视频处理]用java程序调用ffmpeg执行视频文件格式转换flv
- 利用POI抽取PPT中的图片并保存在文件中