ffmpeg解码JPG和PNG等图片 2013-05-15 09:50:22 分类: 嵌入式 【特别提醒:本文写作时,贴上去的代码,"\n"回车符号的"\"没有了,不知道为啥,所以阅读代码时
2017-10-13 13:20
543 查看
ffmpeg解码JPG和PNG等图片 2013-05-15
09:50:22
分类: 嵌入式
【特别提醒:本文写作时,贴上去的代码,"\n"回车符号的"\"没有了,不知道为啥,所以阅读代码时请注意区分,或者欢迎到我的CSDN网站阅读
http://blog.csdn.net/jgf_ntu/article/details/8928977】
一般我们都是用ffmpeg来解码音视频,如果是JPG和PNG等图片呢,其实跟解码视频是一样的,因为视频也是一幅一幅的图片进行解码的,只不过视频的帧是会前后参考的,而JPG等图片来讲,就是独立的一帧而已。
那么,我们参考之前的一篇文章http://blog.chinaunix.net/uid-25272011-id-3633434.html 【一段ffmpeg视频解码为YUV420P的示例代码】 ,稍作修改即可来演示。
同时为了能够保存解码后的图片,我们还需要了解一些YUV或者RGB等各种格式的数据的内存存储方式,这些知识可以参照我之前的另一篇文章《YUV420格式解析》 ,这里详细描述了各种格式的空间存储机制。
一般解码视频时,我们在调用ffmpeg进行解码时,生成的格式一般都是YUV420P的,但解码图皮时可能会有各种形式,如YUVJ422P、YUVJ444P、RGB24等等,本文没有采用ffmpeg的sws_scale函数做统一的转
换,为的是记录如何来存储这些解码后的图片。
先给出如何从ffmpeg的Frame结构体中保存上述的四种解码后的数据:
点击(此处)折叠或打开
/**
* save yuv420p frame [YUV]
*/
void yuv420p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int height_half = height / 2, width_half = width / 2;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save
y
for (i = 0; i < height; i++)
fwrite(y_buf + i * y_wrap, 1, width, pfout);
fprintf(stderr, "===>save
Y successn");
//save
u
for (i = 0; i < height_half; i++)
fwrite(u_buf + i * u_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
U successn");
//save
v
for (i = 0; i < height_half; i++)
fwrite(v_buf + i * v_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
V successn");
fflush(pfout);
}
/**
* save yuv422p frame [YUV]
*/
void yuv422p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int height_half = height / 2, width_half = width / 2;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save
y
for (i = 0; i < height; i++)
fwrite(y_buf + i * y_wrap, 1, width, pfout);
fprintf(stderr, "===>save
Y successn");
//save
u
for (i = 0; i < height; i++)
fwrite(u_buf + i * u_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
U successn");
//save
v
for (i = 0; i < height; i++)
fwrite(v_buf + i * v_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
V successn");
fflush(pfout);
}
/**
* save rgb24 frame [PPM]
*/
void rgb24_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
/* write
PPM header */
fprintf(pfout, "P6n%d
%dn255n", width, height);
/* write
pixel data */
for(i =0; i < height; i++)
fwrite(pFrame->data[0] + i * pFrame->linesize[0], 1, width * 3, pfout);
fflush(pfout);
}
/**
* save yuv444p frame [YUV]
*/
void yuv444p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save
y
for (i = 0; i < height; i++)
fwrite(y_buf + i * y_wrap, 1, width, pfout);
fprintf(stderr, "===>save
Y successn");
//save
u
for (i = 0; i < height; i++)
fwrite(u_buf + i * u_wrap, 1, width, pfout);
fprintf(stderr, "===>save
U successn");
//save
v
for (i = 0; i < height; i++)
fwrite(v_buf + i * v_wrap, 1, width, pfout);
fprintf(stderr, "===>save
V successn");
fflush(pfout);
}
可以对照各种格式看一下代码,应该是很好理解的,下面是其余的main代码,可以编译运行
点击(此处)折叠或打开
/**
* decode picture by ffmpeg-1.0 for jpg and png ...
*
* 2013-05-14
* juguofeng<jgfntu@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
FILE *pfout = NULL;
char ffrvout[128] = { 0 };
/* how many yuv pic you want to save */
#define FRAME_NUM 1
/* enable video demux data save to file */
//#define
ENABLE_DEMUX_SAVE
/* enable yuv pic save to file */
#define ENABLE_YUV_SAVE
/* enable print each video
bytes */
#define ENABLE_PRINT_FRAME_BYTES
/* how many bytes you want to print */
#define PRINT_BYTES 30
/**
* main thread
*/
int main(int argc, char *argv[])
{
int i;
char szFileName[128] = {0};
int decLen = 0;
int frame = 0;
AVCodecContext *pCodecCtx = NULL;
AVFrame *pFrame = NULL;
AVCodec *pCodec = NULL;
AVFormatContext *pFormatCtx = NULL;
if(argc != 3)
{
fprintf(stderr, "ERROR:need
3 argument!n");
exit(-1);
}
sprintf(szFileName, "%s", argv[1]);
#ifdef ENABLE_DEMUX_SAVE
FILE* frvdemux = fopen("rvdemuxout.rm","wb+");
if (NULL == frvdemux)
{
fprintf(stderr, "create
rvdemuxout file failedn");
exit(1);
}
#endif
/* output
yuv file name */
sprintf(ffrvout, "%s", argv[2]);
pfout = fopen(ffrvout, "wb+");
if (NULL == pfout)
{
printf("create output
file failedn");
exit(1);
}
printf("==========> Begin
test ffmpeg call ffmpeg rv decodern");
av_register_all();
/* Open
input video file */
//printf("before
avformat_open_input [%s]n", szFileName);
if(avformat_open_input(&pFormatCtx, szFileName, NULL, NULL)!= 0)
{
fprintf(stderr, "Couldn't
open input filen");
return -1;
}
//printf("after
avformat_open_inputn");
/* Retrieve
stream information */
if(av_find_stream_info(pFormatCtx) < 0)
{
printf("av_find_stream_info
ERRORn");
return -1;
}
//printf("after
av_find_stream_info, n");
/* Find
the first video stream */
int videoStream = -1;
printf("==========> pFormatCtx->nb_streams
= %dn", pFormatCtx->nb_streams);
for(i = 0; i < pFormatCtx->nb_streams; i++) {
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
printf("the first
video stream index: videoStream = %dn",videoStream);
break;
}
}
if(videoStream == -1)
return -1; // Didn't
find a video stream
/* Get a
pointer to the codec context for the
video stream */
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
printf("pCodecCtx->codec_id
= %dn", pCodecCtx->codec_id);
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec == NULL) {
fprintf(stderr, "can
not find decoder!n");
return -1;
}
/* Open
codec */
if(avcodec_open(pCodecCtx, pCodec)<0)
{
printf("cannot open software
codecn");
return -1; // Could not open
codec
}
printf("==========> Open software
codec successn");
pFrame = avcodec_alloc_frame();
if(pFrame == NULL)
{
fprintf(stderr, "avcodec_alloc_frame()
ERRORn");
return -1;
}
/* flag
whether we get a decoded yuv frame */
int frameFinished;
int packetno = 0;
AVPacket packet;
av_init_packet(&packet);
while(av_read_frame(pFormatCtx, &packet) >= 0) {
//printf("[main]avpkt->slice_count=%dn", packet.sliceNum);
/* Is this
a packet from the video stream? */
if(packet.stream_index == videoStream) {
packetno++;
#ifdef ENABLE_PRINT_FRAME_BYTES
if ( 1 ) {
int i;
int size = packet.size < PRINT_BYTES ? packet.size : PRINT_BYTES;
unsigned char *data = packet.data;
printf("===>[%5d]
[", packet.size);
for (i = 0; i < size; i++)
printf("%02x ", data[i]);
printf("]n");
}
#endif
#ifdef ENABLE_DEMUX_SAVE
fwrite(packet.data, 1, packet.size, frvdemux);
#endif
//printf("[the
%d packet]packet.size = %dn", packetno++, packet.size);
while (packet.size > 0) {
decLen = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
//printf("[video_decode_example]after
avcodec_decode_video2,decoded=%dn",decLen);
if (decLen < 0) {
 
4000
; fprintf(stderr, "[video_decode_example]Error
while decoding frame %dn", frame);
//exit(1);
/* FIXME if decode
one frame err, ignore this
frame */
decLen = packet.size;
}
if (frameFinished) {
printf("got
a yuv framen");
//printf(stderr, "[video_decode_example]saving
frame %3dn", frame);
/* the
picture is allocated by the decoder. no
need to free it */
if (frame == 0) {
printf("[video_decode_example]picture->linesize[0]=%d,
c->width=%d,c->height=%dn",
pFrame->linesize[0], pCodecCtx->width, pCodecCtx->height);
printf("===>YUV
format = %dn", pFrame->format);
}
#ifdef ENABLE_YUV_SAVE
/* save
yuv pic */
if (frame < FRAME_NUM) {
switch (pFrame->format) {
case 0 : /* YUV420P */
yuv420p_save(pFrame, pCodecCtx);
break;
case 2 : /* RGB24 */
rgb24_save(pFrame, pCodecCtx);
break;
case 13 : /* YUVJ422P */
yuv422p_save(pFrame, pCodecCtx);
break;
case 14 : /* YUVJ444P */
yuv444p_save(pFrame, pCodecCtx);
break;
default :
fprintf(stderr, "unsupport
YUV format for savingn");
break;
}
fprintf(stderr, "===>save
pic successn");
}
#endif
/* frame
index grow */
frame++;
}
//printf("===========>
%dn", decLen);
/* left data in pkt , go on decoding */
packet.data += decLen;
packet.size -= decLen;
}
if (frame == FRAME_NUM) {
printf("==========>
decoded [%d pkt frames] ---> save [%d YUV frames], enough to stop!n", packetno, FRAME_NUM);
break;
}
}
/* FIXME
no need free in this file */
//printf("free
packet that was allocated by av_read_framen");
// Free
the packet that was allocated by av_read_frame
//av_free_packet(&packet);
}
printf("decoding job down!
begin to freen");
/* Free
the YUV frame */
av_free(pFrame);
/* Close
the codec */
avcodec_close(pCodecCtx);
/* Close
the video file */
av_close_input_file(pFormatCtx);
fclose(pfout);
printf("==========> END-OKn");
return 0;
}
最后是Makefile文件
点击(此处)折叠或打开
# use pkg-config for getting
CFLAGS abd LDFLAGS
FFMPEG_LIBS=libavdevice libavformat
libavfilter libavcodec libswscale libavutil
CFLAGS+=$(shell
pkg-config --cflags
$(FFMPEG_LIBS))
LDFLAGS+=$(shell
pkg-config --libs
$(FFMPEG_LIBS))
EXAMPLES=pic_dec
OBJS=$(addsuffix .o,$(EXAMPLES))
%: %.o
$(CC) $< $(LDFLAGS) -o
$@
%.o: %.c
$(CC) $< $(CFLAGS) -c -o
$@
.phony: all
clean
all: $(OBJS) $(EXAMPLES)
clean:
rm -rf $(EXAMPLES) $(OBJS)
注意如果是自己编译的ffmpeg-1.0等版本,安装到例如/usr/local/目录的话,需要在环境变量中设置
PKG_CONFIG_PATH和LD_LIBRARY_PATH,指定到/usr/local/lib/pkgconfig和/usr/local/lib/目录(如果以后要利用你的PC来交叉编译如VLC等开源代码,最好将这两个变量注释掉,因为交叉编译时的configure脚本会根据
这个配置错误的检查到PC也就是X86结构的lib,这个显然是不对的,会让VLC模块错误的认为你的机子上有了一些第三方的库,但VLC并不知道这是X86结构的)
09:50:22
分类: 嵌入式
【特别提醒:本文写作时,贴上去的代码,"\n"回车符号的"\"没有了,不知道为啥,所以阅读代码时请注意区分,或者欢迎到我的CSDN网站阅读
http://blog.csdn.net/jgf_ntu/article/details/8928977】
一般我们都是用ffmpeg来解码音视频,如果是JPG和PNG等图片呢,其实跟解码视频是一样的,因为视频也是一幅一幅的图片进行解码的,只不过视频的帧是会前后参考的,而JPG等图片来讲,就是独立的一帧而已。
那么,我们参考之前的一篇文章http://blog.chinaunix.net/uid-25272011-id-3633434.html 【一段ffmpeg视频解码为YUV420P的示例代码】 ,稍作修改即可来演示。
同时为了能够保存解码后的图片,我们还需要了解一些YUV或者RGB等各种格式的数据的内存存储方式,这些知识可以参照我之前的另一篇文章《YUV420格式解析》 ,这里详细描述了各种格式的空间存储机制。
一般解码视频时,我们在调用ffmpeg进行解码时,生成的格式一般都是YUV420P的,但解码图皮时可能会有各种形式,如YUVJ422P、YUVJ444P、RGB24等等,本文没有采用ffmpeg的sws_scale函数做统一的转
换,为的是记录如何来存储这些解码后的图片。
先给出如何从ffmpeg的Frame结构体中保存上述的四种解码后的数据:
点击(此处)折叠或打开
/**
* save yuv420p frame [YUV]
*/
void yuv420p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int height_half = height / 2, width_half = width / 2;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save
y
for (i = 0; i < height; i++)
fwrite(y_buf + i * y_wrap, 1, width, pfout);
fprintf(stderr, "===>save
Y successn");
//save
u
for (i = 0; i < height_half; i++)
fwrite(u_buf + i * u_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
U successn");
//save
v
for (i = 0; i < height_half; i++)
fwrite(v_buf + i * v_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
V successn");
fflush(pfout);
}
/**
* save yuv422p frame [YUV]
*/
void yuv422p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int height_half = height / 2, width_half = width / 2;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save
y
for (i = 0; i < height; i++)
fwrite(y_buf + i * y_wrap, 1, width, pfout);
fprintf(stderr, "===>save
Y successn");
//save
u
for (i = 0; i < height; i++)
fwrite(u_buf + i * u_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
U successn");
//save
v
for (i = 0; i < height; i++)
fwrite(v_buf + i * v_wrap, 1, width_half, pfout);
fprintf(stderr, "===>save
V successn");
fflush(pfout);
}
/**
* save rgb24 frame [PPM]
*/
void rgb24_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
/* write
PPM header */
fprintf(pfout, "P6n%d
%dn255n", width, height);
/* write
pixel data */
for(i =0; i < height; i++)
fwrite(pFrame->data[0] + i * pFrame->linesize[0], 1, width * 3, pfout);
fflush(pfout);
}
/**
* save yuv444p frame [YUV]
*/
void yuv444p_save(AVFrame *pFrame, AVCodecContext *pCodecCtx)
{
int i = 0;
int width = pCodecCtx->width, height = pCodecCtx->height;
int y_wrap = pFrame->linesize[0];
int u_wrap = pFrame->linesize[1];
int v_wrap = pFrame->linesize[2];
unsigned char *y_buf = pFrame->data[0];
unsigned char *u_buf = pFrame->data[1];
unsigned char *v_buf = pFrame->data[2];
//save
y
for (i = 0; i < height; i++)
fwrite(y_buf + i * y_wrap, 1, width, pfout);
fprintf(stderr, "===>save
Y successn");
//save
u
for (i = 0; i < height; i++)
fwrite(u_buf + i * u_wrap, 1, width, pfout);
fprintf(stderr, "===>save
U successn");
//save
v
for (i = 0; i < height; i++)
fwrite(v_buf + i * v_wrap, 1, width, pfout);
fprintf(stderr, "===>save
V successn");
fflush(pfout);
}
可以对照各种格式看一下代码,应该是很好理解的,下面是其余的main代码,可以编译运行
点击(此处)折叠或打开
/**
* decode picture by ffmpeg-1.0 for jpg and png ...
*
* 2013-05-14
* juguofeng<jgfntu@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
FILE *pfout = NULL;
char ffrvout[128] = { 0 };
/* how many yuv pic you want to save */
#define FRAME_NUM 1
/* enable video demux data save to file */
//#define
ENABLE_DEMUX_SAVE
/* enable yuv pic save to file */
#define ENABLE_YUV_SAVE
/* enable print each video
bytes */
#define ENABLE_PRINT_FRAME_BYTES
/* how many bytes you want to print */
#define PRINT_BYTES 30
/**
* main thread
*/
int main(int argc, char *argv[])
{
int i;
char szFileName[128] = {0};
int decLen = 0;
int frame = 0;
AVCodecContext *pCodecCtx = NULL;
AVFrame *pFrame = NULL;
AVCodec *pCodec = NULL;
AVFormatContext *pFormatCtx = NULL;
if(argc != 3)
{
fprintf(stderr, "ERROR:need
3 argument!n");
exit(-1);
}
sprintf(szFileName, "%s", argv[1]);
#ifdef ENABLE_DEMUX_SAVE
FILE* frvdemux = fopen("rvdemuxout.rm","wb+");
if (NULL == frvdemux)
{
fprintf(stderr, "create
rvdemuxout file failedn");
exit(1);
}
#endif
/* output
yuv file name */
sprintf(ffrvout, "%s", argv[2]);
pfout = fopen(ffrvout, "wb+");
if (NULL == pfout)
{
printf("create output
file failedn");
exit(1);
}
printf("==========> Begin
test ffmpeg call ffmpeg rv decodern");
av_register_all();
/* Open
input video file */
//printf("before
avformat_open_input [%s]n", szFileName);
if(avformat_open_input(&pFormatCtx, szFileName, NULL, NULL)!= 0)
{
fprintf(stderr, "Couldn't
open input filen");
return -1;
}
//printf("after
avformat_open_inputn");
/* Retrieve
stream information */
if(av_find_stream_info(pFormatCtx) < 0)
{
printf("av_find_stream_info
ERRORn");
return -1;
}
//printf("after
av_find_stream_info, n");
/* Find
the first video stream */
int videoStream = -1;
printf("==========> pFormatCtx->nb_streams
= %dn", pFormatCtx->nb_streams);
for(i = 0; i < pFormatCtx->nb_streams; i++) {
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
printf("the first
video stream index: videoStream = %dn",videoStream);
break;
}
}
if(videoStream == -1)
return -1; // Didn't
find a video stream
/* Get a
pointer to the codec context for the
video stream */
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
printf("pCodecCtx->codec_id
= %dn", pCodecCtx->codec_id);
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec == NULL) {
fprintf(stderr, "can
not find decoder!n");
return -1;
}
/* Open
codec */
if(avcodec_open(pCodecCtx, pCodec)<0)
{
printf("cannot open software
codecn");
return -1; // Could not open
codec
}
printf("==========> Open software
codec successn");
pFrame = avcodec_alloc_frame();
if(pFrame == NULL)
{
fprintf(stderr, "avcodec_alloc_frame()
ERRORn");
return -1;
}
/* flag
whether we get a decoded yuv frame */
int frameFinished;
int packetno = 0;
AVPacket packet;
av_init_packet(&packet);
while(av_read_frame(pFormatCtx, &packet) >= 0) {
//printf("[main]avpkt->slice_count=%dn", packet.sliceNum);
/* Is this
a packet from the video stream? */
if(packet.stream_index == videoStream) {
packetno++;
#ifdef ENABLE_PRINT_FRAME_BYTES
if ( 1 ) {
int i;
int size = packet.size < PRINT_BYTES ? packet.size : PRINT_BYTES;
unsigned char *data = packet.data;
printf("===>[%5d]
[", packet.size);
for (i = 0; i < size; i++)
printf("%02x ", data[i]);
printf("]n");
}
#endif
#ifdef ENABLE_DEMUX_SAVE
fwrite(packet.data, 1, packet.size, frvdemux);
#endif
//printf("[the
%d packet]packet.size = %dn", packetno++, packet.size);
while (packet.size > 0) {
decLen = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
//printf("[video_decode_example]after
avcodec_decode_video2,decoded=%dn",decLen);
if (decLen < 0) {
 
4000
; fprintf(stderr, "[video_decode_example]Error
while decoding frame %dn", frame);
//exit(1);
/* FIXME if decode
one frame err, ignore this
frame */
decLen = packet.size;
}
if (frameFinished) {
printf("got
a yuv framen");
//printf(stderr, "[video_decode_example]saving
frame %3dn", frame);
/* the
picture is allocated by the decoder. no
need to free it */
if (frame == 0) {
printf("[video_decode_example]picture->linesize[0]=%d,
c->width=%d,c->height=%dn",
pFrame->linesize[0], pCodecCtx->width, pCodecCtx->height);
printf("===>YUV
format = %dn", pFrame->format);
}
#ifdef ENABLE_YUV_SAVE
/* save
yuv pic */
if (frame < FRAME_NUM) {
switch (pFrame->format) {
case 0 : /* YUV420P */
yuv420p_save(pFrame, pCodecCtx);
break;
case 2 : /* RGB24 */
rgb24_save(pFrame, pCodecCtx);
break;
case 13 : /* YUVJ422P */
yuv422p_save(pFrame, pCodecCtx);
break;
case 14 : /* YUVJ444P */
yuv444p_save(pFrame, pCodecCtx);
break;
default :
fprintf(stderr, "unsupport
YUV format for savingn");
break;
}
fprintf(stderr, "===>save
pic successn");
}
#endif
/* frame
index grow */
frame++;
}
//printf("===========>
%dn", decLen);
/* left data in pkt , go on decoding */
packet.data += decLen;
packet.size -= decLen;
}
if (frame == FRAME_NUM) {
printf("==========>
decoded [%d pkt frames] ---> save [%d YUV frames], enough to stop!n", packetno, FRAME_NUM);
break;
}
}
/* FIXME
no need free in this file */
//printf("free
packet that was allocated by av_read_framen");
// Free
the packet that was allocated by av_read_frame
//av_free_packet(&packet);
}
printf("decoding job down!
begin to freen");
/* Free
the YUV frame */
av_free(pFrame);
/* Close
the codec */
avcodec_close(pCodecCtx);
/* Close
the video file */
av_close_input_file(pFormatCtx);
fclose(pfout);
printf("==========> END-OKn");
return 0;
}
最后是Makefile文件
点击(此处)折叠或打开
# use pkg-config for getting
CFLAGS abd LDFLAGS
FFMPEG_LIBS=libavdevice libavformat
libavfilter libavcodec libswscale libavutil
CFLAGS+=$(shell
pkg-config --cflags
$(FFMPEG_LIBS))
LDFLAGS+=$(shell
pkg-config --libs
$(FFMPEG_LIBS))
EXAMPLES=pic_dec
OBJS=$(addsuffix .o,$(EXAMPLES))
%: %.o
$(CC) $< $(LDFLAGS) -o
$@
%.o: %.c
$(CC) $< $(CFLAGS) -c -o
$@
.phony: all
clean
all: $(OBJS) $(EXAMPLES)
clean:
rm -rf $(EXAMPLES) $(OBJS)
注意如果是自己编译的ffmpeg-1.0等版本,安装到例如/usr/local/目录的话,需要在环境变量中设置
PKG_CONFIG_PATH和LD_LIBRARY_PATH,指定到/usr/local/lib/pkgconfig和/usr/local/lib/目录(如果以后要利用你的PC来交叉编译如VLC等开源代码,最好将这两个变量注释掉,因为交叉编译时的configure脚本会根据
这个配置错误的检查到PC也就是X86结构的lib,这个显然是不对的,会让VLC模块错误的认为你的机子上有了一些第三方的库,但VLC并不知道这是X86结构的)
相关文章推荐
- ffmpeg解码JPG和PNG等图片
- AVA中listfiles怎样查找指定文件夹中后缀名为 ".PNG", ".GIF", ".JPG"的图片
- ffmpeg解码JPG和PNG等图片
- MAC OS用ffmpeg编译代码出现的Undefined symbols for architecture x86_64: "_CGLGetCurrentContext", referenced
- libjpeg库编码图片为jpg(ffmpeg解码视频存储图片:RGB格式位图压缩为jpg格式图片)
- java代码上传图片到FTP服务器——图像“ftp://****/images/**.jpg"因存在错误而无法显示
- 使用系统imgdecmp库解码JPG,GIF,PNG,BMP图片
- ffmpeg 解码 png apng 图片
- SQL0668N 由于表 "db2inst1.test" 上的原因代码 "3",所以不允许操作(解因为LOAD引起的LOAD暂挂状态锁)
- libjpeg库编码图片为jpg(ffmpeg解码视频存储图片:RGB格式位图压缩为jpg格式图片)
- 利用ffmpeg将H264流 解码为RGB 分类: VC++ ffmpeg-SDL-VLC-Live555 2015-08-07 11:39 155人阅读 评论(0) 收藏
- 嵌入式linux------SDL移植(am335x下显示bmp图片) 分类: TI-AM335X ffmpeg-SDL-VLC-Live555 2015-07-28 15:51 131人阅读 评论(0) 收藏
- 利用ffmpeg做视频解码的顺序 分类: ffmpeg-SDL-VLC-Live555 2013-08-16 08:23 806人阅读 评论(0) 收藏
- php图片等比压缩代码,支持jpg,png,gif,验证gif动画
- iOS大文件分片上传和断点续传 标签: iOS大文件分片断点续传获取视频获取图片 2016-05-17 15:52 4881人阅读 评论(8) 收藏 举报 分类: 技术—iOS 版权声明:本文为博
- 嵌入式 RTP通话:视频流(H.264)的传输 分类: ffmpeg-SDL-VLC-Live555 2015-06-05 18:23 188人阅读 评论(0) 收藏
- php图片等比压缩代码,支持jpg,png,gif,验证gif动画
- Qt基于ffmpeg库封装H264成MP4、AVI,png、jpg图片提取
- 嵌入式linux------ffmpeg移植 编码H264(am335x编码H264) 分类: TI-AM335X ffmpeg-SDL-VLC-Live555 arm-linux-Ubuntu 2015-08-04 09:34 5人阅读 评论(0) 收藏
- libpng库编码图片为png(RGB压缩为png图片:与ffmpeg视频解码存储为png图片)