您的位置:首页 > 其它

使用ffmpeg进行图片格式转换和图片缩放—jpg to yuv420/.mp4 to yuv420

2013-10-24 15:31 645 查看
//.h
#ifndef   _JPG_PROCESS_H
#define   _JPG_PROCESS_H

#if !defined PRId64 || PRI_MACROS_BROKEN
# undef PRId64
# define PRId64 "lld"
#endif

#ifndef INT64_C
#define INT64_C
#define UINT64_C
#endif

#include <stdlib.h>

namespace ffmpeg{
extern "C"
{
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libavutil/parseutils.h>
#include <libswscale/swscale.h>
}
}

using namespace ffmpeg;

struct JpgProcessParam
{
AVFormatContext *fmt_ctx;
AVCodecContext *video_dec_ctx, *audio_dec_ctx;
AVStream *video_stream, *audio_stream;

char *in_filename;
char *out_filename;
char *audio_dst_filename;

FILE *video_dst_file;
FILE *audio_dst_file;

uint8_t *video_dst_data[4];
int      video_dst_linesize[4];
int video_dst_bufsize;

int video_stream_idx, audio_stream_idx;
AVFrame *frame;
AVPacket pkt;
int video_frame_count;
int audio_frame_count;
};

class JpgProcess
{
private:
int decode_packet(int *got_frame, int cached);
int open_codec_context(int *stream_idx, AVFormatContext *fmt_ctx, enum AVMediaType type);
int get_format_from_sample_fmt(const char **fmt, enum AVSampleFormat sample_fmt);
int main_jpg();

JpgProcessParam jpgProcessParam;

public:
bool jpgprocess(char *src_filename, char *dst_filename);

};

#endif


//.cpp

#include "jpgprocess.h"

bool JpgProcess::jpgprocess(char *src_filename, char *dst_filename)
{
if ((NULL == src_filename) || (NULL == dst_filename))
return false;

memset(&jpgProcessParam, 0x00, sizeof(jpgProcessParam));

jpgProcessParam.in_filename = src_filename;
jpgProcessParam.out_filename = dst_filename;

main_jpg();

return true;
}

int JpgProcess::main_jpg()
{
int ret = 0, got_frame;

/* register all formats and codecs */
av_register_all();

/* open input file, and allocate format context */
if (avformat_open_input(&jpgProcessParam.fmt_ctx, jpgProcessParam.in_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", jpgProcessParam.in_filename);
exit(1);
}

/* retrieve stream information */
if (avformat_find_stream_info(jpgProcessParam.fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}

if (open_codec_context(&jpgProcessParam.video_stream_idx, jpgProcessParam.fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
jpgProcessParam.video_stream = jpgProcessParam.fmt_ctx->streams[jpgProcessParam.video_stream_idx];
jpgProcessParam.video_dec_ctx = jpgProcessParam.video_stream->codec;

jpgProcessParam.video_dst_file = fopen(jpgProcessParam.out_filename, "wb");
if (!jpgProcessParam.video_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", jpgProcessParam.out_filename);
ret = 1;
goto end;
}

/* allocate image where the decoded image will be put */
ret = av_image_alloc(jpgProcessParam.video_dst_data, jpgProcessParam.video_dst_linesize,
jpgProcessParam.video_dec_ctx->width, jpgProcessParam.video_dec_ctx->height,
jpgProcessParam.video_dec_ctx->pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}

jpgProcessParam.video_dst_bufsize = ret;
}

if (open_codec_context(&jpgProcessParam.audio_stream_idx, jpgProcessParam.fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
jpgProcessParam.audio_stream = jpgProcessParam.fmt_ctx->streams[jpgProcessParam.audio_stream_idx];
jpgProcessParam.audio_dec_ctx = jpgProcessParam.audio_stream->codec;
jpgProcessParam.audio_dst_file = fopen(jpgProcessParam.audio_dst_filename, "wb");
if (!jpgProcessParam.audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", jpgProcessParam.out_filename);
ret = 1;
goto end;
}
}

/* dump input information to stderr */
av_dump_format(jpgProcessParam.fmt_ctx, 0, jpgProcessParam.in_filename, 0);

if (!jpgProcessParam.audio_stream && !jpgProcessParam.video_stream) {
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
ret = 1;
goto end;
}

jpgProcessParam.frame = avcodec_alloc_frame();
if (!jpgProcessParam.frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
goto end;
}

/* initialize packet, set data to NULL, let the demuxer fill it */
av_init_packet(&jpgProcessParam.pkt);
jpgProcessParam.pkt.data = NULL;
jpgProcessParam.pkt.size = 0;

if (jpgProcessParam.video_stream)
printf("Demuxing video from file '%s' into '%s'\n", jpgProcessParam.in_filename, jpgProcessParam.out_filename);
if (jpgProcessParam.audio_stream)
printf("Demuxing audio from file '%s' into '%s'\n", jpgProcessParam.in_filename, jpgProcessParam.audio_dst_filename);

/* read frames from the file */
while (av_read_frame(jpgProcessParam.fmt_ctx, &jpgProcessParam.pkt) >= 0) {
AVPacket orig_pkt = jpgProcessParam.pkt;
do {
ret = decode_packet(&got_frame, 0);
if (ret < 0)
break;
jpgProcessParam.pkt.data += ret;
jpgProcessParam.pkt.size -= ret;
} while (jpgProcessParam.pkt.size > 0);
av_free_packet(&orig_pkt);
}

/* flush cached frames */
jpgProcessParam.pkt.data = NULL;
jpgProcessParam.pkt.size = 0;
do {
decode_packet(&got_frame, 1);
} while (got_frame);

printf("Demuxing succeeded.\n");

if (jpgProcessParam.video_stream) {
printf("Play the output video file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(jpgProcessParam.video_dec_ctx->pix_fmt), jpgProcessParam.video_dec_ctx->width, jpgProcessParam.video_dec_ctx->height,
jpgProcessParam.out_filename);
}

if (jpgProcessParam.audio_stream) {
enum AVSampleFormat sfmt = jpgProcessParam.audio_dec_ctx->sample_fmt;
int n_channels = jpgProcessParam.audio_dec_ctx->channels;
const char *fmt;

if (av_sample_fmt_is_planar(sfmt)) {
const char *packed = av_get_sample_fmt_name(sfmt);
printf("Warning: the sample format the decoder produced is planar "
"(%s). This example will output the first channel only.\n",
packed ? packed : "?");
sfmt = av_get_packed_sample_fmt(sfmt);
n_channels = 1;
}

if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
goto end;

printf("Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
fmt, n_channels, jpgProcessParam.audio_dec_ctx->sample_rate,
jpgProcessParam.audio_dst_filename);
}

end:

if (jpgProcessParam.video_dec_ctx)
avcodec_close(jpgProcessParam.video_dec_ctx);
if (jpgProcessParam.audio_dec_ctx)
avcodec_close(jpgProcessParam.audio_dec_ctx);
avformat_close_input(&jpgProcessParam.fmt_ctx);
if (jpgProcessParam.video_dst_file)
fclose(jpgProcessParam.video_dst_file);
if (jpgProcessParam.audio_dst_file)
fclose(jpgProcessParam.audio_dst_file);
av_free(jpgProcessParam.frame);
av_free(jpgProcessParam.video_dst_data[0]);

return ret < 0;
}

int JpgProcess::decode_packet(int *got_frame, int cached)
{
int ret = 0;
int decoded = jpgProcessParam.pkt.size;

if (jpgProcessParam.pkt.stream_index == jpgProcessParam.video_stream_idx) {
/* decode video frame */
ret = avcodec_decode_video2(jpgProcessParam.video_dec_ctx, jpgProcessParam.frame, got_frame, &jpgProcessParam.pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame\n");
return ret;
}

if (*got_frame) {
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
cached ? "(cached)" : "",
jpgProcessParam.video_frame_count++, jpgProcessParam.frame->coded_picture_number,
av_ts2timestr(jpgProcessParam.frame->pts, &jpgProcessParam.video_dec_ctx->time_base));

/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy(jpgProcessParam.video_dst_data, jpgProcessParam.video_dst_linesize,
(const uint8_t **)(jpgProcessParam.frame->data), jpgProcessParam.frame->linesize,
jpgProcessParam.video_dec_ctx->pix_fmt, jpgProcessParam.video_dec_ctx->width, jpgProcessParam.video_dec_ctx->height);

/* write to rawvideo file */
//           fwrite(jpgProcessParam.video_dst_data[0], 1, jpgProcessParam.video_dst_bufsize, jpgProcessParam.video_dst_file);
//ÍŒÏñËõС³Écif
{
uint8_t *dst_data[4];
int dst_linesize[4];
int dst_w = 352, dst_h = 288;
enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_YUV420P;
int dst_bufsize;
struct SwsContext *sws_ctx;

sws_ctx = sws_getContext(jpgProcessParam.video_dec_ctx->width,
jpgProcessParam.video_dec_ctx->height,
jpgProcessParam.video_dec_ctx->pix_fmt,
dst_w, dst_h, dst_pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr,
"Impossible to create scale context for the conversion "
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
av_get_pix_fmt_name(jpgProcessParam.video_dec_ctx->pix_fmt),
jpgProcessParam.video_dec_ctx->width,
jpgProcessParam.video_dec_ctx->height,
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);

ret = AVERROR(EINVAL);
exit(1);
}

/* allocate source and destination image buffers */
/*
if ((ret = av_image_alloc(src_data, src_linesize,
jpgProcessParam.video_dec_ctx->width, jpgProcessParam.video_dec_ctx->height, src_pix_fmt, 16)) < 0) {
fprintf(stderr, "Could not allocate source image\n");
exit(1);
}
*/
/* buffer is going to be written to rawvideo file, no alignment */
if ((ret = av_image_alloc(dst_data, dst_linesize,
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
fprintf(stderr, "Could not allocate destination image\n");
exit(1);
}
dst_bufsize = ret;
/* convert to destination format */
sws_scale(sws_ctx, (const uint8_t * const*)jpgProcessParam.video_dst_data,
jpgProcessParam.video_dst_linesize, 0, jpgProcessParam.video_dec_ctx->width, dst_data, dst_linesize);
fwrite(dst_data[0], 1, dst_bufsize, jpgProcessParam.video_dst_file);
av_freep(&dst_data[0]);
sws_freeContext(sws_ctx);

}
}
} else if (jpgProcessParam.pkt.stream_index == jpgProcessParam.audio_stream_idx) {
/* decode audio frame */
ret = avcodec_decode_audio4(jpgProcessParam.audio_dec_ctx, jpgProcessParam.frame, got_frame, &jpgProcessParam.pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding audio frame\n");
return ret;
}
/* Some audio decoders decode only part of the packet, and have to be
* called again with the remainder of the packet data.
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
* Also, some decoders might over-read the packet. */
decoded = FFMIN(ret, jpgProcessParam.pkt.size);

if (*got_frame) {
size_t unpadded_linesize = jpgProcessParam.frame->nb_samples * av_get_bytes_per_sample((AVSampleFormat)jpgProcessParam.frame->format);
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
cached ? "(cached)" : "",
jpgProcessParam.audio_frame_count++, jpgProcessParam.frame->nb_samples,
av_ts2timestr(jpgProcessParam.frame->pts, &jpgProcessParam.audio_dec_ctx->time_base));

/* Write the raw audio data samples of the first plane. This works
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
* most audio decoders output planar audio, which uses a separate
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
* In other words, this code will write only the first audio channel
* in these cases.
* You should use libswresample or libavfilter to convert the frame
* to packed data. */
fwrite(jpgProcessParam.frame->extended_data[0], 1, unpadded_linesize, jpgProcessParam.audio_dst_file);
}
}

return decoded;
}

int JpgProcess::open_codec_context(int *stream_idx, AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;

ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), jpgProcessParam.in_filename);
return ret;
} else {
*stream_idx = ret;
st = fmt_ctx->streams[*stream_idx];

/* find decoder for the stream */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return ret;
}

if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
}

return 0;
}

int JpgProcess::get_format_from_sample_fmt(const char **fmt, enum AVSampleFormat sample_fmt)
{
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8,  "u8",    "u8"    },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;

for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}

fprintf(stderr,
"sample format %s is not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return -1;
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  ffmpeg jpg yuv scale mp4