您的位置:首页 > 移动开发 > Android开发

android基于ffmpeg的简单视频播发器 音频播放

2017-11-03 09:24 453 查看
音频播放没啥好说的,直接复制这篇文章http://www.jianshu.com/p/68cdbee598cd

上代码

java

private void init() {
setContentView(R.layout.activity_main);
SurfaceView surfaceView = findViewById(R.id.surface_view);

surfaceView.getHolder().addCallback(new SurfaceHolder.Callback() {
@Override
public void surfaceCreated(SurfaceHolder holder) {

}

@Override
public void surfaceChanged(final SurfaceHolder holder, int format, int width, int height) {
Thread thread = new Thread(){
@Override
public void run() {
super.run();
String videoPath = "/storage/emulated/0/baiduNetdisk/season09.mp4";
//                        videoPlay(videoPath,holder.getSurface());
audioPlay(videoPath);
}
};
thread.start();
}

@Override
public void surfaceDestroyed(SurfaceHolder holder) {

}
});
}
public AudioTrack createAudio(int sampleRateInHz, int nb_channels) {
int channelConfig;
if (nb_channels == 1) {
channelConfig = AudioFormat.CHANNEL_OUT_MONO;
} else if (nb_channels == 2) {
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
} else {
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
}
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
int minBufferSize = AudioTrack.getMinBufferSize(sampleRateInHz,
channelConfig, audioFormat);

AudioTrack audio = new AudioTrack(AudioManager.STREAM_MUSIC, // 指定流的类型
sampleRateInHz, // 设置音频数据的采样率 32k,如果是44.1k就是44100
channelConfig, // 设置输出声道为双声道立体声,而CHANNEL_OUT_MONO类型是单声道
audioFormat, // 设置音频数据块是8位还是16位,这里设置为16位。好像现在绝大多数的音频都是16位的了
minBufferSize, AudioTrack.MODE_STREAM // 设置模式类型,在这里设置为流类型,另外一种MODE_STATIC貌似没有什么效果
);
// audio.play(); // 启动音频设备,下面就可以真正开始音频数据的播放了
return audio;
}

调用c++代码

public native void audioPlay(String path);

c++代码

#define MAX_AUDIO_FRME_SIZE 48000 * 4
extern "C"
JNIEXPORT void JNICALL
Java_com_example_ffmpegrun_MainActivity_audioPlay(JNIEnv *env, jobject instance, jstring path_) {
const char *path = env->GetStringUTFChars(path_, 0);

// TODO

av_register_all();
AVFormatContext *fmt_ctx = avformat_alloc_context();
if (avformat_open_input(&fmt_ctx, path, NULL, NULL) < 0) {
return;
}
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
return;
}
AVStream *avStream = NULL;
int audio_stream_index = -1;
for (int i = 0; i < fmt_ctx->nb_streams; i++) {
if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
avStream = fmt_ctx->streams[i];
audio_stream_index = i;
break;
}
}
if (audio_stream_index == -1) {
return;
}
AVCodecContext *codec_ctx = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(codec_ctx, avStream->codecpar);

AVCodec *avCodec = avcodec_find_decoder(codec_ctx->codec_id);
if (avcodec_open2(codec_ctx, avCodec, NULL) < 0) {
return;
}
SwrContext *swr_ctx = swr_alloc();

enum AVSampleFormat in_sample_fmt = codec_ctx->sample_fmt;

enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;

int in_sample_rate = codec_ctx->sample_rate;

int out_sample_rate = in_sample_rate;

uint64_t in_ch_layout = codec_ctx->channel_layout;

uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;

swr_alloc_set_opts(swr_ctx,
out_ch_layout, out_sample_fmt, out_sample_rate,
in_ch_layout, in_sample_fmt, in_sample_rate,
0, NULL);
swr_init(swr_ctx);

int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);

jclass player_class = env->GetObjectClass(instance);
jmethodID create_audio_track_mid = env->GetMethodID(player_class, "createAudio",
"(II)Landroid/media/AudioTrack;");
jobject audio_track = env->CallObjectMethod(instance, create_audio_track_mid,
out_sample_rate, out_channel_nb);

jclass audio_track_class = env->GetObjectClass(audio_track);
jmethodID audio_track_play_mid = env->GetMethodID(audio_track_class, "play", "()V");
jmethodID audio_track_stop_mid = env->GetMethodID(audio_track_class, "stop", "()V");
env->CallVoidMethod(audio_track, audio_track_play_mid);

jmethodID audio_track_write_mid = env->GetMethodID(audio_track_class, "write",
"([BII)I");

uint8_t *out_buffer = (uint8_t *) av_malloc(MAX_AUDIO_FRME_SIZE);

AVPacket *pkt = (AVPacket *) malloc(sizeof(AVPacket));
int ret;
while (1) {

if (av_read_frame(fmt_ctx, pkt) < 0){
av_packet_unref(pkt);
break;
}
ret = avcodec_send_packet(codec_ctx, pkt);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
av_packet_unref(pkt);
continue;
}
AVFrame *frame = av_frame_alloc();

ret = avcodec_receive_frame(codec_ctx, frame);
if (ret < 0 && ret != AVERROR_EOF) {
av_packet_unref(pkt);
av_frame_free(&frame);
continue;
}

swr_convert(swr_ctx, &out_buffer, MAX_AUDIO_FRME_SIZE,
(const uint8_t **) frame->data,
frame->nb_samples);
int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,
frame->nb_samples, out_sample_fmt,
1);

jbyteArray audio_sample_array = env->NewByteArray(out_buffer_size);
jbyte *sample_bytep = env->GetByteArrayElements(audio_sample_array, NULL);

memcpy(sample_bytep, out_buffer, (size_t) out_buffer_size);
env->ReleaseByteArrayElements(audio_sample_array, sample_bytep, 0);

env->CallIntMethod(audio_track, audio_track_write_mid,
audio_sample_array, 0, out_buffer_size);

env->DeleteLocalRef(audio_sample_array);

av_frame_free(&frame);

av_packet_unref(pkt);
}

env->CallVoidMethod(audio_track, audio_track_stop_mid);
av_free(out_buffer);
swr_free(&swr_ctx);
avcodec_close(codec_ctx);
avformat_close_input(&fmt_ctx);

env->ReleaseStringUTFChars(path_, path);
}

引入的包

extern "C" {
#include "libavformat/avformat.h"
#include "libavfilter/avfiltergraph.h"
#include <libswresample/swresample.h>
}

AudioTrack还是挺好的,可以正常播放声音,不需要进行其他设置,看别人有用opensl来播放音频,没研究过,听说很强大
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: