您的位置:首页 > 移动开发 > Android开发

Android学习(九)AudioTrack(1)

2015-09-25 00:00 519 查看
摘要: AudioTrack

AudioTrack(1)

AudioTrack简介(简介部分转发)

在Android中播放声音可以用MediaPlayer和AudioTrack两种方案的,但是两种方案是有很大区别的,MediaPlayer可以播放多种格式的声音文件,例如MP3,AAC,WAV,OGG,MIDI等。而AudioTrack只能播放PCM数据流。
事实上,两种本质上是没啥区别的,MediaPlayer在播放音频时,在framework层还是会创建AudioTrack,把解码后的PCM数流传递给AudioTrack,最后由AudioFlinger进行混音,传递音频给硬件播放出来。利用AudioTrack播放只是跳过Mediaplayer的解码部分而已。Mediaplayer的解码核心部分是基于OpenCORE 来实现的,支持通用的音视频和图像格式,codec使用的是OpenMAX接口来进行扩展。因此使用audiotrack播放mp3文件的话,要自己加入一个音频解码器,如libmad。否则只能播放PCM数据,如大多数WAV格式的音频文件。
常规的AudioTrack播放流程如下:
mAudioMinBufSize = AudioTrack.getMinBufferSize(samplerate,
AudioFormat.CHANNEL_CONFIGURATION_STEREO,
AudioFormat.ENCODING_PCM_16BIT);

mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, // 指定在流的类型
// STREAM_ALARM:警告声
// STREAM_MUSCI:音乐声,例如music等
// STREAM_RING:铃声
// STREAM_SYSTEM:系统声音
// STREAM_VOCIE_CALL:电话声音

samplerate,// 设置音频数据的采样率 AudioFormat.CHANNEL_CONFIGURATION_STEREO,// 设置输出声道为双声道立体声
AudioFormat.ENCODING_PCM_16BIT,// 设置音频数据块是8位还是16位
mAudioMinBufSize, AudioTrack.MODE_STREAM);// 设置模式类型,在这里设置为流类型, AudioTrack中有MODE_STATIC和MODE_STREAM两种分类。STREAM方式表示由用户通过write方式把数据一次一次得写到audiotrack中。这种方式的缺点就是JAVA层和Native层不断地交换数据,效率损失较大。而STATIC方式表示是一开始创建的时候,就把音频数据放到一个固定的buffer,然后直接传给audiotrack,后续就不用一次次得write了。AudioTrack会自己播放这个buffer中的数据。这种方法对于铃声等体积较小的文件比较合适。

mAudioTrack.play();//开始播放

mAudioTrack.write(audioBuffer, 0, mAudioMinBufSize);//往track中写数据

mAudioTrack.stop();//停止播放
mAudioTrack.release();// 关闭并释放资源
源码解析

首先看mAudioMinBufSize = AudioTrack.getMinBufferSize(samplerate,

AudioFormat.CHANNEL_CONFIGURATION_STEREO,

AudioFormat.ENCODING_PCM_16BIT);
源码:AudioTrack.java
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch(channelConfig) {
case AudioFormat.CHANNEL_OUT_MONO:
case AudioFormat.CHANNEL_CONFIGURATION_MONO:
channelCount = 1;
break;
case AudioFormat.CHANNEL_OUT_STEREO:
case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
channelCount = 2;à最多支持双声道
break;
default:
loge("getMinBufferSize(): Invalid channel configuration.");
return AudioTrack.ERROR_BAD_VALUE;
}

if ((audioFormat != AudioFormat.ENCODING_PCM_16BIT)
&& (audioFormat != AudioFormat.ENCODING_PCM_8BIT)) {
loge("getMinBufferSize(): Invalid audio format.");
return AudioTrack.ERROR_BAD_VALUE;à只支持PCM8和PCM16音频数据
}

if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
loge("getMinBufferSize(): " + sampleRateInHz +"Hz is not a supported sample rate.");à采用频率区间
return AudioTrack.ERROR_BAD_VALUE;
}

int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);à调用native函数,对硬件做查询,是否支持相关参数
if ((size == -1) || (size == 0)) {
loge("getMinBufferSize(): error querying hardware");
return AudioTrack.ERROR;
}
else {
return size;
}
}
native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);在android_media_AudioTrack.cpp中:
static jint android_media_AudioTrack_get_min_buff_size(JNIEnv *env, jobject thiz,
jint sampleRateInHertz, jint nbChannels, jint audioFormat) {

int frameCount = 0;
if (AudioTrack::getMinFrameCount(&frameCount, AudioSystem::DEFAULT,
sampleRateInHertz) != NO_ERROR) {
return -1;
}
return frameCount * nbChannels * (audioFormat == javaAudioTrackFields.PCM16 ? 2 : 1);
}
一个单位的帧(Frame)等于1个采样点的字节数*声道数(如PCM16,双声道的一个帧=2*2=4字节),在声卡驱动中,内部缓冲区也采用Frame为单位来分配和管理。
再看AudioTrack的构造函数:
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes, int mode, int sessionId)
throws IllegalArgumentException {
mState = STATE_UNINITIALIZED;

// remember which looper is associated with the AudioTrack instanciation
if ((mInitializationLooper = Looper.myLooper()) == null) {
mInitializationLooper = Looper.getMainLooper();
}

audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);

audioBuffSizeCheck(bufferSizeInBytes);

if (sessionId < 0) {
throw (new IllegalArgumentException("Invalid audio session ID: "+sessionId));
}

int[] session = new int[1];
session[0] = sessionId;
// native initialization
int initResult = native_setup(new WeakReference<AudioTrack>(this),
mStreamType, mSampleRate, mChannels, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session);
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
}

mSessionId = session[0];

if (mDataLoadMode == MODE_STATIC) {
mState = STATE_NO_STATIC_DATA;
} else {
mState = STATE_INITIALIZED;
}
}
在native层做setup:android_media_AudioTrack.cpp
static int
android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jint streamType, jint sampleRateInHertz, jint channels,
jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession)
{
LOGV("sampleRate=%d, audioFormat(from Java)=%d, channels=%x, buffSize=%d",
sampleRateInHertz, audioFormat, channels, buffSizeInBytes);
int afSampleRate;
int afFrameCount;

if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
LOGE("Error creating AudioTrack: Could not get AudioSystem frame count.");
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
LOGE("Error creating AudioTrack: Could not get AudioSystem sampling rate.");
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}

if (!AudioSystem::isOutputChannel(channels)) {
LOGE("Error creating AudioTrack: invalid channel mask.");
return AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
}
int nbChannels = AudioSystem::popCount(channels);à统计channels中多少位为1

// check the stream type
AudioSystem::stream_type atStreamType;
if (streamType == javaAudioTrackFields.STREAM_VOICE_CALL) {
atStreamType = AudioSystem::VOICE_CALL;
} …
} else if (streamType == javaAudioTrackFields.STREAM_MUSIC) {
atStreamType = AudioSystem::MUSIC;àjava层值和jni层数值转换

}

// check the format.
// This function was called from Java, so we compare the format against the Java constants
if ((audioFormat != javaAudioTrackFields.PCM16) && (audioFormat != javaAudioTrackFields.PCM8)) {
LOGE("Error creating AudioTrack: unsupported audio format.");
return AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
}


// compute the frame count
int bytesPerSample = audioFormat == javaAudioTrackFields.PCM16 ? 2 : 1;
int format = audioFormat == javaAudioTrackFields.PCM16 ?
AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT;
int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);

AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();

…….

// create the native AudioTrack object
AudioTrack* lpTrack = new AudioTrack();à创建native的AudioTrack
if (lpTrack == NULL) {
LOGE("Error creating uninitialized AudioTrack");
goto native_track_failure;
}

// initialize the native AudioTrack object
if (memoryMode == javaAudioTrackFields.MODE_STREAM) {

lpTrack->set(
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
0,// shared mem
true,// thread can call Java
sessionId);// audio session ID

} else if (memoryMode == javaAudioTrackFields.MODE_STATIC) {
// AudioTrack is using shared memory

if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
LOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
}

lpTrack->set(
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
lpJniStorage->mMemBase,// shared mem
true,// thread can call Java
sessionId);// audio session ID
}

if (lpTrack->initCheck() != NO_ERROR) {
LOGE("Error initializing AudioTrack");
goto native_init_failure;
}

nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
if (nSession == NULL) {
LOGE("Error creating AudioTrack: Error retrieving session id pointer");
goto native_init_failure;
}
// read the audio session ID back from AudioTrack in case we create a new session
nSession[0] = lpTrack->getSessionId();
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;

// save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
// of the Java object (in mNativeTrackInJavaObj)
env->SetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, (int)lpTrack);
à把jni层中new出来的AudioTrack对象指针保存到java对象的一个变量中,这样就把jni层的AudioTrack对象和java层的AudioTrack对象关联起来
// save the JNI resources so we can free them later
//LOGV("storing lpJniStorage: %x\n", (int)lpJniStorage);
env->SetIntField(thiz, javaAudioTrackFields.jniData, (int)lpJniStorage);
à将lpJniStorage对象指针也保存到java层中
return AUDIOTRACK_SUCCESS;

// failures:
native_init_failure:
delete lpTrack;
env->SetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, 0);

native_track_failure:
if (nSession != NULL) {
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
}
env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
delete lpJniStorage;
env->SetIntField(thiz, javaAudioTrackFields.jniData, 0);
return AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;

}
查看AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
struct audiotrack_callback_cookie {
jclass audioTrack_class;
jobject audioTrack_ref;
};

// ----------------------------------------------------------------------------
class AudioTrackJniStorage {
public:
sp<MemoryHeapBase> mMemHeap;
sp<MemoryBase> mMemBase;
audiotrack_callback_cookie mCallbackData;
int mStreamType;

AudioTrackJniStorage() {
mCallbackData.audioTrack_class = 0;
mCallbackData.audioTrack_ref = 0;
mStreamType = AudioSystem::DEFAULT;
}

~AudioTrackJniStorage() {
mMemBase.clear();
mMemHeap.clear();
}

bool allocSharedMem(int sizeInBytes) {
mMemHeap = new MemoryHeapBase(sizeInBytes, 0, "AudioTrack Heap Base");
if (mMemHeap->getHeapID() < 0) {
return false;
}
mMemBase = new MemoryBase(mMemHeap, 0, sizeInBytes);
return true;
}
};
查看MemoryHeapBase的构造函数:
MemoryHeapBase::MemoryHeapBase(size_t size, uint32_t flags, char const * name)
: mFD(-1), mSize(0), mBase(MAP_FAILED), mFlags(flags),
mDevice(0), mNeedUnmap(false)
{
const size_t pagesize = getpagesize();à获取系统中的内存页大小,一般为4kB
size = ((size + pagesize-1) & ~(pagesize-1));
int fd = ashmem_create_region(name == NULL ? "MemoryHeapBase" : name, size);à ashmem_create_region来创建共享内存,在真实设备上将打开/dev/ashmem设备得到一个文件描述符
LOGE_IF(fd<0, "error creating ashmem region: %s", strerror(errno));
if (fd >= 0) {
if (mapfd(fd, size) == NO_ERROR) {à通过mmap方式得到内存地址
if (flags & READ_ONLY) {
ashmem_set_prot_region(fd, PROT_READ);
}
}
}
}
MemoryHeapBase构造完后得到几个结果:

mBase变量指向共享内存的起始地址

mSize是所要求分配的内存大小

mFD是ashmem_create_region返回的文件描述符

查看MemoryBase:
class MemoryBase : public BnMemory
{
public:
MemoryBase(const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
virtual ~MemoryBase();
virtual sp<IMemoryHeap> getMemory(ssize_t* offset, size_t* size) const;

protected:
size_t getSize() const { return mSize; }
ssize_t getOffset() const { return mOffset; }
const sp<IMemoryHeap>& getHeap() const { return mHeap; }

private:
size_t mSize;
ssize_t mOffset;
sp<IMemoryHeap> mHeap;
};
这里暂时只看到分配了一块共享内存,这样两个进程就可以共享这块内存,但是没有跨进程的同步对象。
分析mAudioTrack.play();:
play的jni层:
static void
android_media_AudioTrack_start(JNIEnv *env, jobject thiz)
{
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);à从java的AudioTrack对象中获取native层的AudioTrack对象指针,从int型转换指针
if (lpTrack == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for start()");
return;
}

lpTrack->start();
}
write的jni层,最终都会调用writeToTrack()函数:
jint writeToTrack(AudioTrack* pTrack, jint audioFormat, jbyte* data,
jint offsetInBytes, jint sizeInBytes) {

ssize_t written = 0;
à如果是STATIC模式sharedBuffer()返回不为0,如果为STREAM模式返回为0
if (pTrack->sharedBuffer() == 0) {
written = pTrack->write(data + offsetInBytes, sizeInBytes);
} else {
if (audioFormat == javaAudioTrackFields.PCM16) {

if ((size_t)sizeInBytes > pTrack->sharedBuffer()->size()) {
sizeInBytes = pTrack->sharedBuffer()->size();
}
memcpy(pTrack->sharedBuffer()->pointer(), data + offsetInBytes, sizeInBytes);à STATIC直接把数据memcpy到共享内存
written = sizeInBytes;
} else if (audioFormat == javaAudioTrackFields.PCM8) {

}
return written;

}
到这里只分析了AudioTrack的java层和jni层的交互,开辟了一份共享内存。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: