[Android] 获取音频输出getOutput
2015-08-23 23:43
656 查看
每创建一个AudioTrack,代表需要新增一个输出实例,即需要根据音频流的的streamtype,音频流的音轨数量,采样率,位宽等数据来重新构建buffer,而且输出的设备也可能会有变化,由于Android设备支持的输出设备各种各样,如线控耳机,喇叭,蓝牙耳机,midi设备等,因此如果该设备是第一次被使用时,则会被初始化。
下文描述的打开输出设置并非真正的打开linux设备文件,而是输出设备相关的初始化操作
audio_io_handle_tAudioTrack::getOutput_l()
{
if(mOutput){
returnmOutput;
}else{
returnAudioSystem::getOutput(mStreamType,
mSampleRate,mFormat,mChannelMask,mFlags);
}
}
AudioSystem是上层往底层调用audio相关功能时必经的api层
由于Output涉及到输出策略,即应该输出到哪个设备的问题,因此,需要经过AudioPolicyService来处理
audio_io_handle_tAudioPolicyService::getOutput(audio_stream_type_tstream,
uint32_tsamplingRate,
audio_format_tformat,
audio_channel_mask_tchannelMask,
audio_output_flags_tflags,
constaudio_offload_info_t*offloadInfo)
{
if(mpAudioPolicy==NULL){
return0;
}
ALOGV("getOutput()");
Mutex::Autolock_l(mLock);
returnmpAudioPolicy->get_output(mpAudioPolicy,stream,samplingRate,
format,channelMask,flags,offloadInfo);
}
要理清楚Audio策略,需要先分析AudioPolicyService的构建。AudioPolicyService是在mediaserver初始化的时候创建的
//main_mediaserver.cpp
intmain(intargc,char**argv)
{
MediaPlayerService::instantiate();
}
在AudioPolicyService的初始化过程中,主要与策略相关的步骤有三个
//----------------------------------------------------------------------------
AudioPolicyService::AudioPolicyService()
:BnAudioPolicyService(),mpAudioPolicyDev(NULL),mpAudioPolicy(NULL)
{
/*instantiatetheaudiopolicymanager*/
rc=hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID,&module);
if(rc)
return;
rc=audio_policy_dev_open(module,&mpAudioPolicyDev);
ALOGE_IF(rc,"couldn'topenaudiopolicydevice(%s)",strerror(-rc));
rc=mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev,&aps_ops,this,
&mpAudioPolicy);
}
hw_get_module获取名为“AUDIO_POLICY_HARDWARE_MODULE_ID”的module,在audio_policy_hal.cpp中找到了同名module
structlegacy_ap_moduleHAL_MODULE_INFO_SYM={
module:{
common:{
tag:HARDWARE_MODULE_TAG,
version_major:1,
version_minor:0,
id:AUDIO_POLICY_HARDWARE_MODULE_ID,
name:"LEGACYAudioPolicyHAL",
author:"TheAndroidOpenSourceProject",
methods:&legacy_ap_module_methods,
dso:NULL,
reserved:{0},
},
},
};
audio_policy_dev_open调用该module的open函数得到该module对应的设备
staticintlegacy_ap_dev_open(consthw_module_t*module,constchar*name,
hw_device_t**device)
{
structlegacy_ap_device*dev;
if(strcmp(name,AUDIO_POLICY_INTERFACE)!=0)
return-EINVAL;
dev=(structlegacy_ap_device*)calloc(1,sizeof(*dev));
if(!dev)
return-ENOMEM;
dev->device.common.tag=HARDWARE_DEVICE_TAG;
dev->device.common.version=0;
dev->device.common.module=const_cast<hw_module_t*>(module);
dev->device.common.close=legacy_ap_dev_close;
dev->device.create_audio_policy=create_legacy_ap;
dev->device.destroy_audio_policy=destroy_legacy_ap;
*device=&dev->device.common;
return0;
}
得到该设备对应的音频策略
staticintcreate_legacy_ap(conststructaudio_policy_device*device,
structaudio_policy_service_ops*aps_ops,
void*service,
structaudio_policy**ap)
{
structlegacy_audio_policy*lap;
intret;
if(!service||!aps_ops)
return-EINVAL;
lap=(structlegacy_audio_policy*)calloc(1,sizeof(*lap));
if(!lap)
return-ENOMEM;
lap->policy.set_device_connection_state=ap_set_device_connection_state;
lap->policy.get_device_connection_state=ap_get_device_connection_state;
lap->policy.set_phone_state=ap_set_phone_state;
lap->policy.set_ringer_mode=ap_set_ringer_mode;
lap->policy.set_force_use=ap_set_force_use;
lap->policy.get_force_use=ap_get_force_use;
lap->policy.set_can_mute_enforced_audible=
ap_set_can_mute_enforced_audible;
lap->policy.init_check=ap_init_check;
lap->policy.get_output=ap_get_output;
lap->policy.start_output=ap_start_output;
lap->policy.stop_output=ap_stop_output;
lap->policy.release_output=ap_release_output;
lap->policy.get_input=ap_get_input;
lap->policy.start_input=ap_start_input;
lap->policy.stop_input=ap_stop_input;
lap->policy.release_input=ap_release_input;
lap->policy.init_stream_volume=ap_init_stream_volume;
lap->policy.set_stream_volume_index=ap_set_stream_volume_index;
lap->policy.get_stream_volume_index=ap_get_stream_volume_index;
lap->policy.set_stream_volume_index_for_device=ap_set_stream_volume_index_for_device;
lap->policy.get_stream_volume_index_for_device=ap_get_stream_volume_index_for_device;
lap->policy.get_strategy_for_stream=ap_get_strategy_for_stream;
lap->policy.get_devices_for_stream=ap_get_devices_for_stream;
lap->policy.get_output_for_effect=ap_get_output_for_effect;
lap->policy.register_effect=ap_register_effect;
lap->policy.unregister_effect=ap_unregister_effect;
lap->policy.set_effect_enabled=ap_set_effect_enabled;
lap->policy.is_stream_active=ap_is_stream_active;
lap->policy.is_stream_active_remotely=ap_is_stream_active_remotely;
lap->policy.is_source_active=ap_is_source_active;
lap->policy.dump=ap_dump;
lap->policy.is_offload_supported=ap_is_offload_supported;
lap->service=service;
lap->aps_ops=aps_ops;
lap->service_client=
newAudioPolicyCompatClient(aps_ops,service);
if(!lap->service_client){
ret=-ENOMEM;
gotoerr_new_compat_client;
}
lap->apm=createAudioPolicyManager(lap->service_client);
if(!lap->apm){
ret=-ENOMEM;
gotoerr_create_apm;
}
*ap=&lap->policy;
return0;
err_create_apm:
deletelap->service_client;
err_new_compat_client:
free(lap);
*ap=NULL;
returnret;
}
可见,函数中一大堆的成员赋值都是policy相关的
回到输出策略,从上面policy相关的赋值中,能知道mpAudioPolicy->get_output最终会调用到ap_get_output
staticaudio_io_handle_tap_get_output(structaudio_policy*pol,
audio_stream_type_tstream,
uint32_tsampling_rate,
audio_format_tformat,
audio_channel_mask_tchannelMask,
audio_output_flags_tflags,
constaudio_offload_info_t*offloadInfo)
{
structlegacy_audio_policy*lap=to_lap(pol);
ALOGV("%s:tid%d",__func__,gettid());
returnlap->apm->getOutput((AudioSystem::stream_type)stream,
sampling_rate,(int)format,channelMask,
(AudioSystem::output_flags)flags,
offloadInfo);
}
这里的lap->apm又是什么?我们在上面创建AudioPolicy时也能找到其创建的地方
lap->apm=createAudioPolicyManager(lap->service_client);
extern"C"AudioPolicyInterface*createAudioPolicyManager(AudioPolicyClientInterface*clientInterface)
{
returnnewAudioPolicyManagerDefault(clientInterface);
}
AudioPolicyManagerDefault是AudioPolicyManagerBase的子类
classAudioPolicyManagerDefault:publicAudioPolicyManagerBase
{
public:
AudioPolicyManagerDefault(AudioPolicyClientInterface*clientInterface)
:AudioPolicyManagerBase(clientInterface){}
virtual~AudioPolicyManagerDefault(){}
};
因此最终还是调用到AudioPolicyManagerBase::getOutput,绕了这么大一圈,先来回顾一下getOutput走过的流程
AudioSystem,API层
AudioPolicyService,策略层
Audio_policy_hal,module层
AudioPolicyManagerBase,实际上的策略实现层
根据传进来的streamtype获得策略。
根据策略获得输出设备。策略的作用,就是根据策略与目前的状态来选取输出的设备。
打开该输出设备
audio_io_handle_tAudioPolicyManagerBase::getOutput(AudioSystem::stream_typestream,
uint32_tsamplingRate,
uint32_tformat,
uint32_tchannelMask,
AudioSystem::output_flagsflags,
constaudio_offload_info_t*offloadInfo)
{
audio_io_handle_toutput=0;
uint32_tlatency=0;
routing_strategystrategy=getStrategy((AudioSystem::stream_type)stream);
audio_devices_tdevice=getDeviceForStrategy(strategy,false/*fromCache*/);
mTestOutputs[mCurOutput]=mpClientInterface->openOutput(0,&outputDesc->mDevice,
&outputDesc->mSamplingRate,
&outputDesc->mFormat,
&outputDesc->mChannelMask,
&outputDesc->mLatency,
outputDesc->mFlags,
offloadInfo);
returnoutput;
}
//----------------------------------------------------------------------------
//AudioPolicyManagerBase
//----------------------------------------------------------------------------
AudioPolicyManagerBase::AudioPolicyManagerBase(AudioPolicyClientInterface*clientInterface)
:
#ifdefAUDIO_POLICY_TEST
Thread(false),
#endif//AUDIO_POLICY_TEST
mPrimaryOutput((audio_io_handle_t)0),
mAvailableOutputDevices(AUDIO_DEVICE_NONE),
mPhoneState(AudioSystem::MODE_NORMAL),
mLimitRingtoneVolume(false),mLastVoiceVolume(-1.0f),
mTotalEffectsCpuLoad(0),mTotalEffectsMemory(0),
mA2dpSuspended(false),mHasA2dp(false),mHasUsb(false),mHasRemoteSubmix(false),
mSpeakerDrcEnabled(false)
{
mpClientInterface=clientInterface;
}
我们前回顾一下,传进来的是
lap->apm=createAudioPolicyManager(lap->service_client);
lap->service_client=
newAudioPolicyCompatClient(aps_ops,service);
调用了AudioPolicyCompatClient的openOutput
audio_io_handle_tAudioPolicyCompatClient::openOutput(audio_module_handle_tmodule,
audio_devices_t*pDevices,
uint32_t*pSamplingRate,
audio_format_t*pFormat,
audio_channel_mask_t*pChannelMask,
uint32_t*pLatencyMs,
audio_output_flags_tflags,
constaudio_offload_info_t*offloadInfo)
{
returnmServiceOps->open_output_on_module(mService,module,pDevices,pSamplingRate,
pFormat,pChannelMask,pLatencyMs,
flags,offloadInfo);
}
然后又回调到了AudioPolicyManager的open_output_on_module
structaudio_policy_service_opsaps_ops={
open_output:aps_open_output,
open_duplicate_output:aps_open_dup_output,
close_output:aps_close_output,
suspend_output:aps_suspend_output,
restore_output:aps_restore_output,
open_input:aps_open_input,
close_input:aps_close_input,
set_stream_volume:aps_set_stream_volume,
set_stream_output:aps_set_stream_output,
set_parameters:aps_set_parameters,
get_parameters:aps_get_parameters,
start_tone:aps_start_tone,
stop_tone:aps_stop_tone,
set_voice_volume:aps_set_voice_volume,
move_effects:aps_move_effects,
load_hw_module:aps_load_hw_module,
open_output_on_module:aps_open_output_on_module,
open_input_on_module:aps_open_input_on_module,
};
最终调用的是AudioFlinger的openOutput,实在是比较绕
staticaudio_io_handle_taps_open_output_on_module(void*service,
audio_module_handle_tmodule,
audio_devices_t*pDevices,
uint32_t*pSamplingRate,
audio_format_t*pFormat,
audio_channel_mask_t*pChannelMask,
uint32_t*pLatencyMs,
audio_output_flags_tflags,
constaudio_offload_info_t*offloadInfo)
{
sp<IAudioFlinger>af=AudioSystem::get_audio_flinger();
if(af==0){
ALOGW("%s:couldnotgetAudioFlinger",__func__);
return0;
}
returnaf->openOutput(module,pDevices,pSamplingRate,pFormat,pChannelMask,
pLatencyMs,flags,offloadInfo);
}
AudioFlinger的openOutput是真正实现打开输出设备(模块)的地方,其中有三个步骤:
加载音频硬件设备(audio.primary.rtd294x.so)
硬件设备输出方法初始化(选择恰当的输出函数)
创建MixerThread
//----------------------------------------------------------------------------
audio_io_handle_tAudioFlinger::openOutput(audio_module_handle_tmodule,
audio_devices_t*pDevices,
uint32_t*pSamplingRate,
audio_format_t*pFormat,
audio_channel_mask_t*pChannelMask,
uint32_t*pLatencyMs,
audio_output_flags_tflags,
constaudio_offload_info_t*offloadInfo)
{
outHwDev=findSuitableHwDev_l(module,*pDevices);
audio_hw_device_t*hwDevHal=outHwDev->hwDevice();
audio_io_handle_tid=nextUniqueId()
status_tstatus=hwDevHal->open_output_stream(hwDevHal,
id,
*pDevices,
(audio_output_flags_t)flags,
&config,
&outStream);
thread=newMixerThread(this,output,id,*pDevices);
returnid;
}
AudioFlinger::AudioHwDevice*AudioFlinger::findSuitableHwDev_l(
audio_module_handle_tmodule,
audio_devices_tdevices)
{
if(module==0){
for(size_ti=0;i<ARRAY_SIZE(audio_interfaces);i++){
loadHwModule_l(audio_interfaces[i]);
}
//thentrytofindamodulesupportingtherequesteddevice.
for(size_ti=0;i<mAudioHwDevs.size();i++){
AudioHwDevice*audioHwDevice=mAudioHwDevs.valueAt(i);
audio_hw_device_t*dev=audioHwDevice->hwDevice();
if((dev->get_supported_devices!=NULL)&&
(dev->get_supported_devices(dev)&devices)==devices)
returnaudioHwDevice;
}
}else{
//checkamatchfortherequestedmodulehandle
AudioHwDevice*audioHwDevice=mAudioHwDevs.valueFor(module);
if(audioHwDevice!=NULL){
returnaudioHwDevice;
}
}
returnNULL;
}
而且音频设备可能不止一个,audio_interfaces中就定义了几个需要加载的设备,当然,并不是所有定义的都能加载成功,这取决于方案厂商实现了几个设备模块
staticconstchar*constaudio_interfaces[]={
AUDIO_HARDWARE_MODULE_ID_PRIMARY,
AUDIO_HARDWARE_MODULE_ID_A2DP,
AUDIO_HARDWARE_MODULE_ID_USB,
#ifdefBLUETOOTH_RTK_VR
AUDIO_HARDWARE_MODULE_ID_VOHOG,/*BOARD_HAVE_BLUETOOTH_RTK_VR*/
#endif
#ifdefBLUETOOTH_RTK_SCO
AUDIO_HARDWARE_MODULE_ID_SCO,
#endif
};
如果加载成功,那么会进行音量的设置,并把这个设备加入设备数组中进行维护
//loadHwModule_l()mustbecalledwithAudioFlinger::mLockheld
audio_module_handle_tAudioFlinger::loadHwModule_l(constchar*name)
{
intrc=load_audio_interface(name,&dev);
dev->set_master_volume(dev,mMasterVolume)
audio_module_handle_thandle=nextUniqueId();
mAudioHwDevs.add(handle,newAudioHwDevice(name,dev,flags));
returnhandle;
}
加载,实际上就是把设备相关的lib文件加载进来,并且加载lib文件内的相关函数
//----------------------------------------------------------------------------
staticintload_audio_interface(constchar*if_name,audio_hw_device_t**dev)
{
rc=hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID,if_name,&mod);
rc=audio_hw_device_open(mod,dev);
}
加载lib文件的名字遵循下面的格式:
class_id:audio
inst:primary(参考上面的设备名数组,有primary,A2DP,USB等)
prop:rtd294x(这个是平台相关的名称,如ro.product.board等)
path:优先查找/system/lib/hw下的,后查找vendor/lib/hw下的
load开始加载lib
inthw_get_module_by_class(constchar*class_id,constchar*inst,
conststructhw_module_t**module)
{
intstatus;
inti;
conststructhw_module_t*hmi=NULL;
charprop[PATH_MAX];
charpath[PATH_MAX];
charname[PATH_MAX];
if(inst)
snprintf(name,PATH_MAX,"%s.%s",class_id,inst);
else
strlcpy(name,class_id,PATH_MAX);
/*
*Herewerelyonthefactthatcallingdlopenmultipletimeson
*thesame.sowillsimplyincrementarefcount(andnotload
*anewcopyofthelibrary).
*Wealsoassumethatdlopen()isthread-safe.
*/
/*Loopthroughtheconfigurationvariantslookingforamodule*/
for(i=0;i<HAL_VARIANT_KEYS_COUNT+1;i++){
if(i<HAL_VARIANT_KEYS_COUNT){
if(property_get(variant_keys[i],prop,NULL)==0){
continue;
}
snprintf(path,sizeof(path),"%s/%s.%s.so",
HAL_LIBRARY_PATH2,name,prop);
if(access(path,R_OK)==0)break;
snprintf(path,sizeof(path),"%s/%s.%s.so",
HAL_LIBRARY_PATH1,name,prop);
if(access(path,R_OK)==0)break;
}else{
snprintf(path,sizeof(path),"%s/%s.default.so",
HAL_LIBRARY_PATH2,name);
if(access(path,R_OK)==0)break;
snprintf(path,sizeof(path),"%s/%s.default.so",
HAL_LIBRARY_PATH1,name);
if(access(path,R_OK)==0)break;
}
}
status=-ENOENT;
if(i<HAL_VARIANT_KEYS_COUNT+1){
/*loadthemodule,ifthisfails,we'redoomed,andweshouldnottry
*toloadadifferentvariant.*/
status=load(class_id,path,module);
}
returnstatus;
}
load调用dlopen与dlsym加载函数接口
/**
*Loadthefiledefinedbythevariantandifsuccessful
*returnthedlopenhandleandthehmi.
*@return0=success,!0=failure.
*/
staticintload(constchar*id,
constchar*path,
conststructhw_module_t**pHmi)
{
handle=dlopen(path,RTLD_NOW);
hmi=(structhw_module_t*)dlsym(handle,sym);
}
audio_hw_device_open会调用刚刚加载的lib中的adev_open函数,adev_open函数的职责是把后续所需要的功能函数赋值到某个结构体中进行维护,以便后续调用
staticinlineintaudio_hw_device_open(conststructhw_module_t*module,
structaudio_hw_device**device)
{
returnmodule->methods->open(module,AUDIO_HARDWARE_INTERFACE,
(structhw_device_t**)device);
}
staticintadev_open(consthw_module_t*module,constchar*name,
hw_device_t**device)
{
structtuna_audio_device*adev;
intret;
if(strcmp(name,AUDIO_HARDWARE_INTERFACE)!=0)
return-EINVAL;
adev=calloc(1,sizeof(structtuna_audio_device));
if(!adev)
return-ENOMEM;
adev->hw_device.common.tag=HARDWARE_DEVICE_TAG;
adev->hw_device.common.version=AUDIO_DEVICE_API_VERSION_2_0;
adev->hw_device.common.module=(structhw_module_t*)module;
adev->hw_device.common.close=adev_close;
adev->hw_device.init_check=adev_init_check;
adev->hw_device.set_voice_volume=adev_set_voice_volume;
adev->hw_device.set_master_volume=adev_set_master_volume;
adev->hw_device.get_master_volume=adev_get_master_volume;
adev->hw_device.set_master_mute=adev_set_master_mute;
adev->hw_device.get_master_mute=adev_get_master_mute;
adev->hw_device.set_mode=adev_set_mode;
adev->hw_device.set_mic_mute=adev_set_mic_mute;
adev->hw_device.get_mic_mute=adev_get_mic_mute;
adev->hw_device.set_parameters=adev_set_parameters;
adev->hw_device.get_parameters=adev_get_parameters;
adev->hw_device.get_input_buffer_size=adev_get_input_buffer_size;
adev->hw_device.open_output_stream=adev_open_output_stream;
adev->hw_device.close_output_stream=adev_close_output_stream;
adev->hw_device.open_input_stream=adev_open_input_stream;
adev->hw_device.close_input_stream=adev_close_input_stream;
adev->hw_device.dump=adev_dump;
adev->mixer=mixer_open(0);
if(!adev->mixer){
free(adev);
ALOGE("Unabletoopenthemixer,aborting.");
return-EINVAL;
}
/*SetthedefaultroutebeforethePCMstreamisopened*/
pthread_mutex_lock(&adev->lock);
set_route_by_array(adev->mixer,defaults,1);
adev->mode=AUDIO_MODE_NORMAL;
adev->out_device=AUDIO_DEVICE_OUT_SPEAKER;
adev->in_device=AUDIO_DEVICE_IN_BUILTIN_MIC&~AUDIO_DEVICE_BIT_IN;
select_output_device(adev);
adev->pcm_modem_dl=NULL;
adev->pcm_modem_ul=NULL;
adev->voice_volume=1.0f;
adev->tty_mode=TTY_MODE_OFF;
adev->device_is_toro=is_device_toro();
adev->bluetooth_nrec=true;
adev->wb_amr=0;
adev->AI_open_count=0;
adev->AO_open_count=0;
/*RIL*/
//ril_open(&adev->ril);
pthread_mutex_unlock(&adev->lock);
/*registercallbackforwidebandAMRsetting*/
//ril_register_set_wb_amr_callback(audio_set_wb_amr_callback,(void*)adev);
*device=&adev->hw_device.common;
return0;
}
staticintadev_open_output_stream(structaudio_hw_device*dev,
audio_io_handle_thandle,
audio_devices_tdevices,
audio_output_flags_tflags,
structaudio_config*config,
structaudio_stream_out**stream_out)
{
output_type=OUTPUT_LOW_LATENCY;
out->stream.common.get_buffer_size=out_get_buffer_size_low_latency;
out->stream.common.get_sample_rate=out_get_sample_rate;
out->stream.get_latency=out_get_latency_low_latency;
out->stream.write=out_write_low_latency;
}
首先会判断能否用硬件设备模块设置音量,可以的话就不会采用Android的AudioMixer来混音MasterVolume了,不过streamVolume还是会用AudioMixer进行混音。然后调用readOutputParameters来创建混音所使用的buffer。
//----------------------------------------------------------------------------
//Playback
//----------------------------------------------------------------------------
AudioFlinger::PlaybackThread::PlaybackThread(constsp<AudioFlinger>&audioFlinger,
AudioStreamOut*output,
audio_io_handle_tid,
audio_devices_tdevice,
type_ttype)
:ThreadBase(audioFlinger,id,device,AUDIO_DEVICE_NONE,type),
mNormalFrameCount(0),mMixBuffer(NULL),
mAllocMixBuffer(NULL),mSuspended(0),mBytesWritten(0),
mActiveTracksGeneration(0),
//mStreamTypes[]initializedinconstructorbody
mOutput(output),
mLastWriteTime(0),mNumWrites(0),mNumDelayedWrites(0),mInWrite(false),
mMixerStatus(MIXER_IDLE),
mMixerStatusIgnoringFastTracks(MIXER_IDLE),
standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
mBytesRemaining(0),
mCurrentWriteLength(0),
mUseAsyncWrite(false),
mWriteAckSequence(0),
mDrainSequence(0),
mSignalPending(false),
mScreenState(AudioFlinger::mScreenState),
//index0isreservedfornormalmixer'ssubmix
mFastTrackAvailMask(((1<<FastMixerState::kMaxFastTracks)-1)&~1),
//mLatchD,mLatchQ,
mLatchDValid(false),mLatchQValid(false)
{
snprintf(mName,kNameLength,"AudioOut_%X",id);
mNBLogWriter=audioFlinger->newWriter_l(kLogSize,mName);
//AssumesconstructoriscalledbyAudioFlingerwithit'smLockheld,but
//itwouldbesafertoexplicitlypassinitialmasterVolume/masterMuteas
//parameter.
//
//IftheHALweareusinghassupportformastervolumeormastermute,
//thendonotattenuateormuteduringmixing(justleavethevolumeat1.0
//andthemutesettofalse).
mMasterVolume=audioFlinger->masterVolume_l();
mMasterMute=audioFlinger->masterMute_l();
if(mOutput&&mOutput->audioHwDev){
if(mOutput->audioHwDev->canSetMasterVolume()){
mMasterVolume=1.0;
}
if(mOutput->audioHwDev->canSetMasterMute()){
mMasterMute=false;
}
}
readOutputParameters();
//mStreamTypes[AUDIO_STREAM_CNT]isinitializedbystream_type_tdefaultconstructor
//ThereisnoAUDIO_STREAM_MIN,and++operatordoesnotcompile
for(audio_stream_type_tstream=(audio_stream_type_t)0;stream<AUDIO_STREAM_CNT;
stream=(audio_stream_type_t)(stream+1)){
mStreamTypes[stream].volume=mAudioFlinger->streamVolume_l(stream);
mStreamTypes[stream].mute=mAudioFlinger->streamMute_l(stream);
}
//mStreamTypes[AUDIO_STREAM_CNT]existsbutisn'texplicitlyinitializedhere,
//becausemAudioFlingerdoesn'thaveonetocopyfrom
}
创建mixerbuffer
voidAudioFlinger::PlaybackThread::readOutputParameters()
{
mAllocMixBuffer=newint8_t[mNormalFrameCount*mFrameSize+align-1];
mMixBuffer=(int16_t*)((((size_t)mAllocMixBuffer+align-1)/align)*align);
memset(mMixBuffer,0,mNormalFrameCount*mFrameSize);
}
在MixerThread构造函数内创建了混音器AudioMixer。如果采用FastMixer的话,也会在MixerThread内创建,而且还会创建FastMixer的mixerbufferSourceAudioBufferProvider并进行各种初始化设置,这里不做讨论。
AudioFlinger::MixerThread::MixerThread(constsp<AudioFlinger>&audioFlinger,AudioStreamOut*output,
audio_io_handle_tid,audio_devices_tdevice,type_ttype)
:PlaybackThread(audioFlinger,output,id,device,type),
//mAudioMixerbelow
//mFastMixerbelow
mFastMixerFutex(0)
//mOutputSinkbelow
//mPipeSinkbelow
//mNormalSinkbelow
{
mAudioMixer=newAudioMixer(mNormalFrameCount,mSampleRate);
}
创建完MixerThread后把它加紧mPlaybackThreads进行管理
thread=newMixerThread(this,output,id,*pDevices);
mPlaybackThreads.add(id,thread);
总体流程如下:
下文描述的打开输出设置并非真正的打开linux设备文件,而是输出设备相关的初始化操作
getOutput流程
AudioSystem是上层往底层调用audio相关功能时必经的api层
由于Output涉及到输出策略,即应该输出到哪个设备的问题,因此,需要经过AudioPolicyService来处理
要理清楚Audio策略,需要先分析AudioPolicyService的构建。AudioPolicyService是在mediaserver初始化的时候创建的
在AudioPolicyService的初始化过程中,主要与策略相关的步骤有三个
hw_get_module获取名为“AUDIO_POLICY_HARDWARE_MODULE_ID”的module,在audio_policy_hal.cpp中找到了同名module
audio_policy_dev_open调用该module的open函数得到该module对应的设备
得到该设备对应的音频策略
可见,函数中一大堆的成员赋值都是policy相关的
回到输出策略,从上面policy相关的赋值中,能知道mpAudioPolicy->get_output最终会调用到ap_get_output
这里的lap->apm又是什么?我们在上面创建AudioPolicy时也能找到其创建的地方
AudioPolicyManagerDefault是AudioPolicyManagerBase的子类
因此最终还是调用到AudioPolicyManagerBase::getOutput,绕了这么大一圈,先来回顾一下getOutput走过的流程
AudioSystem,API层
AudioPolicyService,策略层
Audio_policy_hal,module层
AudioPolicyManagerBase,实际上的策略实现层
getOutput实现
getOutput的实现分为三个步骤根据传进来的streamtype获得策略。
根据策略获得输出设备。策略的作用,就是根据策略与目前的状态来选取输出的设备。
打开该输出设备
openOutput
在打开设备的时候调用了mpClientInterface->openOutput,而mpClientInterface是在构造AudioPolicyManagerBase的时候传进来的我们前回顾一下,传进来的是
调用了AudioPolicyCompatClient的openOutput
然后又回调到了AudioPolicyManager的open_output_on_module
最终调用的是AudioFlinger的openOutput,实在是比较绕
AudioFlinger的openOutput是真正实现打开输出设备(模块)的地方,其中有三个步骤:
加载音频硬件设备(audio.primary.rtd294x.so)
硬件设备输出方法初始化(选择恰当的输出函数)
创建MixerThread
1.加载硬件设备
当然是只有设备第一次使用的时候才会加载,后续可以直接从已加载的设备中获取。而且音频设备可能不止一个,audio_interfaces中就定义了几个需要加载的设备,当然,并不是所有定义的都能加载成功,这取决于方案厂商实现了几个设备模块
如果加载成功,那么会进行音量的设置,并把这个设备加入设备数组中进行维护
加载,实际上就是把设备相关的lib文件加载进来,并且加载lib文件内的相关函数
conststructhw_module_t**module)
{
intstatus;
inti;
conststructhw_module_t*hmi=NULL;
charprop[PATH_MAX];
charpath[PATH_MAX];
charname[PATH_MAX];
if(inst)
snprintf(name,PATH_MAX,"%s.%s",class_id,inst);
else
strlcpy(name,class_id,PATH_MAX);
/*
*Herewerelyonthefactthatcallingdlopenmultipletimeson
*thesame.sowillsimplyincrementarefcount(andnotload
*anewcopyofthelibrary).
*Wealsoassumethatdlopen()isthread-safe.
*/
/*Loopthroughtheconfigurationvariantslookingforamodule*/
for(i=0;i<HAL_VARIANT_KEYS_COUNT+1;i++){
if(i<HAL_VARIANT_KEYS_COUNT){
if(property_get(variant_keys[i],prop,NULL)==0){
continue;
}
snprintf(path,sizeof(path),"%s/%s.%s.so",
HAL_LIBRARY_PATH2,name,prop);
if(access(path,R_OK)==0)break;
snprintf(path,sizeof(path),"%s/%s.%s.so",
HAL_LIBRARY_PATH1,name,prop);
if(access(path,R_OK)==0)break;
}else{
snprintf(path,sizeof(path),"%s/%s.default.so",
HAL_LIBRARY_PATH2,name);
if(access(path,R_OK)==0)break;
snprintf(path,sizeof(path),"%s/%s.default.so",
HAL_LIBRARY_PATH1,name);
if(access(path,R_OK)==0)break;
}
}
status=-ENOENT;
if(i<HAL_VARIANT_KEYS_COUNT+1){
/*loadthemodule,ifthisfails,we'redoomed,andweshouldnottry
*toloadadifferentvariant.*/
status=load(class_id,path,module);
}
returnstatus;
}
load调用dlopen与dlsym加载函数接口
*Loadthefiledefinedbythevariantandifsuccessful
*returnthedlopenhandleandthehmi.
*@return0=success,!0=failure.
*/
staticintload(constchar*id,
constchar*path,
conststructhw_module_t**pHmi)
{
handle=dlopen(path,RTLD_NOW);
hmi=(structhw_module_t*)dlsym(handle,sym);
}
audio_hw_device_open会调用刚刚加载的lib中的adev_open函数,adev_open函数的职责是把后续所需要的功能函数赋值到某个结构体中进行维护,以便后续调用
structaudio_hw_device**device)
{
returnmodule->methods->open(module,AUDIO_HARDWARE_INTERFACE,
(structhw_device_t**)device);
}
staticintadev_open(consthw_module_t*module,constchar*name,
hw_device_t**device)
{
structtuna_audio_device*adev;
intret;
if(strcmp(name,AUDIO_HARDWARE_INTERFACE)!=0)
return-EINVAL;
adev=calloc(1,sizeof(structtuna_audio_device));
if(!adev)
return-ENOMEM;
adev->hw_device.common.tag=HARDWARE_DEVICE_TAG;
adev->hw_device.common.version=AUDIO_DEVICE_API_VERSION_2_0;
adev->hw_device.common.module=(structhw_module_t*)module;
adev->hw_device.common.close=adev_close;
adev->hw_device.init_check=adev_init_check;
adev->hw_device.set_voice_volume=adev_set_voice_volume;
adev->hw_device.set_master_volume=adev_set_master_volume;
adev->hw_device.get_master_volume=adev_get_master_volume;
adev->hw_device.set_master_mute=adev_set_master_mute;
adev->hw_device.get_master_mute=adev_get_master_mute;
adev->hw_device.set_mode=adev_set_mode;
adev->hw_device.set_mic_mute=adev_set_mic_mute;
adev->hw_device.get_mic_mute=adev_get_mic_mute;
adev->hw_device.set_parameters=adev_set_parameters;
adev->hw_device.get_parameters=adev_get_parameters;
adev->hw_device.get_input_buffer_size=adev_get_input_buffer_size;
adev->hw_device.open_output_stream=adev_open_output_stream;
adev->hw_device.close_output_stream=adev_close_output_stream;
adev->hw_device.open_input_stream=adev_open_input_stream;
adev->hw_device.close_input_stream=adev_close_input_stream;
adev->hw_device.dump=adev_dump;
adev->mixer=mixer_open(0);
if(!adev->mixer){
free(adev);
ALOGE("Unabletoopenthemixer,aborting.");
return-EINVAL;
}
/*SetthedefaultroutebeforethePCMstreamisopened*/
pthread_mutex_lock(&adev->lock);
set_route_by_array(adev->mixer,defaults,1);
adev->mode=AUDIO_MODE_NORMAL;
adev->out_device=AUDIO_DEVICE_OUT_SPEAKER;
adev->in_device=AUDIO_DEVICE_IN_BUILTIN_MIC&~AUDIO_DEVICE_BIT_IN;
select_output_device(adev);
adev->pcm_modem_dl=NULL;
adev->pcm_modem_ul=NULL;
adev->voice_volume=1.0f;
adev->tty_mode=TTY_MODE_OFF;
adev->device_is_toro=is_device_toro();
adev->bluetooth_nrec=true;
adev->wb_amr=0;
adev->AI_open_count=0;
adev->AO_open_count=0;
/*RIL*/
//ril_open(&adev->ril);
pthread_mutex_unlock(&adev->lock);
/*registercallbackforwidebandAMRsetting*/
//ril_register_set_wb_amr_callback(audio_set_wb_amr_callback,(void*)adev);
*device=&adev->hw_device.common;
return0;
}
2.选择恰当输出方法
调用刚刚加载lib中的open_output_stream方法,即adev_open_output_stream,目的是选择合适的Audio输出方法audio_io_handle_thandle,
audio_devices_tdevices,
audio_output_flags_tflags,
structaudio_config*config,
structaudio_stream_out**stream_out)
{
output_type=OUTPUT_LOW_LATENCY;
out->stream.common.get_buffer_size=out_get_buffer_size_low_latency;
out->stream.common.get_sample_rate=out_get_sample_rate;
out->stream.get_latency=out_get_latency_low_latency;
out->stream.write=out_write_low_latency;
}
3.创建MixerThread
MixerThread是PlaybackThread的子类,因此,也会一同创建PlaybackThread首先会判断能否用硬件设备模块设置音量,可以的话就不会采用Android的AudioMixer来混音MasterVolume了,不过streamVolume还是会用AudioMixer进行混音。然后调用readOutputParameters来创建混音所使用的buffer。
//Playback
//----------------------------------------------------------------------------
AudioFlinger::PlaybackThread::PlaybackThread(constsp<AudioFlinger>&audioFlinger,
AudioStreamOut*output,
audio_io_handle_tid,
audio_devices_tdevice,
type_ttype)
:ThreadBase(audioFlinger,id,device,AUDIO_DEVICE_NONE,type),
mNormalFrameCount(0),mMixBuffer(NULL),
mAllocMixBuffer(NULL),mSuspended(0),mBytesWritten(0),
mActiveTracksGeneration(0),
//mStreamTypes[]initializedinconstructorbody
mOutput(output),
mLastWriteTime(0),mNumWrites(0),mNumDelayedWrites(0),mInWrite(false),
mMixerStatus(MIXER_IDLE),
mMixerStatusIgnoringFastTracks(MIXER_IDLE),
standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
mBytesRemaining(0),
mCurrentWriteLength(0),
mUseAsyncWrite(false),
mWriteAckSequence(0),
mDrainSequence(0),
mSignalPending(false),
mScreenState(AudioFlinger::mScreenState),
//index0isreservedfornormalmixer'ssubmix
mFastTrackAvailMask(((1<<FastMixerState::kMaxFastTracks)-1)&~1),
//mLatchD,mLatchQ,
mLatchDValid(false),mLatchQValid(false)
{
snprintf(mName,kNameLength,"AudioOut_%X",id);
mNBLogWriter=audioFlinger->newWriter_l(kLogSize,mName);
//AssumesconstructoriscalledbyAudioFlingerwithit'smLockheld,but
//itwouldbesafertoexplicitlypassinitialmasterVolume/masterMuteas
//parameter.
//
//IftheHALweareusinghassupportformastervolumeormastermute,
//thendonotattenuateormuteduringmixing(justleavethevolumeat1.0
//andthemutesettofalse).
mMasterVolume=audioFlinger->masterVolume_l();
mMasterMute=audioFlinger->masterMute_l();
if(mOutput&&mOutput->audioHwDev){
if(mOutput->audioHwDev->canSetMasterVolume()){
mMasterVolume=1.0;
}
if(mOutput->audioHwDev->canSetMasterMute()){
mMasterMute=false;
}
}
readOutputParameters();
//mStreamTypes[AUDIO_STREAM_CNT]isinitializedbystream_type_tdefaultconstructor
//ThereisnoAUDIO_STREAM_MIN,and++operatordoesnotcompile
for(audio_stream_type_tstream=(audio_stream_type_t)0;stream<AUDIO_STREAM_CNT;
stream=(audio_stream_type_t)(stream+1)){
mStreamTypes[stream].volume=mAudioFlinger->streamVolume_l(stream);
mStreamTypes[stream].mute=mAudioFlinger->streamMute_l(stream);
}
//mStreamTypes[AUDIO_STREAM_CNT]existsbutisn'texplicitlyinitializedhere,
//becausemAudioFlingerdoesn'thaveonetocopyfrom
}
创建mixerbuffer
{
mAllocMixBuffer=newint8_t[mNormalFrameCount*mFrameSize+align-1];
mMixBuffer=(int16_t*)((((size_t)mAllocMixBuffer+align-1)/align)*align);
memset(mMixBuffer,0,mNormalFrameCount*mFrameSize);
}
在MixerThread构造函数内创建了混音器AudioMixer。如果采用FastMixer的话,也会在MixerThread内创建,而且还会创建FastMixer的mixerbufferSourceAudioBufferProvider并进行各种初始化设置,这里不做讨论。
audio_io_handle_tid,audio_devices_tdevice,type_ttype)
:PlaybackThread(audioFlinger,output,id,device,type),
//mAudioMixerbelow
//mFastMixerbelow
mFastMixerFutex(0)
//mOutputSinkbelow
//mPipeSinkbelow
//mNormalSinkbelow
{
mAudioMixer=newAudioMixer(mNormalFrameCount,mSampleRate);
}
创建完MixerThread后把它加紧mPlaybackThreads进行管理
mPlaybackThreads.add(id,thread);
总体流程如下:
相关文章推荐
- Android源代码分析(三) MediaScanner源码分析(下)
- android SIM state
- Android中activity背景色的设置
- Android Binder机制分析
- Android中处理崩溃异常
- Android数据库ORMlite框架翻译系列(第二章:part 3)
- Intent
- Android数据库ORMlite框架翻译系列(第二章:part 2)
- Activity
- Android数据库ORMlite框架翻译系列(第二章:part 1)
- Android数据库ORMlite框架翻译系列(第一章)
- android 调用系统方法获取应用程序信息 如缓存大小
- Android广播机制
- 进制转换
- Android开发大牛CSDN博客整理
- Wiki_Android_双击或多次点击事件
- <Android Framework 之路>Android5.1 Camera Framework(二)
- Android5.1 Camera Framework(二)
- Android Studio上使用github
- android控制显示和隐藏视图或控件的操作