您的位置:首页 > 移动开发 > Android开发

Android_Audio_Omap_HAL

2015-07-26 10:12 477 查看
此文档仅作为开发随笔记录文档,可作为正式文档做材料参考,但不做正式文档。

 

Written bywolfgang huang(stillinux@gmail.com)

 

此类文档仅记录Android4.1.2+Kernel2.6.37+OMAP3730平台ALSA开发及内核要点,备注好资料应用,以供后续开发人员快速入手,也可作为科普类资料,供其他相关人员学习。

 
Android_Omap_HAL
 
由于我们的OMAP3平台,由TI提供的HAL,对audio设备进行了封装,函数为hardware/ti/omap3/audio/audio_hw.c下实现,audio_policy.c及相应的audio_effects.conf都采用系统默认,audio_policy.conf由/device/ti/productX中提供进行修改。
对于相关安卓加载音频启动及播放录音的过程,请参考《深入剖析Android音频之AudioPolicyService》,该文摘录与http://blog.csdn.net/yangwen123/article/details/39497375,如原文作者觉得不妥,该文表示歉意,并应立即删除。
 
对于系统默认的部分分析,已经由上述提到的文章进行了详细的细述,不要重复开发同样的轮子。本文对OMAP3的HAL进行细述,并作为安卓音频文档的一部分,目标是使读者从安卓HAL一直走到硬件层的处理流程,使Kernel的ALSA构架内部细节透露出来。但是由于鄙人水平有限,难免有错误理解,如有错误,请联系,立即改正。
 
废话不多说,开趴omap3 audio_hw.c和audio_route.c。

struct audio_module HAL_MODULE_INFO_SYM = {
.common = {
.tag = HARDWARE_MODULE_TAG,
.module_api_version = AUDIO_MODULE_API_VERSION_0_1,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = AUDIO_HARDWARE_MODULE_ID,		//”audio”
.name = "Rowboat audio HW HAL",
.author = "The Android Open Source Project",
.methods = &hal_module_methods,
},
};


在《深入剖析Android音频之AudioPolicyService》中给出了加载AUDIO_HARDWARE_MODULE_ID的过程已经详述清楚。
 
frameworks\av\services\audioflinger\ AudioPolicyService.cpp中开始
aps_load_hw_module-->
loadHwModule-->
         loadHwModule_l-->
                   load_audio_interface-->
                           
hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID…)-->
                                    audio_hw_device_open-->
                                                      
adev_open-->

运行到hardware/ti/omap3/audio/audio_hw.c中的adev_open接口,填充音频API接口,配置初始音频路径。
 
我们知道,在hardware/libhardware/include/audio.h中,安卓定义好了音频HAL需要填充实现的API接口。
struct audio_hw_device {
struct hw_device_t common;

/**
* used by audio flinger to enumerate what devices are supported by
* each audio_hw_device implementation.
*
* Return value is a bitmask of 1 or more values of audio_devices_t
*/
uint32_t (*get_supported_devices)(const struct audio_hw_device *dev);

/**
* check to see if the audio hardware interface has been initialized.
* returns 0 on success, -ENODEV on failure.
*/
int (*init_check)(const struct audio_hw_device *dev);

/** set the audio volume of a voice call. Range is between 0.0 and 1.0 */
int (*set_voice_volume)(struct audio_hw_device *dev, float volume);

/**
* set the audio volume for all audio activities other than voice call.
* Range between 0.0 and 1.0. If any value other than 0 is returned,
* the software mixer will emulate this capability.
*/
int (*set_master_volume)(struct audio_hw_device *dev, float volume);

/**
* Get the current master volume value for the HAL, if the HAL supports
* master volume control.  AudioFlinger will query this value from the
* primary audio HAL when the service starts and use the value for setting
* the initial master volume across all HALs.  HALs which do not support
* this method should may leave it set to NULL.
*/
int (*get_master_volume)(struct audio_hw_device *dev, float *volume);

/**
* set_mode is called when the audio mode changes. AUDIO_MODE_NORMAL mode
* is for standard audio playback, AUDIO_MODE_RINGTONE when a ringtone is
* playing, and AUDIO_MODE_IN_CALL when a call is in progress.
*/
int (*set_mode)(struct audio_hw_device *dev, audio_mode_t mode);

/* mic mute */
int (*set_mic_mute)(struct audio_hw_device *dev, bool state);
int (*get_mic_mute)(const struct audio_hw_device *dev, bool *state);

/* set/get global audio parameters */
int (*set_parameters)(struct audio_hw_device *dev, const char *kv_pairs);

/*
* Returns a pointer to a heap allocated string. The caller is responsible
* for freeing the memory for it using free().
*/
char * (*get_parameters)(const struct audio_hw_device *dev,
const char *keys);

/* Returns audio input buffer size according to parameters passed or
* 0 if one of the parameters is not supported.
* See also get_buffer_size which is for a particular stream.
*/
size_t (*get_input_buffer_size)(const struct audio_hw_device *dev,
const struct audio_config *config);

/** This method creates and opens the audio hardware output stream */
int (*open_output_stream)(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
struct audio_stream_out **stream_out);

void (*close_output_stream)(struct audio_hw_device *dev,
struct audio_stream_out* stream_out);

/** This method creates and opens the audio hardware input stream */
int (*open_input_stream)(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
struct audio_config *config,
struct audio_stream_in **stream_in);

void (*close_input_stream)(struct audio_hw_device *dev,
struct audio_stream_in *stream_in);

/** This method dumps the state of the audio hardware */
int (*dump)(const struct audio_hw_device *dev, int fd);
};
typedef struct audio_hw_device audio_hw_device_t;

下面分析adev_open,其完成上述音频HAL的API接口的填充。

static int adev_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
struct audio_device *adev;
int ret;

if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
return -EINVAL;

adev = calloc(1, sizeof(struct audio_device));
if (!adev)
return -ENOMEM;

adev->hw_device.common.tag = HARDWARE_DEVICE_TAG;
adev->hw_device.common.version = AUDIO_DEVICE_API_VERSION_1_0;
adev->hw_device.common.module = (struct hw_module_t *) module;
adev->hw_device.common.close = adev_close;

adev->hw_device.get_supported_devices = adev_get_supported_devices;
adev->hw_device.init_check = adev_init_check;
adev->hw_device.set_voice_volume = adev_set_voice_volume;
adev->hw_device.set_master_volume = adev_set_master
4000
_volume;
adev->hw_device.set_mode = adev_set_mode;
adev->hw_device.set_mic_mute = adev_set_mic_mute;
adev->hw_device.get_mic_mute = adev_get_mic_mute;
adev->hw_device.set_parameters = adev_set_parameters;
adev->hw_device.get_parameters = adev_get_parameters;
adev->hw_device.get_input_buffer_size = adev_get_input_buffer_size;
adev->hw_device.open_output_stream = adev_open_output_stream;
adev->hw_device.close_output_stream = adev_close_output_stream;
adev->hw_device.open_input_stream = adev_open_input_stream;
adev->hw_device.close_input_stream = adev_close_input_stream;
adev->hw_device.dump = adev_dump;

adev->ar = audio_route_init();
adev->orientation = ORIENTATION_UNDEFINED;

*device = &adev->hw_device.common;

return 0;
}


其完成audio_hw_device的接口初始化,并调用audio_route_init,填充hw_device_t指针。
调用audio_route_init,则运行到hardware/ti/omap3/audio/audio_route.c

struct audio_route *audio_route_init(void)
{
……
ar->mixer = mixer_open(MIXER_CARD);

file = fopen(MIXER_XML_PATH, "r");

parser = XML_ParserCreate(NULL);

for (;;) {
buf = XML_GetBuffer(parser, BUF_SIZE);

bytes_read = fread(buf, 1, BUF_SIZE, file);
if (bytes_read < 0)
goto err_parse;

if (XML_ParseBuffer(parser, bytes_read,
bytes_read == 0) == XML_STATUS_ERROR) {
}
}
update_mixer_state(ar);
save_mixer_state(ar);
……
}


删除一些错误处理及无关紧要的部分,留下该函数的躯干,其先调用mixer_open,调用到tinyalsa中,完成snd_card声卡的snd_kcontrol的各信息同步建立到tinyalsa的一个备份。再读取/system/etc/mixer_paths.xml,获取初始mixer设置。获取保存在ar中,后面通过调用update_mixer_state(ar),也调用到tinyalsa,完成对于配置路径中各个snd_kcontrol的配置。最后重新把配置的snd_kcontrol的值读取到ar响应的reset_value中,作为以后复位值使用,其也获取了响应的path的ctl路径,在需要进行该条路径播放的时候,在audio_hw.c中的select_devices中就会先复位audio的路径ctl,之后会根据各自的路径进行应用ctl对音频路径进行配置。具体ctl的设置和读取留到下面tinyalsa时再细述。
 
安卓系统对于打开播放和路径的流程在《深入剖析Android音频之AudioPolicyService》提到过,此处不再复述,此处直接从音频API的相关接口进行细述。
 
对于播放流,设计对应的安卓给出的实现API。如下。
/* common audio stream parameters and operations */
struct audio_stream {

/**
* Return the sampling rate in Hz - eg. 44100.
*/
uint32_t (*get_sample_rate)(const struct audio_stream *stream);

/* currently unused - use set_parameters with key
*    AUDIO_PARAMETER_STREAM_SAMPLING_RATE
*/
int (*set_sample_rate)(struct audio_stream *stream, uint32_t rate);

/**
* Return size of input/output buffer in bytes for this stream - eg. 4800.
* It should be a multiple of the frame size.  See also get_input_buffer_size.
*/
size_t (*get_buffer_size)(const struct audio_stream *stream);

/**
* Return the channel mask -
*  e.g. AUDIO_CHANNEL_OUT_STEREO or AUDIO_CHANNEL_IN_STEREO
*/
audio_channel_mask_t (*get_channels)(const struct audio_stream *stream);

/**
* Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT
*/
audio_format_t (*get_format)(const struct audio_stream *stream);

/* currently unused - use set_parameters with key
*     AUDIO_PARAMETER_STREAM_FORMAT
*/
int (*set_format)(struct audio_stream *stream, audio_format_t format);

/**
* Put the audio hardware input/output into standby mode.
* Driver should exit from standby mode at the next I/O operation.
* Returns 0 on success and <0 on failure.
*/
int (*standby)(struct audio_stream *stream);

/** dump the state of the audio input/output device */
int (*dump)(const struct audio_stream *stream, int fd);

/** Return the set of device(s) which this stream is connected to */
audio_devices_t (*get_device)(const struct audio_stream *stream);

/**
* Currently unused - set_device() corresponds to set_parameters() with key
* AUDIO_PARAMETER_STREAM_ROUTING for both input and output.
* AUDIO_PARAMETER_STREAM_INPUT_SOURCE is an additional information used by
* input streams only.
*/
int (*set_device)(struct audio_stream *stream, audio_devices_t device);

/**
* set/get audio stream parameters. The function accepts a list of
* parameter key value pairs in the form: key1=value1;key2=value2;...
*
* Some keys are reserved for standard parameters (See AudioParameter class)
*
* If the implementation does not accept a parameter change while
* the output is active but the parameter is acceptable otherwise, it must
* return -ENOSYS.
*
* The audio flinger will put the stream in standby and then change the
* parameter value.
*/
int (*set_parameters)(struct audio_stream *stream, const char *kv_pairs);

/*
* Returns a pointer to a heap allocated string. The caller is responsible
* for freeing the memory for it using free().
*/
char * (*get_parameters)(const struct audio_stream *stream,
const char *keys);
int (*add_audio_effect)(const struct audio_stream *stream,
effect_handle_t effect);
int (*remove_audio_effect)(const struct audio_stream *stream,
effect_handle_t effect);
};
typedef struct audio_stream audio_stream_t;

struct audio_stream_out {
struct audio_stream common;

/**
* Return the audio hardware driver estimated latency in milliseconds.
*/
uint32_t (*get_latency)(const struct audio_stream_out *stream);

/**
* Use this method in situations where audio mixing is done in the
* hardware. This method serves as a direct interface with hardware,
* allowing you to directly set the volume as apposed to via the framework.
* This method might produce multiple PCM outputs or hardware accelerated
* codecs, such as MP3 or AAC.
*/
int (*set_volume)(struct audio_stream_out *stream, float left, float right);

/**
* Write audio buffer to driver. Returns number of bytes written, or a
* negative status_t. If at least one frame was written successfully prior to the error,
* it is suggested that the driver return that successful (short) byte count
* and then return an error in the subsequent call.
*/
ssize_t (*write)(struct audio_stream_out *stream, const void* buffer,
size_t bytes);

/* return the number of audio frames written by the audio dsp to DAC since
* the output has exited standby
*/
int (*get_render_position)(const struct audio_stream_out *stream,
uint32_t *dsp_frames);

/**
* get the local time at which the next write to the audio driver will be presented.
* The units are microseconds, where the epoch is decided by the local audio HAL.
*/
int (*get_next_write_timestamp)(const struct audio_stream_out *stream,
int64_t *timestamp);

};
typedef struct audio_stream_out audio_stream_out_t;

struct audio_stream_in {
struct audio_stream common;

/** set the input gain for the audio driver. This method is for
*  for future use */
int (*set_gain)(struct audio_stream_in *stream, float gain);

/** Read audio buffer in from audio driver. Returns number of bytes read, or a
*  negative status_t. If at least one frame was read prior to the error,
*  read should return that byte count and then return an error in the subsequent call.
*/
ssize_t (*read)(struct audio_stream_in *stream, void* buffer,
size_t bytes);

/**
* Return the amount of input frames lost in the audio driver since the
* last call of this function.
* Audio driver is expected to reset the value to 0 and restart counting
* upon returning the current value by this function call.
* Such loss typically occurs when the user space process is blocked
* longer than the capacity of audio driver buffers.
*
* Unit: the number of input audio frames
*/
uint32_t (*get_input_frames_lost)(struct audio_stream_in *stream);
};
typedef struct audio_stream_in audio_stream_in_t;


先分析播放,omap3的实现接口为adev_open_output_stream。

static int adev_open_output_stream(structaudio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
structaudio_config *config,
structaudio_stream_out **stream_out)
{
struct audio_device *adev = (struct audio_device *)dev;
struct stream_out *out;
intret;

out= (struct stream_out *)calloc(1, sizeof(struct stream_out));
if(!out)
return -ENOMEM;

out->stream.common.get_sample_rate = out_get_sample_rate;
out->stream.common.set_sample_rate = out_set_sample_rate;
out->stream.common.get_buffer_size = out_get_buffer_size;
out->stream.common.get_channels = out_get_channels;
out->stream.common.get_format = out_get_format;
out->stream.common.set_format = out_set_format;
out->stream.common.standby = out_standby;
out->stream.common.dump = out_dump;
out->stream.common.set_parameters = out_set_parameters;
out->stream.common.get_parameters = out_get_parameters;
out->stream.common.add_audio_effect = out_add_audio_effect;
out->stream.common.remove_audio_effect = out_remove_audio_effect;
out->stream.get_latency = out_get_latency;
out->stream.set_volume = out_set_volume;
out->stream.write = out_write;
out->stream.get_render_position = out_get_render_position;
out->stream.get_next_write_timestamp = out_get_next_write_timestamp;

out->dev = adev;

config->format = out_get_format(&out->stream.common);
config->channel_mask = out_get_channels(&out->stream.common);
config->sample_rate =out_get_sample_rate(&out->stream.common);

out->standby = true;

*stream_out = &out->stream;
return 0;

err_open:
free(out);
*stream_out = NULL;
return ret;
}
其初始化audio_stream_out和audio_config,并返回给调用者。
上面的函数功能依次如下:
out_get_sample_rate:返回pcm_config_out的rate,定义为OUT_SAMPLING_RATE,44.1khz
out_set_sample_rate:返回错误,不支持该设置。
out_get_buffer_size:返回pcm_config_out的period_size与channels及format对应的采样字节相乘获得每次对应的buffer大小。
out_get_channels:返回AUDIO_CHANNEL_OUT_STEREO,双声道立体声。
out_get_format:返回AUDIO_FORMAT_PCM_16_BIT,采样格式。
out_set_format:返回错误,不支持设置。
out_standby:上锁后调用do_out_standby,其也是调用pcm_close,并hal层释放相关的缓冲。pcm_close在tinyalsa开始细述,此处从略。
out_dump:未实现,返回0。
out_set_parameters:获取传入的路径param,获取成功则调用select_devices选定对应的位指定的路径名称,再复位音频路径,之后设置对应的路径名称(如speaker等)对应的音频路径。
out_get_parameters:未实现。
out_add_audio_effect:未实现。
out_add_audio_effect:未实现。
out_get_latency:获取采样周期数后,对周期数与周期采样字节长度相乘后除以采样速率得出延时数。
out_set_volume:未实现。
out_write:写入流数据,获取kernel缓冲区信息,并做流控,最后通过pcm_write写入数据到音频流中。
out_get_render_position:未实现。
out_get_next_write_timestamp:未实现。
 
继续分析录音:
static int adev_open_input_stream(structaudio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_tdevices,
structaudio_config *config,
structaudio_stream_in **stream_in)
{
struct audio_device *adev = (struct audio_device *)dev;
struct stream_in *in;
intret;

*stream_in = NULL;

/*Respond with a request for mono if a different format is given. */
if(config->channel_mask != AUDIO_CHANNEL_IN_MONO) {
config->channel_mask = AUDIO_CHANNEL_IN_MONO;
return -EINVAL;
}

in= (struct stream_in *)calloc(1, sizeof(struct stream_in));
if(!in)
return -ENOMEM;

in->stream.common.get_sample_rate = in_get_sample_rate;
in->stream.common.set_sample_rate = in_set_sample_rate;
in->stream.common.get_buffer_size = in_get_buffer_size;
in->stream.common.get_channels = in_get_channels;
in->stream.common.get_format = in_get_format;
in->stream.common.set_format = in_set_format;
in->stream.common.standby = in_standby;
in->stream.common.dump = in_dump;
in->stream.common.set_parameters = in_set_parameters;
in->stream.common.get_parameters = in_get_parameters;
in->stream.common.add_audio_effect = in_add_audio_effect;
in->stream.common.remove_audio_effect = in_remove_audio_effect;
in->stream.set_gain = in_set_gain;
in->stream.read = in_read;
in->stream.get_input_frames_lost = in_get_input_frames_lost;

in->dev = adev;
in->standby = true;
in->requested_rate = config->sample_rate;
in->pcm_config = &pcm_config_in; /* default PCM config */

*stream_in = &in->stream;
return 0;
}

其初始化audio_stream_in和audio_config,并返回给调用者。
in_get_sample_rate:返回audio_stream_in的requested_rate。
in_set_sample_rate:不支持该设置。
in_get_buffer_size:返回request_rate和标准config的rate算出比例乘以计算的buffer大小,pcm_config_in的period_size与channels及format对应的采样字节相乘获得每次对应的buffer计算大小。
in_get_channels:返回AUDIO_CHANNEL_IN_MONO,单声道。
in_get_format:返回AUDIO_FORMAT_PCM_16_BIT,采样格式。
in_set_format:返回错误,不支持设置。
in_standby:上锁后调用do_in_standby,其也是调用pcm_close,并hal层释放相关的缓冲。pcm_close在tinyalsa开始细述,此处从略。
in _dump:未实现,返回0。
in _set_parameters:获取传入的路径param,获取成功则调用select_devices选定对应的位指定的路径名称,再复位音频路径,之后设置对应的路径名称(如speaker等)对应的音频路径。
in _get_parameters:未实现。
n _add_audio_effect:未实现。
in _add_audio_effect:未实现。
in_set_gain:未实现。
in _read:读取流数据,通过pcm_read读取数据到缓冲区中。
in_get_input_frames_lost:未实现。
 
下面对于安卓分析tinyalsa。我们主要针对于两个公用文件mixer.c和pcm.c,对于三个小调试工具我们不进行分析。分析好这些公用函数后,我们在回头对路径的设置和数据流的读写进行总体性框架的分析。
 
我们对于tinyalsa的分析也不是全部覆盖,我们着重分析其与内核ALSA框架的交互,对于安卓的交互,我们细述后,会对此进行初步的介绍,相信都有这个能力秒懂。
 
先分析mixer.c,我们着重分析mixer_open。
 
先看下安卓建立与内核ALSA对应的snd_kcontrol的结构,并作为内核ALSA对应的影子保存在该层,快速的交互安卓的请求而不用继续系统调用到内核ALSA进行数据交互。只有在有必要操作音频硬件的时候,才进行与内核ALSA的数据交互完成对于音频硬件的操作,并对影子进行数据更新。

struct mixer_ctl {
struct mixer *mixer;	/* 互相绑定所用 */
struct snd_ctl_elem_info *info;	/* 对应内核中的snd_ctl_elem_info */
char **ename;		/* 枚举量获取的snd_enum中的字符串描述 */
};

struct mixer {
int fd;	/* 对应的controlCx id */
struct snd_ctl_elem_info *info;	/* 对应内核中的snd_ctl_elem_info */
struct mixer_ctl *ctl;		/* 互相绑定所用 */
unsigned int count;			/* mixer_ctl的个数 */
};

struct mixer *mixer_open(unsigned int card)
{
struct snd_ctl_elem_list elist;
struct snd_ctl_elem_info tmp;
struct snd_ctl_elem_id *eid = NULL;
struct mixer *mixer = NULL;
unsigned int n, m;
int fd;
char fn[256];

/* 获取对应操作的文件句柄名称 */
snprintf(fn, sizeof(fn), "/dev/snd/controlC%u", card);
fd = open(fn, O_RDWR);
if (fd < 0)
return 0;

/* 第一遍调用SNDRV_CTL_IOCTL_ELEM_LIST,获取snd_kcontrol的个数 */
memset(&elist, 0, sizeof(elist));
if (ioctl(fd, SNDRV_CTL_IOCTL_ELEM_LIST, &elist) < 0)
goto fail;

mixer = calloc(1, sizeof(*mixer));
if (!mixer)
goto fail;

/* 获取个数后,对应分配空间 */
mixer->ctl = calloc(elist.count, sizeof(struct mixer_ctl));
mixer->info = calloc(elist.count, sizeof(struct snd_ctl_elem_info));
if (!mixer->ctl || !mixer->info)
goto fail;

eid = calloc(elist.count, sizeof(struct snd_ctl_elem_id));
if (!eid)
goto fail;

mixer->count = elist.count;
mixer->fd = fd;
elist.space = mixer->count;
elist.pids = eid;

/* 第二遍调用,获取对应的snd_ctl_elem_info数据,且numid从1开始,0表示无效 */
if (ioctl(fd, SNDRV_CTL_IOCTL_ELEM_LIST, &elist) < 0)
goto fail;

for (n = 0; n < mixer->count; n++) {
struct snd_ctl_elem_info *ei = mixer->info + n;
ei->id.numid = eid
.numid;
/*
* 根据获取的snd_kcontrol对应的numid,调用获取对应的snd_ctl_elem_info,
* 内核的具体步骤是,根据numid,获取对应的snd_kcontrol,之后调用其info操* 作接口,填充用户传入snd_ctl_elem_info缓冲。
*/
if (ioctl(fd, SNDRV_CTL_IOCTL_ELEM_INFO, ei) < 0)
goto fail;
mixer->ctl
.info = ei;
mixer->ctl
.mixer = mixer;
if (ei->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) {
/* 如果为枚举量,还需要进一步获取每个枚举量对应的字符串描述 */
char **enames = calloc(ei->value.enumerated.items, sizeof(char*));
if (!enames)
goto fail;
mixer->ctl
.ename = enames;
/* ei->value.enumerated.items为该枚举量的枚举项的个数 */
for (m = 0; m < ei->value.enumerated.items; m++) {
memset(&tmp, 0, sizeof(tmp));
tmp.id.numid = ei->id.numid;
tmp.value.enumerated.item = m;
/*
* 根据numid和枚举量的item,选择对应的枚举项,进入获取对应
* snd_enum的字符串描述。对应的枚举类,其private_data为soc_enum。
* 在给出指定的item时,其会拷贝
* soc_enum->texts[uinfo->value.enumerated.item]到name中。
*/
if (ioctl(fd, SNDRV_CTL_IOCTL_ELEM_INFO, &tmp) < 0)
goto fail;
enames[m] = strdup(tmp.value.enumerated.name);
if (!enames[m])
goto fail;
}
}
}
free(eid);
return mixer;

fail:
/* TODO: verify frees in failure case */
if (eid)
free(eid);
if (mixer)
mixer_close(mixer);
else if (fd >= 0)
close(fd);
return 0;
}


下面继续分析与系统调用交互的,下面的调用包含SNDRV_CTL_IOCTL_ELEM_READ和SNDRV_CTL_IOCTL_ELEM_WRITE,分别用于对当前snd_kcontrol控制元件的当前值读取和设置的系统接口。其他函数也调用这些接口的在此处一并细述,不在复述。
int mixer_ctl_set_value(struct mixer_ctl *ctl, unsigned int id, int value)
{
struct snd_ctl_elem_value ev;
int ret;

if (!ctl || (id >= ctl->info->count))
return -EINVAL;

memset(&ev, 0, sizeof(ev));
ev.id.numid = ctl->info->id.numid;
/*
* 最终通过调用内核中的snd_ctl_elem_read,先根据ev提供的numid找到对应的*snd_kcontrol,之后获得对应的snd_kcontrol_volatile,检查对应的access可读属性,    * 调用snd_kcontrol->get操作接口,填充snd_ctl_elem_value
*/
ret = ioctl(ctl->mixer->fd, SNDRV_CTL_IOCTL_ELEM_READ, &ev);
if (ret < 0)
return ret;

switch (ctl->info->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
ev.value.integer.value[id] = !!value;
break;

case SNDRV_CTL_ELEM_TYPE_INTEGER:
ev.value.integer.value[id] = value;
break;

case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
ev.value.enumerated.item[id] = value;
break;

default:
return -EINVAL;
}
/*
* 最终通过调用内核中的snd_ctl_elem_write,先根据ev提供的numid找到对应的*snd_kcontrol,之后获得对应的snd_kcontrol_volatile,检查对应的access写入属性,    * 调用snd_kcontrol->put操作接口,获取snd_ctl_elem_value的数据,根据该控制量对*应的寄存器及偏移,设置好后,调用snd_soc_update_bits_locked更新对应的声卡寄*存器
*/

return ioctl(ctl->mixer->fd, SNDRV_CTL_IOCTL_ELEM_WRITE, &ev);
}


由于对安卓上层的调用,tinyalsa除非需要操作硬件的情况下,自身通过是内核snd_kcontrol的影子,直接可以对安卓上层进行响应。诸如获取numid对应的snd_kcontrol等其他,具体参阅mixer.c中,其较为简单。
 
下面分析pcm.c,其主要管理对于pcm流设备的打开和流数据的管理。pcm打开的流程及流数据的管理在内核中怎么实现,我们暂且搁置,会在内核ALSA的pcm模块进行细述。
 
先准备预备知识,对于学过信号与系统的都了解,我们对于模拟信号转到数字信号的处理流程为(采样->量化->编码)。

struct pcm_config {
unsigned int channels;		//采样通道数
unsigned int rate;			//采样速率
unsigned int period_size;	//采样样本的字节大小
unsigned int period_count;	//一个buffer采样样本数,流媒体数据的管理基于buffer
enum pcm_format format;	//采样深度

/* Values to use for the ALSA start, stop and silence thresholds.  Setting
* any one of these values to 0 will cause the default tinyalsa values to be
* used instead.  Tinyalsa defaults are as follows.
*
* start_threshold   : period_count * period_size
* stop_threshold    : period_count * period_size
* silence_threshold : 0
*/

/* 缓冲区的数据超过该值时,硬件开始启动数据传输。如果太大,
* 从开始播放到声音出来时延太长,甚至可导致太短促的声音根本播不出来;
* 如果太小,又可能容易导致XRUN.
*/
unsigned int start_threshold;
unsigned int stop_threshold;
unsigned int silence_threshold;	//min distance from noise for silence filling

/* Minimum number of frames available before pcm_mmap_write() will actually
* write into the kernel buffer. Only used if the stream is opened in mmap mode
* (pcm_open() called with PCM_MMAP flag set).   Use 0 for default.
*/
int avail_min;
};


下面先分析pcm_open。
struct pcm *pcm_open(unsigned int card, unsigned int device,
unsigned int flags, struct pcm_config *config)
{
struct pcm *pcm;
struct snd_pcm_info info;
struct snd_pcm_hw_params params;
struct snd_pcm_sw_params sparams;
char fn[256];
int rc;

pcm = calloc(1, sizeof(struct pcm));
if (!pcm || !config)
return &bad_pcm; /* TODO: could support default config here */

pcm->config = *config;

/* 根据传入的参数,获取打开的设备节点名称 */
snprintf(fn, sizeof(fn), "/dev/snd/pcmC%uD%u%c", card, device,
flags & PCM_IN ? 'c' : 'p');

pcm->flags = flags;

/* 调用open,内核中会进行很多的工作,诸如初始化对应的hw_params,sw_params及
* 追加hw_constraints限制rules,ASOC部件的startup或open操作等。
*/
pcm->fd = open(fn, O_RDWR);
if (pcm->fd < 0) {
oops(pcm, errno, "cannot open device '%s'", fn);
return pcm;
}
/* 获取pcm流的snd_pcm_info信息 */
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_INFO, &info)) {
oops(pcm, errno, "cannot get info");
goto fail_close;
}

param_init(¶ms);	//初始化都所有都mask,且设置范围为最大

//根据pcm_config的format,配置substream的FORMAT的ACCESS属性
param_set_mask(¶ms, SNDRV_PCM_HW_PARAM_FORMAT,
pcm_format_to_alsa(config->format));
param_set_mask(¶ms, SNDRV_PCM_HW_PARAM_SUBFORMAT,
SNDRV_PCM_SUBFORMAT_STD);
//设置对应的snd_interval,下面类推
param_set_min(¶ms, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, config->period_size);
param_set_int(¶ms, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
pcm_format_to_bits(config->format));
param_set_int(¶ms, SNDRV_PCM_HW_PARAM_FRAME_BITS,
pcm_format_to_bits(config->format) * config->channels);
param_set_int(¶ms, SNDRV_PCM_HW_PARAM_CHANNELS,
config->channels);
param_set_int(¶ms, SNDRV_PCM_HW_PARAM_PERIODS, config->period_count);
param_set_int(¶ms, SNDRV_PCM_HW_PARAM_RATE, config->rate);

if (flags & PCM_NOIRQ) {

if (!(flags & PCM_MMAP)) {
oops(pcm, -EINVAL, "noirq only currently supported with mmap().");
goto fail;
}

params.flags |= SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP;
pcm->noirq_frames_per_msec = config->rate / 1000;
}

if (flags & PCM_MMAP)
//设置MMAP的ACCESS属性,对应在snd_mask中
param_set_mask(¶ms, SNDRV_PCM_HW_PARAM_ACCESS,
SNDRV_PCM_ACCESS_MMAP_INTERLEAVED);	// interleaved mmap
else
param_set_mask(¶ms, SNDRV_PCM_HW_PARAM_ACCESS,
SNDRV_PCM_ACCESS_RW_INTERLEAVED);		// readi/writei

/* 更新音频流的hw_params到音频的runtime,其过程在内核ALSA的pcm模块分析 */
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_HW_PARAMS, ¶ms)) {
oops(pcm, errno, "cannot set hw params");
goto fail_close;
}

/* get our refined hw_params */
config->period_size = param_get_int(¶ms, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
config->period_count = param_get_int(¶ms, SNDRV_PCM_HW_PARAM_PERIODS);
pcm->buffer_size = config->period_count * config->period_size;	/* 每个间隔的buffer大小 */

if (flags & PCM_MMAP) {
/*
* 映射到linux应用层下,由linux应用主动进行管理音频数据流缓冲数据, 我们* 不这样用。
*/
pcm->mmap_buffer = mmap(NULL, pcm_frames_to_bytes(pcm, pcm->buffer_size),
PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, pcm->fd, 0);
if (pcm->mmap_buffer == MAP_FAILED) {
oops(pcm, -errno, "failed to mmap buffer %d bytes\n",
pcm_frames_to_bytes(pcm, pcm->buffer_size));
goto fail_close;
}
}

memset(&sparams, 0, sizeof(sparams));
sparams.tstamp_mode = SNDRV_PCM_TSTAMP_ENABLE;	//默认打开
sparams.period_step = 1;

//下面设置缓冲区对应的阀值,对应于硬件的音频流流控,见上面对该成员的相关细述
if (!config->start_threshold)
pcm->config.start_threshold = sparams.start_threshold =
config->period_count * config->period_size / 2;
else
sparams.start_threshold = config->start_threshold;

/* pick a high stop threshold - todo: does this need further tuning */
if (!config->stop_threshold) {
if (pcm->flags & PCM_IN)
pcm->config.stop_threshold = sparams.stop_threshold =
config->period_count * config->period_size * 10;
else
pcm->config.stop_threshold = sparams.stop_threshold =
config->period_count * config->period_size;
}
else
sparams.stop_threshold = config->stop_threshold;
//见上面英文细述
if (!pcm->config.avail_min) {
if (pcm->flags & PCM_MMAP)
pcm->config.avail_min = sparams.avail_min = pcm->config.period_size;
else
pcm->config.avail_min = sparams.avail_min = 1;
} else
sparams.avail_min = config->avail_min;

sparams.xfer_align = config->period_size / 2; /* needed for old kernels */
sparams.silence_size = 0;
sparams.silence_threshold = config->silence_threshold;
pcm->boundary = sparams.boundary = pcm->buffer_size;	//每个流媒体数据帧的大小

//一般pcm->boundary都会比较大,组件出来的是一个数据FIFO.
while (pcm->boundary * 2 <= INT_MAX - pcm->buffer_size)
pcm->boundary *= 2;
//将设置好的sw_params,设置到对应的音频runtime中。
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_SW_PARAMS, &sparams)) {
oops(pcm, errno, "cannot set sw params");
goto fail;
}

//映射内核对于音频流管理的FIFO的管理数据到应用空间中。
rc = pcm_hw_mmap_status(pcm);
if (rc < 0) {
oops(pcm, rc, "mmap status failed");
goto fail;
}

pcm->underruns = 0;
return pcm;

fail:
if (flags & PCM_MMAP)
munmap(pcm->mmap_buffer, pcm_frames_to_bytes(pcm, pcm->buffer_size));
fail_close:
close(pcm->fd);
pcm->fd = -1;
return pcm;
}


下面分析pcm_start

int pcm_start(struct pcm *pcm)
{
//使音频流进行复位或预播放录音状态,其运作过程在内核ALSA中细述
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_PREPARE) < 0)
return oops(pcm, errno, "cannot prepare channel");

if (pcm->flags & PCM_MMAP)
pcm_sync_ptr(pcm, 0);
//使音频流进行运行状态,其运作过程在内核ALSA中细述
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_START) < 0)
return oops(pcm, errno, "cannot start channel");

pcm->running = 1;
return 0;
}

int pcm_stop(struct pcm *pcm)
{
//使音频流进行停止状态,其运作过程在内核ALSA中细述
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_DROP) < 0)
return oops(pcm, errno, "cannot stop channel");

pcm->running = 0;
return 0;
}

//音频数据传输结构
struct snd_xferi {
snd_pcm_sframes_t result;
void __user *buf;
snd_pcm_uframes_t frames;
};

int pcm_read(struct pcm *pcm, void *data, unsigned int count)
{
struct snd_xferi x;

if (!(pcm->flags & PCM_IN))
return -EINVAL;

//注意传输数据最小的单元为采样单元的字节大小,否则会造成不对齐。
x.buf = data;
x.frames = count / (pcm->config.channels *
pcm_format_to_bits(pcm->config.format) / 8);

for (;;) {
if (!pcm->running) {
pcm_start(pcm);
}
//从内核音频缓冲区中读取数据,具体流程有内核ALSA分析给出
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_READI_FRAMES, &x)) {
pcm->running = 0;
if (errno == EPIPE) {
/* we failed to make our window -- try to restart */
pcm->underruns++;
continue;
}
return oops(pcm, errno, "cannot read stream data");
}
return 0;
}
}

int pcm_write(struct pcm *pcm, const void *data, unsigned int count)
{
struct snd_xferi x;

if (pcm->flags & PCM_IN)
return -EINVAL;

x.buf = (void*)data;
x.frames = count / (pcm->config.channels *
pcm_format_to_bits(pcm->config.format) / 8);

for (;;) {
//如果没有运行,则需要预准备一下,再进行数据写入
if (!pcm->running) {
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_PREPARE))
return oops(pcm, errno, "cannot prepare channel");
从内核音频缓冲区中写入数据,具体流程有内核ALSA分析给出
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_WRITEI_FRAMES, &x))
return oops(pcm, errno, "cannot write initial data");
pcm->running = 1;
return 0;
}
if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_WRITEI_FRAMES, &x)) {
pcm->running = 0;
if (errno == EPIPE) {
/* we failed to make our window -- try to restart if we are
* allowed to do so.  Otherwise, simply allow the EPIPE error to
* propagate up to the app level */
pcm->underruns++;
if (pcm->flags & PCM_NORESTART)
return -EPIPE;
continue;
}
return oops(pcm, errno, "cannot write stream data");
}
return 0;
}
}


至此,安卓OMAP3平台对于AUDIO模块与linux内核的交互部分已经全部列在上面。内核对于安卓HAL提交的这些系统调用,运行的流程我们会在内核ALSA的PCM部分进行细述。
 
 
 
下面对整体的框架我们进行一个基本的图表形式表达。
 
初始化如下:



打开输出流:



输入流相似,故不复述。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: