您的位置:首页 > 移动开发 > Android开发

Android 4.0 MutliMedia 流程分析

2012-08-06 18:54 197 查看
对于应用层来说,只需要知道如果使用framework层提供的MediaPlayer和AudioManager就可以完成简单的音视频的播放。

但是,了解Media framework中一系列的实现方法和流程对于以后的扩展或者fix bug都是很有帮助的。

Step 1. new MediaPlayer对象

         |- Java 层

      public MediaPlayer() {
        Looper looper;
        if ((looper = Looper.myLooper()) != null) {
            mEventHandler = new EventHandler(this, looper);
        } else if ((looper = Looper.getMainLooper()) != null) {
            mEventHandler = new EventHandler(this, looper);
        } else {
            mEventHandler = null;
        }

        /* Native setup requires a weak reference to our object.
         * It's easier to create it here than in C++.
         */
        native_setup(new WeakReference<MediaPlayer>(this));
      }

          这个构造函数主要做了两个事情。第一,new了一个EventHandler去处理Looper中事件;第二,调用JNI的native_setup,并将自己用为参数传了下去.

           从MediaPlayer中这段代码,我们知道之后就开始调用JNI。

static {
System.loadLibrary("media_jni");
native_init();
}

 |- JNI层   (frameworks/base/media/jni/android_media_MediaPlayer.cpp)

android_media_MediaPlayer_native_setup(JNIEnv *env, jobject thiz, jobject weak_this)
{
LOGV("native_setup");
sp<MediaPlayer> mp = new MediaPlayer();  // new 了一个native MediaPlayer.
if (mp == NULL) {
jniThrowException(env, "java/lang/RuntimeException", "Out of memory");
return;
}

// create new listener and give it to MediaPlayer
sp<JNIMediaPlayerListener> listener = new JNIMediaPlayerListener(env, thiz, weak_this);
mp->setListener(listener);

// Stow our new C++ MediaPlayer in an opaque field in the Java object.
setMediaPlayer(env, thiz, mp);
}
         native_setup中new了一个native层的MediaPlayer,然后给MediaPlayer加一个listener作为回调函数,暂时不知道这个是用来干嘛的,可以继续往下看。

       |- Native (frameworks/base/media/java/android/media/MediaPlayer.cpp)

MediaPlayer::MediaPlayer()
{
    LOGV("constructor");
    mListener = NULL;
    mCookie = NULL;
    mDuration = -1;
    mStreamType = AUDIO_STREAM_MUSIC;
    mCurrentPosition = -1;
    mSeekPosition = -1;
    mCurrentState = MEDIA_PLAYER_IDLE;
    mPrepareSync = false;
    mPrepareStatus = NO_ERROR;
    mLoop = false;
    mLeftVolume = mRightVolume = 1.0;
    mVideoWidth = mVideoHeight = 0;
    mLockThreadId = 0;
    mAudioSessionId = AudioSystem::newAudioSessionId();
    AudioSystem::acquireAudioSessionId(mAudioSessionId);
    mSendLevel = 0;
}
       这个构造函数中只做了基本的初始化,都设置为默认值,其中有个mAudioSessionId = AudioSystem::newAudioSessionId(),其实就是调用AudioFlinger::newAudioSessionId(), 并且用AudioSessionRef去保存这个SessionId,应该是用来作为一个唯一标志符的。之后就会调用acquireAduioSeesionId到AudioFlinger中去查找,如果发现这个SeesionId之前就有了,那就把对应的AudioSessionRef的引用值+1,然后找不到说明之前不存在,就新建一个AudioSessionRef,然后放到mAudioSessionRefs中去保管。

status_t MediaPlayer::setListener(const sp<MediaPlayerListener>& listener)
{
LOGV("setListener");
Mutex::Autolock _l(mLock);
mListener = listener;
return NO_ERROR;
}
       把JNI传下来的listener赋值给mListener作为一个观察者。

       这样MediaPlayer的构造算是完成了,继续往下走。

Step 2. MediaPlayer.setDataSource    

|- Java层 (in  frameworks/base/media/java/android/media/ )

public void setDataSource(Context context, Uri uri)
public void setDataSource(String path)
...


       MediaPlayer.java提供了三种setDataSource,可以使用content Uri or file-path or http/rtsp URL.

|- JNI层   ---- JNI (frameworks/base/media/jni)

static void
android_media_MediaPlayer_setDataSource(JNIEnv *env, jobject thiz, jstring path)
{
    android_media_MediaPlayer_setDataSourceAndHeaders(env, thiz, path, NULL, NULL);
}
static void
android_media_MediaPlayer_setDataSourceAndHeaders(
        JNIEnv *env, jobject thiz, jstring path,
        jobjectArray keys, jobjectArray values) {

    sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
......
    status_t opStatus =
        mp->setDataSource(   //进入到
                pathStr,
                headersVector.size() > 0? &headersVector : NULL);
}

|- Native层  

status_t MediaPlayer::setDataSource(
const char *url, const KeyedVector<String8, String8> *headers)
{
LOGV("setDataSource(%s)", url);
status_t err = BAD_VALUE;
if (url != NULL) {
const sp<IMediaPlayerService>& service(getMediaPlayerService());  //通过getMediaPlayerService()获得MediaPlayerServie的IBinder对象,赋值给service
if (service != 0) {
sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId));
if (NO_ERROR != player->setDataSource(url, headers)) {
player.clear();
}
err = attachNewPlayer(player);
}
}
return err;
}

     |- sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId));  通过Binder机制去调用MediaPlayerService的Create方法,在Create中会new一个Client出来返回给IMediaPlayer给客户端的MediaPlayer使用,并且会把这个Client保存在服务端的mClients中。

         在Client的构造函数中使用刚才提到的audioSessionId,“mAudioSessionId = audioSessionId;”

sp<IMediaPlayer> MediaPlayerService::create(pid_t pid, const sp<IMediaPlayerClient>& client,
int audioSessionId)
{
int32_t connId = android_atomic_inc(&mNextConnId);

sp<Client> c = new Client(
this, pid, connId, client, audioSessionId,
IPCThreadState::self()->getCallingUid());

LOGV("Create new client(%d) from pid %d, uid %d, ", connId, pid,
IPCThreadState::self()->getCallingUid());

wp<Client> w = c;
{
Mutex::Autolock lock(mLock);
mClients.add(w);
}
return c;
}
     等Client创建完成并且返回给MediaPlayer客户端后,通过调用player->setDataSouce(url, source)去间接调用Client::setDataSource()

status_t MediaPlayerService::Client::setDataSource(
const char *url, const KeyedVector<String8, String8> *headers)
{
LOGV("setDataSource(%s)", url);
......

if (strncmp(url, "content://", 10) == 0) {
// get a filedescriptor for the content Uri and
// pass it to the setDataSource(fd) method

String16 url16(url);
int fd = android::openContentProviderFile(url16);
if (fd < 0)
{
LOGE("Couldn't open fd for %s", url);
return UNKNOWN_ERROR;
}
setDataSource(fd, 0, 0x7fffffffffLL); // this sets mStatus
close(fd);
return mStatus;
} else {
player_type playerType = getPlayerType(url);
LOGV("player type = %d", playerType);

// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
if (p == NULL) return NO_INIT;

if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput(mAudioSessionId);
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}

// now set data source
LOGV(" setDataSource");
mStatus = p->setDataSource(url, headers);
if (mStatus == NO_ERROR) {
mPlayer = p;
} else {
LOGE("  error: %d", mStatus);
}
return mStatus;
}
}
       在这个setDataSource中通过getPlayerTyep去根据url的类型判断要创建哪种Player,然后调用creatPlayer, 并把返回值付给成员变量mPlayer,也就说这个mPlayer才是真正的用于工作的一个Player。

sp<MediaPlayerBase> MediaPlayerService::Client::createPlayer(player_type playerType)
{
// determine if we have the right player type
sp<MediaPlayerBase> p = mPlayer;
if ((p != NULL) && (p->playerType() != playerType)) {
LOGV("delete player");
p.clear();
}
if (p == NULL) {
p = android::createPlayer(playerType, this, notify); //去调用父类的createPlayer
}

if (p != NULL) {
p->setUID(mUID);
}

return p;
}static sp<MediaPlayerBase> createPlayer(player_type playerType, void* cookie,
notify_callback_f notifyFunc)
{
sp<MediaPlayerBase> p;
switch (playerType) {
case SONIVOX_PLAYER:
LOGV(" create MidiFile");
p = new MidiFile();
break;
case STAGEFRIGHT_PLAYER:
LOGV(" create StagefrightPlayer");
p = new StagefrightPlayer;  //大多数情况下都是创建StagefrighetPlayer
break;
case NU_PLAYER:
LOGV(" create NuPlayer");
p = new NuPlayerDriver;
break;
case TEST_PLAYER:
LOGV("Create Test Player stub");
p = new TestPlayerStub();
break;
default:
LOGE("Unknown player type: %d", playerType);
return NULL;
}
if (p != NULL) {
if (p->initCheck() == NO_ERROR) {
p->setNotifyCallback(cookie, notifyFunc);
} else {
p.clear();
}
}
if (p == NULL) {
LOGE("Failed to create player object");
}
return p;
}


看到StagefrightPlayer的构造函数,里面new了一个AwesomePlayer,然后把自己作为一个监听器注入到AwesomePlayer中,感觉StagefirghtPlayer更像是一个装饰类一样。实际工作的还是AwesomePlayer.
StagefrightPlayer::StagefrightPlayer()
: mPlayer(new AwesomePlayer) {
LOGV("StagefrightPlayer");
mPlayer->setListener(this);
}

         到这边,整个的createPlayer算是创建完了,之后在Client::setDataSource还要做两件事,看看是否有AudioOutput了,如果没有就创建一个,并放到新创建出来的mPlayer的AudioSink中。这里又用到了AudioSessionId。注意AudioOutput继承AudioSink。

if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput(mAudioSessionId);
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}

// now set data source
LOGV(" setDataSource");

mStatus = p->setDataSource(url, headers);  //调用AwesomePlayer的setDataSource()

    之后才去调用StagefrightPlayer的setDataSource,实际上就等于调用AwesomePlayer的setDataSource(). 接下来我们看看AwesomPlayer的setDataSource。

status_t AwesomePlayer::setDataSource(
const char *uri, const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
return setDataSource_l(uri, headers);
}

status_t AwesomePlayer::setDataSource_l(
const char *uri, const KeyedVector<String8, String8> *headers) {
reset_l();

mUri = uri;
...

// The actual work will be done during preparation in the call to
// ::finishSetDataSource_l to avoid blocking the calling thread in
// setDataSource for any significant time.

{
Mutex::Autolock autoLock(mStatsLock);
mStats.mFd = -1;
mStats.mURI = mUri;
}

return OK;
}
 在AwesomePlayer中用mStats来保存播放文件的一些参数,在SetDataSource中只是将这些参数放到mStats中并未做任何处理。

到这里整个SetDataSource的动作就算完成了,里面用了Binder机制去实现了C/S构架。MediaPlayer是跟MediaPlayerService相对应的,在MediaPlayerService中创建一个Client然后返回给Client端MediaPlayer中的player。而在Client中又有个真正用于干实事的mPlayer,也就是StageFrightPlayer或者其他的player。里面用了好多设计模式,代理,工厂,装饰...



Step 3. MediaPlayer.prepare()

|- Java层

public native void prepare() throws IOException, IllegalStateException;
public native void prepareAsync() throws IllegalStateException;
一般都会使用prepareAsync去异步完成prepare的动作,防止出现ANR。当prepare完成后可以通过OnPreparedListener来进行prepare之后的工作。

|- JNI层

status_t MediaPlayer::prepareAsync_l()
{
if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
mPlayer->setAudioStreamType(mStreamType);
mCurrentState = MEDIA_PLAYER_PREPARING;
return mPlayer->prepareAsync();
}
LOGE("prepareAsync called in state %d", mCurrentState);
return INVALID_OPERATION;
}
只是简单的判断了下现在的状态,直接就调用Service端的prepareAsync(), 然后直接就走到了AwesomePlayer的prepareAsync_l()。在prepareAsync_l()中并未真正的做处理,而是通过发送一个Event到TimedEventQueue中进行事件调度,在new一个AwesomeEvent的时候会把回调函数也一起放进去,所以我们只要跟踪对应的回调函数就可以了,就是onPrepareAsyncEvent().

status_t MediaPlayerService::Client::prepareAsync()
{
...
status_t ret = p->prepareAsync();
...
return ret;
}
status_t AwesomePlayer::prepareAsync_l() {
...
if (!mQueueStarted) {
mQueue.start();
mQueueStarted = true;
}

modifyFlags(PREPARING, SET);
mAsyncPrepareEvent = new AwesomeEvent(
this, &AwesomePlayer::onPrepareAsyncEvent);

mQueue.postEvent(mAsyncPrepareEvent);

return OK;
}
      在onPrepareAsyncEvent中,会先去判断mUri的size,如果>0也就是我们一开始调用的setDataSource(uri,...),跟着走到fininshSetDataSource_l();
void AwesomePlayer::onPrepareAsyncEvent() {
Mutex::Autolock autoLock(mLock);
... ...

if (mUri.size() > 0) {
status_t err = finishSetDataSource_l();
... ...
}

if (mVideoTrack != NULL && mVideoSource == NULL) {
status_t err = initVideoDecoder();

if (err != OK) {
abortPrepare(err);
return;
}
}

if (mAudioTrack != NULL && mAudioSource == NULL) {
status_t err = initAudioDecoder();

if (err != OK) {
abortPrepare(err);
return;
}
}

modifyFlags(PREPARING_CONNECTED, SET);

if (isStreamingHTTP()) {
postBufferingEvent_l();
} else {
finishAsyncPrepare_l();
}
}


finishSetDataSource_l()看上去还蛮复杂的,做的事情也蛮多的。创建了DataSource,Extractor。我们这边先撇开了流媒体,首先通过DataSource::CreateFromURI创建出一个对应Uri的FileDataSource赋值给dataSource. 然后根据DataSource去创建MediaExtractor。MediaExtractor::Create会根据dataSource的类型,返回出对应的Extractor.比如MPEG4Extractor(source),MP3Extractor(source,
meta),并且把dataSource作为自己的成员。这样Extractor就可以直接操作dataSource。在最后又调用了setDataSource_l(const sp<MediaExtractor> &extractor) .

status_t AwesomePlayer::finishSetDataSource_l() {
sp<DataSource> dataSource;
... ...
AString sniffedMIME;

if (!strncasecmp("http://", mUri.string(), 7)
|| !strncasecmp("https://", mUri.string(), 8)
|| isWidevineStreaming) {
...//主要对流媒体
} else {
dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders);  //返回一个FileSource主要用来对文件进行操作。单例模式?
}
....

sp<MediaExtractor> extractor;

if (isWidevineStreaming) {
...
} else {
extractor = MediaExtractor::Create(
dataSource, sniffedMIME.empty() ? NULL : sniffedMIME.c_str());

if (extractor == NULL) {
return UNKNOWN_ERROR;
}
}

dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);

if (mDecryptHandle != NULL) {
CHECK(mDrmManagerClient);
if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
}
}

status_t err = setDataSource_l(extractor);
...
return OK;
}


在setDataSource_l(extractor)中主要判断extractor中的track数,是否有video和audio,如果有分别调用setVideoSource(extractor->getTrack(i));和setAudioSource。extractor->getTrack(i)会返回一个对应的MediaSource。如果是MP3就返回一个MP3Source。MediaSource也有dataSource的一个指针。在setVideSource所做的工作就是mVideoTrack = source;
所以MediaSource就对应于AwesomePlayer中的mVideoTrack。同理mAudioTrack也会有一个MediaSource.
status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
// Attempt to approximate overall stream bitrate by summing all
// tracks' individual bitrates, if not all of them advertise bitrate,
// we have to fail.

int64_t totalBitRate = 0;

for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);

int32_t bitrate;
if (!meta->findInt32(kKeyBitRate, &bitrate)) {
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
LOGV("track of type '%s' does not publish bitrate", mime);

totalBitRate = -1;
break;
}

totalBitRate += bitrate;
}

mBitrate = totalBitRate;

LOGV("mBitrate = %lld bits/sec", mBitrate);

bool haveAudio = false;
bool haveVideo = false;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);

const char *_mime;
CHECK(meta->findCString(kKeyMIMEType, &_mime));

String8 mime = String8(_mime);

if (!haveVideo && !strncasecmp(mime.string(), "video/", 6)) {
setVideoSource(extractor->getTrack(i));
haveVideo = true;  //是否有Video

// Set the presentation/display size
int32_t displayWidth, displayHeight;
bool success = meta->findInt32(kKeyDisplayWidth, &displayWidth);
if (success) {
success = meta->findInt32(kKeyDisplayHeight, &displayHeight);
}
if (success) {
mDisplayWidth = displayWidth;
mDisplayHeight = displayHeight;
}

{
Mutex::Autolock autoLock(mStatsLock);
mStats.mVideoTrackIndex = mStats.mTracks.size();
mStats.mTracks.push();
TrackStat *stat =
&mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
stat->mMIME = mime.string();
}
} else if (!haveAudio && !strncasecmp(mime.string(), "audio/", 6)) {
setAudioSource(extractor->getTrack(i));
haveAudio = true; //是否有Audio

{
Mutex::Autolock autoLock(mStatsLock);
mStats.mAudioTrackIndex = mStats.mTracks.size();
mStats.mTracks.push();
TrackStat *stat =
&mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
stat->mMIME = mime.string();
}

if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_AUDIO_VORBIS)) {
// Only do this for vorbis audio, none of the other audio
// formats even support this ringtone specific hack and
// retrieving the metadata on some extractors may turn out
// to be very expensive.
sp<MetaData> fileMeta = extractor->getMetaData();
int32_t loop;
if (fileMeta != NULL
&& fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
modifyFlags(AUTO_LOOPING, SET);
}
}
} else if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_TEXT_3GPP)) {
addTextSource(extractor->getTrack(i));
}
}

if (!haveAudio && !haveVideo) {
return UNKNOWN_ERROR;
}

mExtractorFlags = extractor->flags();

return OK;
}
总算这步骤完成之后,我们的prepare也就完成了。

Step 4: MediaPlayer.start()

|- Java 层

 简单的调用JNI的方法

public  void start() throws IllegalStateException {
stayAwake(true);
mIsSuspend = false;
broadcastIntent(PLAYING_STATUS_CHANGED_ACTION, true);
_start();
}

private native void _start() throws IllegalStateException;
|- JNI 层

调用native层的start();

static void
android_media_MediaPlayer_start(JNIEnv *env, jobject thiz)
{
LOGV("start");
sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
... ...
process_media_player_call( env, thiz, mp->start(), NULL, NULL );
}
|- Native 层

在Native层主要是setLooping,setVolume,然后直接start(); 省略中间的调用,到达了AwesomePlayer::play_l() 

status_t MediaPlayer::start()
{
LOGV("start");
Mutex::Autolock _l(mLock);
if (mCurrentState & MEDIA_PLAYER_STARTED)
return NO_ERROR;
if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_PREPARED |
MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_PAUSED ) ) ) {
mPlayer->setLooping(mLoop);
mPlayer->setVolume(mLeftVolume, mRightVolume);
mPlayer->setAuxEffectSendLevel(mSendLevel);
mCurrentState = MEDIA_PLAYER_STARTED;
status_t ret = mPlayer->start();
...
}
LOGE("start called in state %d", mCurrentState);
return INVALID_OPERATION;
}


在play_l(), 如果只有Audio,则先去创建AudioPlayer,mTimeSource=mAudioPlayer,然后就直接startAudioPlayer_l;如果还有Video,则postVideoEvent_l(); 我们假设只有Audio,之后video,后面可以慢慢分析。
status_t AwesomePlayer::play_l() {
modifyFlags(SEEK_PREVIEW, CLEAR);

if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
if (mAudioSink != NULL) {
mAudioPlayer = new AudioPlayer(mAudioSink, this);
mAudioPlayer->setSource(mAudioSource);

mTimeSource = mAudioPlayer;

// If there was a seek request before we ever started,
// honor the request now.
// Make sure to do this before starting the audio player
// to avoid a race condition.
seekAudioIfNecessary_l();
}
}

CHECK(!(mFlags & AUDIO_RUNNING));

if (mVideoSource == NULL) {
// We don't want to post an error notification at this point,
// the error returned from MediaPlayer::start() will suffice.

status_t err = startAudioPlayer_l(
false /* sendErrorNotification */);
}
}

if (mTimeSource == NULL && mAudioPlayer == NULL) {
mTimeSource = &mSystemTimeSource;
}

if (mVideoSource != NULL) {
// Kick off video playback
postVideoEvent_l();

if (mAudioSource != NULL && mVideoSource != NULL) {
postVideoLagEvent_l();
}
}

if (mFlags & AT_EOS) {
// Legacy behaviour, if a stream finishes playing and then
// is started again, we play from the start...
seekTo_l(0);
}

uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted
| IMediaPlayerService::kBatteryDataTrackDecoder;
if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
params |= IMediaPlayerService::kBatteryDataTrackAudio;
}
if (mVideoSource != NULL) {
params |= IMediaPlayerService::kBatteryDataTrackVideo;
}
addBatteryData(params);
if (mVideoSource != NULL) {
if (mMDClient == NULL) {
mMDClient = new MultiDisplayClient();
}
int wcom = 0;
if (mNativeWindow != NULL)
mNativeWindow->query(mNativeWindow.get(), NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER, &wcom);
/*
* 0 means the buffers do not go directly to the window compositor;
* 1 means the ANativeWindow DOES send queued buffers
* directly to the window compositor;
*/
if (wcom == 1) {
sp<MetaData> meta = NULL;
int32_t displayW, displayH, frameRate;
displayW = displayH = frameRate = 0;
MDSVideoInfo info;
memset(&info, 0, sizeof(MDSVideoInfo));
info.isplaying = true;
info.isprotected = (mDecryptHandle != NULL);
bool success = false;
if (mVideoTrack != NULL)
meta = mVideoTrack->getFormat();
if (meta != NULL) {
success = meta->findInt32(kKeyFrameRate, &frameRate);
if (!success)
frameRate = 0;
}
if (mVideoSource != NULL)
meta = mVideoSource->getFormat();
if (meta != NULL) {
success = meta->findInt32(kKeyWidth, &displayW);
if (!success)
displayW = 0;
success = meta->findInt32(kKeyHeight, &displayH);
if (!success)
displayH = 0;
}
info.frameRate = frameRate;
info.displayW  = displayW;
info.displayH  = displayH;
mMDClient->updateVideoInfo(&info);
}
}

return OK;
}


那我们接下来看看startAudioPlayer_l,startAudioPlayer_l中直接调用 mAudioPlayer->start(true /* sourceAlreadyStarted */);里面总过做了四件事情:

1. mSource->start();

2. mSource->read(&mFirstBuffer, &options);   // 从MP3Source中读取一段Buffer,然后返回给mFirstBuffer

3. mAudioSink->open()  //如果有AudioSink,会创建一个new AudioTrack。创建就要联系到AudioFlinger了。在创建AudioTrack的时候,AudioPlayer会把自己的AudioSinkCallback传递进去,这个callback就是用进行数据交换的。在AudioSinkCallback中会调用AudioPlayer::fillBuffer(),在fillBuffer中其实就是调用了AudioSource的read方法。

4. mAudioSink->start();  即调用mTrack->start();  //关于Audio System相关的可以参考网上一些人的总结,AudioTrack, AudioFlinger, AudioSystem, etc

status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(!mStarted);
CHECK(mSource != NULL);

status_t err;
if (!sourceAlreadyStarted) {
err = mSource->start();

if (err != OK) {
return err;
}
}

// We allow an optional INFO_FORMAT_CHANGED at the very beginning
// of playback, if there is one, getFormat below will retrieve the
// updated format, if there isn't, we'll stash away the valid buffer
// of data to be used on the first audio callback.

CHECK(mFirstBuffer == NULL);

MediaSource::ReadOptions options;
if (mSeeking) {
options.setSeekTo(mSeekTimeUs);
mSeeking = false;
}

mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
LOGV("INFO_FORMAT_CHANGED!!!");

CHECK(mFirstBuffer == NULL);
mFirstBufferResult = OK;
mIsFirstBuffer = false;
} else {
mIsFirstBuffer = true;
}

sp<MetaData> format = mSource->getFormat();
const char *mime;
bool success = format->findCString(kKeyMIMEType, &mime);
CHECK(success);
CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));

success = format->findInt32(kKeySampleRate, &mSampleRate);
CHECK(success);

int32_t numChannels;
success = format->findInt32(kKeyChannelCount, &numChannels);
CHECK(success);
mChannels = numChannels;

if (mAudioSink.get() != NULL) {
status_t err = mAudioSink->open(
mSampleRate, numChannels, AUDIO_FORMAT_PCM_16_BIT,
DEFAULT_AUDIOSINK_BUFFERCOUNT,
&AudioPlayer::AudioSinkCallback, this);
if (err != OK) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}

if (!sourceAlreadyStarted) {
mSource->stop();
}

return err;
}

mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
mFrameSize = mAudioSink->frameSize();

mAudioSink->start();
} else {
mAudioTrack = new AudioTrack(
AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT,
(numChannels == 2)
? AUDIO_CHANNEL_OUT_STEREO
: AUDIO_CHANNEL_OUT_MONO,
0, 0, &AudioCallback, this, 0);

if ((err = mAudioTrack->initCheck()) != OK) {
delete mAudioTrack;
mAudioTrack = NULL;

if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}

if (!sourceAlreadyStarted) {
mSource->stop();
}

return err;
}

mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
mFrameSize = mAudioTrack->frameSize();

mAudioTrack->start();
}

{
Mutex::Autolock autoLock(mLock);
mIsClockAdjustmentOn = true;
}

mStarted = true;

return OK;
}


参考

    http://blog.csdn.net/siobhan/article/details/7179694

    AudioTrack分析 :http://blog.csdn.net/Innost/article/details/6125779

    AudioFlinger分析:http://blog.csdn.net/innost/article/details/6142812

                                      http://blog.csdn.net/DroidPhone/article/details/5951999

    AudioPolicyServer和AudioPolicyManager分析:http://blog.csdn.net/DroidPhone/article/details/5949280

   AudioTrack 如何与AudioFlinger交换音频数据    http://blog.csdn.net/DroidPhone/article/details/5941344

    Android 4.1 Audio系统的变化: http://www.cnblogs.com/innost/archive/2012/07/16/2593305.html
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息