您的位置:首页 > 移动开发 > Android开发

Android rtsp流媒体音视频帧的处理流程

2014-02-19 17:46 190 查看
先把从收到rtp包到封装成完整的一帧涉及的相关函数从上到下罗列一遍

后续在忘记的情况下理清的时候可以作为线索,不用从头去分析代码

(MyHandler.h)onMessageReceived(case setu)  sp notify = new AMessage('accu', id());

void ARTPConnection::addStream

void ARTPConnection::onAddStream(const sp &msg)

void ARTPConnection::onPollStreams()

status_t ARTPConnection::receive(StreamInfo *s, bool receiveRTP) 

status_t ARTPConnection::parseRTP(StreamInfo *s, const sp &buffer)

void ARTPSource::processRTPPacket(const sp &buffer)

void ARTPAssembler::onPacketReceived(const sp &source)

ARTPAssembler::AssemblyStatus AMPEG4AudioAssembler::assembleMore(
        const sp &source)

ARTPAssembler::AssemblyStatus AMPEG4AudioAssembler::addPacket(
        const sp &source)

void AMPEG4AudioAssembler::submitAccessUnit()

sp msg = mNotifyMsg->dup();
    msg->setBuffer("access-unit", accessUnit);
    msg->post();

看红色部分,从构造一个消息,到最后通过这个消息将封装好的buffer,返回到MyHandler中来处理,并且前面一篇博客分析dlna问题的时候大致讲解了如何把从服务器端接受到的一些rtp包封装一个完整的视频帧。

下面是封装好的一个完整的帧如何打上时间戳放到待解码的队列中去的

MyHandler中的onMessageReceived函数会受到上面post过来的消息,然后会调用onAccessUnitCompete函数

if (mFirstAccessUnit) {
            如果是第一个到达的数据包,会给RTSPSource.cpp发一个消息,表示受到数据了,已经连接上了,然后更改一下状态
            sp<AMessage> msg = mNotify->dup();
            msg->setInt32("what", kWhatConnected);
            msg->post();

            if (mSeekable) {
                如果是点播的rtsp流媒体,会进入到这里来,而直播流媒体却进不来
                for (size_t i = 0; i < mTracks.size(); ++i) {
                    TrackInfo *info = &mTracks.editItemAt(i);

                    postNormalPlayTimeMapping(i,
                            info->mNormalPlayTimeRTP, info->mNormalPlayTimeUs);
                }
            }
            mFirstAccessUnit = false;
        }

 下面这个处理是J版本新增加的,也就是在音视频任何一个还没有建立时间戳的时候,会将受到的这个帧暂时存储在mPackets中,具体需要研究一下  mAllTracksHaveTime这个变量的赋值,设计rtsp流媒体RTCP包中的sr信息,专门用于同步,下一篇博客在分析。     

        if (!mAllTracksHaveTime) {
            ALOGV("storing accessUnit, no time established yet");
            track->mPackets.push_back(accessUnit);
            return;
        }

当上面的if条件不满足,也即音视频都已经建立了同步的时间机制后,就会进入先面的代码对这个帧做处理,首先把先前暂时存放在mPackets中的帧全部取出来调用addMediaTimestamp打上时间戳,然后发送出去。

while (!track->mPackets.empty()) {
            sp<ABuffer> accessUnit = *track->mPackets.begin();
            track->mPackets.erase(track->mPackets.begin());

            if (addMediaTimestamp(trackIndex, track, accessUnit)) {
                postQueueAccessUnit(trackIndex, accessUnit);
            }
        }
将mPackets中的帧打上时间戳后,还要将刚来的这一帧也打上时间戳然后调用postQueueAccessUnit发送出去
        if (addMediaTimestamp(trackIndex, track, accessUnit)) {
            postQueueAccessUnit(trackIndex, accessUnit);
        }

至于上面怎么打时间戳也会在后面的博客中讲解。

上面的发送出去,跟踪消息流程指导是发送到了RTSPSource.cpp的onMessageReceived函数中了

        case MyHandler::kWhatAccessUnit:
        {
            size_t trackIndex;
            判断是音频还是视频
            CHECK(msg->findSize("trackIndex", &trackIndex));
            CHECK_LT(trackIndex, mTracks.size());
            取出从MyHandler中打好时间戳的帧
            sp<ABuffer> accessUnit;
            CHECK(msg->findBuffer("accessUnit", &accessUnit));

            int32_t damaged;
            判断是否是已经破坏的帧,如果是已经破坏的帧就不放到待解码的队列中去
            if (accessUnit->meta()->findInt32("damaged", &damaged)
                    && damaged) {
                ALOGI("dropping damaged access unit.");
                break;
            }
            TrackInfo *info = &mTracks.editItemAt(trackIndex);
            sp<AnotherPacketSource> source = info->mSource;
            if (source != NULL) {
                uint32_t rtpTime;
                CHECK(accessUnit->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
                下面的这个if条件是J版本修改过的,只有直播流媒体会进入到这个if中去。
                if (!info->mNPTMappingValid) {
                    // This is a live stream, we didn't receive any normal
                    // playtime mapping. We won't map to npt time.
                    source->queueAccessUnit(accessUnit);
                    break;
                }               
                下满这段代码使给这一帧重新就算时间戳,只对点播的rtsp流媒体会起作用。
                int64_t nptUs =
                    ((double)rtpTime - (double)info->mRTPTime)
                        / info->mTimeScale
                        * 1000000ll
                        + info->mNormalPlaytimeUs;
                accessUnit->meta()->setInt64("timeUs", nptUs);
                source->queueAccessUnit(accessUnit);
            }
            break;
        }

无论对于直播还是点播流媒体,最后都会将这个AccessUnit放到AnotherPacketSource的待解码队列中去了。

下面跟踪一下如何从待解码的队列中取出完整的帧进行解码

初始化解码器后会调用ACodec的postFillThisBuffer函数

    CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
    sp<AMessage> notify = mCodec->mNotify->dup();
    取出初始化Acodec从NuPlayerDecoder.cpp中传递过来的notify这个消息,用于往NuPlayerDecoder.cpp中发送消息
    notify->setInt32("what", ACodec::kWhatFillThisBuffer);  设置好了what
    notify->setPointer("buffer-id", info->mBufferID);
    info->mData->meta()->clear();
    notify->setBuffer("buffer", info->mData);
    设置了返回的消息,用于等下fillBuffer之后通知ACodec
    sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec->id());
    reply->setPointer("buffer-id", info->mBufferID);
    notify->setMessage("reply", reply);
    notify->post();
    info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;

post之后就被NuPlayerDecoder.cpp接受到了

case kWhatCodecNotify:
        {
            int32_t what;
            CHECK(msg->findInt32("what", &what));
            if (what == ACodec::kWhatFillThisBuffer) {
                onFillThisBuffer(msg);
            }
            break;
        }

void NuPlayer::Decoder::onFillThisBuffer(const sp<AMessage> &msg) {
    sp<AMessage> reply;
    CHECK(msg->findMessage("reply", &reply));
    sp<ABuffer> outBuffer;

    sp<AMessage> notify = mNotify->dup();  取出从NuPlayer中传递过来的notify,用于给其发送消息
    notify->setMessage("codec-request", msg);
    notify->post();
}

下面就是NuPlayer中接受到了消息

        case kWhatVideoNotify:
        case kWhatAudioNotify:
        {
            bool audio = msg->what() == kWhatAudioNotify;

            sp<AMessage> codecRequest;
            CHECK(msg->findMessage("codec-request", &codecRequest));

            int32_t what;
            CHECK(codecRequest->findInt32("what", &what));

            if (what == ACodec::kWhatFillThisBuffer) {
                status_t err = feedDecoderInputData(
                        audio, codecRequest);
                if (err == -EWOULDBLOCK) {
                    if (mSource->feedMoreTSData() == OK) {
                        msg->post(10000ll);
                    }
                }
            }

status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) {
    sp<AMessage> reply;
    CHECK(msg->findMessage("reply", &reply));
    sp<ABuffer> accessUnit;
    bool dropAccessUnit;
    do {
        status_t err = mSource->dequeueAccessUnit(audio, &accessUnit);

        if (err == -EWOULDBLOCK) {

            return err;

        }

        dropAccessUnit = false;
        if (!audio
                && mVideoLateByUs > 100000ll
                && mVideoIsAVC
                && !IsAVCReferenceFrame(accessUnit)) {
            dropAccessUnit = true;
            ++mNumFramesDropped;
        }
    } while (dropAccessUnit);

status_t NuPlayer::RTSPSource::dequeueAccessUnit(
        bool audio, sp<ABuffer> *accessUnit) {
     下面这段代码使J版本新增加的,目的是等到音频和视频都缓冲到2秒数据之后才会从待解码的队列里取出帧去解码
     if (mStartingUp) {
        if (!haveSufficientDataOnAllTracks()) {
            return -EWOULDBLOCK;
        }
        mStartingUp = false;
    }
    sp<AnotherPacketSource> source = getSource(audio);
    status_t finalResult;
    if (!source->hasBufferAvailable(&finalResult)) {
        return finalResult == OK ? -EWOULDBLOCK : finalResult;
    }
    return source->dequeueAccessUnit(accessUnit);
    到这里就从AnotherPacketSource的待解码队列中取出帧然后扔到ACodec传给解码器去解码
}

over,欢迎同行有兴趣的童鞋们多多交流,一起进步。

http://blog.sina.com.cn/s/blog_645b74b90101cowd.html 转载地址
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  视频 流媒体 RTSP