您的位置:首页 > 移动开发 > Android开发

Android IPC 通讯机制源码分析 (二)

2010-07-23 10:24 711 查看
Client A与Binder kernel通信:

kernel/drivers/android/Binder.c)

static int binder_open(struct inode *nodp, struct file *filp)

{

struct binder_proc *proc;

if (binder_debug_mask &
BINDER_DEBUG_OPEN_CLOSE)

   printk(KERN_INFO "binder_open:
%d:%d/n", current->group_leader->pid,
current->pid);

proc = kzalloc(sizeof(*proc), GFP_KERNEL);

if (proc == NULL)

   return -ENOMEM;

get_task_struct(current);

proc->tsk =
current;        
//保存打开/dev/binder驱动的当前进程任务数据结构

INIT_LIST_HEAD(&proc->todo);

init_waitqueue_head(&proc->wait);

proc->default_priority = task_nice(current);

mutex_lock(&binder_lock);

binder_stats.obj_created[BINDER_STAT_PROC]++;

hlist_add_head(&proc->proc_node,
&binder_procs);

proc->pid =
current->group_leader->pid;

INIT_LIST_HEAD(&proc->delivered_death);

filp->private_data = proc;

mutex_unlock(&binder_lock);

if (binder_proc_dir_entry_proc) {

   char strbuf[11];

   snprintf(strbuf,
sizeof(strbuf), "%u", proc->pid);

   create_proc_read_entry(strbuf,
S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc);
//为当前进程创建一个process入口结构信息

}

return 0;

}

从这里可以知道每一个打开/dev/binder的进程的信息都保存在binder
kernel中,因而当一个进程调用ioctl与kernel binder通信时,binder
kernel就能查询到调用进程的信息。BINDER_WRITE_READ是调用ioctl进程与Binder
kernel通信一个非常重要的command。大家可以看到在IPCThreadState中的transact函数这个函数中call
talkWithDriver发送的command就是BINDER_WRITE_READ。

static long binder_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)

{

int ret;

struct binder_proc *proc = filp->private_data;

struct binder_thread *thread;

unsigned int size = _IOC_SIZE(cmd);

void __user *ubuf = (void __user *)arg;

   
//将调用ioctl的进程挂起 caller将挂起直到 service 返回

ret = wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);

if (ret)

   return ret;

mutex_lock(&binder_lock);

thread = binder_get_thread(proc);//根据当caller进程消息获取该进程线程池数据结构

if (thread == NULL) {

   ret = -ENOMEM;

   goto err;

}

switch (cmd) {

case BINDER_WRITE_READ: {
//IPcThreadState中talkWithDriver设置ioctl的CMD

   struct binder_write_read
bwr;

   if (size != sizeof(struct
binder_write_read)) {

    ret =
-EINVAL;

    goto
err;

   }

   if
(copy_from_user(&bwr, ubuf, sizeof(bwr))) {

    ret =
-EFAULT;

    goto
err;

   }

   if (binder_debug_mask
& BINDER_DEBUG_READ_WRITE)

   
printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at
%08lx/n",

         
proc->pid, thread->pid,
bwr.write_size, bwr.write_buffer, bwr.read_size,
bwr.read_buffer);

   if (bwr.write_size
> 0) {

    ret =
binder_thread_write(proc, thread, (void __user *)bwr.write_buffer,
bwr.write_size, &bwr.write_consumed);

    if (ret
< 0) {

    
bwr.read_consumed = 0;

    
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))

     
ret = -EFAULT;

    
goto err;

    }

   }

   if (bwr.read_size
> 0) {//数据写入到caller process。

    ret =
binder_thread_read(proc, thread, (void __user *)bwr.read_buffer,
bwr.read_size, &bwr.read_consumed,
filp->f_flags & O_NONBLOCK);

    if
(!list_empty(&proc->todo))

    
wake_up_interruptible(&proc->wait);
//恢复挂起的caller进程

    if (ret
< 0) {

    
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))

     
ret = -EFAULT;

    
goto err;

    }

   }

   
.........................................

}

Int binder_thread_write(struct binder_proc *proc, struct
binder_thread *thread,void __user *buffer, int size, signed long
*consumed)

{

uint32_t cmd;

void __user *ptr = buffer + *consumed;

void __user *end = buffer + size;

while (ptr < end
&&
thread->return_error == BR_OK) {

   if (get_user(cmd, (uint32_t
__user *)ptr))//从user空间获取cmd数据到内核空间

    return
-EFAULT;

   ptr += sizeof(uint32_t);

   if (_IOC_NR(cmd)
< ARRAY_SIZE(binder_stats.bc)) {

   
binder_stats.bc[_IOC_NR(cmd)]++;

   
proc->stats.bc[_IOC_NR(cmd)]++;

   
thread->stats.bc[_IOC_NR(cmd)]++;

   }

   switch (cmd) {

   case BC_INCREFS:

.........................................

       
case BC_TRANSACTION:
//IPCThreadState通过writeTransactionData设置该cmd

   case BC_REPLY: {

    struct
binder_transaction_data tr;

    if
(copy_from_user(&tr, ptr, sizeof(tr)))

    
return -EFAULT;

    ptr +=
sizeof(tr);

   
binder_transaction(proc, thread, &tr, cmd ==
BC_REPLY);

    break;

   }

........................................

}  

static void

binder_transaction(struct binder_proc *proc, struct binder_thread
*thread,

struct binder_transaction_data *tr, int reply)

{

    
..............................................

    if (reply)
// cmd != BC_REPLY 不走这个case

{

       
......................................

   }

   else

{

   if
(tr->target.handle) {
//对于service_manager来说这个条件不满足(handle == 0)

   
.......................................

    }

   } else
{//这一段我们获取到了service_mananger process 注册在binder kernle的进程信息

target_node = binder_context_mgr_node; //BINDER_SET_CONTEXT_MGR
注册了service

    if
(target_node == NULL)
{            
//manager

    
return_error = BR_DEAD_REPLY;

    
goto err_no_context_mgr_node;

    }

   }

   e->to_node =
target_node->debug_id;

   target_proc =
target_node->proc; //得到目标进程service_mananger
的结构

   if (target_proc == NULL)
{

    return_error
= BR_DEAD_REPLY;

    goto
err_dead_binder;

   }

   ....................

}

if (target_thread) {

   e->to_thread =
target_thread->pid;

   target_list =
&target_thread->todo;

   target_wait =
&target_thread->wait; //得到service
manager挂起的线程

} else {

   target_list =
&target_proc->todo;

   target_wait =
&target_proc->wait;

}

............................................

case BINDER_TYPE_BINDER:

case BINDER_TYPE_WEAK_BINDER: {

   
..........................

    ref =
binder_get_ref_for_node(target_proc, node); //在Binder
kernel中创建

   
..........................                       
//查找到的service参考

   } break;

............................................

if (target_wait)

     
wake_up_interruptible(target_wait);  
//唤醒挂起的线程 处理caller process请求

....................
4000
........................//处理命令可以看svcmgr_handler

}

   到这里我们已经通过getService连接到service
manager进程了,service manager进程得到请求后,如果他的状态是挂起的话,将被唤醒。现在我们来看一下service
manager中的binder_loop函数。

Service_manager.c

void binder_loop(struct binder_state *bs, binder_handler
func)

{

   
.................................

   
binder_write(bs, readbuf, sizeof(unsigned));

    for (;;)
{

       
bwr.read_size = sizeof(readbuf);

       
bwr.read_consumed = 0;

       
bwr.read_buffer = (unsigned) readbuf;

    res =
ioctl(bs->fd, BINDER_WRITE_READ,
&bwr); //如果没有要处理的请求进程将挂起

       
if (res < 0) {

           
LOGE("binder_loop: ioctl failed (%s)/n", strerror(errno));

           
break;

       
}

res = binder_parse(bs, 0, readbuf, bwr.read_consumed,
func);//这里func就是

      
...................................                 
//svcmgr_handler

    }

}

接收到数据处理的请求,这里进行解析并调用前面注册的回调函数查找caller请求的service

int binder_parse(struct binder_state *bs, struct binder_io
*bio,

                
uint32_t *ptr, uint32_t size, binder_handler func)

{

       
....................................

     
switch(cmd) {

          
......

         
case BR_TRANSACTION: {

           
struct binder_txn *txn = (void *) ptr;

           
if ((end - ptr) * sizeof(uint32_t) < sizeof(struct
binder_txn)) {

               
LOGE("parse: txn too small!/n");

               
return -1;

           
}

           
binder_dump_txn(txn);

           
if (func) {

               
unsigned rdata[256/4];

               
struct binder_io msg;

               
struct binder_io reply;

               
int res;

               
bio_init(&reply, rdata, sizeof(rdata), 4);

               
bio_init_from_txn(&msg, txn);

               
res = func(bs, txn, &msg,
&reply);    
//找到caller请求的service

      
binder_send_reply(bs, &reply,
txn->data, res);//将找到的service返回给caller

           
}

           
ptr += sizeof(*txn) / sizeof(uint32_t);

           
break;

        
........

       
}

}

void binder_send_reply(struct binder_state *bs,

                      
struct binder_io *reply,

                      
void *buffer_to_free,

                      
int status)

{

    struct
{

       
uint32_t cmd_free;

       
void *buffer;

       
uint32_t cmd_reply;

       
struct binder_txn txn;

    }
__attribute__((packed)) data;

   
data.cmd_free = BC_FREE_BUFFER;

    data.buffer
= buffer_to_free;

data.cmd_reply = BC_REPLY;
//将我们前面binder_thread_write中cmd替换为BC_REPLY就可以知

data.txn.target =
0;      
//道service manager如何将找到的service返回给caller了

  
..........................

   
binder_write(bs, &data, sizeof(data));
//调用ioctl与binder kernel通信

}

从这里走出去后,caller该被唤醒了,client进程就得到了所请求的service的IBinder对象在Binder
kernel中的参考,这是一个远程BBinder对象。

连接建立后的client连接Service的通信过程:

virtual sp<ICamera> connect(const
sp<ICameraClient>&
cameraClient)

    {

       
Parcel data, reply;

       
data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());

       
data.writeStrongBinder(cameraClient->asBinder());

       
remote()->transact(BnCameraService::CONNECT, data,
&reply);

       
return
interface_cast<ICamera>(reply.readStrongBinder());

    }

向前面分析的这里remote是我们得到的CameraService的对象,caller进程会切入到CameraService。android的每一个进程都会创建一个线程池,这个线程池用处理其他进程的请求。当没有数据的时候线程是挂起的,这时binder
kernel唤醒了这个线程:

IPCThreadState::joinThreadPool(bool isMain)

{

   
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD
POOL/n", (void*)pthread_self(), getpid());

   
mOut.writeInt32(isMain ? BC_ENTER_LOOPER :
BC_REGISTER_LOOPER);

   

    status_t
result;

    do {

       
int32_t cmd;

       
result = talkWithDriver();

       
if (result >= NO_ERROR) {

           
size_t IN = mIn.dataAvail();  
//binder kernel传递数据到service

           
if (IN < sizeof(int32_t)) continue;

           
cmd = mIn.readInt32();

           
IF_LOG_COMMANDS() {

               
alog << "Processing top-level
Command: "

                   
<< getReturnString(cmd)
<< endl;

           
}

         
result = executeCommand(cmd); //service 执行binder kernel请求的命令

       
}

       

       
// Let this thread exit the thread pool if it is no longer

       
// needed and it is not the main process thread.

       
if(result == TIMED_OUT && !isMain)
{

           
break;

       
}

    } while
(result != -ECONNREFUSED && result
!= -EBADF);

     
.......................

}

status_t IPCThreadState::executeCommand(int32_t cmd)

{

    BBinder*
obj;

   
RefBase::weakref_type* refs;

    status_t
result = NO_ERROR;

   

switch (cmd) {

.........................

    case
BR_TRANSACTION:

       
{

           
binder_transaction_data tr;

           
result = mIn.read(&tr, sizeof(tr));

           
LOG_ASSERT(result == NO_ERROR,

               
"Not enough command data for brTRANSACTION");

           
if (result != NO_ERROR) break;

           

           
Parcel buffer;

           
buffer.ipcSetDataReference(

               
reinterpret_cast<const
uint8_t*>(tr.data.ptr.buffer),

               
tr.data_size,

               
reinterpret_cast<const
size_t*>(tr.data.ptr.offsets),

               
tr.offsets_size/sizeof(size_t), freeBuffer, this);

           

           
const pid_t origPid = mCallingPid;

           
const uid_t origUid = mCallingUid;

           

           
mCallingPid = tr.sender_pid;

           
mCallingUid = tr.sender_euid;

           

           
//LOGI(">>>>
TRANSACT from pid %d uid %d/n", mCallingPid, mCallingUid);

           

           
Parcel reply;

           
.........................

           
if (tr.target.ptr) {

     
sp<BBinder> b((BBinder*)tr.cookie);
//service中Binder对象即CameraService

      
const status_t error = b->transact(tr.code, buffer,
&reply, 0);//将调用

   if (error <
NO_ERROR) reply.setError(error);//CameraService的onTransact函数

               

           
} else {

               
const status_t error =
the_context_object->transact(tr.code, buffer,
&reply, 0);

               
if (error < NO_ERROR) reply.setError(error);

           
}

           

           
//LOGI("<<<<
TRANSACT from pid %d restore pid %d uid %d/n",

           
//    
mCallingPid, origPid, origUid);

           

           
if ((tr.flags & TF_ONE_WAY) == 0) {

               
LOG_ONEWAY("Sending reply to %d!", mCallingPid);

               
sendReply(reply, 0);

           
} else {

               
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);

           
}

           

           
mCallingPid = origPid;

           
mCallingUid = origUid;

           

           
IF_LOG_TRANSACTIONS() {

               
TextOutput::Bundle _b(alog);

               
alog << "BC_REPLY thr "
<< (void*)pthread_self()
<< " / obj "

                   
<< tr.target.ptr
<< ": "
<< indent
<< reply
<< dedent
<< endl;

           
}

       
..................................   

   }

       
break;

}

   
..................................

if ((tr.flags & TF_ONE_WAY) == 0) {

               
LOG_ONEWAY("Sending reply to %d!", mCallingPid);

     
sendReply(reply, 0); //通过binder kernel返回数据到caller进程这个过程大家

            
} else
{                
//参照前面的叙述自己分析一下

               
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);

           
}

    if (result
!= NO_ERROR) {

       
mLastError = result;

    }

    return
result;

}

调用CameraService BBinder对象中的transact函数:

status_t BBinder::transact(

    uint32_t
code, const Parcel& data, Parcel* reply, uint32_t
flags)

{

   .....................

    switch
(code) {

       
case PING_TRANSACTION:

           
reply->writeInt32(pingBinder());

           
break;

       
default:

           
err = onTransact(code, data, reply, flags);

           
break;

    }

   
...................

    return
err;

}

将调用CameraService的onTransact函数,CameraService继承了BBinder。

status_t BnCameraService::onTransact(

    uint32_t
code, const Parcel& data, Parcel* reply, uint32_t
flags)

{

    switch(code)
{

       
case CONNECT: {

           
CHECK_INTERFACE(ICameraService, data, reply);

           
sp<ICameraClient> cameraClient =
interface_cast<ICameraClient>(data.readStrongBinder());

           
sp<ICamera> camera =
connect(cameraClient); //真正的处理函数

           
reply->writeStrongBinder(camera->asBinder());

           
return NO_ERROR;

       
} break;

       
default:

           
return BBinder::onTransact(code, data, reply, flags);

    }

}

至此完成了一次从client到service的通信。

设计一个多客户端的Service

Service可以连接不同的Client,这里说的多客户端是指在Service中为不同的client创建不同的IClient接口,如果看过
AIDL编程的话,应该清楚,Service需要开放一个IService接口给客户端,我们通过
defaultServiceManager->getService就可以得到相应的service一个BpBinder接口,通过这个接口调用
transact函数就可以与service通信了,这样也就完成了一个简单的service与client程序了,但这里有个缺点就是,这个
IService是对所有的client开放的,如果我们要对不同的client做区分的话,在建立连接的时候所有的client需要给Service一
个特性,这样做也未尝不可,但会很麻烦。比如对Camera来说可能不止一个摄像头,摄像头的功能也不一样,这样做就比较麻烦了。其实我们完全可以参照
QT中多客户端的设计方式,在Service中为每一个Client都创建一个IClient接口,IService接口只用于Serivce与
Client建立连接用。对于Camera,如果存在多摄像头我们就可以在Service中为不同的Client打开不同的设备。

import android.os.IBinder;

import android.os.RemoteException;

public class TestServerServer extends
android.app.testServer.ITestServer.Stub

{

int mClientCount = 0;

testServerClient mClient[];

@Override

public android.app.testServer.ITestClient.Stub connect(ITestClient
client) throws RemoteException

{

   // TODO Auto-generated method
stub

testServerClient tClient = new testServerClient(this, client);
//为Client创建

   mClient[mClientCount] =
tClient;               
//不同的IClient

   mClientCount ++;

   System.out.printf("*** Server
connect client is %d", client.asBinder());

   return tClient;

}

@Override

public void receivedData(int count) throws RemoteException

{

   // TODO Auto-generated method
stub

  

}

Public static class testServerClient extends
android.app.testServer.ITestClient.Stub

{

   public
android.app.testServer.ITestClient mClient;

   public TestServerServer
mServer;

   public
testServerClient(TestServerServer tServer,
android.app.testServer.ITestClient tClient)

   {

    mServer =
tServer;

    mClient =
tClient;

   }

   public IBinder
asBinder()

   {

    // TODO
Auto-generated method stub

    return
this;

   }

}

}

这仅仅是个Service的demo而已,如果添加这个作为system Service还得改一下android代码avoid
permission check!

总结:

    假定一个Client A
进程与Service B 进程要建立IPC通信,通过前面的分析我们知道他的流程如下:

1:Service B 打开Binder driver,
将自己的进程信息注册到kernel并为Service创建一个binder_ref。

2:Service B 通过Add_Service 将Service信息添加到service_manager进程

3:Service B 的Thread pool 挂起 等待client 的请求

4:Client A 调用open_driver打开Binder driver
将自己的进程信息注册到kernel并为Service创建一个binder_ref

5: Client A 调用defaultManagerService.getService 得到Service
B在kernel中的IBinder对象

6:通过transact 与Binder kernel 通信,Binder Kernel将Client A 挂起。

7:Binder Kernel恢复Service B thread pool线程,并在 joinThreadPool
中处理Client的请求

8: Binder Kernel 挂起Service B 并将Service B 返回的数据写到Client A

9:Binder Kernle 恢复Client A

Binder kernel driver在Client A 与Service
B之间扮演着中间代理的角色。任何通过transact传递的IBinder对象都会在Binder
kernel中创建一个与此相关联的独一无二的BInder对象,用于区分不同的Client。

 

转自: http://blog.sina.com.cn/s/blog_55b1b0d50100fdft.html
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息