您的位置:首页 > 移动开发 > Android开发

Android binder driver 分析

2015-07-19 10:46 615 查看

1. binder_ioctl

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)


首先调用binder_get_thread从该proc的thread红黑树找到对应该线程的binder_thread结构体。

thread = binder_get_thread(proc);


binder_get_thread的内部实现是根据current宏在binder_proc中的threads红黑树找到描述当前请求线程的binder_thread结构体.

如果没有找到,则为当前调用线程创建一个binder_thread结构体。

这属于binder driver主动将当前线程设置为BINDER_LOOPER_STATE_NEED_RETURN,后面会需要用户空间返回。

处理各种ioctl控制命令码

binder.h中定义如下7种命令码

#defineBINDER_WRITE_READ_IOWR('b', 1, struct binder_write_read)
#defineBINDER_SET_IDLE_TIMEOUT_IOW('b', 3, __s64)
#defineBINDER_SET_MAX_THREADS_IOW('b', 5, __u32)
#defineBINDER_SET_IDLE_PRIORITY_IOW('b', 6, __s32)
#defineBINDER_SET_CONTEXT_MGR_IOW('b', 7, __s32)
#defineBINDER_THREAD_EXIT_IOW('b', 8, __s32)
#defineBINDER_VERSION_IOWR('b', 9, struct binder_version)


因为ioctl的cmd是32位,有一定的格式。利用_IOWR,_IOW等宏辅助生成32位cmd数字。具体可以百度之,属于linux driver的标准实现。

比如:

#define BINDER_WRITE_READ _IOWR(‘b’, 1, struct binder_write_read)

_IOWR : 表示用于双向传输

‘b’ : driver type, binder

1: 用于区别各种命令,1 代表的时BINDER_WRITE_READ

struct binder_write_read:指定要传递数据的类型。可以根据指定的类型,得到所需要传递数据的大小。

2. 主要分析ioctl 控制命令码BINDER_WRITE_READ的处理过程

case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);


参数分别为:

flip:当前调用进程第一次访问binder设备时创建的struct file,每次系统调用binder_ioctl时,都会将该struct传递过来 . (内核根据用户空间传过来的fd,找到在内核中对应的file。)

cmd:ioctl 控制命令,当前为BINDER_WRITE_READ

arg:用户空间传递下来的数据 binder_read_write 的地址

thread:表示为当前调用线程在binder driver 中记录的的binder_thread结构体

2.1 分析binder_ioctl_write_read

首先需要根据传送下来的地址(参数arg),将数据binder_write_read 从用户空间拷贝到内核空间。copy_from_user

void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;

…

if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {


binder_write_read 数据结构体如下,主要用户描述通信过程所传输的数据的size,已经消耗的size,真正数据的地址.

/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses appropriately.
*/

struct binder_write_read {
binder_size_t write_size; /* bytes to write */
binder_size_t write_consumed; /* bytes consumed by driver */
binder_uintptr_t write_buffer;
binder_size_t read_size; /* bytes to read */
binder_size_t read_consumed; /* bytes consumed by driver */
binder_uintptr_t read_buffer;
};


write_xx表示从用户空间传输到binder驱动程序的数据,如果write_xx不为0,表示用户空间有数据需要传递给binder 驱动程序处理。

read_xx表示binder驱动程序返回给用户空间的数据。如果read_size 不为0,表示用户空间需要等待驱动程序返回结果才会回到用户空间。

先处理write_read>0的情况,后处理read_size>0的情况.

write_size 大于0的情况,即输入缓冲区有数据,表示Binder 驱动需要先将数据读出来,调用binder_thread_wirte()进行处理。

if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}


read_size(输出缓冲区)大于0,则表示需要等待结果返回。执行binder_thread_read()等待结果并读取。

if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}


2.2 分析binder_thread_wirte()

static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)


参数含义:

proc:表示当前请求binder通信的进程

thread:表示当前请求binder通信的线程

binder_buffer:表示输入缓冲区的地址,是从binder_write_read 中获取的。

size:表示输入缓冲区数据长度

comsumed:表示已经被处理过的数据长度

2.2.1 根据binder_buffer & consumed 调整本次需要读取数据的起始指针位置

uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;


2.2.2 读取cmd,该cmd为用户程序与binder 通信的协议指令,并调整ptr指针位置。

if (get_user(cmd, (uint32_t __user *)ptr))

…

ptr += sizeof(uint32_t);


binder.h中定义了binder通信协议代码:

binder_driver_command_protocol : BC_XX
binder_driver_return_protocol : BR_XX


其他指令暂先跳过,先来分析BC_TRANSACTION & BC_REPLY,当指令为BC_TRANSACTION & BC_REPLY时,其后面跟的数据为binder_transaction_data 格式。

- 通过copy_from_user() 将用户空间的数据拷贝到内核空间,并调整指针ptr的地址。

- 然后调用binder_transaction() 根据binder_transaction_data的目标的讯息找到目标,并将真正的数据部分传递给目标处理。

struct binder_transaction_data tr;

if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;


binder_transaction_data 用于描述进程间通信过程的数据。主要是对通信目标的描述,通信内容的描述。

具体通信的数据在data里,数据较小,使用静态分配的数组buf保存,传输数据比较大时,则使用动态分配的缓冲区。

struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
/* target descriptor of command transaction */
__u32    handle;
/* target descriptor of return transaction */
binder_uintptr_t ptr;
} target;
binder_uintptr_t    cookie;    /* target object cookie */
__u32        code;        /* transaction command */

/* General information about the transaction. */
__u32        flags;
pid_t        sender_pid;
uid_t        sender_euid;
binder_size_t    data_size;    /* number of bytes of data */
binder_size_t    offsets_size;    /* number of bytes of offsets */

/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
binder_uintptr_t    buffer;
/* offsets from buffer to flat_binder_object structs */
binder_uintptr_t    offsets;
} ptr;
__u8    buf[8];
} data;
};


2.2.3 binder_transaction() 的分析:

static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)


参数说明:

proc:为当前请求binder通信的进程在binder 驱动中记录的binder_proc 结构体

thread:为当前请求binder通信的线程在binder 驱动中记录的binder_thread 结构体

tr:为内核将数据binder_transaction_data 从用户空间拷贝回来存放的内核地址

reply:表示是否为reply的数据

2.2.3.1 直接先看非reply的部分处理

根据target找到目标实体对象binder_node。

当target 不为0,即不为mgr时,会根据当前的proc以及target handle值从红黑树中找到binder_ref。然后根据binder_ref 引用对象,找到binder 实体对象。

当target为mgr时,即为mgr的实体对象。

if (tr->target.handle) {
struct binder_ref *ref;

ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}


找到target binder_proc

target_proc = target_node->proc;

假设最简单的一种情况,当前调用线程没有事务在处理,这个时候并不能确定taragaet_thread。

而只是需要将target_list

target_list = &target_proc->todo;
target_wait = &target_proc->wait;


当需要等待返回结果并且当前线程有数据在处理

if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;

tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}


动态分配一块buffer:binder_alloc_buffer(), 通过copy_from_user()将真正的通信的信息拷贝到buffer中。

处理offsets,处理传输数据中包含的特殊对象。

enum {
BINDER_TYPE_BINDER         = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_BINDER    = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_HANDLE         = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE    = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD             = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
};


BINDER_TYPE_BINDER / BINDER_TYPE_WEAK_BINDER :binder实体对象

BINDER_TYPE_HANDLE / BINDER_TYPE_WEAK_HANDLE : binder引用对象

BINDER_TYPE_FD : 文件描述符。

用flat_binder_object来描述。这也是为什么binder对象 & 文件描述符 是可以跨进程传递的原因?

/*
* This is the flattened representation of a Binder object for transfer
* between processes.  The 'offsets' supplied as part of a binder transaction
* contains offsets into the data where these structures occur.  The Binder
* driver takes care of re-writing the structure type and data as it moves
* between processes.
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
__u32    type;
__u32    flags;

/* 8 bytes of data. */
union {
binder_uintptr_t    binder;    /* local object */
__u32            handle;    /* remote object */
};

/* extra data associated with local object */
binder_uintptr_t    cookie;
};


最后将binder_work加入到target_list中,通过wake_up_interruptible 唤醒睡眠在target_wait上的进程。

t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;


2.3 binder_thread_read

static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)


假设target proc 被唤醒了,需要在这里继续往下执行。

找到要处理的binder work,然后根据binder_work的类型BINDER_WORK_TRANSACTION找到binger_transaction结构体.

while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;

if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}

if (end - ptr < sizeof(tr) + 4)
break;

switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;


然后开始处理

临时调整目标线程的优先级,调整数据的地址(内核地址与用户空间地址有一定偏移)

将协议以及相关信息copy 到用户空间。然后就返回到用户空间处理了。

if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;

tr.target.ptr = target_node->ptr;
tr.cookie =  target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);

if (t->from) {
struct task_struct *sender = t->from->proc->tsk;

tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
} else {
tr.sender_pid = 0;
}

tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,

sizeof(void *));

if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);

trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
t->debug_id, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);

list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: