注意,这里的参数reply = 0,表示这是一个BC_TRANSACTION命令。
前面我们提到,传给驱动程序的handle值为0,即这里的tr->target.handle = 0,表示请求的目标Binder对象是Service Manager,因此有:
- target_node = binder_context_mgr_node;
- target_proc = target_node->proc;
- target_list = &target_proc->todo;
- target_wait = &target_proc->wait;
其中binder_context_mgr_node是在Service Manager通知Binder驱动程序它是守护过程时创建的。
接着创建一个待完成事项tcomplete,它的类型为struct binder_work,这是等一会要保存在当前线程的todo队列去的,表示当前线程有一个待完成的事务。紧跟着创建一个待处理事务t,它的类型为struct binder_transaction,这是等一会要存在到Service Manager的todo队列去的,表示Service Manager当前有一个事务需要处理。同时,这个待处理事务t也要存放在当前线程的待完成事务transaction_stack列表中去:
- t->from_parent = thread->transaction_stack;
- thread->transaction_stack = t;
这样表明当前线程还有事务要处理。
继续往下看,就是分别把tcomplete和t放在当前线程thread和Service Manager进程的todo队列去了:
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
最后,Service Manager有事情可做了,就要唤醒它了:
- wake_up_interruptible(target_wait);
前面我们提到,此时Service Manager正在等待Client的请求,也就是Service Manager此时正在进入到Binder驱动程序的binder_thread_read函数中,并且休眠在target->wait上,具体参考浅谈Service Manager成为Android进程间通信(IPC)机制Binder守护进程之路一文。
这里,我们暂时忽略Service Manager被唤醒之后的情景,继续看当前线程的执行。
函数binder_transaction执行完成之后,就一路返回到binder_ioctl函数里去了。函数binder_ioctl从binder_thread_write函数调用处返回后,发现bwr.read_size大于0,于是就进入到binder_thread_read函数去了:
- static int
- binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed, int non_block)
- {
- void __user *ptr = buffer + *consumed;
- void __user *end = buffer + size;
- int ret = 0;
- int wait_for_proc_work;
- if (*consumed == 0) {
- if (put_user(BR_NOOP, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- }
- retry:
- wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
- ......
- if (wait_for_proc_work) {
- ......
- } else {
- if (non_block) {
- if (!binder_has_thread_work(thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
- }
- ......
- while (1) {
- uint32_t cmd;
- struct binder_transaction_data tr;
- struct binder_work *w;
- struct binder_transaction *t = NULL;
- if (!list_empty(&thread->todo))
- w = list_first_entry(&thread->todo, struct binder_work, entry);
- else if (!list_empty(&proc->todo) && wait_for_proc_work)
- w = list_first_entry(&proc->todo, struct binder_work, entry);
- else {
- if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
- goto retry;
- break;
- }
- if (end - ptr < sizeof(tr) + 4)
- break;
- switch (w->type) {
- ......
- case BINDER_WORK_TRANSACTION_COMPLETE: {
- cmd = BR_TRANSACTION_COMPLETE;
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, cmd);
- if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)
- printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
- proc->pid, thread->pid);
- list_del(&w->entry);
- kfree(w);
- binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
- } break;
- ......
- }
- if (!t)
- continue;
- ......
- }
- done:
- ......
- return 0;
- }
函数首先是写入一个操作码BR_NOOP到用户传进来的缓冲区中去。
回忆一下上面的binder_transaction函数,这里的thread->transaction_stack != NULL,并且thread->todo也不为空,所以线程不会进入休眠状态。
进入while循环中,首先是从thread->todo队列中取回待处理事项w,w的类型为BINDER_WORK_TRANSACTION_COMPLETE,这也是在binder_transaction函数里面设置的。对BINDER_WORK_TRANSACTION_COMPLETE的处理也很简单,只是把一个操作码BR_TRANSACTION_COMPLETE写回到用户传进来的缓冲区中去。这时候,用户传进来的缓冲区就包含两个操作码了,分别是BR_NOOP和BINDER_WORK_TRANSACTION_COMPLETE。
binder_thread_read执行完之后,返回到binder_ioctl函数中,将操作结果写回到用户空间中去:
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto err;
- }
最后就返回到IPCThreadState::talkWithDriver函数中了。
IPCThreadState::talkWithDriver函数从下面语句:
- ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr)
返回后,首先是清空之前写入Binder驱动程序的内容:
- if (bwr.write_consumed > 0) {
- if (bwr.write_consumed < (ssize_t)mOut.dataSize())
- mOut.remove(0, bwr.write_consumed);
- else
- mOut.setDataSize(0);
- }
接着是设置从Binder驱动程序读取的内容:
- if (bwr.read_consumed > 0) {
- mIn.setDataSize(bwr.read_consumed);
- mIn.setDataPosition(0);
- }
然后就返回到IPCThreadState::waitForResponse去了。IPCThreadState::waitForResponse函数的处理也很简单,就是处理刚才从Binder驱动程序读入内容了。从前面的分析中,我们知道,从Binder驱动程序读入的内容就是两个整数了,分别是BR_NOOP和BR_TRANSACTION_COMPLETE。对BR_NOOP的处理很简单,正如它的名字所示,什么也不做;而对BR_TRANSACTION_COMPLETE的处理,就分情况了,如果这个请求是异步的,那个整个BC_TRANSACTION操作就完成了,如果这个请求是同步的,即要等待回复的,也就是reply不为空,那么还要继续通过IPCThreadState::talkWithDriver进入到Binder驱动程序中去等待BC_TRANSACTION操作的处理结果。
这里属于后一种情况,于是再次通过IPCThreadState::talkWithDriver进入到Binder驱动程序的binder_ioctl函数中。不过这一次在binder_ioctl函数中,bwr.write_size等于0,而bwr.read_size大于0,于是再次进入到binder_thread_read函数中。这时候thread->transaction_stack仍然不为NULL,不过thread->todo队列已经为空了,因为前面我们已经处理过thread->todo队列的内容了,于是就通过下面语句:
- ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
进入休眠状态了,等待Service Manager的唤醒。
现在,我们终于可以回到Service Manager被唤醒之后的过程了。前面我们说过,Service Manager此时正在binder_thread_read函数中休眠中:
- static int
- binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed, int non_block)
- {
- void __user *ptr = buffer + *consumed;
- void __user *end = buffer + size;
- int ret = 0;
- int wait_for_proc_work;
- if (*consumed == 0) {
- if (put_user(BR_NOOP, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- }
- retry:
- wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
- ......
- if (wait_for_proc_work) {
- ......
- if (non_block) {
- if (!binder_has_proc_work(proc, thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- } else {
- ......
- }
- ......
- while (1) {
- uint32_t cmd;
- struct binder_transaction_data tr;
- struct binder_work *w;
- struct binder_transaction *t = NULL;
- if (!list_empty(&thread->todo))
- w = list_first_entry(&thread->todo, struct binder_work, entry);
- else if (!list_empty(&proc->todo) && wait_for_proc_work)
- w = list_first_entry(&proc->todo, struct binder_work, entry);
- else {
- if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
- goto retry;
- break;
- }
- if (end - ptr < sizeof(tr) + 4)
- break;
- switch (w->type) {
- case BINDER_WORK_TRANSACTION: {
- t = container_of(w, struct binder_transaction, work);
- } break;
- ......
- }
- if (!t)
- continue;
- BUG_ON(t->buffer == NULL);
- if (t->buffer->target_node) {
- struct binder_node *target_node = t->buffer->target_node;
- tr.target.ptr = target_node->ptr;
- tr.cookie = target_node->cookie;
- t->saved_priority = task_nice(current);
- if (t->priority < target_node->min_priority &&
- !(t->flags & TF_ONE_WAY))
- binder_set_nice(t->priority);
- else if (!(t->flags & TF_ONE_WAY) ||
- t->saved_priority > target_node->min_priority)
- binder_set_nice(target_node->min_priority);
- cmd = BR_TRANSACTION;
- } else {
- ......
- }
- tr.code = t->code;
- tr.flags = t->flags;
- tr.sender_euid = t->sender_euid;
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
- tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
- } else {
- ......
- }
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
- tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
- return -EFAULT;
- ptr += sizeof(tr);
- ......
- list_del(&t->work.entry);
- t->buffer->allow_user_free = 1;
- if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
- t->to_parent = thread->transaction_stack;
- t->to_thread = thread;
- thread->transaction_stack = t;
- } else {
- ......
- }
- break;
- }
- done:
- *consumed = ptr - buffer;
- ......
- return 0;
- }
这里就是从语句中唤醒了:
- ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
Service Manager唤醒过来看,继续往下执行,进入到while循环中。首先是从proc->todo中取回待处理事项w。这个事项w的类型是BINDER_WORK_TRANSACTION,这是上面调用binder_transaction的时候设置的,于是通过w得到待处理事务t:
- t = container_of(w, struct binder_transaction, work);
接下来的内容,就把cmd和t->buffer的内容拷贝到用户传进来的缓冲区去了,这里就是Service Manager从用户空间传进来的缓冲区了:
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
- return -EFAULT;
- ptr += sizeof(tr);
注意,这里先是把t->buffer的内容拷贝到本地变量tr中,再拷贝到用户空间缓冲区去。关于t->buffer内容的拷贝,请参考Android系统进程间通信(IPC)机制Binder中的Server启动过程源代码分析一文,它的一个关键地方是Binder驱动程序和Service Manager守护进程共享了同一个物理内存的内容,拷贝的只是这个物理内存在用户空间的虚拟地址回去:
- tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
- tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
对于Binder驱动程序这次操作来说,这个事项就算是处理完了,就要从todo队列中删除了:
- list_del(&t->work.entry);
紧接着,还不放删除这个事务,因为它还要等待Service Manager处理完成后,再进一步处理,因此,放在thread->transaction_stack队列中:
- t->to_parent = thread->transaction_stack;
- t->to_thread = thread;
- thread->transaction_stack = t;
还要注意的一个地方是,上面写入的cmd = BR_TRANSACTION,告诉Service Manager守护进程,它要做什么事情,后面我们会看到相应的分析。
这样,binder_thread_read函数就处理完了,回到binder_ioctl函数中,同样是操作结果写回到用户空间的缓冲区中去:
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto err;
- }
最后,就返回到frameworks/base/cmds/servicemanager/binder.c文件中的binder_loop函数去了:
- void binder_loop(struct binder_state *bs, binder_handler func)
- {
- int res;
- struct binder_write_read bwr;
- unsigned readbuf[32];
- bwr.write_size = 0;
- bwr.write_consumed = 0;
- bwr.write_buffer = 0;
- readbuf[0] = BC_ENTER_LOOPER;
- binder_write(bs, readbuf, sizeof(unsigned));
- for (;;) {
- bwr.read_size = sizeof(readbuf);
- bwr.read_consumed = 0;
- bwr.read_buffer = (unsigned) readbuf;
- res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
- if (res < 0) {
- LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
- break;
- }
- res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
- if (res == 0) {
- LOGE("binder_loop: unexpected reply?!\n");
- break;
- }
- if (res < 0) {
- LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
- break;
- }
- }
- }
这里就是从下面的语句:
- res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
返回来了。接着就进入binder_parse函数处理从Binder驱动程序里面读取出来的数据:
- int binder_parse(struct binder_state *bs, struct binder_io *bio,
- uint32_t *ptr, uint32_t size, binder_handler func)
- {
- int r = 1;
- uint32_t *end = ptr + (size / 4);
- while (ptr < end) {
- uint32_t cmd = *ptr++;
- switch(cmd) {
- ......
- case BR_TRANSACTION: {
- struct binder_txn *txn = (void *) ptr;
- ......
- if (func) {
- unsigned rdata[256/4];
- struct binder_io msg;
- struct binder_io reply;
- int res;
- bio_init(&reply, rdata, sizeof(rdata), 4);
- bio_init_from_txn(&msg, txn);
- res = func(bs, txn, &msg, &reply);
- binder_send_reply(bs, &reply, txn->data, res);
- }
- ptr += sizeof(*txn) / sizeof(uint32_t);
- break;
- }
- ......
- default:
- LOGE("parse: OOPS %d\n", cmd);
- return -1;
- }
- }
- return r;
- }
前面我们说过,Binder驱动程序写入到用户空间的缓冲区中的cmd为BR_TRANSACTION,因此,这里我们只关注BR_TRANSACTION相关的逻辑。
这里用到的两个数据结构struct binder_txn和struct binder_io可以参考前面一篇文章Android系统进程间通信(IPC)机制Binder中的Server启动过程源代码分析,这里就不复述了。
接着往下看,函数调bio_init来初始化reply变量:
- void bio_init(struct binder_io *bio, void *data,
- uint32_t maxdata, uint32_t maxoffs)
- {
- uint32_t n = maxoffs * sizeof(uint32_t);
- if (n > maxdata) {
- bio->flags = BIO_F_OVERFLOW;
- bio->data_avail = 0;
- bio->offs_avail = 0;
- return;
- }
- bio->data = bio->data0 = data + n;
- bio->offs = bio->offs0 = data;
- bio->data_avail = maxdata - n;
- bio->offs_avail = maxoffs;
- bio->flags = 0;
- }
最后,真正进行处理的函数是从参数中传进来的函数指针func,这里就是定义在frameworks/base/cmds/servicemanager/service_manager.c文件中的svcmgr_handler函数:
- int svcmgr_handler(struct binder_state *bs,
- struct binder_txn *txn,
- struct binder_io *msg,
- struct binder_io *reply)
- {
- struct svcinfo *si;
- uint16_t *s;
- unsigned len;
- void *ptr;
- uint32_t strict_policy;
- // LOGI("target=%p code=%d pid=%d uid=%d\n",
- // txn->target, txn->code, txn->sender_pid, txn->sender_euid);
- if (txn->target != svcmgr_handle)
- return -1;
- // Equivalent to Parcel::enforceInterface(), reading the RPC
- // header with the strict mode policy mask and the interface name.
- // Note that we ignore the strict_policy and don't propagate it
- // further (since we do no outbound RPCs anyway).
- strict_policy = bio_get_uint32(msg);
- s = bio_get_string16(msg, &len);
- if ((len != (sizeof(svcmgr_id) / 2)) ||
- memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
- fprintf(stderr,"invalid id %s\n", str8(s));
- return -1;
- }
- switch(txn->code) {
- case SVC_MGR_GET_SERVICE:
- case SVC_MGR_CHECK_SERVICE:
- s = bio_get_string16(msg, &len);
- ptr = do_find_service(bs, s, len);
- if (!ptr)
- break;
- bio_put_ref(reply, ptr);
- return 0;
- ......
- }
- default:
- LOGE("unknown code %d\n", txn->code);
- return -1;
- }
- bio_put_uint32(reply, 0);
- return 0;
- }
这里, Service Manager要处理的code是SVC_MGR_CHECK_SERVICE,这是在前面的BpServiceManager::checkService函数里面设置的。
回忆一下,在BpServiceManager::checkService时,传给Binder驱动程序的参数为:
- writeInt32(IPCThreadState::self()->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
- writeString16("android.os.IServiceManager");
- writeString16("media.player");
这里的语句:
- strict_policy = bio_get_uint32(msg);
- s = bio_get_string16(msg, &len);
- s = bio_get_string16(msg, &len);
其中,会验证一下传进来的第二个参数,即"android.os.IServiceManager"是否正确,这个是验证RPC头,注释已经说得很清楚了。
最后,就是调用do_find_service函数查找是存在名称为"media.player"的服务了。回忆一下前面一篇文章Android系统进程间通信(IPC)机制Binder中的Server启动过程源代码分析,MediaPlayerService已经把一个名称为"media.player"的服务注册到Service Manager中,所以这里一定能找到。我们看看do_find_service这个函数:
- void *do_find_service(struct binder_state *bs, uint16_t *s, unsigned len)
- {
- struct svcinfo *si;
- si = find_svc(s, len);
- // LOGI("check_service('%s') ptr = %p\n", str8(s), si ? si->ptr : 0);
- if (si && si->ptr) {
- return si->ptr;
- } else {
- return 0;
- }
- }
这里又调用了find_svc函数:
- struct svcinfo *find_svc(uint16_t *s16, unsigned len)
- {
- struct svcinfo *si;
- for (si = svclist; si; si = si->next) {
- if ((len == si->len) &&
- !memcmp(s16, si->name, len * sizeof(uint16_t))) {
- return si;
- }
- }
- return 0;
- }
就是在svclist列表中查找对应名称的svcinfo了。
然后返回到do_find_service函数中。回忆一下前面一篇文章Android系统进程间通信(IPC)机制Binder中的Server启动过程源代码分析,这里的si->ptr就是指MediaPlayerService这个Binder实体在Service Manager进程中的句柄值了。
回到svcmgr_handler函数中,调用bio_put_ref函数将这个Binder引用写回到reply参数。我们看看bio_put_ref的实现:
- void bio_put_ref(struct binder_io *bio, void *ptr)
- {
- struct binder_object *obj;
- if (ptr)
- obj = bio_alloc_obj(bio);
- else
- obj = bio_alloc(bio, sizeof(*obj));
- if (!obj)
- return;
- obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
- obj->type = BINDER_TYPE_HANDLE;
- obj->pointer = ptr;
- obj->cookie = 0;
- }
这里很简单,就是把一个类型为BINDER_TYPE_HANDLE的binder_object写入到reply缓冲区中去。这里的binder_object就是相当于是flat_binder_obj了,具体可以参考Android系统进程间通信(IPC)机制Binder中的Server启动过程源代码分析一文。
再回到svcmgr_handler函数中,最后,还写入一个0值到reply缓冲区中,表示操作结果码:
- bio_put_uint32(reply, 0);
最后返回到binder_parse函数中,调用binder_send_reply函数将操作结果反馈给Binder驱动程序:
- void binder_send_reply(struct binder_state *bs,
- struct binder_io *reply,
- void *buffer_to_free,
- int status)
- {
- struct {
- uint32_t cmd_free;
- void *buffer;
- uint32_t cmd_reply;
- struct binder_txn txn;
- } __attribute__((packed)) data;
- data.cmd_free = BC_FREE_BUFFER;
- data.buffer = buffer_to_free;
- data.cmd_reply = BC_REPLY;
- data.txn.target = 0;
- data.txn.cookie = 0;
- data.txn.code = 0;
- if (status) {
- data.txn.flags = TF_STATUS_CODE;
- data.txn.data_size = sizeof(int);
- data.txn.offs_size = 0;
- data.txn.data = &status;
- data.txn.offs = 0;
- } else {
- data.txn.flags = 0;
- data.txn.data_size = reply->data - reply->data0;
- data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
- data.txn.data = reply->data0;
- data.txn.offs = reply->offs0;
- }
- binder_write(bs, &data, sizeof(data));
- }