ServiceManager是由init进程解析init.rc文件而创建的,启动的入口函数是service_manager.c中的main()方法
//资源路径 /frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char **argv)
{
struct binder_state *bs;
//①打开binder驱动,申请128K大小的内存空间
bs = binder_open(128*1024);
//②设为守护进程,成为binder大管家
if (binder_become_context_manager(bs))
//③循环监听,处理client发来的请求
binder_loop(bs, svcmgr_handler);
}
①binder_open:打开binder驱动,内存映射
//资源路径 /frameworks/native/cmds/servicemanager/binder.c
struct binder_state *binder_open(size_t mapsize)
{
//这个结构体记录了service_manager中有关binder的所有信息
struct binder_state *bs;
//打开binder驱动,得到文件描述符
bs->fd = open("/dev/binder", O_RDWR);
//设置128kb空间大小
bs->mapsize = mapsize;
//通过系统调用,mmap内存映射
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
}
②binder_become_context_manager:设置ServiceManager为大管家
//资源路径: /frameworks/native/cmds/servicemanager/binder.c
binder_become_context_manager(struct binder_state *bs)
{
//通过系统调用到驱动层的binder_ioctl
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
//资源路径: /drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
break;
}
}
static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
kuid_t curr_euid = current_euid();
//保证只能创建一次
if (binder_context_mgr_node != NULL) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
}
//设置当前线程的euid作为service_manager的uid
binder_context_mgr_uid = curr_euid;
//创建service_manager实体
binder_context_mgr_node = binder_new_node(proc, 0, 0);
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t ptr,
binder_uintptr_t cookie)
{
struct binder_node *node;
//给新创建的binder_node分配内核空间
node = kzalloc(sizeof(*node), GFP_KERNEL);
//将新创建的node对象添加到proc红黑树
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
//初始化binder_node
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
//设置binder_work的type
node->work.type = BINDER_WORK_NODE;
//创建work消息队列,类似MessageQueue
INIT_LIST_HEAD(&node->work.entry);
//创建todo消息队列
INIT_LIST_HEAD(&node->async_todo);
}
③binder_loop:轮询处理数据
//资源路径:/frameworks/native/cmds/servicemanager/binder.c
void binder_loop(struct binder_state *bs, binder_handler func)
{
//初始化数据
bwr.write_size = 0;
readbuf[0] = BC_ENTER_LOOPER;
//①将BC_ENTER_LOOPER命令写入驱动,告诉驱动当前进程进入循环
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
//此处的bwr.read_size不为0,进入binder_thread_read
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//②不断轮询读取binder数据,没有数据会进入休眠状态
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
//...
}
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
//bwr.write_size>0,会进入binder_thread_write
bwr.write_size = len;
bwr.write_consumed = 0;
//此处data为BC_ENTER_LOOPER
bwr.write_buffer = (uintptr_t) data;
//此处bwr.read_size为0,不会进入binder_thread_read
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
//通过系统调用写入
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
}
ioctl会调到内核层的binder_ioctl,进入BINDER_WRITE_READ分支调用binder_ioctl_write_read,bwr.write_size>0进入binder_thread_write,bwr.read_size > 0进入binder_thread_read
binder_thread_write
//资源路径:/drivers/staging/android/binder.c
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
while (ptr < end && thread->return_error == BR_OK) {
//获取命令,即BC_ENTER_LOOPER
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
switch (cmd) {
case BC_ENTER_LOOPER:
//设置该线程的looper状态
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
//...
}
}
}
binder_thread_read
//资源路径:/drivers/staging/android/binder.c
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
if (*consumed == 0) {
//设置命令为BR_NOOP
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
//wait_for_proc_work为true
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
if (wait_for_proc_work)
proc->ready_threads++;
if (wait_for_proc_work) {
if (non_block) {
}else
//③service_manager是阻塞的,进入等待
ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
}
}