本文基于Android_9.0、kernel_3.18源码
由Zygote进程简介我们可以得到以下的关系图,init进程通过解析init.rc配置,启动zygote进程和service_manager进程。
service_manager初始化流程
frameworks/native/cmds/servicemanager/service_manager.c
frameworks/native/cmds/servicemanager/binder.c
kernel_common/drivers/android/binder.c
查看service_manager.c的main函数
对于binder,主要做了以下操作:
1、binder_open打开binder驱动
2、binder_become_context_manager将自己注册为Binder进程间通信机制的管理者
3、binder_loop开启循环,监听client端发来的请求
int main(int argc, char** argv){
struct binder_state *bs;
union selinux_callback cb;
char *driver;
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
bs = binder_open(driver, 128*1024);
········
// 注册为Binder进程间通信机制的管理者
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
········
//进入无限循环,处理client端发来的请求
binder_loop(bs, svcmgr_handler);
return 0;
}
1、仔细分析framework层binder.c->binder_open()
binder_open做了如下操作:
1、系统调用open(),打开"/dev/binder"设备文件,此参数通过service_manager.c的main方法传过来。
2、系统调用ioctl(),获取版本信息。
3、系统调用mmap(),进行内存映射。
struct binder_state *binder_open(const char* driver, size_t mapsize){
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
// 文件句柄 通过系统调用陷入内核,打开Binder设备驱动
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open %s (%s)\n",
driver, strerror(errno));
goto fail_open;
}
//通过系统调用,ioctl获取binder版本信息
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
bs->mapsize = mapsize;
//通过系统调用,mmap内存映射
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
1.1、binder驱动初始化
接下来分析kernel层的binder.c代码,先看如下代码:
// 将binder_init()函数注册到Kernel的初始化函数列表中
// 当Kernel启动后,会按照一定的顺序调用初始化函数列表
device_initcall(binder_init);
static int __init binder_init(void){
........
// 将Binder驱动信息注册到文件节点"/dev/binder"上
ret = misc_register(&binder_miscdev);
........
return ret;
}
static struct miscdevice binder_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "binder",
.fops = &binder_fops
};
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
// 将poll函数指向binder_poll
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
首先,通过device_initcall(binder_init)将binder_init()函数注册到Kernel的初始化函数列表中,当调用初始化函数列表时,会调用到binder_init()方法;
然后,通过misc_register(&binder_miscdev)将Binder驱动信息注册到文件节点"/dev/binder"上;
最后,设置函数指针,这样对/dev/binder文件节点执行open、mmap等函数时,就会调用到binder_open、binder_mmap方法中。
1.2、open()
根据上面的信息,我们知道,通过调用open会调用到binder_open方法。
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);
// 为proc分配内存
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
// 初始化proc的数据
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
// 将proc设为filp的私有成员,可以根据filp的私有成员来获取proc信息
filp->private_data = proc;
binder_unlock(__func__);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
}
return 0;
}
首先,创建并初始化binder_proc,它是描述Binder进程的上下文信息结构体,在此将sevicemanager进程的信息存储在proc中。
然后,将proc设为filp的私有成员。这样,在mmap(),ioctl()等函数中,我们都可以根据filp的私有成员来获取proc信息。
1.3、mmap()
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;
........
// 获取空闲的内核空间地址
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
........
proc->buffer = area->addr;
// 计算偏移
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
mutex_unlock(&binder_mmap_lock);
........
// 为proc->pages分配内存,按页分配
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
........
// 记录空间大小
proc->buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
// 分配物理内存,并将其映射到内核空间和用户空间
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
// 将户空间地址信息保存到proc中
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm;
/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
........
}
mmap会将指定大小的物理内存映射到用户空间和内核空间,来达到binder一次拷贝的目的。
首先,通过get_vm_area从内核空间虚拟地址中获取指定大小的空闲地址,
然后,设置proc的相关数据,buffer(内核空间起始地址)、user_buffer_offset(内核空间与用户空间地址的偏移量)、pages(内核空间占用的物理页数目)、buffer_size(内核空间地址大小)等
最后,通过binder_update_page_range分配物理内存,并将其映射到内核空间和用户空间
1.4、binder_update_page_range()
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
struct vm_struct tmp_area;
struct page **page;
struct mm_struct *mm;
........
// 分配物理内存,并将其映射到内核空间和用户空间
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
// 分配物理页面
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
........
// 将内核地址的数据复制给临时变量tmp_area
tmp_area.addr = page_addr;
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
// 将物理页面映射到内核地址相应的位置
ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
........
user_page_addr =
(uintptr_t)page_addr + proc->user_buffer_offset;
// 将物理页面映射插入到进程的虚拟地址相应的位置
ret = vm_insert_page(vma, user_page_addr, page[0]);
........
}
return 0;
........
}
首先,通过alloc_page申请物理页面;
然后,通过map_vm_area将物理页面映射到内核地址相应的位置;
最后,通过vm_insert_page将物理页面映射插入到进程的虚拟地址相应的位置。
至此,整个open流程分析完毕,整理一下open对于两个角色的意义:
(1)servicemanager进程:打开/dev/binder,物理内存映射到自己的进程空间。
(2)Binder驱动:初始化binder_proc,物理内存映射到内核空间和自己的进程空间。
2、binder_become_context_manager
int binder_become_context_manager(struct binder_state *bs){
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
通过系统调用ioctl与binder驱动通信,将自己注册为Binder进程间通信机制的管理者。
2.1、binder_ioctl
static struct binder_node *binder_context_mgr_node; // binder_node全局变量
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); // 等待队列
static int binder_stop_on_user_error; // 等待标记
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
........
// 检测binder_stop_on_user_error:
// 第一种情况:binder_stop_on_user_error < 2,不会等待,直接执行下方代码
// 第二种情况:binder_stop_on_user_error >= 2,进入等待;
// 当其他进程通过wake_up_interruptible来唤醒binder_user_error_wait队列时
// 再次检测binder_stop_on_user_error
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
binder_lock(__func__);
// 在proc中查找该线程对应的binder_thread
// 若查找失败,则新建一个binder_thread,并添加到proc->threads中。
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
// 根据命令类型做不同的操作
switch (cmd) {
// 读写数据
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS:
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
// 设置manager
case BINDER_SET_CONTEXT_MGR:
// 设置manager
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
goto err;
// 检查当前进程是否具有注册Context Manager的SEAndroid安全权限
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto err;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
// 获取版本号
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
........
}
首先,走到wait_event_interruptible判断,由于binder_stop_on_user_error是全局变量,它的初始值是0,因此会继续向下执行;
然后,binder_get_thread在proc中查找当前线程对应的binder_thread,由于之前未创建过,因此会创建binder_thread并将其加入proc->threads红黑树中;
最后,通过switch,走到BINDER_SET_CONTEXT_MGR处理逻辑中,通过binder_ioctl_set_ctx_mgr设置manager,再通过security_binder_set_context_mgr检验当前进程的权限。
2.2、binder_get_thread
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
struct rb_node **p = &proc->threads.rb_node;
// 查找binder_thread
while (*p) {
parent = *p;
thread = rb_entry(parent, struct binder_thread, rb_node);
if (current->pid < thread->pid)
p = &(*p)->rb_left;
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
break;
}
// 没找到binder_thread,则创建新的
if (*p == NULL) {
thread = kzalloc(sizeof(*thread), GFP_KERNEL);
........
// 将该thread链接到proc->threads这棵红黑树中
rb_link_node(&thread->rb_node, parent, p);
rb_insert_color(&thread->rb_node, &proc->threads);
........
}
return thread;
}
binder_get_thread在proc中查找当前线程对应的binder_thread,如果没有找到,则会创建binder_thread并将其加入proc->threads红黑树中。
2.3、binder_ioctl_set_ctx_mgr
static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
kuid_t curr_euid = current_euid();
........
// 新建binder_node,将其赋值给全局变量binder_context_mgr_node
binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto out;
}
........
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
out:
return ret;
}
新建binder_node,将其赋值给全局变量binder_context_mgr_node。
2.4、binder_new_node
static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t ptr,
binder_uintptr_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
// 在proc->nodes红黑树中,查找binder_node(通过ptr判断)
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}
// 没有找到binder_node,新建binder_node
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
// 将node链接到红黑树proc->nodes中
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
// 将进程上下文信息保存到node->proc中
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
........
return node;
}
与binder_get_thread差不多,先在proc->nodes进行查找,如果找到,则返回null,表示不需要新建;否则新建binder_node,并将其添加到proc->nodes红黑树中。
至此,整个binder_become_context_manager流程分析完毕,整理一下binder_become_context_manager对于两个角色的意义:
(1)servicemanager进程:告诉Binder驱动,servicemanager进程是Binder上下文管理者;
(2)Binder驱动:初始化当前线程对应的binder_thread;初始化servicemanager对应的binder_node,并将其赋值给全局变量binder_context_mgr_node。
3、binder_loop
1、通过binder_write(内部也是ioctl系统调用)将BC_ENTER_LOOPER命令发送给binder驱动,将binder_thread设为BINDER_LOOPER_STATE_ENTERED表示它进入了loop循环
2、开启循环
3、通过ioctl(bs->fd, BINDER_WRITE_READ, &bwr)接收数据
4、通过binder_parse进一步处理消息
void binder_loop(struct binder_state *bs, binder_handler func){
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
// 开启循环
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
// 程序进入睡眠,等待唤醒
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
// 进一步处理消息
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
........
}
}
3.1、binder_write
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
// 表示有数据写给Binder驱动
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
// 表示不需要读取数据
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
binder_write只是向Binder驱动发送一个消息,而不会去读取消息反馈。
3.2、binder_ioctl_write_read
回顾上文iotcl,可以知道BINDER_WRITE_READ指令会走到binder_ioctl_write_read方法中。
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
........
// 将binder_write_read从"用户空间"拷贝到"内核空间"
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
........
// 如果write_size>0,则进行写操作
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
// 如果read_size>0,则进行读操作
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
........
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
首先,通过copy_from_user将bwr对象拷贝到Binder驱动中;
然后,如果bwr.write_size>0,则调用binder_thread_write进行写操作;如果bwr.read_size>0,则调用binder_thread_read进行读操作;
最后,将bwr从内核空间拷贝到用户空间。
3.3、binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
// 读取binder_write_read.write_buffer中的内容,每次32bit
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
........
switch (cmd) {
case BC_ENTER_LOOPER:
........
// 设置线程进入循环状态
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
*consumed = ptr - buffer;
}
........
return 0;
}
binder_thread_write()从brw.write_buffer中读取4个字节作为cmd;这4个字节就是servicemanager传递的指令BC_ENTER_LOOPER。在BC_ENTER_LOOPER的case分支中,告诉Binder驱动,servicemanager进程进入了消息循环状态。
3.4、binder_thread_read
在for循环的ioctl中,read_size >0;因此会走进binder_thread_read方法中。
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
........
retry:
// 当线程的事务栈为空 且 待处理事务列表为空时,该标记位true。
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
........
// 设置线程进入等待状态
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc->ready_threads++;
........
if (wait_for_proc_work) {
........
// 阻塞等待事物的发生
ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
}
........
}
目前为止,并没有进程将事务添加到当前线程中,于是wait_for_proc_work = true;因此调用wait_event_interruptible_exclusive()进入等待状态,等待其他进程将servicemanager唤醒。
至于binder_parse需要servicemanager被唤醒后才会调用到,留待后面分析。
至此,整个binder_loop流程分析完毕,整理一下binder_loop对于两个角色的意义:
(1)servicemanager进程:通过BC_ENTER_LOOPER告诉Kernel,servicemanager进入了消息循环状态。然后servicemanager就进入等待状态,等待Client请求;
(2)Binder驱动:已知servicemanager进入了消息循环状态;在收到servicemanager的BINDER_WRITE_READ消息之后,就去servicemanager的从进程上下文binder_proc对象中读取是否有待处理事务,由于没有事务处理,则将servicemanager线程设为中断等待状态。