在线内核层代码:http://androidxref.com/kernel_3.18/xref/
1.binder_init
驱动启动时会先调用驱动的binder_init():主要负责注册misc设备,通过调用misc_register来实现
//资源路径: /drivers/staging/android/binder.c
static int __init binder_init(void)
{
int ret;
//创建名为binder的单线程工作队列
binder_deferred_workqueue = create_singlethread_workqueue("binder");
//...
//注册misc设备
ret = misc_register(&binder_miscdev);
return ret
}
static struct miscdevice binder_miscdev = {
//次设备号 动态分配
.minor = MISC_DYNAMIC_MINOR,
//设备名 binder
.name = "binder",
//设备的文件操作结构,file_operations结构
.fops = &binder_fops
};
//Native层调用驱动层需要通过系统调用(syscall),下面是Native层与驱动层的映射关系
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
//如Native层调用mmap会调到驱动层的binder_mmap
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
2.binder_open
打开驱动会调用binder_open():创建binder_proc对象,并把当前进程等信息保存到binder_proc对象,再将binder_proc对象保存到文件指针filp,以及把binder_proc添加到全局链表binder_procs中。
//资源路径:/drivers/staging/android/binder.c
static int binder_open(struct inode *nodp, struct file *filp)
{
//当前binder进程结构体
struct binder_proc *proc;
//为binder_proc结构体在内核申请内存空间
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
//current代表当前线程
get_task_struct(current);
//将当前线程的task保存到binder进程的tsk
proc->tsk = current;
//初始化todo列表
INIT_LIST_HEAD(&proc->todo);
//初始化wait队列
init_waitqueue_head(&proc->wait);
//将当前进程的nice值转换为进程优先级
proc->default_priority = task_nice(current);
//同步锁,因为binder支持多线程访问
binder_lock(__func__);
//binder_proc对象创建数加1
binder_stats_created(BINDER_STAT_PROC);
//将pro_node节点添加到binder_procs的队列头部
hlist_add_head(&proc->proc_node, &binder_procs);
//记录当前进程的pid
proc->pid = current->group_leader->pid;
//初始化已分发的死亡通知列表
INIT_LIST_HEAD(&proc->delivered_death);
//将binder_proc存放在filp的private_data域,以便在之后的mmap、ioctl中获取
filp->private_data = proc;
//释放同步锁
binder_unlock(__func__);
return 0;
}
3.binder_mmap
1.通过用户空间的虚拟内存大小,分配一块内核的虚拟内存
2.分配一块物理内存(4KB)
3.把这块物理内存分别映射到用户空间的虚拟内存和内核的虚拟内存
//资源路径:/drivers/staging/android/binder.c
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
//内核的虚拟内存
struct vm_struct *area;
//从filp中取binder_proc(binder_open方法中保存的)
struct binder_proc *proc = filp->private_data;
//保证映射内存大小不超过4M
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
//同步锁,保证一次只有一个进程分配内存
mutex_lock(&binder_mmap_lock);
//判断是否映射过
if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
//采用VM_IOREMAP方式,分配一个连续的内核虚拟内存,与进程虚拟内存大小一致
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
//判断是否分配成功
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
//将proc中的buffer指针指向这块内核的虚拟内存
proc->buffer = area->addr;
//计算用户空间和内核空间的地址偏移量
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
//释放同步锁
mutex_unlock(&binder_mmap_lock);
//分配物理页的指针数组,大小为vma的等效page个数
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
//分配物理页面,同时映射到内核空间和进程空间,先分配1个物理页(4KB),真正数据传输的时候再添加,免得内存浪费。
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
//binder_buffer对象指向proc的buffer地址
buffer = proc->buffer;
//创建进程的buffers链表头
INIT_LIST_HEAD(&proc->buffers);
//将binder_buffer地址加入所属进程的buffers队列
list_add(&buffer->entry, &proc->buffers);
//上面通过binder_update_page_range已经做了映射,此内存可用
buffer->free = 1;
//将空闲的buffer放入proc->free_buffers链表中
binder_insert_free_buffer(proc, buffer);
//异步传输的可用空闲空间大小为buffer总大小的一半
proc->free_async_space = proc->buffer_size / 2;
//...
return 0;
}
static int binder_update_page_range(struct binder_proc *proc, int allocate,void *start, void *end,struct vm_area_struct *vma)
{
//上面allocate为1,代表分配内存。如果是1表示释放内存
if (allocate == 0)
goto free_range;
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
//①分配一个page的物理内存
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
//②内核空间的虚拟内存映射到物理空间
ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
//用户空间地址=内核空间地址+偏移量
user_page_addr =(uintptr_t)page_addr + proc->user_buffer_offset;
//③用户空间的虚拟内存映射到物理内存
ret = vm_insert_page(vma, user_page_addr, page[0]);
}
}
4.binder_ioctl
binder_ioctl承载了Binder数据传输部分的主要业务,有两个核心方法 binder_thread_write 和 binder_thread_read
//资源路径:/drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
//进入休眠状态,直到中断唤醒
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
binder_lock(__func__);
//获取binder_thread
thread = binder_get_thread(proc);
//
switch (cmd) {
//binder读写操作
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
//...
}
}
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret=0
//把用户空间数据ubuf拷贝到bwr
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
//当写缓存中有数据,则执行binder写操作
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
//当读缓存中有数据,则执行binder读操作
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
//唤醒等待状态的线程
wake_up_interruptible(&proc->wait);
//读失败,再将bwr数据写回用户空间,并返回
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
//将内核数据bwr拷贝到用户空间ubuf
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}