binder
android 系统进程间通信方式,C/S架构。只需要执行一次拷贝操作。
用户空间的service和client进程通过内核空间的binder驱动(/dev/binder)进行通讯。
service manager,用户空间进程,binder通信机制的上下文管理者。service启动时注册到service manager中。
client从中获取service实例,间接使用service的功能。
虚拟进程地址空间(vm_area_struct)和虚拟内核地址空间(vm_struct)都映射到同一块物理内存空间。
当Client端与Server端发送数据时,Client(作为数据发送端)先从自己的进程空间把IPC通信数据copy_from_user拷贝到内核空间,
而Server端(作为数据接收端)与内核共享数据,不再需要拷贝数据,而是通过内存地址空间的偏移量,即可获悉内存地址,
整个过程只发生一次内存拷贝。一般地做法,需要Client端进程空间拷贝到内核空间,再由内核空间拷贝到Server进程空间,会发生两次拷贝。
对于进程和内核虚拟地址映射到同一个物理内存的操作是发生在数据接收端,而数据发送端还是需要将用户态的数据复制到内核态
binder 驱动程序
基于 Android 9.0 源码。
binder驱动以misc设备进行注册,作为虚拟字符设备,没有直接操作硬件,只是对设备内存的处理。
主要是驱动设备的初始化(binder_init),打开 (binder_open),映射(binder_mmap),数据操作(binder_ioctl)。
比如打开binder驱动,从用户态到系统调用到内核态调用链,open->__open->binder_open
涉及文件
common/include/uapi/linux/android/binder.h
common/drivers/android/binder.c
common/drivers/android/binder_alloc.c
common/drivers/android/binder_alloc_selftest.c
common/drivers/android/binder_alloc.h
common/drivers/android/binder_trace.h
关键数据结构
struct binder_context {
struct binder_node *binder_context_mgr_node;
struct mutex context_mgr_node_lock;
kuid_t binder_context_mgr_uid;
const char *name;
};
struct binder_device {
struct hlist_node hlist;
struct miscdevice miscdev; // misc 设备
struct binder_context context;
};
// binder进程信息
struct binder_proc {
struct hlist_node proc_node; // binder_procs列表元素
struct rb_root threads; // 存储进程中线程的红黑树
struct rb_root nodes; // 存储此进程相关 binder node 的红黑树,依据node->ptr排序
struct rb_root refs_by_desc; // 以ref->desc排序的 binder ref 的红黑树
struct rb_root refs_by_node; // 以ref->node排序的 binder ref 的红黑树
struct list_head waiting_threads; // 等待执行进程work的线程列表
int pid; // 进程的 group_leader 的pid
struct task_struct *tsk; // 进程的 group_leader 的 task_struct
struct files_struct *files; //
struct mutex files_lock;
struct hlist_node deferred_work_node; // binder_deferred_list 的元素
int deferred_work; // 需要执行的 deferred work 的位图
bool is_dead; // 进程是否死亡,等待清除
struct list_head todo; // 进程执行work列表
struct binder_stats stats; // 每进程 binder 统计信息
struct list_head delivered_death; // 分发死亡通知列表
int max_threads; // binder 进程最大支持多少线程
int requested_threads; // 请求了但是没启动的binder线程数,0/1
int requested_threads_started; // 启动了的binder线程数
int tmp_ref; // 临时引用表示进程使用中
struct binder_priority default_priority; // 默认调度优先级
struct dentry *debugfs_entry; // debugfs 节点
struct binder_alloc alloc; // binder allocator 信息
struct binder_context *context; // 此进程的binder_context,初始后不可变
spinlock_t inner_lock;
spinlock_t outer_lock;
};
struct binder_transaction_data
{
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
/* target descriptor of command transaction */
__u32 handle; // binder_ref(即handle)
/* target descriptor of return transaction */
binder_uintptr_t ptr; // Binder_node的内存地址
} target; // 对于BpBinder则使用handle,对于BBinder则使用ptr
binder_uintptr_t cookie; /* target object cookie */ // BBinder指针
__u32 code; /* transaction command */ // RPC代码,代表Client与Server双方约定的命令码
/* General information about the transaction. */
__u32 flags; // 标志位,比如TF_ONE_WAY代表异步,即不等待Server端回复
pid_t sender_pid; // 发送端进程的pid
uid_t sender_euid; // 发送端进程的uid
binder_size_t data_size; /* number of bytes of data */ // data数据的总大小
binder_size_t offsets_size; /* number of bytes of offsets */ // IPC对象的大小
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
binder_uintptr_t buffer; // 数据区起始地址
/* offsets from buffer to flat_binder_object structs */
binder_uintptr_t offsets; // 数据区IPC对象偏移量
} ptr;
__u8 buf[8];
} data; //RPC数据
};
binder_node
binder实体 对应于BBinder对象,记录BBinder的进程、指针、引用计数等binder_ref
binder引用 对应于BpBinder对象,记录BpBinder的引用计数、死亡通知、BBinder指针等
关键全局变量
static HLIST_HEAD(binder_devices); // 存储各个binder设备
static HLIST_HEAD(binder_procs); //存储binder_proc
// 映射binder操作函数
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
// 内存map操作函数
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
.close = binder_vma_close,
.fault = binder_vm_fault,
};
关键函数
- 驱动设备初始化
注册 /dev/binder 或 /dev/hwbinder
static int __init binder_init(void)
{
...
struct binder_device *device; // 驱动设备
...
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); //创建debugfs文件系统
...
// 初始化 binder device, 可能有多个
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ",")))
ret = init_binder_device(device_name);
}
// 初始化binder驱动设备
static int __init init_binder_device(const char *name)
{
int ret;
struct binder_device *binder_device;
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
binder_device->miscdev.fops = &binder_fops; //设备的文件操作结构
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; //次设备号 动态分配
binder_device->miscdev.name = name; //设备名字
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
// 注册misc设备
ret = misc_register(&binder_device->miscdev);
// 添加设备到设备列表中
hlist_add_head(&binder_device->hlist, &binder_devices);
return ret;
}
- 打开binder
进程使用binder通信机制前,需先打开binder设备获取文件描述符。通过文件操作来使用binder
驱动将所有打开了binder设备的进程都加入到全局hash队列 binder_procs 中。
filp 指向一个打开的文件结构体,当进程调用open会获取到一个文件描述符,这两者是关联的。
进程后续以此文件描述符为参数执行mmap或ioctl与驱动交互时,内核会将关联的文件结构体传递给驱动。
驱动通过file结构体的 private_data 获得进程的 binder_proc。
// 创建binder_proc对象,并把当前进程等信息保存到binder_proc对象
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
proc = kzalloc(sizeof(*proc), GFP_KERNEL); //分配 binder_proc 进程结构体
get_task_struct(current->group_leader);
proc->tsk = current->group_leader; //将当前线程的task保存到binder进程的tsk
INIT_LIST_HEAD(&proc->todo); //初始化todo列表
if (binder_supported_policy(current->policy)) {
proc->default_priority.sched_policy = current->policy;
proc->default_priority.prio = current->normal_prio;
} else {
proc->default_priority.sched_policy = SCHED_NORMAL;
proc->default_priority.prio = NICE_TO_PRIO(0); //将当前进程的nice值转换为进程优先级
}
binder_dev = container_of(filp->private_data, struct binder_device, miscdev); // 获取binder设备结构体
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc); // 用于 mmap 分配空间
binder_stats_created(BINDER_STAT_PROC); //BINDER_PROC对象创建数加1
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death); //初始化已分发的死亡通知列表
INIT_LIST_HEAD(&proc->waiting_threads); //初始化wait队列
filp->private_data = proc; //file文件指针的private_data变量指向binder_proc数据
hlist_add_head(&proc->proc_node, &binder_procs); // 将当前 binder_proc 节点添加到列表中
return 0;
}
- 内存映射
vma : 用户虚拟内存空间
1,在内核虚拟内存空间中申请大小和用户虚拟内存大小相同的空间。
2,申请1 page的物理内存,将此内存映射到内核虚拟地址空间和用户虚拟地址空间,实现内核和用户空间buffer的同步操作
user_buffer_offset 是虚拟进程地址与虚拟内核地址的差值(该值为负数)。
同一物理地址,当内核地址为kernel_addr,则进程地址为proc_addr = kernel_addr + user_buffer_offset。
//
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct binder_proc *proc = filp->private_data;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M; // 保证映射内存大小不超过4M
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_ops = &binder_vm_ops; // 注册虚拟内存操作函数
vma->vm_private_data = proc;
// 初始化vma描述的空间,用于之后binder buffer的分配
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
proc->files = get_files_struct(current);
}
int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma)
{
struct vm_struct *area;
struct binder_buffer *buffer;
// 采用IOREMAP方式,分配一个连续的内核虚拟空间,与进程虚拟空间大小一致
area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
alloc->buffer = area->addr; // 指向内核虚拟空间的地址
// 地址偏移量 = 用户虚拟地址空间 - 内核虚拟地址空间
alloc->user_buffer_offset = vma->vm_start - (uintptr_t)alloc->buffer;
// 分配物理页的指针数组,数组大小为vma的等效page个数;
alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
((vma->vm_end - vma->vm_start) / PAGE_SIZE),
GFP_KERNEL);
// 记录buffer大小
alloc->buffer_size = vma->vm_end - vma->vm_start;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
buffer->data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer); // 空闲的buffer
alloc->free_async_space = alloc->buffer_size / 2; // 异步可用空间大小为buffer总大小的一半。
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
/* Same as mmgrab() in later kernel versions */
atomic_inc(&alloc->vma_vm_mm->mm_count);
}
- 通信接口
负责在两个进程间收发IPC数据和IPC reply数据。
ioctl(文件描述符,ioctl命令,数据类型)
ioctl 命令及相关类型
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
struct binder_write_read {
binder_size_t write_size; /* bytes to write */
binder_size_t write_consumed; /* bytes consumed by driver */
binder_uintptr_t write_buffer;
binder_size_t read_size; /* bytes to read */
binder_size_t read_consumed; /* bytes consumed by driver */
binder_uintptr_t read_buffer;
};
-
binder 通信协议
Binder协议包含在IPC数据中,分为两类: 都是针对进程和驱动说的
BINDER_COMMAND_PROTOCOL:binder请求码,以”BC_“开头,简称BC码,用于从IPC层传递到Binder Driver层;
BINDER_RETURN_PROTOCOL :binder响应码,以”BR_“开头,简称BR码,用于从Binder Driver层传递到IPC层;Binder IPC通信至少是两个进程的交互:
client进程执行binder_thread_write,根据BC_XXX命令,生成相应的binder_work;
server进程执行binder_thread_read,根据binder_work.type类型,生成BR_XXX,发送到用户空间处理。binder_ioctl-->binder_ioctl_write_read-->binder_thread_write/binder_thread_read-->
-- BC_XXX --> binder_transaction
-- BR_XXX -->BC_TRANSACTION IPC.transact() BC_REPLY IPC.sendReply() BC_FREE_BUFFER IPC.freeBuffer() BC_REQUEST_DEATH_NOTIFICATION IPC.requestDeathNotification() BC_CLEAR_DEATH_NOTIFICATION IPC.clearDeathNotification() BC_DEAD_BINDER_DONE IPC.execute() BR_TRANSACTION 收到BINDER_WORK_TRANSACTION BR_REPLY 收到BINDER_WORK_TRANSACTION BR_TRANSACTION_COMPLETE 收到BINDER_WORK_TRANSACTION_COMPLETE BR_DEAD_BINDER 收到BINDER_WORK_DEAD_BINDER或BINDER_WORK_DEAD_BINDER_AND_CLEAR BR_CLEAR_DEATH_NOTIFICATION_DONE 收到BINDER_WORK_CLEAR_DEATH_NOTIFICATION
binder_get_thread
从 binder_proc 中查找 binder_thread ,如果当前线程已经加入到proc的线程队列则直接返回,
如果不存在则创建binder_thread,并将当前线程添加到当前的proc
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
// 进入休眠状态,直到中断唤醒
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
// 获取 binder_thread
thread = binder_get_thread(proc);
// 处理binder命令
switch (cmd) {
case BINDER_WRITE_READ: // 进行binder的读写操作,Binder读写交互场景,IPC.talkWithDriver
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
break;
case BINDER_SET_MAX_THREADS: { // 设置binder最大支持的线程数
int max_threads;
copy_from_user(&max_threads, ubuf, sizeof(max_threads));
proc->max_threads = max_threads;
break;
}
case BINDER_SET_CONTEXT_MGR: // 成为binder的上下文管理者,也就是ServiceManager成为守护进程
ret = binder_ioctl_set_ctx_mgr(filp);
break;
case BINDER_THREAD_EXIT: // 当binder线程退出,释放binder线程
binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: { // 获取binder的版本号
struct binder_version __user *ver = ubuf;
put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)
break;
}
default:
ret = -EINVAL;
goto err;
}
}
-
binder_thread
binder_thread 结构体代表当前binder操作所在的线程
-
binder_ioctl_write_read
binder 通信在用户空间和内核空间传递数据
static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, // binder 命令和参数 struct binder_thread *thread) { struct binder_proc *proc = filp->private_data; void __user *ubuf = (void __user *)arg; struct binder_write_read bwr; // 读取读或写参数 copy_from_user(&bwr, ubuf, sizeof(bwr)); // 用户进程写数据 if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } // 用户进程读数据 if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); if (!binder_worklist_empty_ilocked(&proc->todo)) binder_wakeup_proc_ilocked(proc); // 唤醒等待状态的线程 if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } // 将读写结果返回到用户进程 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto out; } out: return ret; }
- binder_thread_write
从binder_buffer里解析出BC码,读出数据封装成 binder_transaction_data 数据,通过 binder_transaction 生成事务过程执行通讯
static int binder_thread_write(struct binder_proc *proc, // 操作进程 struct binder_thread *thread, // 操作线程 binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { uint32_t cmd; void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error.cmd == BR_OK) { get_user(cmd, (uint32_t __user *)ptr); // 获取BC码 ptr += sizeof(uint32_t); switch (cmd) { case BC_INCREFS: case BC_ACQUIRE: case BC_RELEASE: case BC_DECREFS: case BC_INCREFS_DONE: case BC_ACQUIRE_DONE: case BC_ATTEMPT_ACQUIRE: case BC_ACQUIRE_RESULT: case BC_FREE_BUFFER: case BC_TRANSACTION_SG: case BC_REPLY_SG: // 对于请求码为BC_TRANSACTION或BC_REPLY时,会执行binder_transaction()方法,这是最为频繁的操作。 // 常用通讯部分 case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; copy_from_user(&tr, ptr, sizeof(tr)); ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0); break; } case BC_REGISTER_LOOPER: case BC_ENTER_LOOPER: case BC_EXIT_LOOPER: case BC_REQUEST_DEATH_NOTIFICATION: case BC_CLEAR_DEATH_NOTIFICATION: case BC_DEAD_BINDER_DONE: default: *consumed = ptr - buffer; } }
- binder_thread_read
从线程或进程中取出todo工作队列中的binder_work,通知读取进程
static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; retry: wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); thread->looper |= BINDER_LOOPER_STATE_WAITING; // 根据wait_for_proc_work来决定wait在当前线程还是进程的等待队列 if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } } if (non_block) { if (!binder_has_work(thread, wait_for_proc_work)) ret = -EAGAIN; } else { ret = binder_wait_for_work(thread, wait_for_proc_work); } thread->looper &= ~BINDER_LOOPER_STATE_WAITING; while (1) { uint32_t cmd; struct binder_transaction_data tr; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; if (!binder_worklist_empty_ilocked(&thread->todo)) list = &thread->todo; else if (!binder_worklist_empty_ilocked(&proc->todo) && wait_for_proc_work) list = &proc->todo; else { /* no data added */ if (ptr - buffer == 4 && !thread->looper_need_return) goto retry; break; } // 从工作队列中读取去binder_work w = binder_dequeue_work_head_ilocked(list); if (binder_worklist_empty_ilocked(&thread->todo)) thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); } break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of(w, struct binder_error, work); put_user(e->cmd, (uint32_t __user *)ptr); // cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { // 驱动通知一次 BC_TRANSACTION 完成 cmd = BR_TRANSACTION_COMPLETE; put_user(cmd, (uint32_t __user *)ptr); ptr += sizeof(uint32_t); } break; case BINDER_WORK_NODE: case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ ...... if (t->buffer->target_node) { cmd = BR_TRANSACTION; } else { cmd = BR_REPLY; } // 驱动通知进程 BR_XXX put_user(cmd, (uint32_t __user *)ptr); ptr += sizeof(uint32_t); copy_to_user(ptr, &tr, sizeof(tr)); ptr += sizeof(tr); done: *consumed = ptr - buffer; //当满足请求线程加已准备线程数等于0,已启动线程数小于最大线程数(15), //且looper状态为已注册或已进入时创建新的线程。 if (proc->requested_threads == 0 && list_empty(&thread->proc->waiting_threads) && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; // 生成BR_SPAWN_LOOPER命令,用于创建新的线程 put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer); }
- binder_transaction
分为请求和reply两种,找到目标进程,生成事务,将事务添加到目标任务队列中。
一次通讯,binder驱动给目标进程生成 BINDER_WORK_TRANSACTION,后续目标进程读取执行
binde驱动给源进程生成本次通讯结果 BINDER_WORK_TRANSACTION_COMPLETE ,由源进程读取执行
static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, // 通讯协议数据及是否是reply binder_size_t extra_buffers_size) { struct binder_transaction *t; struct binder_work *tcomplete; struct binder_proc *target_proc = NULL; // 目标进程 struct binder_thread *target_thread = NULL; // 目标线程 struct binder_node *target_node = NULL; // 目标binder节点 struct binder_transaction *in_reply_to = NULL; if (reply) { in_reply_to = thread->transaction_stack; thread->transaction_stack = in_reply_to->to_parent; // 找到 target_thread target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); // 找到 target_proc target_proc = target_thread->proc; } else { // 找到 target_proc if (tr->target.handle) { struct binder_ref *ref; ref = binder_get_ref_olocked(proc, tr->target.handle, true); target_node = binder_get_node_refs_for_txn(ref->node, &target_proc, &return_error); } else { target_node = context->binder_context_mgr_node; target_node = binder_get_node_refs_for_txn(target_node, &target_proc, &return_error); } security_binder_transaction(proc->tsk, target_proc->tsk) < 0); if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp = thread->transaction_stack; while (tmp) { struct binder_thread *from; from = tmp->from; if (from && from->proc == target_proc) { atomic_inc(&from->tmp_ref); target_thread = from; spin_unlock(&tmp->lock); break; } tmp = tmp->from_parent; } } } /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; // 从target_proc分配一块buffer,用来构造 binder_transaction t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY)); t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; // t->buffer->target_node = target_node; off_start = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); offp = off_start; // 拷贝 transaction 数据到共享buffer中 copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) tr->data.ptr.buffer, tr->data_size); copy_from_user(offp, (const void __user *)(uintptr_t)tr->data.ptr.offsets, tr->offsets_size)); off_end = (void *)off_start + tr->offsets_size; sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); sg_buf_end = sg_bufp + extra_buffers_size; off_min = 0; for (; offp < off_end; offp++) { struct binder_object_header *hdr; size_t object_size = binder_validate_object(t->buffer, *offp); hdr = (struct binder_object_header *)(t->buffer->data + *offp); off_min = *offp + object_size; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: struct flat_binder_object *fp; fp = to_flat_binder_object(hdr); ret = binder_translate_binder(fp, t, thread); // case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: struct flat_binder_object *fp; fp = to_flat_binder_object(hdr); ret = binder_translate_handle(fp, t, thread); // case BINDER_TYPE_FD: case BINDER_TYPE_FDA: case BINDER_TYPE_PTR: default: } //向当前线程的todo队列添加BINDER_WORK_TRANSACTION_COMPLETE事务,供源进程读取 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; //向目标进程的target_list添加 BINDER_WORK_TRANSACTION 事务,供目标进程读取 t->work.type = BINDER_WORK_TRANSACTION; if (reply) { binder_enqueue_thread_work(thread, tcomplete); // 将回复完成work添加到源进程 binder_pop_transaction_ilocked(target_thread, in_reply_to); binder_enqueue_thread_work_ilocked(target_thread, &t->work); // 将回复work添加到目标进程 wake_up_interruptible_sync(&target_thread->wait); // 唤醒目标休眠进程 binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { /* * Defer the TRANSACTION_COMPLETE, so we don't return to * userspace immediately; this allows the target process to * immediately start processing this transaction, reducing * latency. We will then return the TRANSACTION_COMPLETE when * the target replies (or there is an error). */ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; // 发送transaction到目标进程队列并唤醒它。如果有线程则则唤醒,没有则入队等待队列 binder_proc_transaction(t, target_proc, target_thread); } else { binder_enqueue_thread_work(thread, tcomplete); binder_proc_transaction(t, target_proc, NULL); } }
- binder_thread_write