概述
servicemanager的功能可以分为以下几部分:
- servicemanager的启动
- 打开binder设备,初始化驱动数据,映射内存
- 构建上下文,创建binder node
- 创建binder_loop,循环读取数据并解析
- 回调svcmgr_handler处理client数据
1. servicemanager的启动
servicemanager是一个系统服务,它是由init.rc启动的,如下:
# Start essential services.
start servicemanager
start hwservicemanager
start vndservicemanager</pre>
也就是说servicemanager是在android系统启动的时候就启动的一个服务。另外两个服务分别对应设备hwbinder和vndbinder,在介绍binder驱动初始化的时候有提到相关信息。这里不分析。
代码路径如下:
service_manager.c frameworks\native\cmds\servicemanager
从main开始
int main(int argc, char** argv)
{
struct binder_state *bs;
char *driver;
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
//打开设备/dev/binder,分配空间128k,参见第2节。
bs = binder_open(driver, 128*1024);
//构建上下文,参见第3节。
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
//这里省略掉与selinux相关的代码
//循环读取数据,见第4节;注册回调函数svcmgr_handler,参见第5节。
binder_loop(bs, svcmgr_handler);
return 0;
}
这里介绍以下binder_state,其它的在后续几节详细分析。
//binder.c frameworks\native\cmds\servicemanager
struct binder_state
{
int fd; //设备节点对应的文件描述符
void *mapped; //内存映射完成之后,指向servicemanager进程用户空间分配的虚拟空间起始地址
size_t mapsize; //映射的空间大小
};
从上面的注释可以很明确的看出该结构体的作用。
2. 打开binder设备与内存映射
bs = binder_open(driver, 128*1024);
这里driver指向设备节点/dev/binder。
//binder.c frameworks\native\cmds\servicemanager
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
//分配binder_state结构体
bs = malloc(sizeof(*bs));
//打开/dev/binder,见2.1节
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
//获取并检查内核binder版本是否与user端相同
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr,
"binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
//内存映射,见2.2节
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
return bs;
}
2.1. open
open通过VFS陷入内核态,最终调用binder的binder_open方法,具体参见 Binder驱动初始化4.1节。
open的主要功能就是在binder驱动构建binder_proc结构体,并初始化部分信息。
2.2. mmap
与open调用原理是一样的,mmap负责在binder内核构建binder_alloc,按照mapsize构建一定数量的binder_lru_page结构体,参见 Binder驱动初始化4.2节。
PROT_READ 属性表示servicemanager对于binder内核数据有只读权限,也就是说server只有读取数据的功能,没有写数据的功效。
3. 构建上下文
binder_become_context_manager(bs)
//binder.c frameworks\native\cmds\servicemanager
int binder_become_context_manager(struct binder_state *bs)
{
struct flat_binder_object obj;
memset(&obj, 0, sizeof(obj));
obj.flags = FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
int result = ioctl(bs->fd, BINDER_SET_CONTEXT_MGR_EXT, &obj);
return result;
}
这里主要就是通过ioctl发送cmd BINDER_SET_CONTEXT_MGR_EXT给kernel。flat_binder_object 是传递的数据,这里其实就是个空结构,binder kernel会去填充这个结构。
ioctl和open的调用原理一样,最终调用到kernel的binder_ioctl,关于binder_ioctl的基础信息参见 Binder驱动初始化4.3节。
这里直接看cmd BINDER_SET_CONTEXT_MGR_EXT对应的部分:
//binder.c kernel\msm-4.19\drivers\android
case BINDER_SET_CONTEXT_MGR_EXT: {
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
3.1. struct flat_binder_object
struct flat_binder_object {
struct binder_object_header hdr;
__u32 flags;
/* 8 bytes of data. */
union {
binder_uintptr_t binder; /* local object */
__u32 handle; /* remote object */
};
/* extra data associated with local object */
binder_uintptr_t cookie;
};
3.2. copy_from_user
ubuf 指向用户空间数据虚拟地址,也就是flat_binder_object结构体空间。
copy_from_user的作用就是把用户空间的数据copy到内核空间。
3.3. 设置上下文
主要作用就是创建一个binder_node,并且把它赋给binder_context。这里binder_context实际上是属于binder_device结构体的,也就是驱动初始化的时候创建的binder设备 /dev/binder, 也就是说这个上下文是针对设备的,一个设备只有一个上下文环境。
static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
//proc->context是binder_open在创建proc的时候赋值的,
//实际上是指向binder_device结构体的binder_context。
struct binder_context *context = proc->context;
struct binder_node *new_node;
//可以理解为进程uid即可
kuid_t curr_euid = current_euid();
mutex_lock(&context->context_mgr_node_lock);
//driver设备初始化的时候binder_context_mgr_node并未配置,所以为空,如果不为空说明已经配置,那么直接返回。
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
}
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
//binder_context_mgr_uid在初始化的时候设置为INVALID_UID,所以会走到这里
context->binder_context_mgr_uid = curr_euid;
}
//创建新node,第3.3.2节分析。
new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
}
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
new_node->has_strong_ref = 1;
new_node->has_weak_ref = 1;
//到这里,终于完成了binder_context的各项设置.
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
//这个函数其实是释放当前node的临时引用的,在创建node的时候会增加一个临时引用。前面已经添加了其它引用(如:local_strong_refs),所以临时引用就没有必要了。
binder_put_node(new_node);
out:
mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
3.3.1 context
3.3.2 创建binder_node
static struct binder_node *binder_new_node(struct binder_proc *proc,
struct flat_binder_object *fp)
{
struct binder_node *node;
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
node = binder_init_node_ilocked(proc, new_node, fp);
return node;
}
主要就是分配binder_node结构体,然后调用binder_init_node_ilocked去初始化binder_node.
static struct binder_node *binder_init_node_ilocked(
struct binder_proc *proc,
struct binder_node *new_node,
struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
binder_uintptr_t ptr = fp ? fp->binder : 0;
binder_uintptr_t cookie = fp ? fp->cookie : 0;
__u32 flags = fp ? fp->flags : 0;
s8 priority;
assert_spin_locked(&proc->inner_lock);
while (*p) {
//这里从binder_proc的nodes项查找,省略代码
}
node = new_node;
binder_stats_created(BINDER_STAT_NODE);
//增加一个临时引用,怀疑没有引用可能会导致node被释放,所以增加个临时引用。
node->tmp_refs++;
//把新node添加到binder_proc
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
/* node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);*/
node->inherit_rt = true;
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
//初始化work和async_todo列表头。
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
return node;
}
这里主要就是先检查binder_proc的红黑树nodes, 看看当前node是否存在,如果存在则直接增加一个临时引用。检查条件是node->ptr,这个ptr是进程用户空间指向node的指针,也就是前面提到的结构体flat_binder_object的binder项,servicemanager传进来的该结构体是空的,所以这里为空,也就是binder_proc的nodes里面没有当前node。
然后初始化node,初始化中有几个比较重要的数据,参见binder_node.
3.3.3 binder_node
直接看注释即可,重点关注worklist。
struct binder_node {
//worklist
struct binder_work work;
union {
struct rb_node rb_node;
struct hlist_node dead_node;
};
struct binder_proc *proc;
//当前node的各类引用
struct hlist_head refs;
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
int tmp_refs;
//用户空间指针,会传递给servicemanager
binder_uintptr_t ptr;
binder_uintptr_t cookie;
struct {
/*
* bitfield elements protected by
* proc inner_lock
*/
u8 has_strong_ref:1;
u8 pending_strong_ref:1;
u8 has_weak_ref:1;
u8 pending_weak_ref:1;
};
struct {
/*
* invariant after initialization
*/
u8 sched_policy:2;
u8 inherit_rt:1;
u8 accept_fds:1;
u8 txn_security_ctx:1;
u8 min_priority;
};
//异步事件相关
bool has_async_transaction;
struct list_head async_todo;
};
4. binder_loop循环读取数据
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
}
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
return res;
}
主要功能有:
- 通过ioctl发送BC_ENTER_LOOPER给kernel,置位binder_thread的loop
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
- 通过ioctl读取kernel数据,这个会最终调用到kernel的binder_thread_read,然后读取binder_thread的todo列表或者binder_proc的todo列表,找到binder_work,代码简要摘抄如下:
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
retry:
//等待work
ret = binder_wait_for_work(thread, wait_for_proc_work);
循环处理work
while (1) {
//先取binder_thread的todo列表
if (!binder_worklist_empty_ilocked(&thread->todo))
list = &thread->todo;
//再取binder_proc的todo列表
else if (!binder_worklist_empty_ilocked(&proc->todo) &&
wait_for_proc_work)
list = &proc->todo;
//如果都没有todo work,则跳到retry重新等待work
else {
binder_inner_proc_unlock(proc);
/* no data added */
if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
//从todo列表取出binder_work
w = binder_dequeue_work_head_ilocked(list);
//根据work type做不同处理
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
binder_inner_proc_unlock(proc);
//transaction类型,根据work w找到对应结构体binder_transaction的起始地址并赋值给t,关于container_of可以参考驱动初始化4.1.2节有提到,具体可以百度。它的作用就是根据binder_transaction的成员work找到对象w所在结构体binder_transaction的起始地址。
t = container_of(w, struct binder_transaction, work);
} break;
//根据t去组装binder_transaction_data
//target_node跟work有关,后续再分析,这里知道cmd即可
if (t->buffer->target_node) {
cmd = BR_TRANSACTION;
}
trd->data_size = t->buffer->data_size;
trd->offsets_size = t->buffer->offsets_size;
trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;//客户端用户数据地址
trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
//copy到用户空间
if (copy_to_user(ptr, &tr, trsize)) {
}
}
详细的分析,等到service注册的时候还会再提。需要提一下,BC_XXX一般是用户空间到kernel的cmd形式,BR_XXX一般是kernel到用户的cmd形式。
- 解析读取到的数据,这里就以BR_TRANSACTION为例
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_TRANSACTION_SEC_CTX:
case BR_TRANSACTION: {
struct binder_transaction_data_secctx txn;
if (cmd == BR_TRANSACTION_SEC_CTX) {
} else /* BR_TRANSACTION */ {
//从ptr中取出binder_transaction_data
memcpy(&txn.transaction_data, (void*) ptr, sizeof(struct binder_transaction_data));
ptr += sizeof(struct binder_transaction_data);
txn.secctx = 0;
}
binder_dump_txn(&txn.transaction_data);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
//构建bio,然后调用回调函数func,也就是第5节的svcmgr_handler。
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, &txn.transaction_data);
res = func(bs, &txn, &msg, &reply);
//发送reply给kernel,通知释放buffer,不详述。
if (txn.transaction_data.flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn.transaction_data.data.ptr.buffer);
} else {
binder_send_reply(bs, &reply, txn.transaction_data.data.ptr.buffer, res);
}
}
break;
}
}
return r;
}
5. svcmgr_handler处理数据
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data_secctx *txn_secctx,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
struct binder_transaction_data *txn = &txn_secctx->transaction_data;
//这个code是客户端发给kernel构建work的时候形成的,比如添加service,获取service等,主要分析一下SVC_MGR_ADD_SERVICE和SVC_MGR_GET_SERVICE参见后续章节
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid,
(const char*) txn_secctx->secctx);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
dumpsys_priority = bio_get_uint32(msg);
if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, dumpsys_priority,
txn->sender_pid, (const char*) txn_secctx->secctx))
return -1;
break;
}
//reply相关
bio_put_uint32(reply, 0);
return 0;
}
5.1 添加service
int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle,
uid_t uid, int allow_isolated, uint32_t dumpsys_priority, pid_t spid, const char* sid) {
struct svcinfo *si;
//检查权限,uid大于等于AID_APP也就是10000,或者不符合selinux权限,则禁止注册服务
if (!svc_can_register(s, len, spid, sid, uid)) {
ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
str8(s, len), handle, uid);
return -1;
}
si = find_svc(s, len);
if (si) {
if (si->handle) {
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
str8(s, len), handle, uid);
return -1;
}
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->dumpsys_priority = dumpsys_priority;
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death);
return 0;
}
主要功能就是检查目标service是否已经存在,如果存在则更新handle,如果不存在,则创建svcinfo,然后添加到svclist中。
5.2. 获取service
uint32_t do_find_service(const uint16_t *s, size_t len, uid_t uid, pid_t spid, const char* sid)
{
struct svcinfo *si = find_svc(s, len);
return si->handle;
}
这个很简单,就是遍历svclist找到s对应的service,然后返回service的handle。
关于handle,是service注册的时候生成的,可以唯一代表一个service,在分析service的注册的时候再做分析。
这里省略了获取server的时候的一些权限检查的代码。