以获取SurfaceFlinger服务为例说明
客户端向ServiceManager查询SurfaceFlinger服务的过程
大部分流程在服务注册的过程中已经分析过,下面从ServiceManager收到请求后开始看。
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
......
// cmd是BR_TRANSACTION(我们忽略BR_NOOP的处理)
switch(cmd) {
......
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
// func是svcmgr_handler
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
// reply用于记录返回客户端的IPC数据
bio_init(&reply, rdata, sizeof(rdata), 4);
// msg用于记录客户端发送的IPC数据
bio_init_from_txn(&msg, txn);
// 调用svcmgr_handler处理获取服务请求
res = func(bs, txn, &msg, &reply);
if (txn->flags & TF_ONE_WAY) {
// ONE_WAY无需回复,释放binder_buffer
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
// 非ONE_WAY,需要回复
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
ptr += sizeof(*txn);
break;
}
......
}
}
......
}
首先分析svcmgr_handler()
处理获取服务请求,然后看binder_send_reply()
的实现
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
......
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
// 客户端请求的服务名称
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
// 查找服务,返回服务对应的binder引用句柄
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
// 将服务对应的binder引用句柄打包成binder object回复
bio_put_ref(reply, handle);
return 0;
......
}
......
}
下面看binder_send_reply()
的实现
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data; // 相当于IPCThreadState中的mOut
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
// 回复给客户端的IPC数据
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
// 向binder驱动发送回复信息
binder_write(bs, &data, sizeof(data));
}
最终,回复给客户端的IPC数据格式如下
接下来重点分析Binder驱动返回回复信息给客户端的过程。
ServiceManager返回查询结果给客户端的过程
下面从binder_thread_write()
开始分析
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
......
switch (cmd) {
......
case BC_FREE_BUFFER: {
// 释放binder_buffer,这里暂不讨论
......
}
case BC_TRANSACTION:
case BC_REPLY: {
// 当前为BC_REPLY
struct binder_transaction_data tr;
// 拷贝binder_transaction_data到内核态
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
......
}
.......
}
......
}
下面看binder_transaction()
处理BC_REPLY
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
......
if (reply) {
// 正在处理的事务
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
......
// 从事务栈中删除即将处理完毕的事务
thread->transaction_stack = in_reply_to->to_parent;
// 发起事务的binder_thread
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
// 检查发起事务的binder线程事务栈栈顶
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
// 发起事务的binder进程
target_proc = target_thread->proc;
} else {
......
}
......
// 创建答复binder事务
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
......
// 拷贝回复IPC数据到客户端binder_buffer中
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
......
off_end = (void *)offp + tr->offsets_size;
// 解析进程间传递的flat_binder_object对象并修改信息
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
proc->pid, thread->pid, (u64)*offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
......
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
// 这里类型为BINDER_TYPE_HANDLE
struct binder_ref *ref;
// 通过binder引用句柄找到binder引用对象
ref = binder_get_ref(proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);
......
if (ref->node->proc == target_proc) {
// binder引用对应的binder实体所在进程与客户端进程为同一进程
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
......
} else {
struct binder_ref *new_ref;
// 在客户端进程中为服务binder实体创建对应的binder引用
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
// 修改flat_binder_object中handle为客户端进程binder引用句柄
fp->binder = 0;
fp->handle = new_ref->desc;
fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
......
}
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
// 事务发起者即将受到回复,释放事务发起者事务栈中的事务
binder_pop_transaction(target_thread, in_reply_to);
}
......
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
// 唤醒客户端处理回复
if (target_wait) {
if (reply || !(t->flags & TF_ONE_WAY))
wake_up_interruptible_sync(target_wait);
else
wake_up_interruptible(target_wait);
}
return;
......
}
下面看查询服务的客户端接收回复信息,从binder_thread_read()
开始分析。
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
......
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
// 获取binder_work
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
......
switch (w->type) {
// 由binder_work得到所属binder_transaction
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
......
}
......
// 这里t->buffer->target_node为NULL
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
// 客户端收到回复
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
......
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
// binder返回协议拷贝到用户空间
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
// binder_transaction_data拷贝至用户空间
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
......
// 从待处理的工作项链表中删除
list_del(&t->work.entry);
// 允许用户空间释放binder_buffer
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
// 事务处理完成,释放binder_transaction
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
done:
// binder驱动返回的数据大小
*consumed = ptr - buffer;
......
}
下面看客户端的用户态处理返回数据的过程,从waitForResponse()
开始分析。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
// 返回用户态
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
......
// binder驱动返回的协议命令(忽略BR_NOOP)
switch (cmd) {
......
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
// reply非空
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
// 这里flags为0
// 将收到的IPC数据信息设置到reply中
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
......
}
}
......
}
......
}
......
}
最终,reply
中的数据格式如下所示
IPCThreadState
的waitForResponse()
方法完成后返回到父方法transact()
中,进一步返回到IServiceManager
的checkService()
中,下面分析checkService()
对reply
的处理。
virtual sp<IBinder> checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
// BpBinder的transact方法执行后,调用readStrongBinder解析binder object
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
下面看readStrongBinder()
的实现
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
// Note that a lot of code in Android reads binders by hand with this
// method, and that code has historically been ok with getting nullptr
// back (while ignoring error codes).
// readNullableStrongBinder继续调用unflatten_binder解析binder object
readNullableStrongBinder(&val);
return val;
}
下面看unflatten_binder()
的实现
status_t unflatten_binder(const sp<ProcessState>& proc,
const Parcel& in, sp<IBinder>* out)
{
// 返回reply中的flat_binder_object
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = reinterpret_cast<IBinder*>(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
// 这里是BINDER_TYPE_HANDLE,flat->handle为binder引用句柄
*out = proc->getStrongProxyForHandle(flat->handle);
return finish_unflatten_binder(
static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
下面看getStrongProxyForHandle()
的实现
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
// 通过binder引用句柄查找binder代理缓存项
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
// We need to create a new BpBinder if there isn't currently one, OR we
// are unable to acquire a weak reference on this current one. See comment
// in getWeakProxyForHandle() for more info about this.
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
......
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
// 创建binder代理对象
b = new BpBinder(handle);
// 缓存binder代理
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
getStrongProxyForHandle()
返回binder代理BpBinder
,最终checkService()
返回binder代理BpBinder
。回到最初的getService()
中,分析得到BpBinder
后,服务代理BpSurfaceComposer
是如何创建的。
template<typename INTERFACE>
status_t getService(const String16& name, sp<INTERFACE>* outService)
{
const sp<IServiceManager> sm = defaultServiceManager();
if (sm != NULL) {
// getService返回binder代理BpBinder
*outService = interface_cast<INTERFACE>(sm->getService(name));
if ((*outService) != NULL) return NO_ERROR;
}
return NAME_NOT_FOUND;
}
interface_cast<INTERFACE>()
是个内联函数,它调用INTERFACE::asInterface()
来创建BpSurfaceComposer
。下面看asInterface()
的实现。
asInterface()的模板定义:
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16 descriptor; \
static android::sp<I##INTERFACE> asInterface( \
const android::sp<android::IBinder>& obj); \
virtual const android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
asInterface()的模板实现:
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const android::String16 I##INTERFACE::descriptor(NAME); \
const android::String16& \
I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
android::sp<I##INTERFACE> I##INTERFACE::asInterface( \
const android::sp<android::IBinder>& obj) \
{ \
android::sp<I##INTERFACE> intr; \
if (obj != NULL) { \
intr = static_cast<I##INTERFACE*>( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { } \
#define CHECK_INTERFACE(interface, data, reply) \
if (!(data).checkInterface(this)) { return PERMISSION_DENIED; } \
ISurfaceComposer
中asInterface()
的定义及实现:
asInterface()的定义在ISurfaceComposer.h中
class ISurfaceComposer: public IInterface {
public:
DECLARE_META_INTERFACE(SurfaceComposer);
......
}
asInterface()的定义在ISurfaceComposer.cpp中
......
IMPLEMENT_META_INTERFACE(SurfaceComposer, "android.ui.ISurfaceComposer");
......
展开宏定义IMPLEMENT_META_INTERFACE(SurfaceComposer, "android.ui.ISurfaceComposer");
,asInterface()
的代码如下
android::sp<ISurfaceComposer> ISurfaceComposer::asInterface(
const android::sp<android::IBinder>& obj)
{
android::sp<ISurfaceComposer> intr;
if (obj != NULL) {
// 这里obj为BpBinder, queryLocalInterface返回NULL
intr = static_cast<ISurfaceComposer*>(
obj->queryLocalInterface(
ISurfaceComposer::descriptor).get());
if (intr == NULL) {
// 创建BpSurfaceComposer
intr = new BpSurfaceComposer(obj);
}
}
return intr;
}
至此,获取SurfaceFlinger
服务的大致流程分析完毕。