在之所以需要进程通讯,内核空间的地址空间是所有进程共享的,但是用户空间使用的是虚拟地址空间(都有4GB空间),不能共享,因此需要类似于binder这样的驱动,让数据在内核中逛一圈
Binder框架定义了四个角色:Server,Client,ServiceManager(以后简称SMgr)以及Binder驱动。其中Server,Client,SMgr运行于用户空间,驱动运行于内核空间。这四个角色的关系和互联网类似:Server是服务器,Client是客户终端,SMgr是域名服务器(DNS),驱动是路由器。
Binder优点
一次拷贝:只需要通过copy_from_user将client的拷贝到提前mmap好的内存上,同时也映射进入了server端,因此server可以直接访问数据,避免了一次copy_to_user
安全性:传统IPC只能由用户在数据包里填入UID/PID,而binder的uid是在驱动中自动填充,并且支持支持实名Binder也支持匿名Binder,安全性高
只提供ioctl接口,不提供write和read,ioctl一次操作可以先写后读
ServiceManager(DNS)
ServiceManager启动
// service_manager.c
int main()
{
struct binder_state *bs;
bs = binder_open(128*1024); // 打开binder驱动并开辟了128k的虚拟空间
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
selinux_enabled = is_selinux_enabled();
sehandle = selinux_android_service_context_handle();
selinux_status_open(true);
union selinux_callback cb;
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
binder_loop(bs, svcmgr_handler); // 开始loop循环,等待其他进程来addService/ findService..
return 0;
}
binder_open打开/dev/binder 驱动,并映射到ServiceManager进程
// binder.c
struct binder_state *binder_open(size_t mapsize) {
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); // 只开辟了128k的虚拟空间
return bs;
}
addService
添加一个service的时候调用
// IServiceManager.cpp
virtual status_t addService(const String16& name, const sp<IBinder>& service, bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
然后调用BpBinder的transact
BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags); // 这里的mHandle其实是0(也即是serviceManager的binder handle),在defaultServiceManager()中创建的BpBinder,并传入的0
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
然后进入IPCThreadState的transact
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL); //j将数据写入mOut,存数据的结构为binder_transaction_data, 在waitForResponse中的talkWithDriver真正下发,注意BC_TRANSACTION,这个字段会在binder驱动中解析使用
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply); // 开始向驱动写数据,并一直等到回复(binder是同步的)
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
将用户数据想写入mOut中暂存
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
}
// 非常重要
mOut.writeInt32(cmd); // cmd传入driver中用于解析做什么操作,此时为BC_TRANSACTION
mOut.write(&tr, sizeof(tr)); // 对应driver中bwr.write_buffer
return NO_ERROR;
}
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult) {
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break; // 向调用binder的ioctl进行写数据,读回复
err = mIn.errorCheck();
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) { // 根据回复类型
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
case BR_DEAD_REPLY:
err = DEAD_OBJECT;
goto finish;
case BR_FAILED_REPLY:
err = FAILED_TRANSACTION;
goto finish;
case BR_ACQUIRE_RESULT:
{
ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
const int32_t result = mIn.readInt32();
if (!acquireResult) continue;
*acquireResult = result ? NO_ERROR : INVALID_OPERATION;
}
goto finish;
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
return err;
}
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data(); // 对应driver中bwr.write_buffer
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(__ANDROID__)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
} while (err == -EINTR);
}
进入driver中,由于只是写因此进入binder_thread_write而不是binder_thread_read
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed) {
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr)) // 解析得到用户空间mOut中的cmd BC_TRANSACTION
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
....
case BC_TRANSACTION: // 这和用户空间writeTransactionData的cmd一致
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr))) // 猜测是开始从用户空间拷贝数据到内核空间,也即是binder中一次拷贝地方,拷贝的数据也就是mOut中的binder_transaction_data
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY); // 开始transaction
break;
}
}
}
未完待续
参考
Android Bander设计与实现 - 设计篇 (理解很透彻)