AMS:SystemServer进程的BBinder
C端首先获取amsBpBinder(这一步是通过驱动和sevice_manager进程通讯)
C端创建ServiceConnection(JavaBBinderHolder):sc
amsBpBinder.transact(intent,sc)
->writeStrongObject时将BBinderHolder创建为BBinder
通过ioctl和驱动通信
进入amsBBinder onTransaction
readStrongObject过程中创建持有了scBpBinder
AMS处理逻辑,比如检查Service进程有没有启动,找到对应Service等等,以及获取到S的activityThreadBpinder(通过和驱动以及service_manager进程通信,这个过程中S创建了activityThreadBBinder)
activityThreadBpinder.transact(intent)
通过驱动进入sBBinder.onTransaction->
进行业务处理,读取intent,调用onBind(intent),创建了S端的业务JavaBBinderHolder
接着S端通过amsBpBinder.transact(sJavaBBinderHolder)
同样通过write生成了sBBinder
走驱动->amsBBinder.onTransact()
同样通过read获取了sBpBinder
接着amsBBinder通过之前获取到的scBpBinder.connected(sBpBinder)
->scBpBinder.transact()
->走驱动回传回了C进程
c进程通过onTransact,read创建了属于自己的sBpBinder
调用connected(sBpBinder)
/**
* binder驱动:
* 主要作用:
* 相关类:kernel/msm-4.14/drivers/android (binder.c)
*/
/**
* binder.c binder驱动通过定义file_operations提供了函数供应用进程调用
*/
所有的驱动操作均需要先调用open拿到fd之后,通过传入fd来操作。
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
/**
* 2.6.36以前linux是通过ioctl映射,之前版本unlocked_ioctl和ioctl均可.
* 用户进程通过调用ioctl即可调用到binder_ioctl
*/
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
/**
* open(驱动节点:"/dev/binder",读写方式)即会映射到binder_open
*/
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
//servicemanager服务进程:
// 相关类:frameworks/native/cmds/servicemanager (service_manager.c binder.c)
//
//
//应用进程:
// 相关类:
// native层:frameworks/native/libs/binder (IPCThreadState.cpp ProcessState.cpp Parcel.cpp BpBinder.cpp) 头文件目录:frameworks/native/libs/binder/include/binder 注:部分函数默参定义在头文件,如果找不到参数,可以前往头文件查阅
// java层:frameworks/base/core/java/android/os (ServiceManager.java ServiceManagerNative.java ServiceManagerNative.ServiceManagerProxy BinderProxy Parcel.java
// frameworks/base/core/java/com/android/internal/os(BinderInternal.java)
// jni通信:frameworks/base/core/jni (android_util_Binder.cpp)
//
从addService开始分析:
MediaPlayerService.cpp:
首先创建了该Service(IBinder)
void MediaPlayerService::instantiate() {
defaultServiceManager()->addService(
String16("media.player"), new MediaPlayerService());
}
IserviceManager.cpp::
//通过PrpcessState::self获取一个IServiceManager(单例)
sp<IServiceManager> defaultServiceManager(){
if (gDefaultServiceManager != nullptr) return gDefaultServiceManager;
{
while (gDefaultServiceManager == nullptr) {
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(nullptr));
}
}
return gDefaultServiceManager;
}
ProcessState.cpp:(主要用于管理每个进程的Binder操作)
//ProcessState也是每个进程只有一个的单例模式
sp<ProcessState> ProcessState::self(){
if (gProcess != nullptr) {
return gProcess;
}
//kDefaultDriver:"/dev/binder" binder驱动节点
gProcess = new ProcessState(kDefaultDriver);
return gProcess;
}
//构造函数里先通过open_driver调用驱动的binder_open做好初始化并且拿到对应的fd
ProcessState::ProcessState(const char *driver)
: mDriverName(String8(driver))
, mDriverFD(open_driver(driver))
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mManagesContexts(false)
, mBinderContextCheckFunc(nullptr)
, mBinderContextUserData(nullptr)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
, mCallRestriction(CallRestriction::NONE){
if (mDriverFD >= 0) {
//open成功拿到fd之后,通过mmap调用驱动的binder_mmap:建立物理空间与进程虚拟地址/内核虚拟地址的映射
mVMStart = mmap(nullptr, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
close(mDriverFD);
mDriverFD = -1;
mDriverName.clear();
}
}
}
//可以看出binder_open基本上是为调用他的进程创建对应的数据结构,并且将其存储起来,所以驱动里持有着所有调用进程的信息
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
//开辟空间用来存储进程相关信息
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
//添加todolist
INIT_LIST_HEAD(&proc->todo);
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
//初始化alloc,mmap时就是使用alloc进行后续操作,存储了pid,将alloc的buffer添加到list头部等等
binder_alloc_init(&proc->alloc);
//存储pid
proc->pid = current->group_leader->pid;
//添加todolist
INIT_LIST_HEAD(&proc->delivered_death);
//添加wait_threads list
INIT_LIST_HEAD(&proc->waiting_threads);
//将初始化好的proc和private_data字段关联,当我们进行其他binder驱动调用时都能拿到该对象,所以我们也就能获取到proc
filp->private_data = proc;
mutex_lock(&binder_procs_lock);
//将proc加入binder_procs链表头部
hlist_add_head(&proc->proc_node, &binder_procs);
return 0;
}
//接着继续看getContextObject:
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/){
return getStrongProxyForHandle(0);
}
//在这里我们拿到了BpBinder对象
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
handle_entry* e = lookupHandleLocked(handle);
if (e != nullptr) {
IBinder* b = e->binder;
if (b == nullptr || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
Parcel data;
//暂时不看细节,这里是当handle为0时,告诉binder驱动????????
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, nullptr, 0);
if (status == DEAD_OBJECT)
return nullptr;
}
//创建了BpBinder:
b = BpBinder::create(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
}
}
return result;
}
//接着我们回到ISviceManager.cpp:
interface_cast()参数为我们通过ProcessState::self()->getContextObject(nullptr)拿到的BpBinder
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj){
//这里是一个模板调用,INTERFACE是IServiceManager
return INTERFACE::asInterface(obj);
}
//但是ISviceManager内并没有asInterface的实现,实际这里是利用了宏
具体的定义和声明在IInterface.h:
#define DECLARE_META_INTERFACE(INTERFACE)
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME)
指定值实现分别在
IServiceManager.cpp:IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
DECLARE_META_INTERFACE(ServiceManager)
所以asInterface(obj)的具体实现是:
::android::sp<IServiceManager IServiceManager::asInterface( \
const ::android::sp<::android::IBinder>& obj) \
{ \
::android::sp<IServiceManager> intr; \
if (obj != nullptr) { \
intr = static_cast<IServiceManager*>( \
obj->queryLocalInterface( \
IServiceManager::descriptor).get()); \
if (intr == nullptr) { \
intr = new BpServiceManager(obj); \
} \
} \
return intr; \
}
//因此最终我们拿到的是BpServiceManager(实现位于IServiceManager.cpp)
小结一下:
defaultServiceManager():
创建了进程唯一的ProcessState(通过open与驱动建立联系,mmap建议映射关系),创建了BpBinder(handle=0)
利用BpBinder创建了BpServiceManager
接着走BpServiceManager.addService(MediaPlayerService):
注意 MediaPlayerServic继承关系为:->BnMediaPlayerService->BnInterface->BBinder->IBinder
BpBinder也继承IBinder ,他们分别实现了remoteBinder以及BBinder(其实将BBinder命名为BnBinder更好)
BpBinder* BpBinder::remoteBinder()
{
return this;
}
BBinder* BBinder::localBinder()
{
return this;
}
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated, int dumpsysPriority) {
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
//重点1
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
data.writeInt32(dumpsysPriority);
//重点2
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
//addService里我们先关注一下data.writeStrongBinder(service);
Parcel.cpp:
status_t Parcel::writeStrongBinder(const sp<IBinder>& val){
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
const sp<IBinder>& binder, Parcel* out){
flat_binder_object obj;
if (binder != nullptr) {
BBinder *local = binder->localBinder();
//可以看到这里对传入的binder做了判断,到底是BBinder还是BpBinder
//推测是Client端过来是BpBinder,而Service端是BBinder:??????
//主要是扁平化处理,将binder参数,转化为flat_binder_object,
//当是BpBinder时,用handle存储handle,当是BBinder时用cookie记录binder
if (!local) {
//BpBinder
BpBinder *proxy = binder->remoteBinder();
const int32_t handle = proxy ? proxy->handle() : 0;
obj.hdr.type = BINDER_TYPE_HANDLE;
obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
obj.handle = handle;
obj.cookie = 0;
} else {
//BBinder
if (local->isRequestingSid()) {
obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
}
obj.hdr.type = BINDER_TYPE_BINDER;
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);
}
} else {
obj.hdr.type = BINDER_TYPE_BINDER;
obj.binder = 0;
obj.cookie = 0;
}
//最后将扁平化处理后的flat_binder_object写入Parcel
return finish_flatten_binder(binder, obj, out);
}
inline static status_t finish_flatten_binder(
const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
{
return out->writeObject(flat, false);
}
接下来我们看BpServiceManager:remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
还记得么,我们是用BpBinder构造的BpServiceManager,这里的remote()就是BpBinder:
所以我们继续追查BpBinder.transact():
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
//重点:
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
可以发现出现了一个新的类IPCThreadState:这个才是真正处理和驱动通信的类,并且每个线程都具有各自的IPCThreadState,属于线程单利
其实在之前BpBinder的create创建,构造函数里,就已经通过IPCThreadState::self()->incWeakHandle(handle, this);
self进行初始化了。
//TLS效用等同于Java的ThreadLocal,保证了线程各自的IPCThreadState
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
//关注构造函数
return new IPCThreadState;
}
if (gShutdown) {
return nullptr;
}
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
int key_create_value = pthread_key_create(&gTLS, threadDestructor);
if (key_create_value != 0) {
pthread_mutex_unlock(&gTLSMutex);
return nullptr;
}
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
goto restart;
}
//可以看出每个线程都有一个自己的IPCThreadState
//里面包含一个Parcel out(用于存储发送给Binder驱动的数据)/in(用于接受Binder驱动的数据);默认大小为256字节,
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mWorkSource(kUnsetWorkSource),
mPropagateWorkSource(false),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0),
mCallRestriction(mProcess->mCallRestriction)
{
pthread_setspecific(gTLS, this);
clearCaller();
mIn.setDataCapacity(256);
mOut.setDataCapacity(256);
mIPCThreadStateBase = IPCThreadStateBase::self();
}
在这里补充说明一下Parcel.cpp其实就是个数据包装类,内部通过malloc开辟内存空间,以及realloc去扩容,内部扩容函数为growData,感兴趣可以具体去看
主要是对数据对齐,按顺序写入开辟的堆内存,所以读取也要按写入顺序来。
//为了方便阅读,精简了部分代码
//在这里我们先注释一下几个参数:handle是构造BpBinder时传入的0,code是addService时传入的ADD_SERVICE_TRANSACTION
//data里放着的是我们用Parcel扁平化打包的服务数据,例如服务名,继承自BBinder的服务
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err;
//进行通讯数据的封装,注意,这里又多出一个标志位BC_TRANSACTION;所有的BC flag均是用户区发给驱动的请求,驱动处理完成后会发回一个BR flag
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, nullptr);
//这里是判断是否需要返回值,如果不需要那么hook一个假的reply开始与binder驱动进行数据通信
if (reply) {
//开始与驱动通信,通信结果存放于reply中
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
return err;
}
//将数据转为合适的格式用IPCThreadState的out进行打包,预备与binder驱动transact
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0;
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
}
mOut.writeInt32(cmd);//这里的cmd是BC_TRANSACTION
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
//循环与驱动通信,然后读取驱动的回复:这里可以先跳到下面看看talkWithDriver具体实现,再之后分析完驱动执行流程再回来看
while (1) {
//talkWithDriver:真正的与驱动通信
if ((err=talkWithDriver()) < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
case BR_ACQUIRE_RESULT:
{
ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
const int32_t result = mIn.readInt32();
if (!acquireResult) continue;
*acquireResult = result ? NO_ERROR : INVALID_OPERATION;
}
goto finish;
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(nullptr,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(nullptr,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
status_t IPCThreadState::talkWithDriver(bool doReceive){
//头文件默认赋值:doReceive=true
binder_write_read bwr;
/read_buffer是否为null,当第一次进入时,in里是没有read过的,所以是true
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
//存储上要发送给驱动的数据量以及数据地址
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
//存储上要告知驱动自己需要接收的数据量和接收地址
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
//驱动对write和read的处理回复信息
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
//真正通过用户空间ioctl->内核空间驱动:binder_ioctl进行通信
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
} while (err == -EINTR);
if (err >= NO_ERROR) {
//通讯结束后,binder驱动对数据的处理情况放在了bwr的consumed里
//如果驱动消耗了write数据
if (bwr.write_consumed > 0) {
//判断消耗类多少,把已经消耗掉的从out里去掉,下次循环会接着去发送剩下数据
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else {
//驱动全部处理了发送给他的数据,那么直接将out清空
mOut.setDataSize(0);
processPostWriteDerefs();
}
}
//如果驱动帮我们成功read到了需要的数据,将数据存储到in中
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
所有数据的提供和要求都放到了binder_write_read bwr,并且添加了一个新的flag为BINDER_WRITE_READ
此时bwr的数据里:cmd为BINDER_WRITE_READ,handle是构造BpBinder时传入的0,code是addService时传入的ADD_SERVICE_TRANSACTION,
bwr的write_buffer里放着cmd(BC_TRANSACTION)+BBinder的service对象
我们接着看驱动的ioctl:
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
//拿到binder_open时初始化的的进程相关对象
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
//挂起调用进程,直到目标进程返回结果,再唤醒等待进程:内部实现是死循环,先把自己设置为可中断,然后通过linux的schedule调用进程管理进行中断
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
//去获取proc的线程链表中查询是否有当前线程的信息,若果没有,则创建并添加。
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
...此处先忽略掉其余的BINDER_分支
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
err_unlocked:
trace_binder_ioctl_done(ret);
return ret;
}
//注意这里的arg是我们传给驱动的bwr数据
接下来我们看对应的BINDER_WRITE_READ:ret = binder_ioctl_write_read(filp, cmd, arg, thread);
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
//先把用户传来的数据地址做个转型
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
//接下来通过copy_from_user 将用户空间数据拷贝到驱动内核空间中bwr
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
//talkWithDriver里有发送和接收回复两种情况,所以这里也有两种,binder_thread_write以及binder_thread_read
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
//执行成功ret=0,负数为出现错误
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
//先挑binder_thread_write进行阅读
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
//binder_buffer
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
//因为一开始还没有consumed,所以ptr就指向数据起点
void __user *ptr = buffer + *consumed;
//end为数据终点
void __user *end = buffer + size;
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
//之前已经拷贝过一次了,为什么还要再次拷贝?????
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
default:
pr_err("%d:%d unknown command %d\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}
*consumed = ptr - buffer;
}
return 0;
}
//用于处理BC_TRANSACTION和BC_REPLY
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
int ret;
struct binder_transaction *t; //表示这是一个transaction操作
struct binder_work *tcomplete;//表示这是一个未完成的操作,比如C端阻塞,还在等S端返回结果
binder_size_t buffer_offset = 0;
binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
binder_size_t sg_buf_offset, sg_buf_end_offset;
struct binder_proc *target_proc = NULL;//目标进程
struct binder_thread *target_thread = NULL;//目标线程
struct binder_node *target_node = NULL;
struct binder_transaction *in_reply_to = NULL;
uint32_t return_error = 0;
uint32_t return_error_param = 0;
uint32_t return_error_line = 0;
binder_size_t last_fixup_obj_off = 0;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
//当是BC——REPLY时:cmd == BC_REPLY
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EPROTO;
return_error_line = __LINE__;
goto err_empty_call_stack;
}
if (in_reply_to->to_thread != thread) {
spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
spin_unlock(&in_reply_to->lock);
binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
return_error_param = -EPROTO;
return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
binder_inner_proc_unlock(proc);
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
return_error_param = -EPROTO;
return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_thread->proc);
} else {
//当是BC_TRANSACTION
//target.handle赋值是在IPCThreadState::writeTransactionData:tr.target.handle = handle;就是我们之前参数一直注视的handle,
//当addService时,我们构建BpBinder时传入的handle为0
if (tr->target.handle) {
//当handle不为0时
struct binder_ref *ref;
//去查找是否存在符合handle要求的node
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
//找到node,并且找到target_proc
target_node = binder_get_node_refs_for_txn(
ref->node, &target_proc,
&return_error);
} else {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
}
} else {
//handle为0,意味着我们的目标进程是service_manager
//所以直接去取全局变量即可,因为service_manager进程是唯一的
target_node = context->binder_context_mgr_node;
if (target_node)
target_node = binder_get_node_refs_for_txn(
target_node, &target_proc,
&return_error);
else
return_error = BR_DEAD_REPLY;
}
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
return_error_param = -EPROTO;
return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
struct binder_thread *from;
from = tmp->from;
if (from && from->proc == target_proc) {
atomic_inc(&from->tmp_ref);
//找到target_thread
target_thread = from;
spin_unlock(&tmp->lock);
break;
}
tmp = tmp->from_parent;
}
}
}
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
if (!(t->flags & TF_ONE_WAY) &&
binder_supported_policy(current->policy)) {
/* Inherit supported policies for synchronous transactions */
t->priority.sched_policy = current->policy;
t->priority.prio = current->normal_prio;
} else {
/* Otherwise, fall back to the default priority */
t->priority = target_proc->default_priority;
}
if (target_node && target_node->txn_security_ctx) {
u32 secid;
security_task_getsecid(proc->tsk, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
}
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
if (IS_ERR(t->buffer)) {
/*
* -ESRCH indicates VMA cleared. The target is dying.
*/
return_error_param = PTR_ERR(t->buffer);
return_error = return_error_param == -ESRCH ?
BR_DEAD_REPLY : BR_FAILED_REPLY;
return_error_line = __LINE__;
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
if (secctx) {
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
secctx, secctx_sz);
security_release_secctx(secctx, secctx_sz);
secctx = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer, 0,
(const void __user *)
(uintptr_t)tr->data.ptr.buffer,
tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EFAULT;
return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer,
ALIGN(tr->data_size, sizeof(void *)),
(const void __user *)
(uintptr_t)tr->data.ptr.offsets,
tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EFAULT;
return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
off_start_offset = ALIGN(tr->data_size, sizeof(void *));
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
struct binder_object_header *hdr;
size_t object_size;
struct binder_object object;
binder_size_t object_offset;
binder_alloc_copy_from_buffer(&target_proc->alloc,
&object_offset,
t->buffer,
buffer_offset,
sizeof(object_offset));
object_size = binder_get_object(target_proc, t->buffer,
object_offset, &object);
if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid,
(u64)object_offset,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
hdr = &object.hdr;
off_min = object_offset + object_size;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, object_offset,
fp, sizeof(*fp));
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, object_offset,
fp, sizeof(*fp));
} break;
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
int target_fd = binder_translate_fd(fp->fd, t, thread,
in_reply_to);
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = target_fd;
return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
fp->fd = target_fd;
binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, object_offset,
fp, sizeof(*fp));
} break;
case BINDER_TYPE_FDA: {
struct binder_object ptr_object;
binder_size_t parent_offset;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
size_t num_valid = (buffer_offset - off_start_offset) *
sizeof(binder_size_t);
struct binder_buffer_object *parent =
binder_validate_ptr(target_proc, t->buffer,
&ptr_object, fda->parent,
off_start_offset,
&parent_offset,
num_valid);
if (!parent) {
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(target_proc, t->buffer,
off_start_offset,
parent_offset,
fda->parent_offset,
last_fixup_obj_off,
last_fixup_min_off)) {
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj_off = parent_offset;
last_fixup_min_off =
fda->parent_offset + sizeof(u32) * fda->num_fds;
} break;
case BINDER_TYPE_PTR: {
struct binder_buffer_object *bp =
to_binder_buffer_object(hdr);
size_t buf_left = sg_buf_end_offset - sg_buf_offset;
size_t num_valid;
if (bp->length > buf_left) {
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer,
sg_buf_offset,
(const void __user *)
(uintptr_t)bp->buffer,
bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)
t->buffer->user_data + sg_buf_offset;
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
num_valid = (buffer_offset - off_start_offset) *
sizeof(binder_size_t);
ret = binder_fixup_parent(t, thread, bp,
off_start_offset,
num_valid,
last_fixup_obj_off,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, object_offset,
bp, sizeof(*bp));
last_fixup_obj_off = object_offset;
last_fixup_min_off = 0;
} break;
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_object_type;
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_restore_priority(current, in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
/*
* Defer the TRANSACTION_COMPLETE, so we don't return to
* userspace immediately; this allows the target process to
* immediately start processing this transaction, reducing
* latency. We will then return the TRANSACTION_COMPLETE when
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
if (!binder_proc_transaction(t, target_proc, target_thread)) {
binder_inner_proc_lock(proc);
binder_pop_transaction_ilocked(thread, t);
binder_inner_proc_unlock(proc);
goto err_dead_proc_or_thread;
}
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
if (target_node)
binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
*/
smp_wmb();
WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer,
buffer_offset, true);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
if (target_node) {
binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
proc->pid, thread->pid, return_error, return_error_param,
(u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
{
struct binder_transaction_log_entry *fe;
e->return_error = return_error;
e->return_error_param = return_error_param;
e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
/*
* write barrier to synchronize with initialization
* of log entry
*/
smp_wmb();
WRITE_ONCE(e->debug_id_done, t_debug_id);
WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
binder_restore_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
binder_enqueue_thread_work(thread, &thread->return_error.work);
}
}