参考文章 :
Binder架构图 :
源码地址:
几个重要的类 :
- 1、BinderProxy;
- 2、ServiceManager;
- 3、BpBinder;
- 4、BBinder;
- 5、ActivityManagerService;
- 6、IPCThreadState;
一、BinderProxy :
在https://www.jianshu.com/p/4acd9ee3df12进程创建中的模块<7.14>中了解到在初始化SMP时, 需要通过BinderInternal.getContextObject从native层获取IBinder实例;
1.1 android_util_Binder.getContextObject:
static const JNINativeMethod gBinderInternalMethods[] = {
{ "getContextObject", "()Landroid/os/IBinder;", (void*)android_os_BinderInternal_getContextObject },
...
};
/**
* java层与native层方法的映射, 然后可以自定义native层的方法, 而不一定要严格按照jni定义的模仿方法;
*/
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
/**
* b指向的是BpBinder实例;
*/
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
/**
* 由模块<1.2>可知, javaObjectForIBinder返回BinderProxy, BinderProxy持有BpBinder引用;
*/
return javaObjectForIBinder(env, b);
}
ProcessState--->
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
IBinder* b = e->binder;
if (handle == 0) {
Parcel data;
status_t status = IPCThreadState::self()->transact(0, IBinder::PING_TRANSACTION, data, NULL, 0);
}
/**
* 构建BpBinder对象;
*/
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
return result;
}
1.2 android_util_Binder.javaObjectForIBinder:
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
if (val->checkSubclass(&gBinderOffsets)) {
jobject object = static_cast<JavaBBinder*>(val.get())->object();
return object;
}
AutoMutex _l(mProxyLock);
jobject object = (jobject)val->findObject(&gBinderProxyOffsets);
if (object != NULL) {
/**
* 如果object已经被初始化过, 则直接返回其引用;
*/
jobject res = jniGetReferent(env, object);
if (res != NULL) {
return res;
}
}
/**
* 结合<//1--->>, <//2--->>可知, object实际指向的是java层的android/os/BinderProxy;
*/
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
if (object != NULL) {
/**
* var指向的是BpBinder, 这里将BpBinder指向BinderProxy.mObject属性;
*/
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
val->incStrong((void*)javaObjectForIBinder);
jobject refObject = env->NewGlobalRef(env->GetObjectField(object, gBinderProxyOffsets.mSelf));
val->attachObject(&gBinderProxyOffsets, refObject, jnienv_to_javavm(env), proxy_cleanup);
sp<DeathRecipientList> drl = new DeathRecipientList;
drl->incStrong((void*)javaObjectForIBinder);
env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get()));
android_atomic_inc(&gNumProxyRefs);
incRefsCreated(env);
}
return object;
}
//1--->
const char* const kBinderProxyPathName = "android/os/BinderProxy";
//2--->
static int int_register_android_os_BinderProxy(JNIEnv* env)
{
jclass clazz = FindClassOrDie(env, "java/lang/Error");
gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
clazz = FindClassOrDie(env, kBinderProxyPathName);
gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "()V");
gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz, "sendDeathNotice",
"(Landroid/os/IBinder$DeathRecipient;)V");
gBinderProxyOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
gBinderProxyOffsets.mSelf = GetFieldIDOrDie(env, clazz, "mSelf", "Ljava/lang/ref/WeakReference;");
gBinderProxyOffsets.mOrgue = GetFieldIDOrDie(env, clazz, "mOrgue", "J");
clazz = FindClassOrDie(env, "java/lang/Class");
gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "()Ljava/lang/String;");
return RegisterMethodsOrDie(env, kBinderProxyPathName, gBinderProxyMethods, NELEM(gBinderProxyMethods));
}
1.2 BinderProxy.transact:
对https://www.jianshu.com/p/4acd9ee3df12模块<7.15>以及https://www.jianshu.com/p/eee43ec8a7cc模块<1.4>可知, java层与native层进行交互时, 都需要先调用BinderProxy.transact方法;
final class BinderProxy implements IBinder {
public native boolean transactNative(int code, Parcel data, Parcel reply, int flags);
public boolean transact(int code, Parcel data, Parcel reply, int flags) {
/**
* BinderProxy在native层将任务交给BpBinder进行处理;
*/
return transactNative(code, data, reply, flags);
}
}
1.3 android_util_Binder.transact:
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags)
{
Parcel* data = parcelForJavaObject(env, dataObj);
Parcel* reply = parcelForJavaObject(env, replyObj);
/**
* BinderInternal.getObjectContext时, 会构造一个BinderProxy实例, 并且将BpBinder保存在
* BinderProxy的mObject中, 所以此处的target实际指向BpBinder
*/
IBinder* target = (IBinder*) env->GetLongField(obj, gBinderProxyOffsets.mObject);
bool time_binder_calls;
int64_t start_millis;
/**
* data和reply都是应用层通过BinderProxy传过来. 模块<1.4>
*/
status_t err = target->transact(code, *data, reply, flags);
return JNI_TRUE;
}
1.4 BpBinder.transact:
status_t BpBinder::transact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
/**
* 模块<1.5>
*/
status_t status = IPCThreadState::self()->transact(mHandle, code, data, reply, flags);
return status;
}
1.5 IPCThreadState.transact:
status_t IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
/**
* 1. 与驱动进行交互, 写入数据, 然后在下文waitForResponse中等待响应;模块<6.4>
* 2. 这里的writeTransactionData与waitForResponse的方法都依赖于IPCThreadState的
* taleWithDriver方法;模块<6.3>
*/
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
}
return err;
}
1.1 AndroidRuntime.startReg :
1.2 android_util_Binder.register_android_os_Binder :
- 上述代码做了三件事;
- 1、注册Binder类的jni方法;
- 2、注册BinderInternal类的jni方法;
- 3、注册BinderProxy类的Jni方法;
1.3 android_util_Binder.int_register_android_os_Binder :
- //7L ~ //9L分别做了三件事 :
- 1、将java层的Binder类保存到mClass变量中;
- 2、将java层Binder类的execTransact方法保存到mExecTransact变量中;
- 3、将java层Binder类中的mObject属性保存到mObject变量中;
1.4 android_util_Binder.int_register_android_os_BinderInternal :
- 类似1.3, 注册BinderInternal;
- 1、将java层的BinderInternal类保存在mClass变量中;
- 2、将java层BinderInternal类中的forceBinderGc方法保存在mForceGc变量中;
1.5 android_util_Binder.int_register_android_os_BinderProxy :
- 1、完成BinderProxy的注册;
- 2、将java层的BinderProxy类保存在gBinderProxyOffsets.mClass变量中;
- 3、将java层BinderProxy类的构造函数保存在gBinderProxyOffsets.mConstructor变量中;
二、ActivityManagerService :
- Binder学习_02相关进程创建流程--->在Zygote进程创建完成以后, 通过fork方式创建system_server进程, system_server进程创建完成以后, 会切入到system_server进程, 然后进入到java层SystemServer的main方法, 然后触发创建ActivityManagerService对象;
2.1 SystemServer.run :
2.2 SystemServer.startBootstrapServices :
- 1、进行一系列服务的注册, 特别需要注意的是在注册"activity"时传入的AMS;
三、ServiceManager :
- 1、在system_server进程中完成ActivityManagerService对象的创建, 然后在ActivityManagerService中通过ServiceManager.addService进行各种服务的注册;
- 2、ServiceManager提供两种服务, addService和getService, 最后都依赖于SMN去实现, SMN的实现又交由其代理类SMP;
3.1 ServiceManager.addService :
- 1、最终调用到ServiceManagerProxy的addService方法;
- 2、ServiceManagerProxy内部持有obj的引用, 通过对下文模块四的分析可知, ServiceManagerProxy内部持有的IBinder_mRemote实际指向BinderProxy;
3.2 SMP.addService :
- 1、将数据写入Parcel中, 然后通过mRemote.transact将数据与native层进行交互;
- 2、在向Parcel中写入数据时, //7L将当前IBinder写入到Parcel中, 这个对后续回调很关键;
3.3 Parcel.writeStrongBinder :
- 1、分两部分进行分析, native层的ibinderForJavaObject和writeStrongBinder;
- 2、此时object指向java层调用addService时传入的IBinder;
3.4 android_os_Parcel.ibinderForJavaObject :
- 1、//6L的obj指向java层的Binder对象;
3.5 JavaBBinderHolder.get :
- 1、创建JavaBBinder对象, JavaBBinder继承BBinder, JavaBBinder对象持有mObject的引用, 该mObject指向ServiceManager.addService时传入的IBinder对象;
3.6 Parcel.writeStrongBinder :
四、BpBinder :
- 在模块二、三中知道, 在ActivityManagerService中会通过ServiceManager进行一系列服务的注册, 服务注册最终依赖于ServiceManagerProxy, 在构造ServiceManagerProxy对象时, 传入通过BinderInternal.getObjectContext()获取的IBinder;
4.1 BinderInternal.getObjectContext :
- 1、单例创建ProcessState对象;
4.2 ProcessState.getContextObject :
- 1、返回BpBinder对象;
4.3 android_util_Binder.javaObjectForIBinder :
- 1、//25L创建BinderProxy对象;
- 2、//27L将BpBinder对象保存在BinderProxy的mObject变量中;
- 3、//30L将BinderProxy对象信息附加到BpBinder的mObjects中;
五、BBinder :
- 1、AMS注册服务时, 会在native创建与之对应的BBinder对象, BBinder实际指向JavaBBinder, 在构造JavaBBinder对象时, 会传入一个IBinder对象, 将IBinder赋值给JavaBBinder的mObject变量, 该IBinder对象指向java层的IBinder对象;
- 2、AMS通过SM获取服务时, 根据传入的服务名name获取在注册服务时产生的JavaBBinder;
六、IPCThreadState:
- 1、在https://www.jianshu.com/p/4acd9ee3df12中模块<7.7>中system_server进程创建之后, 会创建一个Binder线程, 然后通过PoolThread.run方法触发其内部的threadLoop方法的执行;
- 2、结合https://www.jianshu.com/p/4acd9ee3df12中模块<7.18>可知在java层中的SMP.transcat最终都是通过BinderProxy调用到native层的BpBinder的transact方法, 然后触发IPCThreadState.transcat的执行;
6.1 PoolThread.threadLoop:
class PoolThread : public Thread
{
public:
PoolThread(bool isMain) : mIsMain(isMain)
{
}
protected:
//1--->
/**
* 在线程模块进行分析;
*/
virtual bool threadLoop()
{
/**
* 然后触发IPCThreadState.joinThreadPool的执行, 模块<6.2>
*/
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
6.2 IPCThreadState.self/joinThreadPool:
/**
* 单例模式创建IPCThreadState实例;
*/
IPCThreadState* IPCThreadState::self()
{
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
return new IPCThreadState;
}
...
}
void IPCThreadState::joinThreadPool(bool isMain)
{
...
}
6.3 IPCThreadState.talkWithDriver:
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
/**
* 通过ioctl不停的进行读写操作, 与BinderDriver 进行交互;
*/
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
...
} while (err == -EINTR);
return err;
}
6.4 IPCThreadState.transact:
status_t IPCThreadState::transact(int32_t handle, uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
/**
* 1. 进行数据的传输;模块<6.4>
*/
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
/**
* 1. flags & TF_ONE_WAY = 0说明是双向传输, 则这里等待响应;模块<6.5>
*/
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
6.5 IPCThreadState.writeTransactionData:
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
...
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
- 1、binder_transaction_data作为载体, 将handle, Parcel进行封装, 通过Parcel进行传输;
6.6 IPCThreadState.waitForResponse:
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
/**
* 与driver层进行交互, 直到读取到结果为止, 跳出该方法;
*/
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
case BR_TRANSACTION_COMPLETE: ...
case BR_DEAD_REPLY: ...
case BR_FAILED_REPLY: ...
case BR_ACQUIRE_RESULT: ...
case BR_REPLY: ...
default:
/**
* 对读取的数据进行处理, 并返回;模块<6.6>
*/
err = executeCommand(cmd);
break;
}
}
return err;
}
6.7 IPCThreadState.executeCommand:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_ERROR: ...
case BR_OK: ...
case BR_ACQUIRE: ...
case BR_RELEASE: ...
case BR_INCREFS: ...
case BR_DECREFS: ...
case BR_ATTEMPT_ACQUIRE: ...
case BR_TRANSACTION:
{
/**
* binder_transaction_data作为载体, 将tr引用, 内存占用读入到status_t result中;
*/
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
Parcel reply;
status_t error;
if (tr.target.ptr) {
sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
sendReply(reply, 0);
}
...
}
break;
case BR_DEAD_BINDER: ...
case BR_CLEAR_DEATH_NOTIFICATION_DONE: ...
case BR_FINISHED: ...
case BR_NOOP: ...
case BR_SPAWN_LOOPER: ...
return result;
}