参考链接:
从一个简单的AIDL实现看binder原理(一)简单的AIDL实现
从一个简单的AIDL实现看binder原理(二)bindService的调用过程
从一个简单的AIDL实现看binder原理(三)bindService调用过程中Binder的传递
在上一篇博文中,我们分析了在bindService的过程中binder对象是怎么从Service进程传递到Activity所在进程的,但是为什么传递的时候是一个Binder,但最后接收到的是个BinderProxy并没有分析到,本篇讲分析Binder对象是在什么时机转换为BinderProxy对象的
首先看一下Binder和BinderProxy的类结构:
从上图可以看出,Binder和BinderProxy是实现了IBinder接口的两个类,Binder和BinderProxy是不存在继承关系的,所以不可能进行强转,最有可能的触发节点就是在IPC的过程中,接下来我们验证一下这种猜测
我们继续从ActivityThread#handleBindService方法说起
private void handleBindService(BindServiceData data) {
Service s = mServices.get(data.token);
if (DEBUG_SERVICE)
Slog.v(TAG, "handleBindService s=" + s + " rebind=" + data.rebind);
if (s != null) {
try {
data.intent.setExtrasClassLoader(s.getClassLoader());
data.intent.prepareToEnterProcess();
try {
if (!data.rebind) {
IBinder binder = s.onBind(data.intent);
ActivityManager.getService().publishService(
data.token, data.intent, binder);
} else {
s.onRebind(data.intent);
ActivityManager.getService().serviceDoneExecuting(
data.token, SERVICE_DONE_EXECUTING_ANON, 0, 0);
}
ensureJitEnabled();
} catch (RemoteException ex) {
throw ex.rethrowFromSystemServer();
}
} catch (Exception e) {
if (!mInstrumentation.onException(s, e)) {
throw new RuntimeException(
"Unable to bind to service " + s
+ " with " + data.intent + ": " + e.toString(), e);
}
}
}
}
在代码的第11行这里调用了Service的onBinder方法,我们回顾一下第一篇博文中Service的实现:
@Nullable
@Override
public IBinder onBind(Intent intent) {
StudentStub stub = new StudentStub();
Log.d("ipcLog", "stub = " + stub.getClass());
return stub;
}
public class StudentStub extends IAIDLInterface.Stub {
@Override
public List<Student> getStudent() throws RemoteException {
return studentList;
}
@Override
public void setStudent(Student student) throws RemoteException {
studentList.add(student);
}
}
从这里可以看出,onBind的返回时其实就是StudentStub 对象,也就是一个Binder对象,
我们继续看第12行代码:
ActivityManager.getService().publishService(
data.token, data.intent, binder);
这是一个IPC过程,我们回顾一下第一篇博文中IPC的发起:
@Override
public void onClick(View v) {
switch (v.getId()) {
case R.id.btn_start:
bindService(new Intent(this, RemoteService.class), connection, BIND_AUTO_CREATE);
break;
case R.id.btn_add:
int code = Integer.valueOf(codeEt.getText().toString().trim());
String name = nameEt.getText().toString().trim();
try {
proxy.setStudent(new Student(code, name));
} catch (RemoteException e) {
e.printStackTrace();
}
break;
case R.id.btn_get:
try {
List<Student> students = proxy.getStudent();
if (students != null && students.size() > 0) {
StringBuilder sb = new StringBuilder();
for (Student student : students) {
sb.append(student.toString());
sb.append("\n");
}
studentListText.setText(sb.toString());
}
} catch (RemoteException e) {
e.printStackTrace();
}
break;
default:
break;
}
}
从上述代码可以看到,在我们发起IPC的请求时,使用的是BinderBorxy的对象,同理我们可以得出,在跨进程调用ActivityManagerService中的方法时,也是使用AMS的BinderProxy对象,这个对象的实现是在IActivityManager的Proxy类中,Proxy类是Stub的内部类,而Stub是IActivityManager的内部类,其类文件结构如图所示:
看到这里是不是有一种豁然开朗的感觉?原来大名鼎鼎的ActivityMangerService的实现原理和我们普通的AIDL是一模一样的!
我们继续跟进publishService方法的实现,首先看发起端,即IActivityManager$Stub中Proxy的实现:
@Override
public void publishService(android.os.IBinder token, android.content.Intent intent, android.os.IBinder service) throws android.os.RemoteException {
android.os.Parcel _data = android.os.Parcel.obtain();
android.os.Parcel _reply = android.os.Parcel.obtain();
try {
_data.writeInterfaceToken(DESCRIPTOR);
_data.writeStrongBinder(token);
if ((intent != null)) {
_data.writeInt(1);
intent.writeToParcel(_data, 0);
} else {
_data.writeInt(0);
}
_data.writeStrongBinder(service);
mRemote.transact(Stub.TRANSACTION_publishService, _data, _reply, 0);
_reply.readException();
} finally {
_reply.recycle();
_data.recycle();
}
}
方法的第三个参数就是我们要传递的RemoteService中的Binder,在这里将他传入了Parcel的writeStrongBinder方法的参数里,继续跟进Parcel的writeStrongBinder方法:
public final void writeStrongBinder(IBinder val) {
nativeWriteStrongBinder(mNativePtr, val);
}
这里转入了jni层的nativeWriteStrongBinder方法继续执行,具体实现在android_os_Parcel.cpp中:
static void android_os_Parcel_writeStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr, jobject object)
{
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env, object));
if (err != NO_ERROR) {
signalExceptionForError(env, clazz, err);
}
}
}
这里转到了Parcel.cpp的writeStrongBinder方法中,其传递的参数是android_util_Binder.cpp的ibinderForJavaObject方法的返回值:
// android_util_Binder.cpp
sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj)
{
if (obj == NULL) return NULL;
//Java层的Binder对象
if (env->IsInstanceOf(obj, gBinderOffsets.mClass)) {
JavaBBinderHolder* jbh = (JavaBBinderHolder*)
env->GetLongField(obj, gBinderOffsets.mObject);
return jbh != NULL ? jbh->get(env, obj) : NULL; //【见3.5.3】
}
//Java层的BinderProxy对象
if (env->IsInstanceOf(obj, gBinderProxyOffsets.mClass)) {
return (IBinder*)env->GetLongField(obj, gBinderProxyOffsets.mObject);
}
return NULL;
}
这个方法的主要工作是根据Binde(Java)生成JavaBBinderHolder(C++)对象. 主要工作是创建JavaBBinderHolder对象,并把JavaBBinderHolder对象地址保存到Binder.mObject成员变量.
// android_util_Binder.cpp
sp<JavaBBinder> get(JNIEnv* env, jobject obj)
{
AutoMutex _l(mLock);
sp<JavaBBinder> b = mBinder.promote();
if (b == NULL) {
//首次进来,创建JavaBBinder对象【见3.5.4】
b = new JavaBBinder(env, obj);
mBinder = b;
}
return b;
}
JavaBBinder的初始化在这里:
// android_util_Binder.cpp
JavaBBinder(JNIEnv* env, jobject object)
: mVM(jnienv_to_javavm(env)), mObject(env->NewGlobalRef(object))
{
android_atomic_inc(&gNumLocalRefs);
incRefsCreated(env);
}
创建JavaBBinder,该对象继承于BBinder对象。
data.writeStrongBinder(service)最终等价于parcel->writeStrongBinder(new JavaBBinder(env, obj));
也就是将java的Binder对象转成了C++的BBinder对象
分析过参数后,继续分析Parcel.cpp的writeStrongBinder方法:
// Parcel.cpp
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
IBinder *local = binder->localBinder();
if (!local) {
BpBinder *proxy = binder->remoteBinder();
const int32_t handle = proxy ? proxy->handle() : 0;
obj.type = BINDER_TYPE_HANDLE; //远程Binder
obj.binder = 0;
obj.handle = handle;
obj.cookie = 0;
} else {
obj.type = BINDER_TYPE_BINDER; //本地Binder,进入该分支
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);
}
} else {
obj.type = BINDER_TYPE_BINDER; //本地Binder
obj.binder = 0;
obj.cookie = 0;
}
return finish_flatten_binder(binder, obj, out);
}
对于Binder实体,则cookie记录Binder实体的指针;
对于Binder代理,则用handle记录Binder代理的句柄;
binder->localBinder(),可以看Binder.cpp,binder->binder->remoteBinder(),可以看BpBinder.cpp
// Binder.cpp
BBinder* BBinder::localBinder()
{
return this;
}
BpBinder* IBinder::remoteBinder()
{
return NULL;
}
// BpBinder.cpp
BBinder* IBinder::localBinder()
{
return NULL;
}
BpBinder* BpBinder::remoteBinder()
{
return this;
}
从这里可以看出,如果是BBinder对象,locaBinder方法会返回其自身,如果是BpBinder对象,remoteBinder方法会返回其自身,因为我们在Binder传递过程中,传递的是Binder的实体,即Java的Binder对象、C++的BBinder对象,因此Parcel.cpp的flatten_binder方法会走入下面的分支:
obj.type = BINDER_TYPE_BINDER; //本地Binder,进入该分支
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);
到这里,Binder对象的扁平化处理就完成了
接下来接着看IActivityManager#publishService方法,在执行完成_data.writeStrongBinder(service)后继续执行了 mRemote.transact(Stub.TRANSACTION_publishService, _data, _reply, 0),这个mRemote是BinderProxy的一个实例,具体为啥可参考第一篇博文,即AIDL的实现。
因此,这里的执行就转到了Binder.java$BinderProxy#transact中:
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
if (mWarnOnBlocking && ((flags & FLAG_ONEWAY) == 0)) {
// For now, avoid spamming the log by disabling after we've logged
// about this interface at least once
mWarnOnBlocking = false;
Log.w(Binder.TAG, "Outgoing transactions from this process must be FLAG_ONEWAY",
new Throwable());
}
final boolean tracingEnabled = Binder.isTracingEnabled();
if (tracingEnabled) {
final Throwable tr = new Throwable();
Binder.getTransactionTracker().addTrace(tr);
StackTraceElement stackTraceElement = tr.getStackTrace()[1];
Trace.traceBegin(Trace.TRACE_TAG_ALWAYS,
stackTraceElement.getClassName() + "." + stackTraceElement.getMethodName());
}
try {
return transactNative(code, data, reply, flags);
} finally {
if (tracingEnabled) {
Trace.traceEnd(Trace.TRACE_TAG_ALWAYS);
}
}
}
在方法的结尾,有一次转入了jni层执行transactNative方法,这个方法的实现是android_util_Binder.cpp的android_os_BinderProxy_transact方法:
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
...
// java Parcel对象转为native Parcel对象
Parcel* data = parcelForJavaObject(env, dataObj);
...
// java Parcel对象转为native Parcel对象
Parcel* reply = parcelForJavaObject(env, replyObj);
//java BinderProxy对象(即mRemote)转为native BinderProxy对象
IBinder* target = (IBinder*)env->GetLongField(obj, gBinderProxyOffsets.mObject);
...
status_t err = target->transact(code, *data, reply, flags);
...
}
这段代码主要是对java层的对象进行了一次转换后转入native层进行调用,因为mRemote是BinderProxy对象,因此target是一个BpBinder对象,最终 target->transact转入BpBinder.cpp->transact方法中:
// BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
其中data就是我们要传递的Parcel对象。
在这里又继续调用了IPCThreadState::self()->transact方法,从本篇开始到现在,这些逻辑一直在RemoteService所在进程中执行,一直没有看到有跨进程调用的迹象,当我们看到IPC开头的这个类名时,我们知道,离真正开始跨进程调用不远了!OK,接下来继续分析IPCThreadState.cpp的transact方法:
// IPCThreadState.cpp
tatus_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
<< handle << " / code " << TypeCode(code) << ": "
<< indent << data << dedent << endl;
}
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
#if 0
if (code == 4) { // relayout
ALOGI(">>>>>> CALLING transaction 4");
} else {
ALOGI(">>>>>> CALLING transaction %d", code);
}
#endif
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
#if 0
if (code == 4) { // relayout
ALOGI("<<<<<< RETURNING transaction 4");
} else {
ALOGI("<<<<<< RETURNING transaction %d", code);
}
#endif
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
<< handle << ": ";
if (reply) alog << indent << *reply << dedent << endl;
else alog << "(none requested)" << endl;
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
其中调用writeTransactionData即是写入要传输数据的过程,它的实现如下:
// IPCThreadState.cpp
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0;
tr.target.handle = handle; // handle = 0
tr.code = code; // code = ADD_SERVICE_TRANSACTION
tr.flags = binderFlags; // binderFlags = 0
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
// data为记录Media服务信息的Parcel对象
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize(); // mDataSize
tr.data.ptr.buffer = data.ipcData(); //mData
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t); //mObjectsSize
tr.data.ptr.offsets = data.ipcObjects(); //mObjects
} else if (statusBuffer) {
...
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd); //cmd = BC_TRANSACTION
mOut.write(&tr, sizeof(tr)); //写入binder_transaction_data数据
return NO_ERROR;
}
这里的handle是BpBinder对象所持有mHandle,即BpBinder所对应的BBinder,也就是Binder的本地实体,
binder_transaction_data结构体是binder驱动通信的数据结构,该过程最终是把Binder请求码BC_TRANSACTION和binder_transaction_data结构体写入到mOut
接下来会执行到IPCThreadState.cpp->waitForResponse方法:
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
...
if (mIn.dataAvail() == 0) continue;
cmd = mIn.readInt32();
switch (cmd) {
case BR_TRANSACTION_COMPLETE: ...
case BR_DEAD_REPLY: ...
case BR_FAILED_REPLY: ...
case BR_ACQUIRE_RESULT: ...
case BR_REPLY: ...
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
...
return err;
}
注意看talkWithDriver方法,在这里会正式进入Android内核,即Binder驱动中进行跨进程传递和转换。
具体的细节篇幅有限,我们下一篇再讲。