(二) binder驱动分析

关键数据


struct binder_proc {
    struct hlist_node proc_node;
    struct rb_root threads;
    struct rb_root nodes;
    struct rb_root refs_by_desc;
    struct rb_root refs_by_node;
    struct list_head waiting_threads;
    int pid;
    struct task_struct *tsk;
    const struct cred *cred;
    struct hlist_node deferred_work_node;
    int deferred_work;
    int outstanding_txns;
    bool is_dead;
    bool is_frozen;
    bool sync_recv;
    bool async_recv;
    wait_queue_head_t freeze_wait;

    struct list_head todo;
    struct binder_stats stats;
    struct list_head delivered_death;
    int max_threads;
    int requested_threads;
    int requested_threads_started;
    int tmp_ref;
    long default_priority;
    struct dentry *debugfs_entry;
    struct binder_alloc alloc;
    struct binder_context *context;
    spinlock_t inner_lock;
    spinlock_t outer_lock;
    struct dentry *binderfs_entry;
    bool oneway_spam_detection_enabled;
};

struct binder_buffer {
    struct list_head entry; /* free and allocated entries by address */
    struct rb_node rb_node; /* free entry by size or allocated entry */
                /* by address */
    unsigned free:1;
    unsigned clear_on_free:1;
    unsigned allow_user_free:1;
    unsigned async_transaction:1;
    unsigned oneway_spam_suspect:1;
    unsigned debug_id:27;

    struct binder_transaction *transaction;

    struct binder_node *target_node;
    size_t data_size;
    size_t offsets_size;
    size_t extra_buffers_size;
    void __user *user_data;
    int    pid;
};

struct binder_alloc {
    struct mutex mutex;
    struct vm_area_struct *vma;
    struct mm_struct *vma_vm_mm;
    void __user *buffer;
    struct list_head buffers;
    struct rb_root free_buffers;
    struct rb_root allocated_buffers;
    size_t free_async_space;
    struct binder_lru_page *pages;
    size_t buffer_size;
    uint32_t buffer_free;
    int pid;
    size_t pages_high;
    bool oneway_spam_detected;
};

binder_thread代表当前操作binder的线程

struct binder_thread {
    struct binder_proc *proc;
    struct rb_node rb_node;
    struct list_head waiting_thread_node;
    int pid;
    int looper;              /* only modified by this thread */
    bool looper_need_return; /* can be written by other thread */
    struct binder_transaction *transaction_stack;
    struct list_head todo;
    bool process_todo;
    struct binder_error return_error;
    struct binder_error reply_error;
    wait_queue_head_t wait;
    struct binder_stats stats;
    atomic_t tmp_ref;
    bool is_dead;
};

ioctl命令

#define BINDER_WRITE_READ       _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT     _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS      _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY    _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR      _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT      _IOW('b', 8, __s32)
#define BINDER_VERSION          _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO  _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF    _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT  _IOW('b', 13, struct flat_binder_object)
#define BINDER_FREEZE           _IOW('b', 14, struct binder_freeze_info)
#define BINDER_GET_FROZEN_INFO      _IOWR('b', 15, struct binder_frozen_status_info)
#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)

协议

enum binder_driver_command_protocol {
    BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
    BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
    /*
     * binder_transaction_data: the sent command.
     */

    BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
    /*
     * not currently supported
     * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
     * Else you have acquired a primary reference on the object.
     */

    BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
    /*
     * void *: ptr to transaction data received on a read
     */

    BC_INCREFS = _IOW('c', 4, __u32),
    BC_ACQUIRE = _IOW('c', 5, __u32),
    BC_RELEASE = _IOW('c', 6, __u32),
    BC_DECREFS = _IOW('c', 7, __u32),
    /*
     * int: descriptor
     */

    BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
    BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
    /*
     * void *: ptr to binder
     * void *: cookie for binder
     */

    BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
    /*
     * not currently supported
     * int: priority
     * int: descriptor
     */

    BC_REGISTER_LOOPER = _IO('c', 11),
    /*
     * No parameters.
     * Register a spawned looper thread with the device.
     */

    BC_ENTER_LOOPER = _IO('c', 12),
    BC_EXIT_LOOPER = _IO('c', 13),
    /*
     * No parameters.
     * These two commands are sent as an application-level thread
     * enters and exits the binder loop, respectively.  They are
     * used so the binder can have an accurate count of the number
     * of looping threads it has available.
     */

    BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
                        struct binder_handle_cookie),
    /*
     * int: handle
     * void *: cookie
     */

    BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
                        struct binder_handle_cookie),
    /*
     * int: handle
     * void *: cookie
     */

    BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
    /*
     * void *: cookie
     */

    BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
    BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
    /*
     * binder_transaction_data_sg: the sent command.
     */
};

binder驱动初始化

如果没使用binderfs,则直接注册binder设备,init_binderfs为空实现。binder_devices_param android8.0以后为 "binder,hwbinder,vndbinder"。

static int __init binder_init(void)
{
    int ret;
    char *device_name, *device_tmp;
    struct binder_device *device;
    struct hlist_node *tmp;
    char *device_names = NULL;

    if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
        strcmp(binder_devices_param, "") != 0) {
        /*
        * Copy the module_parameter string, because we don't want to
        * tokenize it in-place.
         */
        device_names = kstrdup(binder_devices_param, GFP_KERNEL);
        if (!device_names) {
            ret = -ENOMEM;
            goto err_alloc_device_names_failed;
        }

        device_tmp = device_names;
        while ((device_name = strsep(&device_tmp, ","))) {
            ret = init_binder_device(device_name);
            if (ret)
                goto err_init_binder_device_failed;
        }
    }

    ret = init_binderfs();
    if (ret)
        goto err_init_binder_device_failed;

    return ret;

err_init_binder_device_failed:
    hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
        misc_deregister(&device->miscdev);
        hlist_del(&device->hlist);
        kfree(device);
    }
    kfree(device_names);
    return ret;
}

如果设置了binderfs,则不会立即注册binder设备,而是注册binder文件系统


static struct file_system_type binder_fs_type = {
    .name           = "binder",
    .init_fs_context    = binderfs_init_fs_context,
    .parameters     = binderfs_fs_parameters,
    .kill_sb        = kill_litter_super,
    .fs_flags       = FS_USERNS_MOUNT,
};

int __init init_binderfs(void)
{
    int ret;
    const char *name;
    size_t len;

    /* Allocate new major number for binderfs. */
    ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
                  "binder");
    if (ret)
        return ret;

    ret = register_filesystem(&binder_fs_type);
    if (ret) {
        unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
        return ret;
    }

    return ret;
}

注册的设备会加入binder_devices链表,注意binderfs自定义添加的设备不会加入到链表中。

static int __init init_binder_device(const char *name)
{
    int ret;
    struct binder_device *binder_device;

    binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
    if (!binder_device)
        return -ENOMEM;

    binder_device->miscdev.fops = &binder_fops;
    binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
    binder_device->miscdev.name = name;

    refcount_set(&binder_device->ref, 1);
    binder_device->context.binder_context_mgr_uid = INVALID_UID;
    binder_device->context.name = name;
    mutex_init(&binder_device->context.context_mgr_node_lock);

    ret = misc_register(&binder_device->miscdev);
    if (ret < 0) {
        kfree(binder_device);
        return ret;
    }

    hlist_add_head(&binder_device->hlist, &binder_devices);
    return ret;
}

const struct file_operations binder_fops = {
    .owner = THIS_MODULE,
    .poll = binder_poll,
    .unlocked_ioctl = binder_ioctl,
    .compat_ioctl = compat_ptr_ioctl,
    .mmap = binder_mmap,
    .open = binder_open,
    .flush = binder_flush,
    .release = binder_release,
};

miscdevice.fops关联了该设备的操作函数。

binder_open

(1) 创建binder_proc
(2) 初始化proc innner_lock,outer_lock锁。tsk设置为当前进程主线程。default_priority设置为当前线程的优先级。
(3)binder_proc存放到文件指针filp->private_data中
(4) 每个进程只能有一个binder_proc。毕竟binder是用于跨进程通信的,所以每个进程一个就可以了。
(5)将binder_proc加入入binder_procs链表中


static int binder_open(struct inode *nodp, struct file *filp)
{
    struct binder_proc *proc, *itr;
    struct binder_device *binder_dev;
    struct binderfs_info *info;
    struct dentry *binder_binderfs_dir_entry_proc = NULL;
    bool existing_pid = false;

    binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
             current->group_leader->pid, current->pid);
    //(1)
    proc = kzalloc(sizeof(*proc), GFP_KERNEL); 
    if (proc == NULL)
        return -ENOMEM;

  // (2)
    spin_lock_init(&proc->inner_lock);
    spin_lock_init(&proc->outer_lock);
    get_task_struct(current->group_leader);
    proc->tsk = current->group_leader;
    proc->cred = get_cred(filp->f_cred);
    INIT_LIST_HEAD(&proc->todo);
    init_waitqueue_head(&proc->freeze_wait);
    proc->default_priority = task_nice(current);
    /* binderfs stashes devices in i_private */
    if (is_binderfs_device(nodp)) {
        binder_dev = nodp->i_private;
        info = nodp->i_sb->s_fs_info;
        binder_binderfs_dir_entry_proc = info->proc_log_dir;
    } else {
        binder_dev = container_of(filp->private_data,
                      struct binder_device, miscdev);
    }
    refcount_inc(&binder_dev->ref);
    proc->context = &binder_dev->context;
    binder_alloc_init(&proc->alloc);

    binder_stats_created(BINDER_STAT_PROC);
    proc->pid = current->group_leader->pid;
    INIT_LIST_HEAD(&proc->delivered_death);
    INIT_LIST_HEAD(&proc->waiting_threads);
//(3)
    filp->private_data = proc;

    mutex_lock(&binder_procs_lock);
      //(4) start
    hlist_for_each_entry(itr, &binder_procs, proc_node) {
        if (itr->pid == proc->pid) {
            existing_pid = true;
            break;
        }
    }
  //(4) end
//(5)
    hlist_add_head(&proc->proc_node, &binder_procs);
    mutex_unlock(&binder_procs_lock);

    if (binder_debugfs_dir_entry_proc && !existing_pid) {
        char strbuf[11];

        snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
        /*
         * proc debug entries are shared between contexts.
         * Only create for the first PID to avoid debugfs log spamming
         * The printing code will anyway print all contexts for a given
         * PID so this is not a problem.
         */
        proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
            binder_debugfs_dir_entry_proc,
            (void *)(unsigned long)proc->pid,
            &proc_fops);
    }

    if (binder_binderfs_dir_entry_proc && !existing_pid) {
        char strbuf[11];
        struct dentry *binderfs_entry;

        snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
        /*
         * Similar to debugfs, the process specific log file is shared
         * between contexts. Only create for the first PID.
         * This is ok since same as debugfs, the log file will contain
         * information on all contexts of a given PID.
         */
        binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
            strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
        if (!IS_ERR(binderfs_entry)) {
            proc->binderfs_entry = binderfs_entry;
        } else {
            int error;

            error = PTR_ERR(binderfs_entry);
            pr_warn("Unable to create file %s in binderfs (error %d)\n",
                strbuf, error);
        }
    }

    return 0;
}

binder_mmap

(1) 要在binder所属的进程组mmap才有效
(2) 禁止对mmap的内存拷贝和写
(3) vma->vm_private_data设置binder_proc,使得可以通过vma取出binder_proc

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
    struct binder_proc *proc = filp->private_data;
  
//(1)
    if (proc->tsk != current->group_leader)
        return -EINVAL;

      ......
  //(2)
    vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
    vma->vm_flags &= ~VM_MAYWRITE;

    vma->vm_ops = &binder_vm_ops;
//(3)
    vma->vm_private_data = proc;

    return binder_alloc_mmap_handler(&proc->alloc, vma);
}

(1)映射空间最大为4M
(2)alloc->buffer指向vma的起始地址
(3)
(6)将异步事务的空间大小设置为整个空间的一半


int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                  struct vm_area_struct *vma)
{
    int ret;
    const char *failure_string;
    struct binder_buffer *buffer;

    mutex_lock(&binder_alloc_mmap_lock);
    if (alloc->buffer_size) {
        ret = -EBUSY;
        failure_string = "already mapped";
        goto err_already_mapped;
    }
//(1)
    alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
                   SZ_4M);
    mutex_unlock(&binder_alloc_mmap_lock);
//(2)
    alloc->buffer = (void __user *)vma->vm_start;

//(3)
    alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                   sizeof(alloc->pages[0]),
                   GFP_KERNEL);
    if (alloc->pages == NULL) {
        ret = -ENOMEM;
        failure_string = "alloc page array";
        goto err_alloc_pages_failed;
    }
  //(4)
    buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
    if (!buffer) {
        ret = -ENOMEM;
        failure_string = "alloc buffer struct";
        goto err_alloc_buf_struct_failed;
    }
    //(5)
    buffer->user_data = alloc->buffer;
    list_add(&buffer->entry, &alloc->buffers);
    buffer->free = 1;
//(6)
    binder_insert_free_buffer(alloc, buffer);
    alloc->free_async_space = alloc->buffer_size / 2;
    binder_alloc_set_vma(alloc, vma);
    mmgrab(alloc->vma_vm_mm);

    return 0;

err_alloc_buf_struct_failed:
    kfree(alloc->pages);
    alloc->pages = NULL;
err_alloc_pages_failed:
    alloc->buffer = NULL;
    mutex_lock(&binder_alloc_mmap_lock);
    alloc->buffer_size = 0;
err_already_mapped:
    mutex_unlock(&binder_alloc_mmap_lock);
    binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
               "%s: %d %lx-%lx %s failed %d\n", __func__,
               alloc->pid, vma->vm_start, vma->vm_end,
               failure_string, ret);
    return ret;
}

binder_ioctl

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    /*pr_info("binder_ioctl: %d:%d %x %lx\n",
            proc->pid, current->pid, cmd, arg);*/

    binder_selftest_alloc(&proc->alloc);

    trace_binder_ioctl(cmd, arg);

    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
        goto err_unlocked;

    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ:
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);
        if (ret)
            goto err;
        break;
    case BINDER_SET_MAX_THREADS: {
        int max_threads;

        if (copy_from_user(&max_threads, ubuf,
                   sizeof(max_threads))) {
            ret = -EINVAL;
            goto err;
        }
        binder_inner_proc_lock(proc);
        proc->max_threads = max_threads;
        binder_inner_proc_unlock(proc);
        break;
    }
    case BINDER_SET_CONTEXT_MGR_EXT: {
        struct flat_binder_object fbo;

        if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
            ret = -EINVAL;
            goto err;
        }
        ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
        if (ret)
            goto err;
        break;
    }
    case BINDER_SET_CONTEXT_MGR:
        ret = binder_ioctl_set_ctx_mgr(filp, NULL);
        if (ret)
            goto err;
        break;
    case BINDER_THREAD_EXIT:
        binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
                 proc->pid, thread->pid);
        binder_thread_release(proc, thread);
        thread = NULL;
        break;
    case BINDER_VERSION: {
        struct binder_version __user *ver = ubuf;

        if (size != sizeof(struct binder_version)) {
            ret = -EINVAL;
            goto err;
        }
        if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                 &ver->protocol_version)) {
            ret = -EINVAL;
            goto err;
        }
        break;
    }
    case BINDER_GET_NODE_INFO_FOR_REF: {
        struct binder_node_info_for_ref info;

        if (copy_from_user(&info, ubuf, sizeof(info))) {
            ret = -EFAULT;
            goto err;
        }

        ret = binder_ioctl_get_node_info_for_ref(proc, &info);
        if (ret < 0)
            goto err;

        if (copy_to_user(ubuf, &info, sizeof(info))) {
            ret = -EFAULT;
            goto err;
        }

        break;
    }
    case BINDER_GET_NODE_DEBUG_INFO: {
        struct binder_node_debug_info info;

        if (copy_from_user(&info, ubuf, sizeof(info))) {
            ret = -EFAULT;
            goto err;
        }

        ret = binder_ioctl_get_node_debug_info(proc, &info);
        if (ret < 0)
            goto err;

        if (copy_to_user(ubuf, &info, sizeof(info))) {
            ret = -EFAULT;
            goto err;
        }
        break;
    }
    case BINDER_FREEZE: {
        struct binder_freeze_info info;
        struct binder_proc **target_procs = NULL, *target_proc;
        int target_procs_count = 0, i = 0;

        ret = 0;

        if (copy_from_user(&info, ubuf, sizeof(info))) {
            ret = -EFAULT;
            goto err;
        }

        mutex_lock(&binder_procs_lock);
        hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
            if (target_proc->pid == info.pid)
                target_procs_count++;
        }

        if (target_procs_count == 0) {
            mutex_unlock(&binder_procs_lock);
            ret = -EINVAL;
            goto err;
        }

        target_procs = kcalloc(target_procs_count,
                       sizeof(struct binder_proc *),
                       GFP_KERNEL);

        if (!target_procs) {
            mutex_unlock(&binder_procs_lock);
            ret = -ENOMEM;
            goto err;
        }

        hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
            if (target_proc->pid != info.pid)
                continue;

            binder_inner_proc_lock(target_proc);
            target_proc->tmp_ref++;
            binder_inner_proc_unlock(target_proc);

            target_procs[i++] = target_proc;
        }
        mutex_unlock(&binder_procs_lock);

        for (i = 0; i < target_procs_count; i++) {
            if (ret >= 0)
                ret = binder_ioctl_freeze(&info,
                              target_procs[i]);

            binder_proc_dec_tmpref(target_procs[i]);
        }

        kfree(target_procs);

        if (ret < 0)
            goto err;
        break;
    }
    case BINDER_GET_FROZEN_INFO: {
        struct binder_frozen_status_info info;

        if (copy_from_user(&info, ubuf, sizeof(info))) {
            ret = -EFAULT;
            goto err;
        }

        ret = binder_ioctl_get_freezer_info(&info);
        if (ret < 0)
            goto err;

        if (copy_to_user(ubuf, &info, sizeof(info))) {
            ret = -EFAULT;
            goto err;
        }
        break;
    }
    case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
        uint32_t enable;

        if (copy_from_user(&enable, ubuf, sizeof(enable))) {
            ret = -EFAULT;
            goto err;
        }
        binder_inner_proc_lock(proc);
        proc->oneway_spam_detection_enabled = (bool)enable;
        binder_inner_proc_unlock(proc);
        break;
    }
    default:
        ret = -EINVAL;
        goto err;
    }
    ret = 0;
err:
    if (thread)
        thread->looper_need_return = false;
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -EINTR)
        pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}


binder_get_thread获取当前进程中的线程信息,如果没有,则创建一个binder_thread,并初始化

static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
    struct binder_thread *thread;
    struct binder_thread *new_thread;

    binder_inner_proc_lock(proc);
    thread = binder_get_thread_ilocked(proc, NULL);
    binder_inner_proc_unlock(proc);
    if (!thread) {
        new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
        if (new_thread == NULL)
            return NULL;
        binder_inner_proc_lock(proc);
        thread = binder_get_thread_ilocked(proc, new_thread);
        binder_inner_proc_unlock(proc);
        if (thread != new_thread)
            kfree(new_thread);
    }
    return thread;
}

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

相关阅读更多精彩内容

友情链接更多精彩内容