天天看点

servicemanager之启动过程启动打开binder驱动: binder_open()成为上下文管理者: binder_become_context_manager()事件循环: binder_loop()

相关代码文件如下:

代码路径 说明
frameworks/native/cmds/servicemanager/service_manager.c servicemanager守护进程启动文件
frameworks/native/cmds/servicemanager/binder.c servicemanager在实现时,并没有引用libbinder库,所以将一些和binder驱动交互的函数在该文件中实现
drivers/staging/android/binder.c 内核binder驱动

关键的 几个函数调用如下所示:

servicemanager之启动过程启动打开binder驱动: binder_open()成为上下文管理者: binder_become_context_manager()事件循环: binder_loop()

启动

int main(int argc, char** argv)
{
    struct binder_state *bs;
    char *driver;

	// 默认binder驱动的设备节点是/dev/binder
    if (argc > 1) {
        driver = argv[1];
    } else {
        driver = "/dev/binder";
    }

	// 打开/dev/binder设备节点
    bs = binder_open(driver, 128*1024);
    if (!bs) {
		// 显然,设备厂商可以有自己的binder驱动
#ifdef VENDORSERVICEMANAGER
        ALOGW("failed to open binder driver %s\n", driver);
        while (true) {
            sleep(UINT_MAX);
        }
#else
        ALOGE("failed to open binder driver %s\n", driver);
#endif
        return -1;
    }
	// 将servicemanager注册成为binder服务的管家
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
	// SELinux相关操作,忽略
...
	// 进入循环,不断处理来自client的事件,每个事件由svcmgr_handler()处理
    binder_loop(bs, svcmgr_handler);
    return 0;
}
           

程序结构非常清晰:

  1. 打开binder驱动设备本文件,一般是/dev/binder文件;
  2. 将servermanager进程设置为binder机制的“上下文管理者”;
  3. 进入binder_loop()循环处理一个个事件。

打开binder驱动: binder_open()

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));
    if (!bs) {
        errno = ENOMEM;
        return NULL;
    }
	// 打开设备节点
    bs->fd = open(driver, O_RDWR | O_CLOEXEC);
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open %s (%s)\n", driver, strerror(errno));
        goto fail_open;
    }
	// 检查驱动的版本号
    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr, "binder: kernel driver version (%d) differs from user space version (%d)\n",
                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
        goto fail_open;
    }
	// 建立共享内存区域
    bs->mapsize = mapsize;
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n", strerror(errno));
        goto fail_map;
    }
    return bs;
fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return NULL;
}
           

打开操作非常的标准,这里为了和binder驱动高效的交换数据,使用了mmap()。打开的文件句柄保存在了动态分配的binder_state中。

struct binder_state
{
    int fd;
    void *mapped;
    size_t mapsize;
};
           

内核态打开:binder_open()

static int binder_open(struct inode *nodp, struct file *filp)
{
	struct binder_proc *proc;
	// 分配一个struct binder_proc结构,并进行初始化
	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (proc == NULL)
		return -ENOMEM;
	get_task_struct(current);
	proc->tsk = current;
	INIT_LIST_HEAD(&proc->todo);
	init_waitqueue_head(&proc->wait);
	proc->default_priority = task_nice(current);

	binder_lock(__func__);
	binder_stats_created(BINDER_STAT_PROC);
	hlist_add_head(&proc->proc_node, &binder_procs);
	proc->pid = current->group_leader->pid;
	INIT_LIST_HEAD(&proc->delivered_death);
	// 将proc关联到struct file结构的private成员
	filp->private_data = proc;
	binder_unlock(__func__);
	// 在/sys/kernel/debug/binder目录下,以pid为名字建立一个目录
	if (binder_debugfs_dir_entry_proc) {
		char strbuf[11];
		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
	}
	return 0;
}
           

可见,用户态进程每打开一次/dev/binder文件,内核都会关联一个struct binder_proc给打开的文件,用于用户态在打开时(servermanager自己控制,或者其它使用者通过libbinder库中的ProcessState类)会保证每个进程只打开一次该设备文件,所以可以理解为:每个使用binder的进程在内核都有一个struct binder_proc结构。

struct binder_proc成员较多,后面用到时再解释掌握。

成为上下文管理者: binder_become_context_manager()

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
           

内核态实现

static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	struct binder_proc *proc = filp->private_data;
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;

	trace_binder_ioctl(cmd, arg);
	// 检查是否有错误发生,有错误直接返回失败
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		goto err_unlocked;

	binder_lock(__func__);
	// 查找当前线程是否在proc->threads中有对应的struct binder_thread对象,没有则新建一个
	thread = binder_get_thread(proc);
	if (thread == NULL) {
		ret = -ENOMEM;
		goto err;
	}
	// 这里关注上下文管理者相关实现
	switch (cmd) {
...
	case BINDER_SET_CONTEXT_MGR:
		ret = binder_ioctl_set_ctx_mgr(filp);
		if (ret)
			goto err;
		// SELinux相关,先忽略
		ret = security_binder_set_context_mgr(proc->tsk);
		if (ret < 0)
			goto err;
		break;
...
	}
	ret = 0;
err:
	if (thread)
		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
	binder_unlock(__func__);
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret && ret != -ERESTARTSYS)
		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
	trace_binder_ioctl_done(ret);
	return ret;
}
           

关键看binder_ioctl_set_ctx_mgr()的实现:

// 全局变量保存了binder上下文管理者信息
static struct binder_node *binder_context_mgr_node;
static kuid_t binder_context_mgr_uid = INVALID_UID;

static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	kuid_t curr_euid = current_euid();
	// 系统中只能有一个上下文管理者
	if (binder_context_mgr_node != NULL) {
		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
		ret = -EBUSY;
		goto out;
	}
	// 记录上下文管理者的uid
	if (uid_valid(binder_context_mgr_uid)) {
		if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, curr_euid),
			       from_kuid(&init_user_ns, binder_context_mgr_uid));
			ret = -EPERM;
			goto out;
		}
	} else {
		binder_context_mgr_uid = curr_euid;
	}
	// 新建一个struct binder_node对象,作为上下文管理者节点
	binder_context_mgr_node = binder_new_node(proc, 0, 0);
	if (binder_context_mgr_node == NULL) {
		ret = -ENOMEM;
		goto out;
	}
	binder_context_mgr_node->local_weak_refs++;
	binder_context_mgr_node->local_strong_refs++;
	binder_context_mgr_node->has_strong_ref = 1;
	binder_context_mgr_node->has_weak_ref = 1;
out:
	return ret;
}
           

事件循环: binder_loop()

一切准备工作就绪后,servicemanager进入循环,不停的读取来自驱动的消息,然后处理一个个的事件。

binder读写缓存区

不过在进入binder_loop()之前,先来看看读写binder驱动用的一个核心数据结构struct binder_read_write,后面我们称该结构为binder读写缓存区。

struct binder_write_read {
  binder_size_t write_size;
  binder_size_t write_consumed;
  binder_uintptr_t write_buffer;
  binder_size_t read_size;
  binder_size_t read_consumed;
  binder_uintptr_t read_buffer;
};
           

binder读写缓冲区的成员分为读和写两类:

  • 对于写缓存区,write_size指定了要写入的字节数,write_consumed既作为入参,也作为出参,传入时指定驱动程序应该从write_buffer的哪个偏移量开始获取写入数据,也就是说,实际写入的数据是write_buffer[write_consumed]~write_buffer[write_consumed+write_size];作为返回值时,驱动会将实际写入的字节数放到该字段,write_buffer指向要写入的数据。
  • 对于读缓存区,read_size指定了read_buffer的可用空间,read_consumed和wirte_consumed的用法完全类似,即驱动程序需要将可读数据放到read_buffer[read_consumed]~read_buffer[read_consumed+read_size]区间内,并且作为出参时,驱动会通过该字段告诉用户态程序read_buffer中实际读到的字节数,read_buffer指向读取缓存区。

binder_loop()

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];
	// 不写入内容
    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

	// 告诉驱动开始进入循环
    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
		// 写buffer没有空间,读buffer有32字节的空间,所以下面ioctl()调用只会读取到数据
        bwr.read_size = sizeof(readbuf);
		// 驱动将会将实际放入读buffer中的字节数记录到read_consumed中
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;
		// 阻塞方式读binder设备,当有client要和servicemanager通信时将会收到可读信息
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
		// 解析并处理读取到的命令,核心事件处理函数func为svcmgr_handler
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}
           

内核态binder读写

binder_write()内部调用的还是ioctl(),命令字也是BINDER_WRITE_READ,在内核中,由binder_ioctl_write_read()处理。

static int binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;
	// 临时保存读写数据
	struct binder_write_read bwr;

	// 用户态传入的arg是struct binder_write_read对象,这里检查大小和拷贝输入数据
	if (size != sizeof(struct binder_write_read)) {
		ret = -EINVAL;
		goto out;
	}
	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
	// 处理用户态的写数据请求
	if (bwr.write_size > 0) {
		ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
		if (ret < 0) {
			bwr.read_consumed = 0;
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	// 处理用户态的读数据请求
	if (bwr.read_size > 0) {
		ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed,
			filp->f_flags & O_NONBLOCK);
		// todo列表不为空,说明有线程在等待,此时状态可能有更新,唤醒这些等待线程
		if (!list_empty(&proc->todo))
			wake_up_interruptible(&proc->wait);
		if (ret < 0) {
			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
				ret = -EFAULT;
			goto out;
		}
	}
	// 将可能存在的返回信息再copy到arg中
	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
		ret = -EFAULT;
		goto out;
	}
out:
	return ret;
}
           

对于servermanager的启动过程,涉及到的写操作是在binder_loop()入口处向内核写入了BC_ENTER_LOOPER命令,内核的处理如下:

static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
	binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed)
{
	uint32_t cmd;
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	// ptr和end之间的数据就是用户进程实际要写入的数据内容,均指向用户态内存
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
		// 前四个字节表示子命令字,这是因为实际上很多命令字都复用了ioctl命令字:BINDER_WRITE_READ
		if (get_user(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		trace_binder_command(cmd);
		switch (cmd) {
...
		case BC_ENTER_LOOPER:
			// 用户态线程进入loop时设置BC_ENTER_LOOPER
			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
				thread->looper |= BINDER_LOOPER_STATE_INVALID;
				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
					proc->pid, thread->pid);
			}
			// 设置BINDER_LOOPER_STATE_ENTERED标记
			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
			break;
		case BC_EXIT_LOOPER:
			// 设置BINDER_LOOPER_STATE_EXITED标记
			thread->looper |= BINDER_LOOPER_STATE_EXITED;
			break;
...
	}
	return 0;
}
           

对于servermanager的启动过程,读操作就是binder_loop()主循环体中的ioctl()调用,内核的处理如下:

static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
	binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block)
{
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	// ptr和end之间的空间可用来保存读取到的数据
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}
retry:
	// 初始化后,wait_for_proc_work为true
	wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
	// 优先返回错误信息,先忽略
	if (thread->return_error != BR_OK && ptr < end) {
...
	}

	// 设置LOOPER状态
	thread->looper |= BINDER_LOOPER_STATE_WAITING;
	if (wait_for_proc_work)
		proc->ready_threads++;
	binder_unlock(__func__);
	if (wait_for_proc_work) {
		// looper状态要正确
		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) {
			wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
		}
		// 设置优先级
		binder_set_nice(proc->default_priority);
		if (non_block) {
			if (!binder_has_proc_work(proc, thread))
				ret = -EAGAIN;
		} else
			// 读操作会阻塞到这里
			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
	} else {
		if (non_block) {
			if (!binder_has_thread_work(thread))
				ret = -EAGAIN;
		} else
			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
	}
	binder_lock(__func__);

	if (wait_for_proc_work)
		proc->ready_threads--;
	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
...
}