servicemanager的源碼在/frameworks/base/cmds/servicemanager目錄下由binder.c,binder.h,service_manager.c構成
生成servicemanager檔案放在/system/bin/目錄下
servicemanager的入口是在service_manager.c中的main函數
int main(int argc, char **argv)
{
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER; //binder service manager 句柄0
bs = binder_open(128*1024); //打開/dev/binder ,映射128*1024位元組記憶體
if (binder_become_context_manager(bs)) { //設定本程序為binder上下文管理者
LOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler); //binder循環
return 0;
}
一.打開/dev/binder,映射記憶體
struct binder_state *binder_open(unsigned mapsize)
{
struct binder_state *bs;
bs = malloc(sizeof(*bs)); //配置設定bs記憶體
if (!bs) {
errno = ENOMEM;
return 0;
}
bs->fd = open("/dev/binder", O_RDWR); //打開/dev/binder裝置檔案 (~O_NONBLOCK)
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",strerror(errno));
goto fail_open;
}
bs->mapsize = mapsize; //設定要映射的記憶體大小 128*1024
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); //映射記憶體
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",strerror(errno));
goto fail_map;
}
return bs; //bs的fd儲存了裝置描述符,傳回bs以便其他ioctl等操作
fail_map:
close(bs->fd);
fail_open:
free(bs);
return 0;
}
1.1 打開/dev/binder裝置檔案,會觸發裝置檔案的open方法
static int binder_open(struct inode *nodp, struct file *filp) //打開/dev/binder
{
struct binder_proc *proc;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",current->group_leader->pid, current->pid);//列印組上司id和程序id
proc = kzalloc(sizeof(*proc), GFP_KERNEL); //配置設定binder_proc結構體記憶體
if (proc == NULL)
return -ENOMEM;
get_task_struct(current); //增加目前程序的task_struct的引用計數
proc->tsk = current; //proc->tsk指向目前程序的task_struct
INIT_LIST_HEAD(&proc->todo); //初始化proc->todo連結清單頭
init_waitqueue_head(&proc->wait); //初始化proc->wait等待隊列頭
proc->default_priority = task_nice(current); //擷取目前程序的優先級 proc->default_priority
mutex_lock(&binder_lock); //鎖定互斥鎖binder_lock
binder_stats_created(BINDER_STAT_PROC); //binder_proc建立計數+1
hlist_add_head(&proc->proc_node, &binder_procs);//添加proc->proc_node節點到全局binder_procs哈希連結清單中
proc->pid = current->group_leader->pid; //proc->pid等于目前程序的組上司id
INIT_LIST_HEAD(&proc->delivered_death); //初始化proc->delivered_death等待隊列頭
filp->private_data = proc; //檔案的私有資料指針指向proc
mutex_unlock(&binder_lock); //解鎖互斥鎖binder_lock //解互斥鎖binder_lock
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); //格式化proc->pid字串到strbuf
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
//建立/binder/proc/id值 檔案
}
return 0;
}
當每次打開/dev/binder都會配置設定一個binder_proc結構體,并設定proc的多個成員,
初始化binder_proc->todo list連結清單
初始化binder_proc->wait 等待隊列
添加binder_proc->proc_node到全局binder_proc哈希連結清單中
初始化binder_proc->delivered_death 等待隊列
将filp->private_data檔案的私有資料指針 指向binder_proc(以後的ioctl等操作可以使用該指針擷取binder_proc)
然後根據程序id建立debugfs中/binder/proc/$pid檔案
二.設定本程序為binder上下文管理者
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0); //ioctl指令BINDER_SET_CONTEXT_MGR
}
2.1 BINDER_SET_CONTEXT_MGR指令告知裝置驅動/dev/binder設定binder服務管理者
調用了/dev/binder的ioctl方法,摘錄部分執行到的代碼
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;//擷取binder_proc
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd); //擷取指令資料大小
void __user *ubuf = (void __user *)arg; //擷取arg參數指針
//等待隊列binder_user_error_wait喚醒,條件是binder_stop_on_user_error錯誤值小于2
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
mutex_lock(&binder_lock); //鎖定互斥量 binder_lock
thread = binder_get_thread(proc); //擷取binder線程
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
...
case BINDER_SET_CONTEXT_MGR: //設定上下文管理者
if (binder_context_mgr_node != NULL) { //判斷是否已經設定管理者
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
if (binder_context_mgr_uid != -1) { //還沒有設定管理者
if (binder_context_mgr_uid != current->cred->euid) { //不等于目前程序的有效id
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",current->cred->euid,binder_context_mgr_uid);
ret = -EPERM;
goto err;
}
}
else
binder_context_mgr_uid = current->cred->euid; //将目前程序的有效id給到全局binder_context_mgr_uid
binder_context_mgr_node = binder_new_node(proc, NULL, NULL); //建立上下文管理者實體
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
binder_context_mgr_node->local_weak_refs++; //本地弱指針計數++
binder_context_mgr_node->local_strong_refs++; //本地強指針計數++
binder_context_mgr_node->has_strong_ref = 1; //有強指針
binder_context_mgr_node->has_weak_ref = 1; //有弱指針
break;
...
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
mutex_unlock(&binder_lock); //解鎖互斥量 binder_lock
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); //等待隊列binder_user_error_wait喚醒
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
return ret;
}
先通過filp->private_data私有資料擷取binder_proc結構體
2.2 接着調用binder_get_thread函數
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
struct rb_node **p = &proc->threads.rb_node; //proc線程紅黑樹根節點
while (*p) { //*p=NULL
parent = *p;
thread = rb_entry(parent, struct binder_thread, rb_node);
if (current->pid < thread->pid)
p = &(*p)->rb_left;
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
break;
}
if (*p == NULL) { //若線程不存在,則建立線程
thread = kzalloc(sizeof(*thread), GFP_KERNEL); //配置設定線程記憶體空間
if (thread == NULL)
return NULL;
binder_stats_created(BINDER_STAT_THREAD); //增加binder_thread建立引用計數
thread->proc = proc; //設定線程的binder_proc
thread->pid = current->pid; //擷取目前程序id
init_waitqueue_head(&thread->wait); //初始化線程等待隊列頭
INIT_LIST_HEAD(&thread->todo); //初始化線程todo連結清單
rb_link_node(&thread->rb_node, parent, p);
rb_insert_color(&thread->rb_node, &proc->threads);
thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; //設定傳回标志
thread->return_error = BR_OK; //設定線程的傳回值
thread->return_error2 = BR_OK; //設定線程的傳回值
}
return thread;
}
rb_node **p = &proc->threads.rb_node 擷取binder_proc的線程紅黑樹根的根節點
查找binder_proc紅黑樹,初始化過程查找binder_thread肯定無果,是以*p=NULL,進入if語句
建立線程binder_thread,并設定其相關成員
binder_thread->proc指向對應的binder_proc 也即是将binder_thread和binder_proc捆綁
初始化binder_thread->wait 等待隊列
将binder_thread->rb_node節點添加進紅黑樹,并上色
設定新建立的線程的預設屬性:
binder_thread->looper|=BINDER_LOOPER_STATE_NEED_RETURN屬性
binder_thread->return_error = BR_OK
binder_thread->return_error2 = BR_OK
2.3 進入switch的BINDER_SET_CONTEXT_MGR分支
判斷全局binder_context_mgr_node的值看是否已經有管理者了
判斷全局binder_context_mgr_uid的值看是否已經有管理者了
設定全局binder_context_mgr_uid
給全局binder_context_mgr_node配置設定binder實體binder_node,這裡調用了binder_new_node函數,進去分析一下
2.4 binder_new_node
static struct binder_node *binder_new_node(struct binder_proc *proc,void __user *ptr,void __user *cookie)
{
struct rb_node **p = &proc->nodes.rb_node; //擷取proc紅黑樹根的根節點
struct rb_node *parent = NULL;
struct binder_node *node;
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
if (ptr < node->ptr)
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
}
node = kzalloc(sizeof(*node), GFP_KERNEL); //配置設定binder_node記憶體
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE); //設定節點建立計數
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
node->debug_id = ++binder_last_id;
node->proc = proc; //捆綁binder_proc
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE; //表示建立了實體
INIT_LIST_HEAD(&node->work.entry); //初始化節點的工作隊列頭
INIT_LIST_HEAD(&node->async_todo); //初始化節點的異步隊列頭
binder_debug(BINDER_DEBUG_INTERNAL_REFS,"binder: %d:%d node %d u%p c%p created\n",
proc->pid, current->pid, node->debug_id,node->ptr, node->cookie);
return node;
}
擷取binder_proc的紅黑樹根的根節點,while循環周遊binder實體紅黑樹,同理也是找不到跳出
配置設定binder_node記憶體,也就是建立binder實體
将binder_node添加進binder_proc->rb_node紅黑樹中,并上色
将binder_node->proc指向binder_proc,也即是将binder_node和binder_proc捆綁
這裡ptr和cookie都為NULL
設定新建立的binder_node(binder實體)的預設屬性
binder_node->work.type為BINDER_WORK_NODE
初始化binder_node->work.entry list連結清單
初始化binder_node->async_todo list連結清單
2.5 接着設定全局binder上下文管理者binder實體binder_node(binder_context_mgr_node)的屬性
binder_context_mgr_node->local_weak_refs++; //本地弱指針計數++
binder_context_mgr_node->local_strong_refs++; //本地強指針計數++
binder_context_mgr_node->has_strong_ref = 1; //有強指針
binder_context_mgr_node->has_weak_ref = 1; //有弱指針
2.6 跳出switch語句
進入err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
設定binder_thread->looper 去掉BINDER_LOOPER_STATE_NEED_RETURN屬性
三. binder_loop(bs, svcmgr_handler)
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
unsigned readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER; //通知binder驅動已經進入死循環
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {
bwr.read_size = sizeof(readbuf); //讀取資料的長度
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf; //讀取的資料
//循環讀取/dev/binder裝置,看是否有對service的請求
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //ioctl BINDER_WRITE_READ
if (res < 0) {
LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
if (res == 0) {
LOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
3.1 binder_write
設定readbuf[0]為BC_ENDER_LOOPER
int binder_write(struct binder_state *bs, void *data, unsigned len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len; //設定要寫的資料長度
bwr.write_consumed = 0;
bwr.write_buffer = (unsigned) data; //要寫的資料
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //ioctl指令BINDER_WRITE_READ
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",strerror(errno));
}
return res;
}
這裡bwr.write_buffer=BC_ENDER_LOOPER指令,bwr.write_size=4
3.1.1 調用ioctl方法,摘取調用到的部分
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data; //擷取binder_proc
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg; //擷取arg參數指針
//等待隊列binder_user_error_wait喚醒,條件是binder_stop_on_user_error錯誤值小于2
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
mutex_lock(&binder_lock); //鎖定互斥量 binder_lock
thread = binder_get_thread(proc); //擷取binder線程
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ: { //讀寫資料
struct binder_write_read bwr; //binder讀寫結構體
if (size != sizeof(struct binder_write_read)) { //判斷資料完整性
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { //複制資料 從使用者空間到核心空間
ret = -EFAULT;
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,bwr.read_size, bwr.read_buffer);
if (bwr.write_size > 0) { //若write_size大于0 寫
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0; //清除bwr的讀銷毀資料大小
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) //複制資料 從核心空間到使用者空間
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) { //若read_size大于0 讀
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) //複制資料 從核心空間到使用者空間
ret = -EFAULT;
goto err;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
...
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
mutex_unlock(&binder_lock); //解鎖互斥量 binder_lock
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); //等待隊列binder_user_error_wait喚醒
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
return ret;
}
通過filp->private_data擷取binder_proc
調用binder_get_thread函數擷取binder_thread
進入switch的BINDER_WRITE_READ分支
擷取傳遞進來的bwr結構體
3.1.2 這裡write_size>0進入binder_thread_write摘錄調用到的部分
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,void __user *buffer, int size, signed long *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed; //ptr指向有效資料位址
void __user *end = buffer + size; //end指向有效資料尾位址
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr)) //擷取指令
return -EFAULT;
ptr += sizeof(uint32_t);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
...
case BC_ENTER_LOOPER: //進入
binder_debug(BINDER_DEBUG_THREADS,"binder: %d:%d BC_ENTER_LOOPER\n",proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { //判斷是否已經注冊
thread->looper |= BINDER_LOOPER_STATE_INVALID; //錯誤
binder_user_error("binder: %d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED; //添加進入标志
break;
...
}
*consumed = ptr - buffer;
}
return 0;
}
這裡*ptr=bufer+0,*end=buffer+4 是以,prt<end 而binder_thread預設的return_error為BR_OK是以會進入while循環
ptr+=4
接着調用get_user從使用者空間複制指令到核心空間cmd
然後進入switch的BC_ENTER_LOOPER分支
根據looper的值,thread->looper & BINDER_LOOPER_STATE_REGISTERED為假
接着設定binder_thread->looper|=BINDER_LOOPER_STATE_ENTERED,跳出switch,
*consumed=ptr-buffer那麼*ptr=ptr(前面ptr+4),是以ptr=end跳出while (while的循環執行次數跟指令的個數一樣)
傳回0;
3.1.3 binder_thread_write傳回值ret=0
copy_to_user(ubuf, &bwr, sizeof(bwr))複制bwr到使用者空間,然後跳出switch
進入err: ...執行完畢
四 進入for(;;)死循環
1.ioctl(bs->fd, BINDER_WRITE_READ, &bwr)
bwr.read_size = sizeof(readbuf); //讀取資料的長度
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf; //讀取的資料
//循環讀取/dev/binder裝置,看是否有對service的請求
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //ioctl BINDER_WRITE_READ
調用ioctl,摘取部分代碼
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data; //擷取binder_proc
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd); //擷取指令資料大小
void __user *ubuf = (void __user *)arg; //擷取arg參數指針
//等待隊列binder_user_error_wait喚醒,條件是binder_stop_on_user_error錯誤值小于2
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
return ret;
mutex_lock(&binder_lock); //鎖定互斥量 binder_lock
thread = binder_get_thread(proc); //擷取binder線程
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ: { //讀寫資料
struct binder_write_read bwr; //binder讀寫結構體
if (size != sizeof(struct binder_write_read)) { //判斷資料完整性
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { //複制資料 從使用者空間到核心空間
ret = -EFAULT;
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,bwr.read_size, bwr.read_buffer);
if (bwr.write_size > 0) { //若write_size大于0 寫
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0; //清除bwr的讀銷毀資料大小
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) //複制資料 從核心空間到使用者空間
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) { //若read_size大于0 讀
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) //複制資料 從核心空間到使用者空間
ret = -EFAULT;
goto err;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
...
}
ret = 0;
err:
if (thread)
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
mutex_unlock(&binder_lock); //解鎖互斥量 binder_lock
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); //等待隊列binder_user_error_wait喚醒
if (ret && ret != -ERESTARTSYS)
printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
return ret;
}
首先根據filp-> private_data擷取binder_proc,再擷取binder_thread
進入switch的BINDER_WRITE_READ,從使用者空間複制指令到核心空間
由于write_size=0,read_buffer=32*4>0,是以調用binder_thread_read
static int binder_thread_read(struct binder_proc *proc,struct binder_thread *thread,void __user *buffer, int size,signed long *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
if (put_user(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (ptr == end)
goto done;
thread->return_error2 = BR_OK;
}
if (put_user(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
thread->return_error = BR_OK;
goto done;
}
thread->looper |= BINDER_LOOPER_STATE_WAITING;
if (wait_for_proc_work)
proc->ready_threads++;
mutex_unlock(&binder_lock);
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("binder: %d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,binder_stop_on_user_error < 2); //等待隊列binder_user_error_wait喚醒
}
binder_set_nice(proc->default_priority); //設定binder->proc的預設優先級
if (non_block) {
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
}
else
ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
}
else {
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
}
else
ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
}
mutex_lock(&binder_lock);
if (wait_for_proc_work)
proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
return ret;
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo))
w = list_first_entry(&thread->todo, struct binder_work, entry);
else if (!list_empty(&proc->todo) && wait_for_proc_work)
w = list_first_entry(&proc->todo, struct binder_work, entry);
else {
if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,"binder: %d:%d BR_TRANSACTION_COMPLETE\n",proc->pid, thread->pid);
list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
uint32_t cmd = BR_NOOP; //操作完成
const char *cmd_name;
int strong = node->internal_strong_refs || node->local_strong_refs;
int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
if (weak && !node->has_weak_ref) {
cmd = BR_INCREFS;
cmd_name = "BR_INCREFS";
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
} else if (strong && !node->has_strong_ref) {
cmd = BR_ACQUIRE;
cmd_name = "BR_ACQUIRE";
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
} else if (!strong && node->has_strong_ref) {
cmd = BR_RELEASE;
cmd_name = "BR_RELEASE";
node->has_strong_ref = 0;
} else if (!weak && node->has_weak_ref) {
cmd = BR_DECREFS;
cmd_name = "BR_DECREFS";
node->has_weak_ref = 0;
}
if (cmd != BR_NOOP) {
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(node->ptr, (void * __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
if (put_user(node->cookie, (void * __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_USER_REFS,"binder: %d:%d %s %d u%p c%p\n",
proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
} else {
list_del_init(&w->entry);
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,"binder: %d:%d node %d u%p c%p deleted\n",
proc->pid, thread->pid, node->debug_id,node->ptr, node->cookie);
rb_erase(&node->rb_node, &proc->nodes);
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"binder: %d:%d node %d u%p c%p state unchanged\n",
proc->pid, thread->pid, node->debug_id, node->ptr,
node->cookie);
}
}
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER; //線程死亡
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(death->cookie, (void * __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,"binder: %d:%d %s %p\n",proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?"BR_DEAD_BINDER":"BR_CLEAR_DEATH_NOTIFICATION_DONE",death->cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
list_del(&w->entry);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} else
list_move(&w->entry, &proc->delivered_death);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
}
if (!t)
continue;
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION; //請求
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY; //回複
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,current->nsproxy->pid_ns);
} else {
tr.sender_pid = 0;
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size,sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,"binder: %d:%d %s %d %d:%d, cmd %dsize %zd-%zd ptr %p-%p\n",
proc->pid, thread->pid,(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :"BR_REPLY",
t->debug_id, t->from ? t->from->proc->pid : 0,t->from ? t->from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,tr.data.ptr.buffer, tr.data.ptr.offsets);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
done:
*consumed = ptr - buffer;
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
binder_debug(BINDER_DEBUG_THREADS,"binder: %d:%d BR_SPAWN_LOOPER\n",proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
}
return 0;
}
這裡read_consumed = 0,*ptr = buffer,*end = buffer + 32*4
進入if (*consumed == 0) ,将BR_NOOP放置ptr, ptr+=4
wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo)=1
if (thread->return_error != BR_OK && ptr < end),預設的binder_thread->return_error=BR_OK是以條件為假
binder_thread->looper |= BINDER_LOOPER_STATE_WAITING,設定線程狀态
binder_proc->ready_threads++
進入if (wait_for_proc_work),接着進入if (non_block).
這裡non_block=filp->f_flags & O_NONBLOCK既open裝置時候的标志O_RDWR,是以non_block=0
wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)),進入等待
條件判斷binder_has_proc_work
static int binder_has_proc_work(struct binder_proc *proc,struct binder_thread *thread)
{ //binder_proc->todo連結清單是為空||binder_thread->looper設定了BINDER_LOOPER_STATE_NEED_RETURN屬性
return !list_empty(&proc->todo) ||(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
}
程式卡在這裡等待喚醒
未完...