天天看點

核心保留記憶體防止洩漏的機制_轉

保留記憶體防止洩漏的機制:程序退出會調用

do_exit->exit_files->put_files_struct->close_files->filp_close->fput->__fput

在__fput裡調用file->f_op->release(inode,

file)

而在保留記憶體對應release函數中,根據tgid做出判斷,如果有對應tgid的保留記憶體塊沒有被主動釋放,則認為是程序非正常退出,測試釋放與tgid對應保留記憶體塊。

但在android中調試發現,binder有時會在binder_deferred_func函數中調用put_files_struct來釋放某個程序所屬的檔案資源,此時由于tgid屬于binder所在workqueue所在的thread,保留記憶體檢測不到該tgid,即使原來的程序有申請保留記憶體也不會被釋放,進而出現洩漏。

核心中binder在binder_init中建立了一個workqueue

 binder_deferred_workqueue= create_singlethread_workqueue("binder");

void put_files_struct(struct files_struct *files)

{

 struct fdtable *fdt;

 if(atomic_dec_and_test(&files->count)) {

  close_files(files);

  /*

   * Free the fd and fdset arrays if we expanded them.

   * If the fdtable was embedded, pass files for freeing

   * at the end of the RCU grace period. Otherwise,

   * you can free files immediately.

   */

  rcu_read_lock();

  fdt = files_fdtable(files);

  if (fdt != &files->fdtab)

   kmem_cache_free(files_cachep, files);

  free_fdtable(fdt);

  rcu_read_unlock();

 }

}

staticvoid close_files(struct files_struct * files)

 int i, j;

 j= 0;

 /*

  * It is safe to dereference the fd table without RCU or

  * ->file_lock because this is the last reference to the

  * files structure.  But use RCU to shut RCU-lockdep up.

  */

 rcu_read_lock();

 fdt = files_fdtable(files);

 rcu_read_unlock();

 for (;;) {

  unsigned long set;

  i = j * __NFDBITS;

  if (i >= fdt->max_fds)

   break;

  set = fdt->open_fds->fds_bits[j++];

  while (set) {

   if (set & 1) {

    struct file * file = xchg(&fdt->fd[i], NULL);

    if (file) {

     filp_close(file, files);

     cond_resched();

    }

   }

   i++;

   set >>= 1;

  }

intfilp_close(struct file *filp, fl_owner_t id)

 int retval = 0;

 if(!file_count(filp)) {

  printk(KERN_ERR "VFS: Close: file count is 0\n");

  return 0;

 if(filp->f_op && filp->f_op->flush)

  retval = filp->f_op->flush(filp, id);

 dnotify_flush(filp,id);

 locks_remove_posix(filp, id);

 fput(filp);

 return retval;

voidfput(struct file *file)

 if (atomic_long_dec_and_test(&file->f_count))

  __fput(file);

staticvoid __fput(struct file *file)

 struct dentry *dentry = file->f_path.dentry;

 struct vfsmount *mnt = file->f_path.mnt;

 struct inode *inode = dentry->d_inode;

 might_sleep();

 fsnotify_close(file);

  * The function eventpoll_release() should be the first called

  * in the file cleanup chain.

 eventpoll_release(file);

 locks_remove_flock(file);

 if(unlikely(file->f_flags & FASYNC)) {

  if (file->f_op && file->f_op->fasync)

   file->f_op->fasync(-1, file, 0);

 if (file->f_op && file->f_op->release)

  file->f_op->release(inode, file);

 security_file_free(file);

 ima_file_free(file);

 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev !=

NULL))

  cdev_put(inode->i_cdev);

 fops_put(file->f_op);

 put_pid(file->f_owner.pid);

 file_kill(file);

 if (file->f_mode & FMODE_WRITE)

  drop_file_write_access(file);

 file->f_path.dentry = NULL;

 file->f_path.mnt = NULL;

 file_free(file);

 dput(dentry);

 mntput(mnt);

static void binder_deferred_func(struct work_struct *work)

 struct binder_proc *proc;

 struct files_struct *files;

 intdefer;

 do {

  mutex_lock(&binder_lock);

  mutex_lock(&binder_deferred_lock);

  if (!hlist_empty(&binder_deferred_list)) {

   proc = hlist_entry(binder_deferred_list.first,

     struct binder_proc, deferred_work_node);

   hlist_del_init(&proc->deferred_work_node);

   defer = proc->deferred_work;

   proc->deferred_work = 0;

  } else {

   proc = NULL;

   defer = 0;

  mutex_unlock(&binder_deferred_lock);

  files= NULL;

  if (defer & BINDER_DEFERRED_PUT_FILES) {

   files = proc->files;

   if (files)

    proc->files = NULL;

  if(defer & BINDER_DEFERRED_FLUSH)

   binder_deferred_flush(proc);

  if(defer & BINDER_DEFERRED_RELEASE)

   binder_deferred_release(proc); /* frees proc */

  mutex_unlock(&binder_lock);

  if (files)

   put_files_struct(files);

 } while (proc);