标签:
转自:http://blog.csdn.net/adaptiver/article/details/7425496
保留内存防止泄漏的机制:进程退出会调用
do_exit->exit_files->put_files_struct->close_files->filp_close->fput->__fput
在__fput里调用file->f_op->release(inode,
file)
而在保留内存对应release函数中,根据tgid做出判断,如果有对应tgid的保留内存块没有被主动释放,则认为是进程非正常退出,测试释放与tgid对应保留内存块。
但在android中调试发现,binder有时会在binder_deferred_func函数中调用put_files_struct来释放某个进程所属的文件资源,此时由于tgid属于binder所在workqueue所在的thread,保留内存检测不到该tgid,即使原来的进程有申请保留内存也不会被释放,从而出现泄漏。
内核中binder在binder_init中建立了一个workqueue
binder_deferred_workqueue = create_singlethread_workqueue("binder");
void put_files_struct(struct files_struct *files)
{
struct fdtable *fdt;
if
(atomic_dec_and_test(&files->count)) {
close_files(files);
/*
* Free the fd and fdset arrays if we expanded them.
* If the fdtable was embedded, pass files for freeing
* at the end of the RCU grace period. Otherwise,
* you can free files immediately.
*/
rcu_read_lock();
fdt = files_fdtable(files);
if (fdt != &files->fdtab)
kmem_cache_free(files_cachep, files);
free_fdtable(fdt);
rcu_read_unlock();
}
}
static
void close_files(struct files_struct * files)
{
int i, j;
struct fdtable *fdt;
j = 0;
/*
* It is safe to dereference the fd table without RCU or
* ->file_lock because this is the last reference to the
* files structure. But use RCU to shut RCU-lockdep up.
*/
rcu_read_lock();
fdt = files_fdtable(files);
rcu_read_unlock();
for (;;) {
unsigned long set;
i = j * __NFDBITS;
if (i >= fdt->max_fds)
break;
set = fdt->open_fds->fds_bits[j++];
while (set) {
if (set & 1) {
struct file * file = xchg(&fdt->fd[i], NULL);
if (file) {
filp_close(file, files);
cond_resched();
}
}
i++;
set >>= 1;
}
}
}
int
filp_close(struct file *filp, fl_owner_t id)
{
int retval = 0;
if
(!file_count(filp)) {
printk(KERN_ERR "VFS: Close: file count is 0\n");
return 0;
}
if
(filp->f_op && filp->f_op->flush)
retval = filp->f_op->flush(filp, id);
dnotify_flush(filp,
id);
locks_remove_posix(filp, id);
fput(filp);
return retval;
}
void
fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count))
__fput(file);
}
static
void __fput(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct vfsmount *mnt = file->f_path.mnt;
struct inode *inode = dentry->d_inode;
might_sleep();
fsnotify_close(file);
/*
* The function eventpoll_release() should be the first called
* in the file cleanup chain.
*/
eventpoll_release(file);
locks_remove_flock(file);
if
(unlikely(file->f_flags & FASYNC)) {
if (file->f_op && file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
}
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
ima_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev !=
NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);
put_pid(file->f_owner.pid);
file_kill(file);
if (file->f_mode & FMODE_WRITE)
drop_file_write_access(file);
file->f_path.dentry = NULL;
file->f_path.mnt = NULL;
file_free(file);
dput(dentry);
mntput(mnt);
}
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
struct files_struct *files;
int
defer;
do {
mutex_lock(&binder_lock);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
hlist_del_init(&proc->deferred_work_node);
defer = proc->deferred_work;
proc->deferred_work = 0;
} else {
proc = NULL;
defer = 0;
}
mutex_unlock(&binder_deferred_lock);
files
= NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
files = proc->files;
if (files)
proc->files = NULL;
}
if
(defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if
(defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
mutex_unlock(&binder_lock);
if (files)
put_files_struct(files);
} while (proc);
}
标签:
原文地址:http://www.cnblogs.com/embedded-linux/p/5891901.html