标签:
/********************************************************************** * Android ashmem hacking * 声明: * 最近有些东西涉及到binder,而binder又涉及到ashmem,于是先跟一下这 * 部分的内容。 * * 2016-1-12 深圳 南山平山村 曾剑锋 *********************************************************************/ /** * 参考文章: * Android系统匿名共享内存Ashmem(Anonymous Shared Memory)驱动程序源代码分析 * http://blog.csdn.net/luoshengyang/article/details/6664554 */ /* * ashmem_area - anonymous shared memory area * Lifecycle: From our parent file‘s open() until its release() * Locking: Protected by `ashmem_mutex‘ * Big Note: Mappings do NOT pin this structure; it dies on close() */ struct ashmem_area { char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ struct list_head unpinned_list; /* list of all ashmem areas */ struct file *file; /* the shmem-based backing file */ size_t size; /* size of the mapping, in bytes */ unsigned long prot_mask; /* allowed prot bits, as vm_flags */ }; /* * ashmem_range - represents an interval of unpinned (evictable) pages * Lifecycle: From unpin to pin * Locking: Protected by `ashmem_mutex‘ */ struct ashmem_range { struct list_head lru; /* entry in LRU list */ struct list_head unpinned; /* entry in its area‘s unpinned list */ struct ashmem_area *asma; /* associated area */ size_t pgstart; /* starting page, inclusive */ size_t pgend; /* ending page, inclusive */ unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ }; module_init(ashmem_init); ----------+ module_exit(ashmem_exit); | | static int __init ashmem_init(void) <---------+ { int ret; // static struct kmem_cache *ashmem_area_cachep __read_mostly; ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", sizeof(struct ashmem_area), 0, 0, NULL); if (unlikely(!ashmem_area_cachep)) { printk(KERN_ERR "ashmem: failed to create slab cache\n"); return -ENOMEM; } ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", sizeof(struct ashmem_range), 0, 0, NULL); if (unlikely(!ashmem_range_cachep)) { printk(KERN_ERR "ashmem: failed to create slab cache\n"); return -ENOMEM; } ret = misc_register(&ashmem_misc); -------------------+ if (unlikely(ret)) { | printk(KERN_ERR "ashmem: failed to register misc device!\n"); | return ret; | } | | register_shrinker(&ashmem_shrinker); | | printk(KERN_INFO "ashmem: initialized\n"); | | return 0; | } | | static struct file_operations ashmem_fops = { <-------+ | .owner = THIS_MODULE, | | .open = ashmem_open, -------*-+ | .release = ashmem_release, | | | .read = ashmem_read, | | | .llseek = ashmem_llseek, | | | .mmap = ashmem_mmap, ---------------*-*--------*---------+ .unlocked_ioctl = ashmem_ioctl, ---------------*-*--------*-------+ | .compat_ioctl = ashmem_ioctl, | | | | | }; | | | | | | | | | | static struct miscdevice ashmem_misc = { <-------*-*--------+ | | .minor = MISC_DYNAMIC_MINOR, | | | | .name = "ashmem", | | | | .fops = &ashmem_fops, --------+ | | | }; +-------------------------------------------+ | | V | | static int ashmem_open(struct inode *inode, struct file *file) | | { | | struct ashmem_area *asma; | | int ret; | | | | ret = generic_file_open(inode, file); | | if (unlikely(ret)) | | return ret; | | | | // static struct kmem_cache *ashmem_area_cachep __read_mostly; | | // ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", | | // sizeof(struct ashmem_area), | | // 0, 0, NULL); | | asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); | | if (unlikely(!asma)) | | return -ENOMEM; | | | | INIT_LIST_HEAD(&asma->unpinned_list); | | // #define ASHMEM_NAME_PREFIX "dev/ashmem/" | | // #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) | | // #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)| | memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); | | // #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) | | asma->prot_mask = PROT_MASK; | | // can get this asma struct in other function | | file->private_data = asma; | | | | return 0; +-----------------------------------------------------------+ | } | | V | static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)| { | struct ashmem_area *asma = file->private_data; | long ret = -ENOTTY; | | switch (cmd) { | case ASHMEM_SET_NAME: | ret = set_name(asma, (void __user *) arg); -------+ | break; | | case ASHMEM_GET_NAME: | | ret = get_name(asma, (void __user *) arg); -------*-----+ | break; | | | case ASHMEM_SET_SIZE: | | | ret = -EINVAL; | | | if (!asma->file) { | | | ret = 0; | | | asma->size = (size_t) arg; | | | } | | | break; | | | case ASHMEM_GET_SIZE: | | | ret = asma->size; | | | break; | | | case ASHMEM_SET_PROT_MASK: | | | ret = set_prot_mask(asma, arg); | | | break; | | | case ASHMEM_GET_PROT_MASK: | | | ret = asma->prot_mask; | | | break; | | | case ASHMEM_PIN: | | | case ASHMEM_UNPIN: | | | case ASHMEM_GET_PIN_STATUS: | | | ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);| | | break; | | | case ASHMEM_PURGE_ALL_CACHES: | | | ret = -EPERM; | | | if (capable(CAP_SYS_ADMIN)) { | | | struct shrink_control sc = { | | | .gfp_mask = GFP_KERNEL, | | | .nr_to_scan = 0, | | | }; | | | ret = ashmem_shrink(&ashmem_shrinker, &sc); | | | sc.nr_to_scan = ret; | | | ashmem_shrink(&ashmem_shrinker, &sc); | | | } | | | break; | | | } | | | | | | return ret; | | | } +----------------------------------------------+ | | V | | static int set_name(struct ashmem_area *asma, void __user *name) | | { | | int ret = 0; | | | | mutex_lock(&ashmem_mutex); | | | | /* cannot change an existing mapping‘s name */ | | if (unlikely(asma->file)) { | | ret = -EINVAL; | | goto out; | | } | | | | if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, | | name, ASHMEM_NAME_LEN))) | | ret = -EFAULT; | | asma->name[ASHMEM_FULL_NAME_LEN-1] = ‘\0‘; | | | | out: | | mutex_unlock(&ashmem_mutex); | | | | return ret; | | } +------------------------------------------------------+ | V | static int get_name(struct ashmem_area *asma, void __user *name) | { | int ret = 0; | | mutex_lock(&ashmem_mutex); | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != ‘\0‘) { | size_t len; | | /* | * Copying only `len‘, instead of ASHMEM_NAME_LEN, bytes | * prevents us from revealing one user‘s stack to another. | */ | len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; | if (unlikely(copy_to_user(name, | asma->name + ASHMEM_NAME_PREFIX_LEN, len))) | ret = -EFAULT; | } else { | if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, | sizeof(ASHMEM_NAME_DEF)))) | ret = -EFAULT; | } | mutex_unlock(&ashmem_mutex); | | return ret; | } | | static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) <--------+ { struct ashmem_area *asma = file->private_data; int ret = 0; mutex_lock(&ashmem_mutex); /* user needs to SET_SIZE before mapping */ if (unlikely(!asma->size)) { ret = -EINVAL; goto out; } /* requested protection bits must match our allowed protection mask */ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & calc_vm_prot_bits(PROT_MASK))) { ret = -EPERM; goto out; } vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); if (!asma->file) { char *name = ASHMEM_NAME_DEF; struct file *vmfile; if (asma->name[ASHMEM_NAME_PREFIX_LEN] != ‘\0‘) name = asma->name; /* ... and allocate the backing shmem file */ vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); if (unlikely(IS_ERR(vmfile))) { ret = PTR_ERR(vmfile); goto out; } asma->file = vmfile; } // #define get_file(x) atomic_long_inc(&(x)->f_count) get_file(asma->file); if (vma->vm_flags & VM_SHARED) shmem_set_file(vma, asma->file); --------------+ else { | if (vma->vm_file) | fput(vma->vm_file); | vma->vm_file = asma->file; | } | vma->vm_flags |= VM_CAN_NONLINEAR; | | out: | mutex_unlock(&ashmem_mutex); | return ret; | } +------------------------------------------+ v void shmem_set_file(struct vm_area_struct *vma, struct file *file) { if (vma->vm_file) fput(vma->vm_file); -------------+ vma->vm_file = file; | vma->vm_ops = &shmem_vm_ops; | vma->vm_flags |= VM_CAN_NONLINEAR; | } | | void fput(struct file *file) <------------+ { if (atomic_long_dec_and_test(&file->f_count)) __fput(file); }
标签:
原文地址:http://www.cnblogs.com/zengjfgit/p/5123587.html