Ядро Linux в комментариях

       

Mm/mlock.c


32761 /* 32762 * linux/mm/mlock.c 32763 * 32764 * (C) Copyright 1995 Linus Torvalds 32765 */ 32766 #include <linux/slab.h> 32767 #include <linux/shm.h> 32768 #include <linux/mman.h> 32769 #include <linux/smp_lock.h> 32770 32771 #include <asm/uaccess.h> 32772 #include <asm/pgtable.h> 32773 32774 static inline int mlock_fixup_all( 32775 struct vm_area_struct * vma, int newflags) 32776 { 32777 vma->vm_flags = newflags; 32778 return 0; 32779 } 32780 32781 static inline int mlock_fixup_start( 32782 struct vm_area_struct * vma, unsigned long end, 32783 int newflags) 32784 { 32785 struct vm_area_struct * n; 32786 32787 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 32788 if (!n) 32789 return -EAGAIN; 32790 *n = *vma; 32791 vma->vm_start = end; 32792 n->vm_end = end; 32793 vma->vm_offset += vma->vm_start - n->vm_start; 32794 n->vm_flags = newflags; 32795 if (n->vm_file) 32796 n->vm_file->f_count++; 32797 if (n->vm_ops && n->vm_ops->open) 32798 n->vm_ops->open(n); 32799 insert_vm_struct(current->mm, n); 32800 return 0; 32801 } 32802 32803 static inline int mlock_fixup_end( 32804 struct vm_area_struct * vma, 32805 unsigned long start, int newflags) 32806 { 32807 struct vm_area_struct * n; 32808 32809 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 32810 if (!n) 32811 return -EAGAIN; 32812 *n = *vma; 32813 vma->vm_end = start; 32814 n->vm_start = start; 32815 n->vm_offset += n->vm_start - vma->vm_start; 32816 n->vm_flags = newflags; 32817 if (n->vm_file) 32818 n->vm_file->f_count++; 32819 if (n->vm_ops && n->vm_ops->open) 32820 n->vm_ops->open(n); 32821 insert_vm_struct(current->mm, n); 32822 return 0; 32823 } 32824 32825 static inline int mlock_fixup_middle( 32826 struct vm_area_struct * vma, 32827 unsigned long start, unsigned long end, int newflags) 32828 { 32829 struct vm_area_struct * left, * right; 32830 32831 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 32832 if (!left) 32833 return -EAGAIN; 32834 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 32835 if (!right) { 32836 kmem_cache_free(vm_area_cachep, left); 32837 return -EAGAIN; 32838 } 32839 *left = *vma; 32840 *right = *vma; 32841 left->vm_end = start; 32842 vma->vm_start = start; 32843 vma->vm_end = end; 32844 right->vm_start = end; 32845 vma->vm_offset += vma->vm_start - left->vm_start; 32846 right->vm_offset += right->vm_start - left->vm_start; 32847 vma->vm_flags = newflags; 32848 if (vma->vm_file) 32849 vma->vm_file->f_count += 2; 32850 32851 if (vma->vm_ops && vma->vm_ops->open) { 32852 vma->vm_ops->open(left); 32853 vma->vm_ops->open(right); 32854 } 32855 insert_vm_struct(current->mm, left); 32856 insert_vm_struct(current->mm, right); 32857 return 0; 32858 } 32859 32860 static int mlock_fixup(struct vm_area_struct * vma, 32861 unsigned long start, unsigned long end, 32862 unsigned int newflags) 32863 { 32864 int pages, retval; 32865 32866 if (newflags == vma->vm_flags) 32867 return 0; 32868 32869 if (start == vma->vm_start) { 32870 if (end == vma->vm_end) 32871 retval = mlock_fixup_all(vma, newflags); 32872 else 32873 retval = mlock_fixup_start(vma, end, newflags); 32874 } else { 32875 if (end == vma->vm_end) 32876 retval = mlock_fixup_end(vma, start, newflags); 32877 else 32878 retval = mlock_fixup_middle(vma, start, end, 32879 newflags); 32880 } 32881 if (!retval) { 32882 /* keep track of amount of locked VM */ 32883 pages = (end - start) >> PAGE_SHIFT; 32884 if (!(newflags & VM_LOCKED)) 32885 pages = -pages; 32886 vma->vm_mm->locked_vm += pages; 32887 make_pages_present(start, end); 32888 } 32889 return retval; 32890 } 32891 32892 static int do_mlock(unsigned long start, size_t len, 32893 int on) 32894 { 32895 unsigned long nstart, end, tmp; 32896 struct vm_area_struct * vma, * next; 32897 int error; 32898 32899 if (!capable(CAP_IPC_LOCK)) 32900 return -EPERM; 32901 len = (len + ~PAGE_MASK) & PAGE_MASK; 32902 end = start + len; 32903 if (end < start) 32904 return -EINVAL; 32905 if (end == start) 32906 return 0; 32907 vma = find_vma(current->mm, start); 32908 if (!vma vma->vm_start > start) 32909 return -ENOMEM; 32910 32911 for (nstart = start ; ; ) { 32912 unsigned int newflags; 32913 32914 /* Here we know that 32915 * vma->vm_start <= nstart < vma->vm_end. */ 32916 newflags = vma->vm_flags | VM_LOCKED; 32917 if (!on) 32918 newflags &= ~VM_LOCKED; 32919 32920 if (vma->vm_end >= end) { 32921 error = mlock_fixup(vma, nstart, end, newflags); 32922 break; 32923 } 32924 32925 tmp = vma->vm_end; 32926 next = vma->vm_next; 32927 error = mlock_fixup(vma, nstart, tmp, newflags); 32928 if (error) 32929 break; 32930 nstart = tmp; 32931 vma = next; 32932 if (!vma vma->vm_start != nstart) { 32933 error = -ENOMEM; 32934 break; 32935 } 32936 } 32937 merge_segments(current->mm, start, end); 32938 return error; 32939 } 32940 32941 asmlinkage int sys_mlock(unsigned long start, size_t len) 32942 { 32943 unsigned long locked; 32944 unsigned long lock_limit; 32945 int error = -ENOMEM; 32946 32947 down(&current->mm->mmap_sem); 32948 lock_kernel(); 32949 len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & 32950 PAGE_MASK; 32951 start &= PAGE_MASK; 32952 32953 locked = len >> PAGE_SHIFT; 32954 locked += current->mm->locked_vm; 32955 32956 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur; 32957 lock_limit >>= PAGE_SHIFT; 32958 32959 /* check against resource limits */ 32960 if (locked > lock_limit) 32961 goto out; 32962 32963 /* we may lock at most half of physical memory... */ 32964 /* (this check is pretty bogus, but doesn't hurt) */ 32965 if (locked > num_physpages/2) 32966 goto out; 32967 32968 error = do_mlock(start, len, 1); 32969 out: 32970 unlock_kernel(); 32971 up(&current->mm->mmap_sem); 32972 return error; 32973 } 32974 32975 asmlinkage int sys_munlock(unsigned long start, 32976 size_t len) 32977 { 32978 int ret; 32979 32980 down(&current->mm->mmap_sem); 32981 lock_kernel(); 32982 len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & 32983 PAGE_MASK; 32984 start &= PAGE_MASK; 32985 ret = do_mlock(start, len, 0); 32986 unlock_kernel(); 32987 up(&current->mm->mmap_sem); 32988 return ret; 32989 } 32990 32991 static int do_mlockall(int flags) 32992 { 32993 int error; 32994 unsigned int def_flags; 32995 struct vm_area_struct * vma; 32996 32997 if (!capable(CAP_IPC_LOCK)) 32998 return -EPERM; 32999 33000 def_flags = 0; 33001 if (flags & MCL_FUTURE) 33002 def_flags = VM_LOCKED; 33003 current->mm->def_flags = def_flags; 33004 33005 error = 0; 33006 for (vma = current->mm->mmap; vma; 33007 vma = vma->vm_next) { 33008 unsigned int newflags; 33009 33010 newflags = vma->vm_flags | VM_LOCKED; 33011 if (!(flags & MCL_CURRENT)) 33012 newflags &= ~VM_LOCKED; 33013 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, 33014 newflags); 33015 if (error) 33016 break; 33017 } 33018 merge_segments(current->mm, 0, TASK_SIZE); 33019 return error; 33020 } 33021 33022 asmlinkage int sys_mlockall(int flags) 33023 { 33024 unsigned long lock_limit; 33025 int ret = -EINVAL; 33026 33027 down(&current->mm->mmap_sem); 33028 lock_kernel(); 33029 if (!flags (flags & ~(MCL_CURRENT | MCL_FUTURE))) 33030 goto out; 33031 33032 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur; 33033 lock_limit >>= PAGE_SHIFT; 33034 33035 ret = -ENOMEM; 33036 if (current->mm->total_vm > lock_limit) 33037 goto out; 33038 33039 /* we may lock at most half of physical memory... */ 33040 /* (this check is pretty bogus, but doesn't hurt) */ 33041 if (current->mm->total_vm > num_physpages/2) 33042 goto out; 33043 33044 ret = do_mlockall(flags); 33045 out: 33046 unlock_kernel(); 33047 up(&current->mm->mmap_sem); 33048 return ret; 33049 } 33050 33051 asmlinkage int sys_munlockall(void) 33052 { 33053 int ret; 33054 33055 down(&current->mm->mmap_sem); 33056 lock_kernel(); 33057 ret = do_mlockall(0); 33058 unlock_kernel(); 33059 up(&current->mm->mmap_sem); 33060 return ret; 33061 }



Содержание раздела