root/mm/mlock.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlock_fixup_all
  2. mlock_fixup_start
  3. mlock_fixup_end
  4. mlock_fixup_middle
  5. mlock_fixup
  6. do_mlock
  7. sys_mlock
  8. sys_munlock
  9. do_mlockall
  10. sys_mlockall
  11. sys_munlockall

   1 /*
   2  *      linux/mm/mlock.c
   3  *
   4  *  (C) Copyright 1995 Linus Torvalds
   5  */
   6 #include <linux/stat.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/mm.h>
  10 #include <linux/shm.h>
  11 #include <linux/errno.h>
  12 #include <linux/mman.h>
  13 #include <linux/string.h>
  14 #include <linux/malloc.h>
  15 
  16 #include <asm/segment.h>
  17 #include <asm/system.h>
  18 #include <asm/pgtable.h>
  19 
  20 static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
     /* [previous][next][first][last][top][bottom][index][help] */
  21 {
  22         vma->vm_flags = newflags;
  23         return 0;
  24 }
  25 
  26 static inline int mlock_fixup_start(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  27         unsigned long end, int newflags)
  28 {
  29         struct vm_area_struct * n;
  30 
  31         n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  32         if (!n)
  33                 return -EAGAIN;
  34         *n = *vma;
  35         vma->vm_start = end;
  36         n->vm_end = end;
  37         vma->vm_offset += vma->vm_start - n->vm_start;
  38         n->vm_flags = newflags;
  39         if (n->vm_inode)
  40                 n->vm_inode->i_count++;
  41         if (n->vm_ops && n->vm_ops->open)
  42                 n->vm_ops->open(n);
  43         insert_vm_struct(current, n);
  44         return 0;
  45 }
  46 
  47 static inline int mlock_fixup_end(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  48         unsigned long start, int newflags)
  49 {
  50         struct vm_area_struct * n;
  51 
  52         n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  53         if (!n)
  54                 return -EAGAIN;
  55         *n = *vma;
  56         vma->vm_end = start;
  57         n->vm_start = start;
  58         n->vm_offset += n->vm_start - vma->vm_start;
  59         n->vm_flags = newflags;
  60         if (n->vm_inode)
  61                 n->vm_inode->i_count++;
  62         if (n->vm_ops && n->vm_ops->open)
  63                 n->vm_ops->open(n);
  64         insert_vm_struct(current, n);
  65         return 0;
  66 }
  67 
  68 static inline int mlock_fixup_middle(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  69         unsigned long start, unsigned long end, int newflags)
  70 {
  71         struct vm_area_struct * left, * right;
  72 
  73         left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  74         if (!left)
  75                 return -EAGAIN;
  76         right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  77         if (!right) {
  78                 kfree(left);
  79                 return -EAGAIN;
  80         }
  81         *left = *vma;
  82         *right = *vma;
  83         left->vm_end = start;
  84         vma->vm_start = start;
  85         vma->vm_end = end;
  86         right->vm_start = end;
  87         vma->vm_offset += vma->vm_start - left->vm_start;
  88         right->vm_offset += right->vm_start - left->vm_start;
  89         vma->vm_flags = newflags;
  90         if (vma->vm_inode)
  91                 vma->vm_inode->i_count += 2;
  92         if (vma->vm_ops && vma->vm_ops->open) {
  93                 vma->vm_ops->open(left);
  94                 vma->vm_ops->open(right);
  95         }
  96         insert_vm_struct(current, left);
  97         insert_vm_struct(current, right);
  98         return 0;
  99 }
 100 
 101 static int mlock_fixup(struct vm_area_struct * vma, 
     /* [previous][next][first][last][top][bottom][index][help] */
 102         unsigned long start, unsigned long end, unsigned int newflags)
 103 {
 104         int pages, retval;
 105 
 106         if (newflags == vma->vm_flags)
 107                 return 0;
 108 
 109         /* keep track of amount of locked VM */
 110         pages = (end - start) >> PAGE_SHIFT;
 111         if (!(newflags & VM_LOCKED))
 112                 pages = -pages;
 113         vma->vm_mm->locked_vm += pages;
 114 
 115         if (start == vma->vm_start) {
 116                 if (end == vma->vm_end)
 117                         retval = mlock_fixup_all(vma, newflags);
 118                 else
 119                         retval = mlock_fixup_start(vma, end, newflags);
 120         } else {
 121                 if (end == vma->vm_end)
 122                         retval = mlock_fixup_end(vma, start, newflags);
 123                 else
 124                         retval = mlock_fixup_middle(vma, start, end, newflags);
 125         }
 126         if (!retval && (newflags & VM_LOCKED)) {
 127                 while (start < end) {
 128                         char c = get_user((char *) start);
 129                         __asm__ __volatile__("": :"r" (c));
 130                         start += PAGE_SIZE;
 131                 }
 132         }
 133         return retval;
 134 }
 135 
 136 static int do_mlock(unsigned long start, size_t len, int on)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         unsigned long nstart, end, tmp;
 139         struct vm_area_struct * vma, * next;
 140         int error;
 141 
 142         if (!suser())
 143                 return -EPERM;
 144         len = (len + ~PAGE_MASK) & PAGE_MASK;
 145         end = start + len;
 146         if (end < start)
 147                 return -EINVAL;
 148         if (end == start)
 149                 return 0;
 150         vma = find_vma(current, start);
 151         if (!vma || vma->vm_start > start)
 152                 return -ENOMEM;
 153 
 154         for (nstart = start ; ; ) {
 155                 unsigned int newflags;
 156 
 157                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 158 
 159                 newflags = vma->vm_flags | VM_LOCKED;
 160                 if (!on)
 161                         newflags &= ~VM_LOCKED;
 162 
 163                 if (vma->vm_end >= end) {
 164                         error = mlock_fixup(vma, nstart, end, newflags);
 165                         break;
 166                 }
 167 
 168                 tmp = vma->vm_end;
 169                 next = vma->vm_next;
 170                 error = mlock_fixup(vma, nstart, tmp, newflags);
 171                 if (error)
 172                         break;
 173                 nstart = tmp;
 174                 vma = next;
 175                 if (!vma || vma->vm_start != nstart) {
 176                         error = -ENOMEM;
 177                         break;
 178                 }
 179         }
 180         merge_segments(current, start, end);
 181         return error;
 182 }
 183 
 184 asmlinkage int sys_mlock(unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 185 {
 186         unsigned long locked;
 187         unsigned long lock_limit;
 188 
 189         len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
 190         start &= PAGE_MASK;
 191 
 192         locked = len >> PAGE_SHIFT;
 193         locked += current->mm->locked_vm;
 194 
 195         lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
 196         lock_limit >>= PAGE_SHIFT;
 197 
 198         /* check against resource limits */
 199         if (locked > lock_limit)
 200                 return -ENOMEM;
 201 
 202         /* we may lock at most half of physical memory... */
 203         /* (this check is pretty bogus, but doesn't hurt) */
 204         if (locked > MAP_NR(high_memory)/2)
 205                 return -ENOMEM;
 206 
 207         return do_mlock(start, len, 1);
 208 }
 209 
 210 asmlinkage int sys_munlock(unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
 213         start &= PAGE_MASK;
 214         return do_mlock(start, len, 0);
 215 }
 216 
 217 static int do_mlockall(int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 218 {
 219         int error;
 220         unsigned int def_flags;
 221         struct vm_area_struct * vma;
 222 
 223         if (!suser())
 224                 return -EPERM;
 225 
 226         def_flags = 0;
 227         if (flags & MCL_FUTURE)
 228                 def_flags = VM_LOCKED;
 229         current->mm->def_flags = def_flags;
 230 
 231         error = 0;
 232         for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
 233                 unsigned int newflags;
 234 
 235                 newflags = vma->vm_flags | VM_LOCKED;
 236                 if (!(flags & MCL_CURRENT))
 237                         newflags &= ~VM_LOCKED;
 238                 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
 239                 if (error)
 240                         break;
 241         }
 242         merge_segments(current, 0, TASK_SIZE);
 243         return error;
 244 }
 245 
 246 asmlinkage int sys_mlockall(int flags)
     /* [previous][next][first][last][top][bottom][index][help] */
 247 {
 248         unsigned long lock_limit;
 249 
 250         if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
 251                 return -EINVAL;
 252 
 253         lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
 254         lock_limit >>= PAGE_SHIFT;
 255 
 256         if (current->mm->total_vm > lock_limit)
 257                 return -ENOMEM;
 258 
 259         /* we may lock at most half of physical memory... */
 260         /* (this check is pretty bogus, but doesn't hurt) */
 261         if (current->mm->total_vm > MAP_NR(high_memory)/2)
 262                 return -ENOMEM;
 263 
 264         return do_mlockall(flags);
 265 }
 266 
 267 asmlinkage int sys_munlockall(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 268 {
 269         return do_mlockall(0);
 270 }

/* [previous][next][first][last][top][bottom][index][help] */