root/mm/mprotect.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. change_protection
  2. mprotect_fixup_all
  3. mprotect_fixup_start
  4. mprotect_fixup_end
  5. mprotect_fixup_middle
  6. mprotect_fixup
  7. sys_mprotect

   1 /*
   2  *      linux/mm/mprotect.c
   3  *
   4  *  (C) Copyright 1994 Linus Torvalds
   5  */
   6 #include <linux/stat.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/mm.h>
  10 #include <linux/shm.h>
  11 #include <linux/errno.h>
  12 #include <linux/mman.h>
  13 #include <linux/string.h>
  14 #include <linux/malloc.h>
  15 
  16 #include <asm/segment.h>
  17 #include <asm/system.h>
  18 
  19 static void change_protection(unsigned long start, unsigned long end, int prot)
     /* [previous][next][first][last][top][bottom][index][help] */
  20 {
  21         unsigned long *page_table, *dir;
  22         unsigned long page, offset;
  23         int nr;
  24 
  25         dir = PAGE_DIR_OFFSET(current, start);
  26         offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
  27         nr = (end - start) >> PAGE_SHIFT;
  28         while (nr > 0) {
  29                 page = *dir;
  30                 dir++;
  31                 if (!(page & PAGE_PRESENT)) {
  32                         nr = nr - PTRS_PER_PAGE + offset;
  33                         offset = 0;
  34                         continue;
  35                 }
  36                 page_table = offset + (unsigned long *) (page & PAGE_MASK);
  37                 offset = PTRS_PER_PAGE - offset;
  38                 if (offset > nr)
  39                         offset = nr;
  40                 nr = nr - offset;
  41                 do {
  42                         page = *page_table;
  43                         if (page & PAGE_PRESENT)
  44                                 *page_table = (page & PAGE_CHG_MASK) | prot;
  45                         ++page_table;
  46                 } while (--offset);
  47         }
  48         return;
  49 }
  50 
  51 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  52         int newflags, int prot)
  53 {
  54         vma->vm_flags = newflags;
  55         vma->vm_page_prot = prot;
  56         return 0;
  57 }
  58 
  59 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  60         unsigned long end,
  61         int newflags, int prot)
  62 {
  63         struct vm_area_struct * n;
  64 
  65         n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  66         if (!n)
  67                 return -ENOMEM;
  68         *n = *vma;
  69         vma->vm_start = end;
  70         n->vm_end = end;
  71         vma->vm_offset += vma->vm_start - n->vm_start;
  72         n->vm_flags = newflags;
  73         n->vm_page_prot = prot;
  74         if (n->vm_inode)
  75                 n->vm_inode->i_count++;
  76         if (n->vm_ops && n->vm_ops->open)
  77                 n->vm_ops->open(n);
  78         insert_vm_struct(current, n);
  79         return 0;
  80 }
  81 
  82 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  83         unsigned long start,
  84         int newflags, int prot)
  85 {
  86         struct vm_area_struct * n;
  87 
  88         n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  89         if (!n)
  90                 return -ENOMEM;
  91         *n = *vma;
  92         vma->vm_end = start;
  93         n->vm_start = start;
  94         n->vm_offset += n->vm_start - vma->vm_start;
  95         n->vm_flags = newflags;
  96         n->vm_page_prot = prot;
  97         if (n->vm_inode)
  98                 n->vm_inode->i_count++;
  99         if (n->vm_ops && n->vm_ops->open)
 100                 n->vm_ops->open(n);
 101         insert_vm_struct(current, n);
 102         return 0;
 103 }
 104 
 105 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 106         unsigned long start, unsigned long end,
 107         int newflags, int prot)
 108 {
 109         struct vm_area_struct * left, * right;
 110 
 111         left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 112         if (!left)
 113                 return -ENOMEM;
 114         right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 115         if (!right) {
 116                 kfree(left);
 117                 return -ENOMEM;
 118         }
 119         *left = *vma;
 120         *right = *vma;
 121         left->vm_end = start;
 122         vma->vm_start = start;
 123         vma->vm_end = end;
 124         right->vm_start = end;
 125         vma->vm_offset += vma->vm_start - left->vm_start;
 126         right->vm_offset += right->vm_start - left->vm_start;
 127         vma->vm_flags = newflags;
 128         vma->vm_page_prot = prot;
 129         if (vma->vm_inode)
 130                 vma->vm_inode->i_count += 2;
 131         if (vma->vm_ops && vma->vm_ops->open) {
 132                 vma->vm_ops->open(left);
 133                 vma->vm_ops->open(right);
 134         }
 135         insert_vm_struct(current, left);
 136         insert_vm_struct(current, right);
 137         return 0;
 138 }
 139 
 140 static int mprotect_fixup(struct vm_area_struct * vma, 
     /* [previous][next][first][last][top][bottom][index][help] */
 141         unsigned long start, unsigned long end, unsigned int newflags)
 142 {
 143         int prot, error;
 144 
 145         if (newflags == vma->vm_flags)
 146                 return 0;
 147         prot = PAGE_PRESENT;
 148         if (newflags & (VM_READ | VM_EXEC))
 149                 prot |= PAGE_READONLY;
 150         if (newflags & VM_WRITE)
 151                 if (newflags & VM_SHARED)
 152                         prot |= PAGE_SHARED;
 153                 else
 154                         prot |= PAGE_COPY;
 155 
 156         if (start == vma->vm_start)
 157                 if (end == vma->vm_end)
 158                         error = mprotect_fixup_all(vma, newflags, prot);
 159                 else
 160                         error = mprotect_fixup_start(vma, end, newflags, prot);
 161         else if (end == vma->vm_end)
 162                 error = mprotect_fixup_end(vma, start, newflags, prot);
 163         else
 164                 error = mprotect_fixup_middle(vma, start, end, newflags, prot);
 165 
 166         if (error)
 167                 return error;
 168 
 169         change_protection(start, end, prot);
 170         return 0;
 171 }
 172 
 173 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         unsigned long end, tmp;
 176         struct vm_area_struct * vma, * next;
 177         int error;
 178 
 179         if (start & ~PAGE_MASK)
 180                 return -EINVAL;
 181         len = (len + ~PAGE_MASK) & PAGE_MASK;
 182         end = start + len;
 183         if (end < start)
 184                 return -EINVAL;
 185         if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
 186                 return -EINVAL;
 187         if (end == start)
 188                 return 0;
 189         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
 190                 if (!vma)
 191                         return -EFAULT;
 192                 if (vma->vm_end > start)
 193                         break;
 194         }
 195         if (vma->vm_start > start)
 196                 return -EFAULT;
 197 
 198         for ( ; ; ) {
 199                 unsigned int newflags;
 200 
 201                 /* Here we know that  vma->vm_start <= start < vma->vm_end. */
 202 
 203                 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
 204                 if ((newflags & ~(newflags >> 4)) & 0xf) {
 205                         error = -EACCES;
 206                         break;
 207                 }
 208 
 209                 if (vma->vm_end >= end) {
 210                         error = mprotect_fixup(vma, start, end, newflags);
 211                         break;
 212                 }
 213 
 214                 tmp = vma->vm_end;
 215                 next = vma->vm_next;
 216                 error = mprotect_fixup(vma, start, tmp, newflags);
 217                 if (error)
 218                         break;
 219                 start = tmp;
 220                 vma = next;
 221                 if (!vma || vma->vm_start != start) {
 222                         error = -EFAULT;
 223                         break;
 224                 }
 225         }
 226         merge_segments(current->mm->mmap);
 227         return error;
 228 }

/* [previous][next][first][last][top][bottom][index][help] */