root/mm/mprotect.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. change_protection
  2. mprotect_fixup_all
  3. mprotect_fixup_start
  4. mprotect_fixup_end
  5. mprotect_fixup_middle
  6. mprotect_fixup
  7. sys_mprotect

   1 /*
   2  *      linux/mm/mprotect.c
   3  *
   4  *  (C) Copyright 1994 Linus Torvalds
   5  */
   6 #include <linux/stat.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/mm.h>
  10 #include <linux/shm.h>
  11 #include <linux/errno.h>
  12 #include <linux/mman.h>
  13 #include <linux/string.h>
  14 #include <linux/malloc.h>
  15 
  16 #include <asm/segment.h>
  17 #include <asm/system.h>
  18 
  19 #define CHG_MASK (PAGE_MASK | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PWT | PAGE_PCD)
  20 
  21 static void change_protection(unsigned long start, unsigned long end, int prot)
     /* [previous][next][first][last][top][bottom][index][help] */
  22 {
  23         unsigned long *page_table, *dir;
  24         unsigned long page, offset;
  25         int nr;
  26 
  27         dir = PAGE_DIR_OFFSET(current->tss.cr3, start);
  28         offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
  29         nr = (end - start) >> PAGE_SHIFT;
  30         while (nr > 0) {
  31                 page = *dir;
  32                 dir++;
  33                 if (!(page & PAGE_PRESENT)) {
  34                         nr = nr - PTRS_PER_PAGE + offset;
  35                         offset = 0;
  36                         continue;
  37                 }
  38                 page_table = offset + (unsigned long *) (page & PAGE_MASK);
  39                 offset = PTRS_PER_PAGE - offset;
  40                 if (offset > nr)
  41                         offset = nr;
  42                 nr = nr - offset;
  43                 do {
  44                         page = *page_table;
  45                         if (page & PAGE_PRESENT)
  46                                 *page_table = (page & CHG_MASK) | prot;
  47                         ++page_table;
  48                 } while (--offset);
  49         }
  50         return;
  51 }
  52 
  53 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  54         int newflags, int prot)
  55 {
  56         vma->vm_flags = newflags;
  57         vma->vm_page_prot = prot;
  58         merge_segments(current->mm->mmap);
  59         return 0;
  60 }
  61 
  62 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  63         unsigned long end,
  64         int newflags, int prot)
  65 {
  66         struct vm_area_struct * n;
  67 
  68         n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  69         if (!n)
  70                 return -ENOMEM;
  71         *n = *vma;
  72         vma->vm_start = end;
  73         n->vm_end = end;
  74         vma->vm_offset += vma->vm_start - n->vm_start;
  75         n->vm_flags = newflags;
  76         n->vm_page_prot = prot;
  77         if (n->vm_inode)
  78                 n->vm_inode->i_count++;
  79         if (n->vm_ops && n->vm_ops->open)
  80                 n->vm_ops->open(n);
  81         insert_vm_struct(current, n);
  82         merge_segments(current->mm->mmap);
  83         return 0;
  84 }
  85 
  86 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  87         unsigned long start,
  88         int newflags, int prot)
  89 {
  90         struct vm_area_struct * n;
  91 
  92         n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  93         if (!n)
  94                 return -ENOMEM;
  95         *n = *vma;
  96         vma->vm_end = start;
  97         n->vm_start = start;
  98         n->vm_offset += n->vm_start - vma->vm_start;
  99         n->vm_flags = newflags;
 100         n->vm_page_prot = prot;
 101         if (n->vm_inode)
 102                 n->vm_inode->i_count++;
 103         if (n->vm_ops && n->vm_ops->open)
 104                 n->vm_ops->open(n);
 105         insert_vm_struct(current, n);
 106         merge_segments(current->mm->mmap);
 107         return 0;
 108 }
 109 
 110 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 111         unsigned long start, unsigned long end,
 112         int newflags, int prot)
 113 {
 114         struct vm_area_struct * left, * right;
 115 
 116         left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 117         if (!left)
 118                 return -ENOMEM;
 119         right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 120         if (!right) {
 121                 kfree(left);
 122                 return -ENOMEM;
 123         }
 124         *left = *vma;
 125         *right = *vma;
 126         left->vm_end = start;
 127         vma->vm_start = start;
 128         vma->vm_end = end;
 129         right->vm_start = end;
 130         vma->vm_offset += vma->vm_start - left->vm_start;
 131         right->vm_offset += right->vm_start - left->vm_start;
 132         vma->vm_flags = newflags;
 133         vma->vm_page_prot = prot;
 134         if (vma->vm_inode)
 135                 vma->vm_inode->i_count += 2;
 136         if (vma->vm_ops && vma->vm_ops->open) {
 137                 vma->vm_ops->open(left);
 138                 vma->vm_ops->open(right);
 139         }
 140         insert_vm_struct(current, left);
 141         insert_vm_struct(current, right);
 142         merge_segments(current->mm->mmap);
 143         return 0;
 144 }
 145 
 146 static int mprotect_fixup(struct vm_area_struct * vma, 
     /* [previous][next][first][last][top][bottom][index][help] */
 147         unsigned long start, unsigned long end, unsigned int newflags)
 148 {
 149         int prot, error;
 150 
 151         if (newflags == vma->vm_flags)
 152                 return 0;
 153         prot = PAGE_PRESENT;
 154         if (newflags & (VM_READ | VM_EXEC))
 155                 prot |= PAGE_READONLY;
 156         if (newflags & VM_WRITE)
 157                 if (newflags & VM_SHARED)
 158                         prot |= PAGE_SHARED;
 159                 else
 160                         prot |= PAGE_COPY;
 161 
 162         if (start == vma->vm_start)
 163                 if (end == vma->vm_end)
 164                         error = mprotect_fixup_all(vma, newflags, prot);
 165                 else
 166                         error = mprotect_fixup_start(vma, end, newflags, prot);
 167         else if (end == vma->vm_end)
 168                 error = mprotect_fixup_end(vma, start, newflags, prot);
 169         else
 170                 error = mprotect_fixup_middle(vma, start, end, newflags, prot);
 171 
 172         if (error)
 173                 return error;
 174 
 175         change_protection(start, end, prot);
 176         return 0;
 177 }
 178 
 179 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181         unsigned long end, tmp;
 182         struct vm_area_struct * vma;
 183 
 184         if (start & ~PAGE_MASK)
 185                 return -EINVAL;
 186         len = (len + ~PAGE_MASK) & PAGE_MASK;
 187         end = start + len;
 188         if (end < start)
 189                 return -EINVAL;
 190         if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
 191                 return -EINVAL;
 192         if (end == start)
 193                 return 0;
 194         for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
 195                 if (!vma)
 196                         return -EFAULT;
 197                 if (vma->vm_end > start)
 198                         break;
 199         }
 200         if (vma->vm_start > start)
 201                 return -EFAULT;
 202 
 203         for ( ; ; ) {
 204                 int error;
 205                 unsigned int newflags;
 206 
 207                 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
 208                 if ((newflags & ~(newflags >> 4)) & 0xf)
 209                         return -EACCES;
 210 
 211                 if (vma->vm_end >= end)
 212                         return mprotect_fixup(vma, start, end, newflags);
 213 
 214                 tmp = vma->vm_end;
 215                 error = mprotect_fixup(vma, start, tmp, newflags);
 216                 if (error)
 217                         return error;
 218                 start = tmp;
 219                 if (vma->vm_end <= start) {
 220                         vma = vma->vm_next;
 221                         if (vma && vma->vm_start < start)
 222                                 vma = vma->vm_next;
 223                         if (!vma || vma->vm_start != start)
 224                                 return -EFAULT;
 225                 }
 226         }
 227 }

/* [previous][next][first][last][top][bottom][index][help] */