root/mm/mremap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_one_pte
  2. alloc_one_pte
  3. copy_one_pte
  4. move_one_page
  5. move_page_tables
  6. move_vma
  7. sys_mremap

   1 /*
   2  *      linux/mm/remap.c
   3  *
   4  *      (C) Copyright 1996 Linus Torvalds
   5  */
   6 
   7 #include <linux/stat.h>
   8 #include <linux/sched.h>
   9 #include <linux/kernel.h>
  10 #include <linux/mm.h>
  11 #include <linux/shm.h>
  12 #include <linux/errno.h>
  13 #include <linux/mman.h>
  14 #include <linux/string.h>
  15 #include <linux/malloc.h>
  16 #include <linux/swap.h>
  17 
  18 #include <asm/segment.h>
  19 #include <asm/system.h>
  20 #include <asm/pgtable.h>
  21 
  22 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
  23 {
  24         pgd_t * pgd;
  25         pmd_t * pmd;
  26         pte_t * pte = NULL;
  27 
  28         pgd = pgd_offset(mm, addr);
  29         if (pgd_none(*pgd))
  30                 goto end;
  31         if (pgd_bad(*pgd)) {
  32                 printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd));
  33                 pgd_clear(pgd);
  34                 goto end;
  35         }
  36 
  37         pmd = pmd_offset(pgd, addr);
  38         if (pmd_none(*pmd))
  39                 goto end;
  40         if (pmd_bad(*pmd)) {
  41                 printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd));
  42                 pmd_clear(pmd);
  43                 goto end;
  44         }
  45 
  46         pte = pte_offset(pmd, addr);
  47         if (pte_none(*pte))
  48                 pte = NULL;
  49 end:
  50         return pte;
  51 }
  52 
  53 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
  54 {
  55         pmd_t * pmd;
  56         pte_t * pte = NULL;
  57 
  58         pmd = pmd_alloc(pgd_offset(mm, addr), addr);
  59         if (pmd)
  60                 pte = pte_alloc(pmd, addr);
  61         return pte;
  62 }
  63 
  64 static inline int copy_one_pte(pte_t * src, pte_t * dst)
     /* [previous][next][first][last][top][bottom][index][help] */
  65 {
  66         int error = 0;
  67         pte_t pte = *src;
  68 
  69         if (!pte_none(pte)) {
  70                 error++;
  71                 if (dst) {
  72                         pte_clear(src);
  73                         set_pte(dst, pte);
  74                         error--;
  75                 }
  76         }
  77         return error;
  78 }
  79 
  80 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
     /* [previous][next][first][last][top][bottom][index][help] */
  81 {
  82         int error = 0;
  83         pte_t * src;
  84 
  85         src = get_one_pte(mm, old_addr);
  86         if (src)
  87                 error = copy_one_pte(src, alloc_one_pte(mm, new_addr));
  88         return error;
  89 }
  90 
  91 static int move_page_tables(struct mm_struct * mm,
     /* [previous][next][first][last][top][bottom][index][help] */
  92         unsigned long new_addr, unsigned long old_addr, unsigned long len)
  93 {
  94         unsigned long offset = len;
  95 
  96         flush_cache_range(mm, old_addr, old_addr + len);
  97         flush_tlb_range(mm, old_addr, old_addr + len);
  98 
  99         /*
 100          * This is not the clever way to do this, but we're taking the
 101          * easy way out on the assumption that most remappings will be
 102          * only a few pages.. This also makes error recovery easier.
 103          */
 104         while (offset) {
 105                 offset -= PAGE_SIZE;
 106                 if (move_one_page(mm, old_addr + offset, new_addr + offset))
 107                         goto oops_we_failed;
 108         }
 109         return 0;
 110 
 111         /*
 112          * Ok, the move failed because we didn't have enough pages for
 113          * the new page table tree. This is unlikely, but we have to
 114          * take the possibility into account. In that case we just move
 115          * all the pages back (this will work, because we still have
 116          * the old page tables)
 117          */
 118 oops_we_failed:
 119         flush_cache_range(mm, new_addr, new_addr + len);
 120         while ((offset += PAGE_SIZE) < len)
 121                 move_one_page(mm, new_addr + offset, old_addr + offset);
 122         flush_tlb_range(mm, new_addr, new_addr + len);
 123         zap_page_range(mm, new_addr, new_addr + len);
 124         return -1;
 125 }
 126 
 127 static inline unsigned long move_vma(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 128         unsigned long addr, unsigned long old_len, unsigned long new_len)
 129 {
 130         struct vm_area_struct * new_vma;
 131 
 132         new_vma = (struct vm_area_struct *)
 133                 kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 134         if (new_vma) {
 135                 unsigned long new_addr = get_unmapped_area(addr, new_len);
 136 
 137                 if (new_addr && !move_page_tables(current->mm, new_addr, addr, old_len)) {
 138                         *new_vma = *vma;
 139                         new_vma->vm_start = new_addr;
 140                         new_vma->vm_end = new_addr+new_len;
 141                         new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
 142                         if (new_vma->vm_inode)
 143                                 new_vma->vm_inode->i_count++;
 144                         if (new_vma->vm_ops && new_vma->vm_ops->open)
 145                                 new_vma->vm_ops->open(new_vma);
 146                         insert_vm_struct(current, new_vma);
 147                         merge_segments(current, new_vma->vm_start, new_vma->vm_end);
 148                         do_munmap(addr, old_len);
 149                         return new_addr;
 150                 }
 151                 kfree(new_vma);
 152         }
 153         return -ENOMEM;
 154 }
 155 
 156 /*
 157  * Expand (or shrink) an existing mapping, potentially moving it at the
 158  * same time (controlled by the "may_move" flag and available VM space)
 159  */
 160 asmlinkage unsigned long sys_mremap(unsigned long addr,
     /* [previous][next][first][last][top][bottom][index][help] */
 161         unsigned long old_len, unsigned long new_len,
 162         int may_move)
 163 {
 164         struct vm_area_struct *vma;
 165 
 166         if (addr & ~PAGE_MASK)
 167                 return -EINVAL;
 168         old_len = PAGE_ALIGN(old_len);
 169         new_len = PAGE_ALIGN(new_len);
 170         if (old_len == new_len)
 171                 return addr;
 172 
 173         /*
 174          * Always allow a shrinking remap: that just unmaps
 175          * the unnecessary pages..
 176          */
 177         if (old_len > new_len) {
 178                 do_munmap(addr+new_len, old_len - new_len);
 179                 return addr;
 180         }
 181 
 182         /*
 183          * Ok, we need to grow..
 184          */
 185         vma = find_vma(current, addr);
 186         if (!vma || vma->vm_start > addr)
 187                 return -EFAULT;
 188         /* We can't remap across vm area boundaries */
 189         if (old_len > vma->vm_end - addr)
 190                 return -EFAULT;
 191         if (vma->vm_flags & VM_LOCKED) {
 192                 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
 193                 locked += new_len - old_len;
 194                 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
 195                         return -EAGAIN;
 196         }
 197 
 198         /* old_len exactly to the end of the area.. */
 199         if (old_len == vma->vm_end - addr) {
 200                 unsigned long max_addr = TASK_SIZE;
 201                 if (vma->vm_next)
 202                         max_addr = vma->vm_next->vm_start;
 203                 /* can we just expand the current mapping? */
 204                 if (max_addr - addr >= new_len) {
 205                         int pages = (new_len - old_len) >> PAGE_SHIFT;
 206                         vma->vm_end = addr + new_len;
 207                         current->mm->total_vm += pages;
 208                         if (vma->vm_flags & VM_LOCKED)
 209                                 current->mm->locked_vm += pages;
 210                         return addr;
 211                 }
 212         }
 213 
 214         /*
 215          * We weren't able to just expand or shrink the area,
 216          * we need to create a new one and move it..
 217          */
 218         if (!may_move)
 219                 return -ENOMEM;
 220         return move_vma(vma, addr, old_len, new_len);
 221 }

/* [previous][next][first][last][top][bottom][index][help] */