root/mm/mmap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_mmap
  2. sys_mmap
  3. sys_munmap
  4. generic_mmap

   1 /*
   2  *      linux/mm/mmap.c
   3  *
   4  * Written by obz.
   5  */
   6 #include <linux/stat.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/mm.h>
  10 #include <linux/shm.h>
  11 #include <linux/errno.h>
  12 #include <linux/mman.h>
  13 #include <linux/string.h>
  14 
  15 #include <asm/segment.h>
  16 #include <asm/system.h>
  17 
  18 /*
  19  * description of effects of mapping type and prot in current implementation.
  20  * this is due to the current handling of page faults in memory.c. the expected
  21  * behavior is in parens:
  22  *
  23  * map_type     prot
  24  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
  25  * MAP_SHARED   r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  26  *              w: (no) yes     w: (no) copy    w: (yes) yes    w: (no) no
  27  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  28  *              
  29  * MAP_PRIVATE  r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  30  *              w: (no) copy    w: (no) copy    w: (copy) copy  w: (no) no
  31  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  32  *
  33  */
  34 
  35 #define CODE_SPACE(addr)        \
  36  (PAGE_ALIGN(addr) < current->start_code + current->end_code)
  37 
  38 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
     /* [previous][next][first][last][top][bottom][index][help] */
  39         unsigned long prot, unsigned long flags, unsigned long off)
  40 {
  41         int mask, error;
  42 
  43         if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
  44                 return -EINVAL;
  45 
  46         /*
  47          * do simple checking here so the lower-level routines won't have
  48          * to. we assume access permissions have been handled by the open
  49          * of the memory object, so we don't do any here.
  50          */
  51 
  52         switch (flags & MAP_TYPE) {
  53         case MAP_SHARED:
  54                 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
  55                         return -EINVAL;
  56                 /* fall through */
  57         case MAP_PRIVATE:
  58                 if (!(file->f_mode & 1))
  59                         return -EINVAL;
  60                 break;
  61 
  62         default:
  63                 return -EINVAL;
  64         }
  65 
  66         /*
  67          * obtain the address to map to. we verify (or select) it and ensure
  68          * that it represents a valid section of the address space.
  69          */
  70 
  71         if (flags & MAP_FIXED) {
  72                 if (addr & ~PAGE_MASK)
  73                         return -EINVAL;
  74                 if (len > TASK_SIZE || addr > TASK_SIZE - len)
  75                         return -ENOMEM;
  76         } else {
  77                 struct vm_area_struct * vmm;
  78 
  79                 /* Maybe this works.. Ugly it is. */
  80                 addr = SHM_RANGE_START;
  81                 while (addr+len < SHM_RANGE_END) {
  82                         for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
  83                                 if (addr >= vmm->vm_end)
  84                                         continue;
  85                                 if (addr + len <= vmm->vm_start)
  86                                         continue;
  87                                 addr = PAGE_ALIGN(vmm->vm_end);
  88                                 break;
  89                         }
  90                         if (!vmm)
  91                                 break;
  92                 }
  93                 if (addr+len >= SHM_RANGE_END)
  94                         return -ENOMEM;
  95         }
  96 
  97         /*
  98          * determine the object being mapped and call the appropriate
  99          * specific mapper. the address has already been validated, but
 100          * not unmapped
 101          */
 102         if (!file->f_op || !file->f_op->mmap)
 103                 return -ENODEV;
 104         mask = 0;
 105         if (prot & (PROT_READ | PROT_EXEC))
 106                 mask |= PAGE_READONLY;
 107         if (prot & PROT_WRITE)
 108                 if ((flags & MAP_TYPE) == MAP_PRIVATE)
 109                         mask |= PAGE_COW;
 110                 else
 111                         mask |= PAGE_RW;
 112         if (!mask)
 113                 return -EINVAL;
 114 
 115         error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
 116         if (!error)
 117                 return addr;
 118 
 119         if (!current->errno)
 120                 current->errno = -error;
 121         return -1;
 122 }
 123 
 124 extern "C" int sys_mmap(unsigned long *buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         unsigned long fd;
 127         struct file * file;
 128 
 129         fd = get_fs_long(buffer+4);
 130         if (fd >= NR_OPEN || !(file = current->filp[fd]))
 131                 return -EBADF;
 132         return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
 133                 get_fs_long(buffer+2), get_fs_long(buffer+3), get_fs_long(buffer+5));
 134 }
 135 
 136 extern "C" int sys_munmap(unsigned long addr, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         struct vm_area_struct *mpnt, **p, *free;
 139 
 140         if ((addr & ~PAGE_MASK) || addr > LONG_MAX || addr == 0 || addr + len > TASK_SIZE)
 141                 return -EINVAL;
 142 
 143         /* This needs a bit of work - we need to figure out how to
 144            deal with areas that overlap with something that we are using */
 145 
 146         p = &current->mmap;
 147         free = NULL;
 148         /*
 149          * Check if this memory area is ok - put it on the temporary
 150          * list if so..
 151          */
 152         while ((mpnt = *p) != NULL) {
 153                 if (addr > mpnt->vm_start && addr < mpnt->vm_end)
 154                         goto bad_munmap;
 155                 if (addr+len > mpnt->vm_start && addr + len < mpnt->vm_end)
 156                         goto bad_munmap;
 157                 if (addr <= mpnt->vm_start && addr + len >= mpnt->vm_end) {
 158                         *p = mpnt->vm_next;
 159                         mpnt->vm_next = free;
 160                         free = mpnt;
 161                         continue;
 162                 }
 163                 p = &mpnt->vm_next;
 164         }
 165         /*
 166          * Ok - we have the memory areas we should free on the 'free' list,
 167          * so release them, and unmap the page range..
 168          */
 169         while (free) {
 170                 mpnt = free;
 171                 free = free->vm_next;
 172                 if (mpnt->vm_ops->close)
 173                         mpnt->vm_ops->close(mpnt);
 174                 kfree(mpnt);
 175         }
 176 
 177         unmap_page_range(addr, len);
 178         return 0;
 179 bad_munmap:
 180 /*
 181  * the arguments we got were bad: put the temporary list back into the mmap list
 182  */
 183         while (free) {
 184                 mpnt = free;
 185                 free = free->vm_next;
 186                 mpnt->vm_next = current->mmap;
 187                 current->mmap = mpnt;
 188         }
 189         return -EINVAL;
 190 }
 191 
 192 /* This is used for a general mmap of a disk file */
 193 int generic_mmap(struct inode * inode, struct file * file,
     /* [previous][next][first][last][top][bottom][index][help] */
 194         unsigned long addr, size_t len, int prot, unsigned long off)
 195 {
 196         struct vm_area_struct * mpnt;
 197         extern struct vm_operations_struct file_mmap;
 198         struct buffer_head * bh;
 199 
 200         if (off & (inode->i_sb->s_blocksize - 1))
 201                 return -EINVAL;
 202         if (len > high_memory || off > high_memory - len) /* avoid overflow */
 203                 return -ENXIO;
 204         if (get_limit(USER_DS)  != TASK_SIZE)
 205                 return -EINVAL;
 206         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 207                 return -EACCES;
 208         if (!inode->i_op || !inode->i_op->bmap)
 209                 return -ENOEXEC;
 210         if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
 211                 return -EACCES;
 212         if (!IS_RDONLY(inode)) {
 213                 inode->i_atime = CURRENT_TIME;
 214                 inode->i_dirt = 1;
 215         }
 216         brelse(bh);
 217 
 218         mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 219         if (!mpnt)
 220                 return -ENOMEM;
 221 
 222         unmap_page_range(addr, len);    
 223         mpnt->vm_task = current;
 224         mpnt->vm_start = addr;
 225         mpnt->vm_end = addr + len;
 226         mpnt->vm_page_prot = prot;
 227         mpnt->vm_share = NULL;
 228         mpnt->vm_inode = inode;
 229         inode->i_count++;
 230         mpnt->vm_offset = off;
 231         mpnt->vm_ops = &file_mmap;
 232         mpnt->vm_next = current->mmap;
 233         current->mmap = mpnt;
 234 #if 0
 235         printk("VFS: Loaded mmap at %08x -  %08x\n",
 236                 mpnt->vm_start, mpnt->vm_end);
 237 #endif
 238         return 0;
 239 }
 240 

/* [previous][next][first][last][top][bottom][index][help] */