root/mm/mmap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_mmap
  2. sys_mmap
  3. sys_munmap
  4. generic_mmap

   1 /*
   2  *      linux/mm/mmap.c
   3  *
   4  * Written by obz.
   5  */
   6 #include <linux/stat.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/mm.h>
  10 #include <linux/errno.h>
  11 #include <linux/mman.h>
  12 #include <linux/string.h>
  13 
  14 #include <asm/segment.h>
  15 #include <asm/system.h>
  16 
  17 /*
  18  * description of effects of mapping type and prot in current implementation.
  19  * this is due to the current handling of page faults in memory.c. the expected
  20  * behavior is in parens:
  21  *
  22  * map_type     prot
  23  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
  24  * MAP_SHARED   r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  25  *              w: (no) yes     w: (no) copy    w: (yes) yes    w: (no) no
  26  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  27  *              
  28  * MAP_PRIVATE  r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  29  *              w: (no) copy    w: (no) copy    w: (copy) copy  w: (no) no
  30  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  31  *
  32  */
  33 
  34 #define CODE_SPACE(addr) ((((addr)+4095)&~4095) < \
  35                           current->start_code + current->end_code)
  36 
  37 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
     /* [previous][next][first][last][top][bottom][index][help] */
  38         unsigned long prot, unsigned long flags, unsigned long off)
  39 {
  40         int mask, error;
  41 
  42         if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
  43                 return -EINVAL;
  44 
  45         /*
  46          * do simple checking here so the lower-level routines won't have
  47          * to. we assume access permissions have been handled by the open
  48          * of the memory object, so we don't do any here.
  49          */
  50 
  51         switch (flags & MAP_TYPE) {
  52         case MAP_SHARED:
  53                 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
  54                         return -EINVAL;
  55                 /* fall through */
  56         case MAP_PRIVATE:
  57                 if (!(file->f_mode & 1))
  58                         return -EINVAL;
  59                 break;
  60 
  61         default:
  62                 return -EINVAL;
  63         }
  64 
  65         /*
  66          * obtain the address to map to. we verify (or select) it and ensure
  67          * that it represents a valid section of the address space.
  68          */
  69 
  70         if (flags & MAP_FIXED) {
  71                 if ((addr & 0xfff) || addr == 0)
  72                         return -EINVAL;
  73                 if (len > TASK_SIZE || addr > TASK_SIZE - len)
  74                         return -ENOMEM;
  75         } else {
  76                 struct vm_area_struct * vmm;
  77 
  78                 /* Maybe this works.. Ugly it is. */
  79                 addr = 0x40000000;
  80                 while (addr+len < 0x60000000) {
  81                         for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
  82                                 if (addr >= vmm->vm_end)
  83                                         continue;
  84                                 if (addr + len <= vmm->vm_start)
  85                                         continue;
  86                                 addr = (vmm->vm_end + 0xfff) & 0xfffff000;
  87                                 break;
  88                         }
  89                         if (!vmm)
  90                                 break;
  91                 }
  92                 if (addr+len >= 0x60000000)
  93                         return -ENOMEM;
  94         }
  95 
  96         /*
  97          * determine the object being mapped and call the appropriate
  98          * specific mapper. the address has already been validated, but
  99          * not unmapped
 100          */
 101         if (!file->f_op || !file->f_op->mmap)
 102                 return -ENODEV;
 103         mask = 0;
 104         if (prot & (PROT_READ | PROT_EXEC))
 105                 mask |= PAGE_READONLY;
 106         if (prot & PROT_WRITE)
 107                 if ((flags & MAP_TYPE) == MAP_PRIVATE)
 108                         mask |= PAGE_COW;
 109                 else
 110                         mask |= PAGE_RW;
 111         if (!mask)
 112                 return -EINVAL;
 113 
 114         error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
 115         if (!error)
 116                 return addr;
 117 
 118         if (!current->errno)
 119                 current->errno = -error;
 120         return -1;
 121 }
 122 
 123 extern "C" int sys_mmap(unsigned long *buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 124 {
 125         unsigned long fd;
 126         struct file * file;
 127 
 128         fd = get_fs_long(buffer+4);
 129         if (fd >= NR_OPEN || !(file = current->filp[fd]))
 130                 return -EBADF;
 131         return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
 132                 get_fs_long(buffer+2), get_fs_long(buffer+3), get_fs_long(buffer+5));
 133 }
 134 
 135 extern "C" int sys_munmap(unsigned long addr, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 136 {
 137         struct vm_area_struct *mpnt, **p, *free;
 138 
 139         if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 || addr + len > TASK_SIZE)
 140                 return -EINVAL;
 141 
 142         /* This needs a bit of work - we need to figure out how to
 143            deal with areas that overlap with something that we are using */
 144 
 145         p = &current->mmap;
 146         free = NULL;
 147         /*
 148          * Check if this memory area is ok - put it on the temporary
 149          * list if so..
 150          */
 151         while ((mpnt = *p) != NULL) {
 152                 if (addr > mpnt->vm_start && addr < mpnt->vm_end)
 153                         goto bad_munmap;
 154                 if (addr+len > mpnt->vm_start && addr + len < mpnt->vm_end)
 155                         goto bad_munmap;
 156                 if (addr <= mpnt->vm_start && addr + len >= mpnt->vm_end) {
 157                         *p = mpnt->vm_next;
 158                         mpnt->vm_next = free;
 159                         free = mpnt;
 160                         continue;
 161                 }
 162                 p = &mpnt->vm_next;
 163         }
 164         /*
 165          * Ok - we have the memory areas we should free on the 'free' list,
 166          * so release them, and unmap the page range..
 167          */
 168         while (free) {
 169                 mpnt = free;
 170                 free = free->vm_next;
 171                 if (mpnt->vm_ops->close)
 172                         mpnt->vm_ops->close(mpnt);
 173                 kfree(mpnt);
 174         }
 175 
 176         unmap_page_range(addr, len);
 177         return 0;
 178 bad_munmap:
 179 /*
 180  * the arguments we got were bad: put the temporary list back into the mmap list
 181  */
 182         while (free) {
 183                 mpnt = free;
 184                 free = free->vm_next;
 185                 mpnt->vm_next = current->mmap;
 186                 current->mmap = mpnt;
 187         }
 188         return -EINVAL;
 189 }
 190 
 191 /* This is used for a general mmap of a disk file */
 192 int generic_mmap(struct inode * inode, struct file * file,
     /* [previous][next][first][last][top][bottom][index][help] */
 193         unsigned long addr, size_t len, int prot, unsigned long off)
 194 {
 195         struct vm_area_struct * mpnt;
 196         extern struct vm_operations_struct file_mmap;
 197         struct buffer_head * bh;
 198 
 199         if (off & (inode->i_sb->s_blocksize - 1))
 200                 return -EINVAL;
 201 
 202         if (len > high_memory || off > high_memory - len) /* avoid overflow */
 203                 return -ENXIO;
 204 
 205         if (get_limit(USER_DS)  != TASK_SIZE)
 206                 return -EINVAL;
 207 
 208         if (!inode->i_sb || !S_ISREG(inode->i_mode) || !permission(inode,MAY_READ)) {
 209                 iput(inode);
 210                 return -EACCES;
 211         }
 212         if (!inode->i_op || !inode->i_op->bmap) {
 213                 iput(inode);
 214                 return -ENOEXEC;
 215         }
 216         if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize))) {
 217                 iput(inode);
 218                 return -EACCES;
 219         }
 220         if (!IS_RDONLY(inode)) {
 221                 inode->i_atime = CURRENT_TIME;
 222                 inode->i_dirt = 1;
 223         }
 224         brelse(bh);
 225 
 226         mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
 227         if (!mpnt){
 228                 iput(inode);
 229                 return -ENOMEM;
 230         }
 231 
 232         unmap_page_range(addr, len);    
 233         mpnt->vm_task = current;
 234         mpnt->vm_start = addr;
 235         mpnt->vm_end = addr + len;
 236         mpnt->vm_page_prot = prot;
 237         mpnt->vm_share = NULL;
 238         mpnt->vm_inode = inode;
 239         inode->i_count++;
 240         mpnt->vm_offset = off;
 241         mpnt->vm_ops = &file_mmap;
 242         mpnt->vm_next = current->mmap;
 243         current->mmap = mpnt;
 244 #if 0
 245         printk("VFS: Loaded mmap at %08x -  %08x\n",
 246                 mpnt->vm_start, mpnt->vm_end);
 247 #endif
 248         return 0;
 249 }
 250 

/* [previous][next][first][last][top][bottom][index][help] */