root/mm/mmap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mmap_chr
  2. sys_mmap
  3. sys_munmap

   1 /*
   2  *      linux/mm/mmap.c
   3  *
   4  * Written by obz.
   5  */
   6 #include <linux/stat.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/mm.h>
  10 #include <linux/errno.h>
  11 
  12 #include <asm/segment.h>
  13 #include <asm/system.h>
  14 
  15 #include <sys/mman.h>
  16 
  17 /*
  18  * description of effects of mapping type and prot in current implementation.
  19  * this is due to the current handling of page faults in memory.c. the expected
  20  * behavior is in parens:
  21  *
  22  * map_type     prot
  23  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
  24  * MAP_SHARED   r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  25  *              w: (no) yes     w: (no) copy    w: (yes) yes    w: (no) no
  26  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  27  *              
  28  * MAP_PRIVATE  r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  29  *              w: (no) copy    w: (no) copy    w: (copy) copy  w: (no) no
  30  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  31  *
  32  * the permissions are encoded as cxwr (copy,exec,write,read)
  33  */
  34 #define MTYP(T) ((T) & MAP_TYPE)
  35 #define PREAD(T,P) (((P) & PROT_READ) ? 1 : 0)
  36 #define PWRITE(T,P) (((P) & PROT_WRITE) ? (MTYP(T) == MAP_SHARED ? 2 : 10) : 0)
  37 #define PEXEC(T,P) (((P) & PROT_EXEC) ? 4 : 0)
  38 #define PERMISS(T,P) (PREAD(T,P)|PWRITE(T,P)|PEXEC(T,P))
  39 
  40 #define CODE_SPACE(addr) ((((addr)+4095)&~4095) < \
  41                           current->start_code + current->end_code)
  42 
  43 static caddr_t
  44 mmap_chr(unsigned long addr, size_t len, int prot, int flags,
     /* [previous][next][first][last][top][bottom][index][help] */
  45          struct inode *inode, unsigned long off)
  46 {
  47         int major, minor;
  48 
  49         major = MAJOR(inode->i_rdev);
  50         minor = MINOR(inode->i_rdev);
  51 
  52         /*
  53          * for character devices, only /dev/mem may be mapped. when the
  54          * swapping code is modified to allow arbitrary sources of pages,
  55          * then we can open it up to regular files.
  56          */
  57 
  58         if (major != 1 || minor != 1)
  59                 return (caddr_t)-ENODEV;
  60 
  61         /*
  62          * we only allow mappings from address 0 to high_memory, since thats
  63          * the range of our memory [actually this is a lie. the buffer cache
  64          * and ramdisk occupy higher memory, but the paging stuff won't
  65          * let us map to it anyway, so we break it here].
  66          *
  67          * this call is very dangerous! because of the lack of adequate
  68          * tagging of frames, it is possible to mmap over a frame belonging
  69          * to another (innocent) process. with MAP_SHARED|MAP_WRITE, this
  70          * rogue process can trample over the other's data! we ignore this :{
  71          * for now, we hope people will malloc the required amount of space,
  72          * then mmap over it. the mm needs serious work before this can be
  73          * truly useful.
  74          */
  75 
  76         if (len > high_memory || off > high_memory - len) /* avoid overflow */
  77                 return (caddr_t)-ENXIO;
  78 
  79         if (remap_page_range(addr, off, len, PERMISS(flags, prot)))
  80                 return (caddr_t)-EAGAIN;
  81         
  82         return (caddr_t)addr;
  83 }
  84 
  85 caddr_t
  86 sys_mmap(unsigned long *buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88         unsigned long base, addr;
  89         unsigned long len, limit, off;
  90         int prot, flags, fd;
  91         struct file *file;
  92         struct inode *inode;
  93 
  94         addr = (unsigned long)  get_fs_long(buffer);    /* user address space*/
  95         len = (size_t)          get_fs_long(buffer+1);  /* nbytes of mapping */
  96         prot = (int)            get_fs_long(buffer+2);  /* protection */
  97         flags = (int)           get_fs_long(buffer+3);  /* mapping type */
  98         fd = (int)              get_fs_long(buffer+4);  /* object to map */
  99         off = (unsigned long)   get_fs_long(buffer+5);  /* offset in object */
 100 
 101         if (fd >= NR_OPEN || fd < 0 || !(file = current->filp[fd]))
 102                 return (caddr_t) -EBADF;
 103         if (addr > TASK_SIZE || (addr+(unsigned long) len) > TASK_SIZE)
 104                 return (caddr_t) -EINVAL;
 105         inode = file->f_inode;
 106 
 107         /*
 108          * do simple checking here so the lower-level routines won't have
 109          * to. we assume access permissions have been handled by the open
 110          * of the memory object, so we don't do any here.
 111          */
 112 
 113         switch (flags & MAP_TYPE) {
 114         case MAP_SHARED:
 115                 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
 116                         return (caddr_t)-EINVAL;
 117                 /* fall through */
 118         case MAP_PRIVATE:
 119                 if (!(file->f_mode & 1))
 120                         return (caddr_t)-EINVAL;
 121                 break;
 122 
 123         default:
 124                 return (caddr_t)-EINVAL;
 125         }
 126 
 127         /*
 128          * obtain the address to map to. we verify (or select) it and ensure
 129          * that it represents a valid section of the address space. we assume
 130          * that if PROT_EXEC is specified this should be in the code segment.
 131          */
 132         if (prot & PROT_EXEC) {
 133                 base = get_base(current->ldt[1]);       /* cs */
 134                 limit = get_limit(0x0f);                /* cs limit */
 135         } else {
 136                 base = get_base(current->ldt[2]);       /* ds */
 137                 limit = get_limit(0x17);                /* ds limit */
 138         }
 139 
 140         if (flags & MAP_FIXED) {
 141                 /*
 142                  * if MAP_FIXED is specified, we have to map exactly at this
 143                  * address. it must be page aligned and not ambiguous.
 144                  */
 145                 if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 ||
 146                     (off & 0xfff))
 147                         return (caddr_t)-EINVAL;
 148                 if (addr + len > limit)
 149                         return (caddr_t)-ENOMEM;
 150         } else {
 151                 /*
 152                  * we're given a hint as to where to put the address.
 153                  * that we still need to search for a range of pages which
 154                  * are not mapped and which won't impact the stack or data
 155                  * segment.
 156                  * in linux, we only have a code segment and data segment.
 157                  * since data grows up and stack grows down, we're sort of
 158                  * stuck. placing above the data will break malloc, below
 159                  * the stack will cause stack overflow. because of this
 160                  * we don't allow nonspecified mappings...
 161                  */
 162                 return (caddr_t)-ENOMEM;
 163         }
 164 
 165         /*
 166          * determine the object being mapped and call the appropriate
 167          * specific mapper. the address has already been validated, but
 168          * not unmapped
 169          */
 170         if (S_ISCHR(inode->i_mode))
 171                 addr = (unsigned long)mmap_chr(base + addr, len, prot, flags,
 172                                                inode, off);
 173         else
 174                 addr = (unsigned long)-ENODEV;
 175         if ((long)addr > 0)
 176                 addr -= base;
 177 
 178         return (caddr_t)addr;
 179 }
 180 
 181 int sys_munmap(unsigned long addr, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 182 {
 183         unsigned long base, limit;
 184 
 185         base = get_base(current->ldt[2]);       /* map into ds */
 186         limit = get_limit(0x17);                /* ds limit */
 187 
 188         if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 ||
 189             addr + len > limit)
 190                 return -EINVAL;
 191         if (unmap_page_range(base + addr, len))
 192                 return -EAGAIN; /* should never happen */
 193         return 0;
 194 }

/* [previous][next][first][last][top][bottom][index][help] */