root/mm/mmap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mmap_chr
  2. sys_mmap
  3. sys_munmap

   1 /*
   2  *      linux/mm/mmap.c
   3  *
   4  * Written by obz.
   5  */
   6 #include <linux/stat.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/mm.h>
  10 #include <linux/errno.h>
  11 #include <linux/mman.h>
  12 
  13 #include <asm/segment.h>
  14 #include <asm/system.h>
  15 
  16 /*
  17  * description of effects of mapping type and prot in current implementation.
  18  * this is due to the current handling of page faults in memory.c. the expected
  19  * behavior is in parens:
  20  *
  21  * map_type     prot
  22  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
  23  * MAP_SHARED   r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  24  *              w: (no) yes     w: (no) copy    w: (yes) yes    w: (no) no
  25  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  26  *              
  27  * MAP_PRIVATE  r: (no) yes     r: (yes) yes    r: (no) yes     r: (no) no
  28  *              w: (no) copy    w: (no) copy    w: (copy) copy  w: (no) no
  29  *              x: (no) no      x: (no) no      x: (no) no      x: (yes) no
  30  *
  31  * the permissions are encoded as cxwr (copy,exec,write,read)
  32  */
  33 #define MTYP(T) ((T) & MAP_TYPE)
  34 #define PREAD(T,P) (((P) & PROT_READ) ? 1 : 0)
  35 #define PWRITE(T,P) (((P) & PROT_WRITE) ? (MTYP(T) == MAP_SHARED ? 2 : 10) : 0)
  36 #define PEXEC(T,P) (((P) & PROT_EXEC) ? 4 : 0)
  37 #define PERMISS(T,P) (PREAD(T,P)|PWRITE(T,P)|PEXEC(T,P))
  38 
  39 #define CODE_SPACE(addr) ((((addr)+4095)&~4095) < \
  40                           current->start_code + current->end_code)
  41 
  42 static caddr_t
  43 mmap_chr(unsigned long addr, size_t len, int prot, int flags,
     /* [previous][next][first][last][top][bottom][index][help] */
  44          struct inode *inode, unsigned long off)
  45 {
  46         int major, minor;
  47 
  48         major = MAJOR(inode->i_rdev);
  49         minor = MINOR(inode->i_rdev);
  50 
  51         /*
  52          * for character devices, only /dev/[k]mem may be mapped. when the
  53          * swapping code is modified to allow arbitrary sources of pages,
  54          * then we can open it up to regular files.
  55          */
  56 
  57         if (major != 1 || (minor != 1 && minor != 2))
  58                 return (caddr_t)-ENODEV;
  59 
  60         /*
  61          * we only allow mappings from address 0 to high_memory, since thats
  62          * the range of our memory [actually this is a lie. the buffer cache
  63          * and ramdisk occupy higher memory, but the paging stuff won't
  64          * let us map to it anyway, so we break it here].
  65          *
  66          * this call is very dangerous! because of the lack of adequate
  67          * tagging of frames, it is possible to mmap over a frame belonging
  68          * to another (innocent) process. with MAP_SHARED|MAP_WRITE, this
  69          * rogue process can trample over the other's data! we ignore this :{
  70          * for now, we hope people will malloc the required amount of space,
  71          * then mmap over it. the mm needs serious work before this can be
  72          * truly useful.
  73          */
  74 
  75         if (len > high_memory || off > high_memory - len) /* avoid overflow */
  76                 return (caddr_t)-ENXIO;
  77 
  78         if (remap_page_range(addr, off, len, PERMISS(flags, prot)))
  79                 return (caddr_t)-EAGAIN;
  80         
  81         return (caddr_t)addr;
  82 }
  83 
  84 caddr_t
  85 sys_mmap(unsigned long *buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
  86 {
  87         unsigned long base, addr;
  88         unsigned long len, limit, off;
  89         int prot, flags, fd;
  90         struct file *file;
  91         struct inode *inode;
  92 
  93         addr = (unsigned long)  get_fs_long(buffer);    /* user address space*/
  94         len = (size_t)          get_fs_long(buffer+1);  /* nbytes of mapping */
  95         prot = (int)            get_fs_long(buffer+2);  /* protection */
  96         flags = (int)           get_fs_long(buffer+3);  /* mapping type */
  97         fd = (int)              get_fs_long(buffer+4);  /* object to map */
  98         off = (unsigned long)   get_fs_long(buffer+5);  /* offset in object */
  99 
 100         if (fd >= NR_OPEN || fd < 0 || !(file = current->filp[fd]))
 101                 return (caddr_t) -EBADF;
 102         if (addr > TASK_SIZE || (addr+(unsigned long) len) > TASK_SIZE)
 103                 return (caddr_t) -EINVAL;
 104         inode = file->f_inode;
 105 
 106         /*
 107          * do simple checking here so the lower-level routines won't have
 108          * to. we assume access permissions have been handled by the open
 109          * of the memory object, so we don't do any here.
 110          */
 111 
 112         switch (flags & MAP_TYPE) {
 113         case MAP_SHARED:
 114                 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
 115                         return (caddr_t)-EINVAL;
 116                 /* fall through */
 117         case MAP_PRIVATE:
 118                 if (!(file->f_mode & 1))
 119                         return (caddr_t)-EINVAL;
 120                 break;
 121 
 122         default:
 123                 return (caddr_t)-EINVAL;
 124         }
 125 
 126         /*
 127          * obtain the address to map to. we verify (or select) it and ensure
 128          * that it represents a valid section of the address space. we assume
 129          * that if PROT_EXEC is specified this should be in the code segment.
 130          */
 131         if (prot & PROT_EXEC) {
 132                 base = get_base(current->ldt[1]);       /* cs */
 133                 limit = get_limit(0x0f);                /* cs limit */
 134         } else {
 135                 base = get_base(current->ldt[2]);       /* ds */
 136                 limit = get_limit(0x17);                /* ds limit */
 137         }
 138 
 139         if (flags & MAP_FIXED) {
 140                 /*
 141                  * if MAP_FIXED is specified, we have to map exactly at this
 142                  * address. it must be page aligned and not ambiguous.
 143                  */
 144                 if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 ||
 145                     (off & 0xfff))
 146                         return (caddr_t)-EINVAL;
 147                 if (addr + len > limit)
 148                         return (caddr_t)-ENOMEM;
 149         } else {
 150                 /*
 151                  * we're given a hint as to where to put the address.
 152                  * that we still need to search for a range of pages which
 153                  * are not mapped and which won't impact the stack or data
 154                  * segment.
 155                  * in linux, we only have a code segment and data segment.
 156                  * since data grows up and stack grows down, we're sort of
 157                  * stuck. placing above the data will break malloc, below
 158                  * the stack will cause stack overflow. because of this
 159                  * we don't allow nonspecified mappings...
 160                  */
 161                 return (caddr_t)-ENOMEM;
 162         }
 163 
 164         /*
 165          * determine the object being mapped and call the appropriate
 166          * specific mapper. the address has already been validated, but
 167          * not unmapped
 168          */
 169         if (S_ISCHR(inode->i_mode))
 170                 addr = (unsigned long)mmap_chr(base + addr, len, prot, flags,
 171                                                inode, off);
 172         else
 173                 addr = (unsigned long)-ENODEV;
 174         if ((long)addr > 0)
 175                 addr -= base;
 176 
 177         return (caddr_t)addr;
 178 }
 179 
 180 int sys_munmap(unsigned long addr, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 181 {
 182         unsigned long base, limit;
 183 
 184         base = get_base(current->ldt[2]);       /* map into ds */
 185         limit = get_limit(0x17);                /* ds limit */
 186 
 187         if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 ||
 188             addr + len > limit)
 189                 return -EINVAL;
 190         if (unmap_page_range(base + addr, len))
 191                 return -EAGAIN; /* should never happen */
 192         return 0;
 193 }

/* [previous][next][first][last][top][bottom][index][help] */