1 /* 2 * linux/mm/mmap.c 3 * 4 * Written by obz. 5 */ 6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/mman.h>
12 #include <linux/string.h>
13
14 #include <asm/segment.h>
15 #include <asm/system.h>
16
17 /* 18 * description of effects of mapping type and prot in current implementation. 19 * this is due to the current handling of page faults in memory.c. the expected 20 * behavior is in parens: 21 * 22 * map_type prot 23 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 24 * MAP_SHARED r: (no) yes r: (yes) yes r: (no) yes r: (no) no 25 * w: (no) yes w: (no) copy w: (yes) yes w: (no) no 26 * x: (no) no x: (no) no x: (no) no x: (yes) no 27 * 28 * MAP_PRIVATE r: (no) yes r: (yes) yes r: (no) yes r: (no) no 29 * w: (no) copy w: (no) copy w: (copy) copy w: (no) no 30 * x: (no) no x: (no) no x: (no) no x: (yes) no 31 * 32 */ 33
34 #defineCODE_SPACE(addr) ((((addr)+4095)&~4095) < \
35 current->start_code + current->end_code)
36
37 intsys_mmap(unsignedlong *buffer)
/* */ 38 { 39 unsignedlongbase, addr;
40 unsignedlonglen, limit, off;
41 intprot, flags, mask, fd, error;
42 structfile *file;
43
44 addr = (unsignedlong) get_fs_long(buffer); /* user address space*/ 45 len = (size_t) get_fs_long(buffer+1); /* nbytes of mapping */ 46 prot = (int) get_fs_long(buffer+2); /* protection */ 47 flags = (int) get_fs_long(buffer+3); /* mapping type */ 48 fd = (int) get_fs_long(buffer+4); /* object to map */ 49 off = (unsignedlong) get_fs_long(buffer+5); /* offset in object */ 50
51 if (fd >= NR_OPEN || fd < 0 || !(file = current->filp[fd]))
52 return -EBADF;
53 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
54 return -EINVAL;
55
56 /* 57 * do simple checking here so the lower-level routines won't have 58 * to. we assume access permissions have been handled by the open 59 * of the memory object, so we don't do any here. 60 */ 61
62 switch (flags & MAP_TYPE) { 63 caseMAP_SHARED:
64 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
65 return -EINVAL;
66 /* fall through */ 67 caseMAP_PRIVATE:
68 if (!(file->f_mode & 1))
69 return -EINVAL;
70 break;
71
72 default:
73 return -EINVAL;
74 } 75
76 /* 77 * obtain the address to map to. we verify (or select) it and ensure 78 * that it represents a valid section of the address space. we assume 79 * that if PROT_EXEC is specified this should be in the code segment. 80 */ 81 if (prot & PROT_EXEC) { 82 base = get_base(current->ldt[1]); /* cs */ 83 limit = get_limit(0x0f); /* cs limit */ 84 }else{ 85 base = get_base(current->ldt[2]); /* ds */ 86 limit = get_limit(0x17); /* ds limit */ 87 } 88
89 if (flags & MAP_FIXED) { 90 /* 91 * if MAP_FIXED is specified, we have to map exactly at this 92 * address. it must be page aligned and not ambiguous. 93 */ 94 if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 ||
95 (off & 0xfff))
96 return -EINVAL;
97 if (addr + len > limit)
98 return -ENOMEM;
99 }else{ 100 /* 101 * we're given a hint as to where to put the address. 102 * that we still need to search for a range of pages which 103 * are not mapped and which won't impact the stack or data 104 * segment. 105 * in linux, we only have a code segment and data segment. 106 * since data grows up and stack grows down, we're sort of 107 * stuck. placing above the data will break malloc, below 108 * the stack will cause stack overflow. because of this 109 * we don't allow nonspecified mappings... 110 */ 111 return -ENOMEM;
112 } 113
114 /* 115 * determine the object being mapped and call the appropriate 116 * specific mapper. the address has already been validated, but 117 * not unmapped 118 */ 119 if (!file->f_op || !file->f_op->mmap)
120 return -ENODEV;
121 mask = 0;
122 if (prot & (PROT_READ | PROT_EXEC))
123 mask |= PAGE_READONLY;
124 if (prot & PROT_WRITE)
125 mask |= PAGE_RW;
126 if (!mask)
127 return -EINVAL;
128 if ((flags & MAP_TYPE) == MAP_PRIVATE) { 129 mask |= PAGE_COW;
130 mask &= ~PAGE_RW;
131 } 132 error = file->f_op->mmap(file->f_inode, file, base + addr, len, mask, off);
133 if (error)
134 returnerror;
135 returnaddr;
136 } 137
138 intsys_munmap(unsignedlongaddr, size_tlen)
/* */ 139 { 140 unsignedlongbase, limit;
141
142 base = get_base(current->ldt[2]); /* map into ds */ 143 limit = get_limit(0x17); /* ds limit */ 144
145 if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 ||
146 addr + len > limit)
147 return -EINVAL;
148 if (unmap_page_range(base + addr, len))
149 return -EAGAIN; /* should never happen */ 150 return 0;
151 }