This source file includes following definitions.
- do_mmap
- sys_mmap
- sys_munmap
- generic_mmap
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/errno.h>
11 #include <linux/mman.h>
12 #include <linux/string.h>
13
14 #include <asm/segment.h>
15 #include <asm/system.h>
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #define CODE_SPACE(addr) ((((addr)+4095)&~4095) < \
35 current->start_code + current->end_code)
36
37 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags, unsigned long off)
39 {
40 int mask, error;
41
42 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
43 return -EINVAL;
44
45
46
47
48
49
50
51 switch (flags & MAP_TYPE) {
52 case MAP_SHARED:
53 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
54 return -EINVAL;
55
56 case MAP_PRIVATE:
57 if (!(file->f_mode & 1))
58 return -EINVAL;
59 break;
60
61 default:
62 return -EINVAL;
63 }
64
65
66
67
68
69
70 if (flags & MAP_FIXED) {
71 if ((addr & 0xfff) || addr == 0)
72 return -EINVAL;
73 if (len > TASK_SIZE || addr > TASK_SIZE - len)
74 return -ENOMEM;
75 } else {
76 struct vm_area_struct * vmm;
77
78
79 addr = 0x40000000;
80 while (addr+len < 0x60000000) {
81 for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
82 if (addr >= vmm->vm_end)
83 continue;
84 if (addr + len <= vmm->vm_start)
85 continue;
86 addr = (vmm->vm_end + 0xfff) & 0xfffff000;
87 break;
88 }
89 if (!vmm)
90 break;
91 }
92 if (addr+len >= 0x60000000)
93 return -ENOMEM;
94 }
95
96
97
98
99
100
101 if (!file->f_op || !file->f_op->mmap)
102 return -ENODEV;
103 mask = 0;
104 if (prot & (PROT_READ | PROT_EXEC))
105 mask |= PAGE_READONLY;
106 if (prot & PROT_WRITE)
107 if ((flags & MAP_TYPE) == MAP_PRIVATE)
108 mask |= PAGE_COW;
109 else
110 mask |= PAGE_RW;
111 if (!mask)
112 return -EINVAL;
113
114 error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
115 if (!error)
116 return addr;
117
118 if (!current->errno)
119 current->errno = -error;
120 return -1;
121 }
122
123 extern "C" int sys_mmap(unsigned long *buffer)
124 {
125 unsigned long fd;
126 struct file * file;
127
128 fd = get_fs_long(buffer+4);
129 if (fd >= NR_OPEN || !(file = current->filp[fd]))
130 return -EBADF;
131 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
132 get_fs_long(buffer+2), get_fs_long(buffer+3), get_fs_long(buffer+5));
133 }
134
135 extern "C" int sys_munmap(unsigned long addr, size_t len)
136 {
137 struct vm_area_struct *mpnt, **p, *free;
138
139 if ((addr & 0xfff) || addr > 0x7fffffff || addr == 0 || addr + len > TASK_SIZE)
140 return -EINVAL;
141
142
143
144
145 p = ¤t->mmap;
146 free = NULL;
147
148
149
150
151 while ((mpnt = *p) != NULL) {
152 if (addr > mpnt->vm_start && addr < mpnt->vm_end)
153 goto bad_munmap;
154 if (addr+len > mpnt->vm_start && addr + len < mpnt->vm_end)
155 goto bad_munmap;
156 if (addr <= mpnt->vm_start && addr + len >= mpnt->vm_end) {
157 *p = mpnt->vm_next;
158 mpnt->vm_next = free;
159 free = mpnt;
160 continue;
161 }
162 p = &mpnt->vm_next;
163 }
164
165
166
167
168 while (free) {
169 mpnt = free;
170 free = free->vm_next;
171 if (mpnt->vm_ops->close)
172 mpnt->vm_ops->close(mpnt);
173 kfree(mpnt);
174 }
175
176 unmap_page_range(addr, len);
177 return 0;
178 bad_munmap:
179
180
181
182 while (free) {
183 mpnt = free;
184 free = free->vm_next;
185 mpnt->vm_next = current->mmap;
186 current->mmap = mpnt;
187 }
188 return -EINVAL;
189 }
190
191
192 int generic_mmap(struct inode * inode, struct file * file,
193 unsigned long addr, size_t len, int prot, unsigned long off)
194 {
195 struct vm_area_struct * mpnt;
196 extern struct vm_operations_struct file_mmap;
197 struct buffer_head * bh;
198
199 if (off & (inode->i_sb->s_blocksize - 1))
200 return -EINVAL;
201
202 if (len > high_memory || off > high_memory - len)
203 return -ENXIO;
204
205 if (get_limit(USER_DS) != TASK_SIZE)
206 return -EINVAL;
207
208 if (!inode->i_sb || !S_ISREG(inode->i_mode) || !permission(inode,MAY_READ)) {
209 iput(inode);
210 return -EACCES;
211 }
212 if (!inode->i_op || !inode->i_op->bmap) {
213 iput(inode);
214 return -ENOEXEC;
215 }
216 if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize))) {
217 iput(inode);
218 return -EACCES;
219 }
220 if (!IS_RDONLY(inode)) {
221 inode->i_atime = CURRENT_TIME;
222 inode->i_dirt = 1;
223 }
224 brelse(bh);
225
226 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
227 if (!mpnt){
228 iput(inode);
229 return -ENOMEM;
230 }
231
232 unmap_page_range(addr, len);
233 mpnt->vm_task = current;
234 mpnt->vm_start = addr;
235 mpnt->vm_end = addr + len;
236 mpnt->vm_page_prot = prot;
237 mpnt->vm_share = NULL;
238 mpnt->vm_inode = inode;
239 inode->i_count++;
240 mpnt->vm_offset = off;
241 mpnt->vm_ops = &file_mmap;
242 mpnt->vm_next = current->mmap;
243 current->mmap = mpnt;
244 #if 0
245 printk("VFS: Loaded mmap at %08x - %08x\n",
246 mpnt->vm_start, mpnt->vm_end);
247 #endif
248 return 0;
249 }
250