This source file includes following definitions.
- do_mmap
- sys_mmap
- unmap_fixup
- sys_mprotect
- sys_munmap
- do_munmap
- generic_mmap
- insert_vm_struct
- merge_segments
- anon_map
- ignoff_mergep
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *,
20 unsigned long, size_t, int,
21 unsigned long);
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
40 unsigned long prot, unsigned long flags, unsigned long off)
41 {
42 int mask, error;
43
44 if ((len = PAGE_ALIGN(len)) == 0)
45 return addr;
46
47 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
48 return -EINVAL;
49
50
51
52
53
54
55
56 if (file != NULL)
57 switch (flags & MAP_TYPE) {
58 case MAP_SHARED:
59 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
60 return -EACCES;
61
62 case MAP_PRIVATE:
63 if (!(file->f_mode & 1))
64 return -EACCES;
65 break;
66
67 default:
68 return -EINVAL;
69 }
70
71
72
73
74
75 if (flags & MAP_FIXED) {
76 if (addr & ~PAGE_MASK)
77 return -EINVAL;
78 if (len > TASK_SIZE || addr > TASK_SIZE - len)
79 return -EINVAL;
80 } else {
81 struct vm_area_struct * vmm;
82
83
84 addr = SHM_RANGE_START;
85 while (addr+len < SHM_RANGE_END) {
86 for (vmm = current->mm->mmap ; vmm ; vmm = vmm->vm_next) {
87 if (addr >= vmm->vm_end)
88 continue;
89 if (addr + len <= vmm->vm_start)
90 continue;
91 addr = PAGE_ALIGN(vmm->vm_end);
92 break;
93 }
94 if (!vmm)
95 break;
96 }
97 if (addr+len >= SHM_RANGE_END)
98 return -ENOMEM;
99 }
100
101
102
103
104
105
106 if (file && (!file->f_op || !file->f_op->mmap))
107 return -ENODEV;
108 mask = 0;
109 if (prot & (PROT_READ | PROT_EXEC))
110 mask |= PAGE_READONLY;
111 if (prot & PROT_WRITE)
112 if ((flags & MAP_TYPE) == MAP_PRIVATE)
113 mask |= PAGE_COPY;
114 else
115 mask |= PAGE_SHARED;
116 if (!mask)
117 mask = PAGE_PRESENT;
118
119 do_munmap(addr, len);
120
121 if (file)
122 error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
123 else
124 error = anon_map(NULL, NULL, addr, len, mask, off);
125
126 if (!error)
127 return addr;
128
129 if (!current->errno)
130 current->errno = -error;
131 return -1;
132 }
133
134 asmlinkage int sys_mmap(unsigned long *buffer)
135 {
136 int error;
137 unsigned long flags;
138 struct file * file = NULL;
139
140 error = verify_area(VERIFY_READ, buffer, 6*4);
141 if (error)
142 return error;
143 flags = get_fs_long(buffer+3);
144 if (!(flags & MAP_ANONYMOUS)) {
145 unsigned long fd = get_fs_long(buffer+4);
146 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
147 return -EBADF;
148 }
149 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
150 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
151 }
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175 void unmap_fixup(struct vm_area_struct *area,
176 unsigned long addr, size_t len)
177 {
178 struct vm_area_struct *mpnt;
179 unsigned long end = addr + len;
180
181 if (addr < area->vm_start || addr >= area->vm_end ||
182 end <= area->vm_start || end > area->vm_end ||
183 end < addr)
184 {
185 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
186 area->vm_start, area->vm_end, addr, end);
187 return;
188 }
189
190
191 if (addr == area->vm_start && end == area->vm_end) {
192 if (area->vm_ops && area->vm_ops->close)
193 area->vm_ops->close(area);
194 if (area->vm_inode)
195 iput(area->vm_inode);
196 return;
197 }
198
199
200 if (addr >= area->vm_start && end == area->vm_end)
201 area->vm_end = addr;
202 if (addr == area->vm_start && end <= area->vm_end) {
203 area->vm_offset += (end - area->vm_start);
204 area->vm_start = end;
205 }
206
207
208 if (addr > area->vm_start && end < area->vm_end)
209 {
210
211 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
212
213 *mpnt = *area;
214 mpnt->vm_offset += (end - area->vm_start);
215 mpnt->vm_start = end;
216 if (mpnt->vm_inode)
217 mpnt->vm_inode->i_count++;
218 area->vm_end = addr;
219 insert_vm_struct(current, mpnt);
220 }
221
222
223 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
224 *mpnt = *area;
225 insert_vm_struct(current, mpnt);
226 }
227
228
229 asmlinkage int sys_mprotect(unsigned long addr, size_t len, unsigned long prot)
230 {
231 return -EINVAL;
232 }
233
234 asmlinkage int sys_munmap(unsigned long addr, size_t len)
235 {
236 return do_munmap(addr, len);
237 }
238
239
240
241
242
243
244
245 int do_munmap(unsigned long addr, size_t len)
246 {
247 struct vm_area_struct *mpnt, **npp, *free;
248
249 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
250 return -EINVAL;
251
252 if ((len = PAGE_ALIGN(len)) == 0)
253 return 0;
254
255
256
257
258
259
260
261 npp = ¤t->mm->mmap;
262 free = NULL;
263 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
264 unsigned long end = addr+len;
265
266 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
267 (addr >= mpnt->vm_end && end > mpnt->vm_end))
268 {
269 npp = &mpnt->vm_next;
270 continue;
271 }
272
273 *npp = mpnt->vm_next;
274 mpnt->vm_next = free;
275 free = mpnt;
276 }
277
278 if (free == NULL)
279 return 0;
280
281
282
283
284
285
286
287 while (free) {
288 unsigned long st, end;
289
290 mpnt = free;
291 free = free->vm_next;
292
293 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
294 end = addr+len;
295 end = end > mpnt->vm_end ? mpnt->vm_end : end;
296
297 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
298 mpnt->vm_ops->unmap(mpnt, st, end-st);
299 else
300 unmap_fixup(mpnt, st, end-st);
301
302 kfree(mpnt);
303 }
304
305 unmap_page_range(addr, len);
306 return 0;
307 }
308
309
310 int generic_mmap(struct inode * inode, struct file * file,
311 unsigned long addr, size_t len, int prot, unsigned long off)
312 {
313 struct vm_area_struct * mpnt;
314 extern struct vm_operations_struct file_mmap;
315 struct buffer_head * bh;
316
317 if (prot & PAGE_RW)
318 return -EINVAL;
319 if (off & (inode->i_sb->s_blocksize - 1))
320 return -EINVAL;
321 if (!inode->i_sb || !S_ISREG(inode->i_mode))
322 return -EACCES;
323 if (!inode->i_op || !inode->i_op->bmap)
324 return -ENOEXEC;
325 if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
326 return -EACCES;
327 if (!IS_RDONLY(inode)) {
328 inode->i_atime = CURRENT_TIME;
329 inode->i_dirt = 1;
330 }
331 brelse(bh);
332
333 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
334 if (!mpnt)
335 return -ENOMEM;
336
337 unmap_page_range(addr, len);
338 mpnt->vm_task = current;
339 mpnt->vm_start = addr;
340 mpnt->vm_end = addr + len;
341 mpnt->vm_page_prot = prot;
342 mpnt->vm_flags = 0;
343 mpnt->vm_share = NULL;
344 mpnt->vm_inode = inode;
345 inode->i_count++;
346 mpnt->vm_offset = off;
347 mpnt->vm_ops = &file_mmap;
348 insert_vm_struct(current, mpnt);
349 merge_segments(current->mm->mmap, NULL, NULL);
350
351 return 0;
352 }
353
354
355
356
357
358
359
360 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
361 {
362 struct vm_area_struct **nxtpp, *mpnt;
363
364 nxtpp = &t->mm->mmap;
365
366 for(mpnt = t->mm->mmap; mpnt != NULL; mpnt = mpnt->vm_next)
367 {
368 if (mpnt->vm_start > vmp->vm_start)
369 break;
370 nxtpp = &mpnt->vm_next;
371
372 if ((vmp->vm_start >= mpnt->vm_start &&
373 vmp->vm_start < mpnt->vm_end) ||
374 (vmp->vm_end >= mpnt->vm_start &&
375 vmp->vm_end < mpnt->vm_end))
376 printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
377 vmp->vm_start, vmp->vm_end,
378 mpnt->vm_start, vmp->vm_end);
379 }
380
381 vmp->vm_next = mpnt;
382
383 *nxtpp = vmp;
384 }
385
386
387
388
389
390
391 void merge_segments(struct vm_area_struct *mpnt,
392 map_mergep_fnp mergep, void *mpd)
393 {
394 struct vm_area_struct *prev, *next;
395
396 if (mpnt == NULL)
397 return;
398
399 for(prev = mpnt, mpnt = mpnt->vm_next;
400 mpnt != NULL;
401 prev = mpnt, mpnt = next)
402 {
403 int mp;
404
405 next = mpnt->vm_next;
406
407 if (mergep == NULL)
408 {
409 unsigned long psz = prev->vm_end - prev->vm_start;
410 mp = prev->vm_offset + psz == mpnt->vm_offset;
411 }
412 else
413 mp = (*mergep)(prev, mpnt, mpd);
414
415
416
417
418
419
420 if (prev->vm_ops != mpnt->vm_ops ||
421 prev->vm_page_prot != mpnt->vm_page_prot ||
422 prev->vm_inode != mpnt->vm_inode ||
423 prev->vm_end != mpnt->vm_start ||
424 !mp ||
425 prev->vm_flags != mpnt->vm_flags ||
426 prev->vm_share != mpnt->vm_share ||
427 prev->vm_next != mpnt)
428 continue;
429
430
431
432
433
434
435 prev->vm_end = mpnt->vm_end;
436 prev->vm_next = mpnt->vm_next;
437 kfree_s(mpnt, sizeof(*mpnt));
438 mpnt = prev;
439 }
440 }
441
442
443
444
445
446 static int anon_map(struct inode *ino, struct file * file,
447 unsigned long addr, size_t len, int mask,
448 unsigned long off)
449 {
450 struct vm_area_struct * mpnt;
451
452 if (zeromap_page_range(addr, len, mask))
453 return -ENOMEM;
454
455 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
456 if (!mpnt)
457 return -ENOMEM;
458
459 mpnt->vm_task = current;
460 mpnt->vm_start = addr;
461 mpnt->vm_end = addr + len;
462 mpnt->vm_page_prot = mask;
463 mpnt->vm_flags = 0;
464 mpnt->vm_share = NULL;
465 mpnt->vm_inode = NULL;
466 mpnt->vm_offset = 0;
467 mpnt->vm_ops = NULL;
468 insert_vm_struct(current, mpnt);
469 merge_segments(current->mm->mmap, ignoff_mergep, NULL);
470
471 return 0;
472 }
473
474
475 int ignoff_mergep(const struct vm_area_struct *m1,
476 const struct vm_area_struct *m2,
477 void *data)
478 {
479 if (m1->vm_inode != m2->vm_inode)
480 return 0;
481
482 return (struct inode *)data == m1->vm_inode;
483 }