This source file includes following definitions.
- do_mmap
- get_unmapped_area
- sys_mmap
- unmap_fixup
- sys_munmap
- do_munmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
39 unsigned long prot, unsigned long flags, unsigned long off)
40 {
41 int mask, error;
42 struct vm_area_struct * vma;
43
44 if ((len = PAGE_ALIGN(len)) == 0)
45 return addr;
46
47 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
48 return -EINVAL;
49
50
51 if (off + len < off)
52 return -EINVAL;
53
54
55
56
57
58
59
60 if (file != NULL) {
61 switch (flags & MAP_TYPE) {
62 case MAP_SHARED:
63 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
64 return -EACCES;
65
66 case MAP_PRIVATE:
67 if (!(file->f_mode & 1))
68 return -EACCES;
69 break;
70
71 default:
72 return -EINVAL;
73 }
74 if ((flags & MAP_DENYWRITE) && (file->f_inode->i_wcount > 0))
75 return -ETXTBSY;
76 } else if ((flags & MAP_TYPE) == MAP_SHARED)
77 return -EINVAL;
78
79
80
81
82
83
84 if (flags & MAP_FIXED) {
85 if (addr & ~PAGE_MASK)
86 return -EINVAL;
87 if (len > TASK_SIZE || addr > TASK_SIZE - len)
88 return -EINVAL;
89 } else {
90 addr = get_unmapped_area(len);
91 if (!addr)
92 return -ENOMEM;
93 }
94
95
96
97
98
99
100 if (file && (!file->f_op || !file->f_op->mmap))
101 return -ENODEV;
102 mask = PAGE_PRESENT;
103 if (prot & (PROT_READ | PROT_EXEC))
104 mask |= PAGE_READONLY;
105 if (prot & PROT_WRITE)
106 if ((flags & MAP_TYPE) == MAP_PRIVATE)
107 mask |= PAGE_COPY;
108 else
109 mask |= PAGE_SHARED;
110
111 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
112 GFP_KERNEL);
113 if (!vma)
114 return -ENOMEM;
115
116 vma->vm_task = current;
117 vma->vm_start = addr;
118 vma->vm_end = addr + len;
119 vma->vm_page_prot = mask;
120 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
121 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
122
123 if (file) {
124 if (file->f_mode & 1)
125 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
126 if (flags & MAP_SHARED) {
127 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
128 if (!(file->f_mode & 2))
129 vma->vm_flags &= ~VM_MAYWRITE;
130 }
131 } else
132 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
133 vma->vm_ops = NULL;
134 vma->vm_offset = off;
135 vma->vm_inode = NULL;
136 vma->vm_pte = 0;
137
138 do_munmap(addr, len);
139
140 if (file)
141 error = file->f_op->mmap(file->f_inode, file, vma);
142 else
143 error = anon_map(NULL, NULL, vma);
144
145 if (error) {
146 kfree(vma);
147 return error;
148 }
149 insert_vm_struct(current, vma);
150 merge_segments(current->mm->mmap);
151 return addr;
152 }
153
154
155
156
157
158
159 unsigned long get_unmapped_area(unsigned long len)
160 {
161 struct vm_area_struct * vmm;
162 unsigned long gap_start = 0, gap_end;
163
164 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
165 if (gap_start < SHM_RANGE_START)
166 gap_start = SHM_RANGE_START;
167 if (!vmm || ((gap_end = vmm->vm_start) > SHM_RANGE_END))
168 gap_end = SHM_RANGE_END;
169 gap_start = PAGE_ALIGN(gap_start);
170 gap_end &= PAGE_MASK;
171 if ((gap_start <= gap_end) && (gap_end - gap_start >= len))
172 return gap_start;
173 if (!vmm)
174 return 0;
175 gap_start = vmm->vm_end;
176 }
177 }
178
179 asmlinkage int sys_mmap(unsigned long *buffer)
180 {
181 int error;
182 unsigned long flags;
183 struct file * file = NULL;
184
185 error = verify_area(VERIFY_READ, buffer, 6*sizeof(long));
186 if (error)
187 return error;
188 flags = get_fs_long(buffer+3);
189 if (!(flags & MAP_ANONYMOUS)) {
190 unsigned long fd = get_fs_long(buffer+4);
191 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
192 return -EBADF;
193 }
194 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
195 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
196 }
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220 void unmap_fixup(struct vm_area_struct *area,
221 unsigned long addr, size_t len)
222 {
223 struct vm_area_struct *mpnt;
224 unsigned long end = addr + len;
225
226 if (addr < area->vm_start || addr >= area->vm_end ||
227 end <= area->vm_start || end > area->vm_end ||
228 end < addr)
229 {
230 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
231 area->vm_start, area->vm_end, addr, end);
232 return;
233 }
234
235
236 if (addr == area->vm_start && end == area->vm_end) {
237 if (area->vm_ops && area->vm_ops->close)
238 area->vm_ops->close(area);
239 if (area->vm_inode)
240 iput(area->vm_inode);
241 return;
242 }
243
244
245 if (addr >= area->vm_start && end == area->vm_end)
246 area->vm_end = addr;
247 if (addr == area->vm_start && end <= area->vm_end) {
248 area->vm_offset += (end - area->vm_start);
249 area->vm_start = end;
250 }
251
252
253 if (addr > area->vm_start && end < area->vm_end)
254 {
255
256 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
257
258 if (!mpnt)
259 return;
260 *mpnt = *area;
261 mpnt->vm_offset += (end - area->vm_start);
262 mpnt->vm_start = end;
263 if (mpnt->vm_inode)
264 mpnt->vm_inode->i_count++;
265 if (mpnt->vm_ops && mpnt->vm_ops->open)
266 mpnt->vm_ops->open(mpnt);
267 area->vm_end = addr;
268 insert_vm_struct(current, mpnt);
269 }
270
271
272 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
273 if (!mpnt)
274 return;
275 *mpnt = *area;
276 if (mpnt->vm_ops && mpnt->vm_ops->open)
277 mpnt->vm_ops->open(mpnt);
278 if (area->vm_ops && area->vm_ops->close) {
279 area->vm_end = area->vm_start;
280 area->vm_ops->close(area);
281 }
282 insert_vm_struct(current, mpnt);
283 }
284
285 asmlinkage int sys_munmap(unsigned long addr, size_t len)
286 {
287 return do_munmap(addr, len);
288 }
289
290
291
292
293
294
295
296 int do_munmap(unsigned long addr, size_t len)
297 {
298 struct vm_area_struct *mpnt, **npp, *free;
299
300 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
301 return -EINVAL;
302
303 if ((len = PAGE_ALIGN(len)) == 0)
304 return 0;
305
306
307
308
309
310
311
312 npp = ¤t->mm->mmap;
313 free = NULL;
314 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
315 unsigned long end = addr+len;
316
317 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
318 (addr >= mpnt->vm_end && end > mpnt->vm_end))
319 {
320 npp = &mpnt->vm_next;
321 continue;
322 }
323
324 *npp = mpnt->vm_next;
325 mpnt->vm_next = free;
326 free = mpnt;
327 }
328
329 if (free == NULL)
330 return 0;
331
332
333
334
335
336
337
338 while (free) {
339 unsigned long st, end;
340
341 mpnt = free;
342 free = free->vm_next;
343
344 remove_shared_vm_struct(mpnt);
345
346 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
347 end = addr+len;
348 end = end > mpnt->vm_end ? mpnt->vm_end : end;
349
350 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
351 mpnt->vm_ops->unmap(mpnt, st, end-st);
352
353 unmap_fixup(mpnt, st, end-st);
354 kfree(mpnt);
355 }
356
357 unmap_page_range(addr, len);
358 return 0;
359 }
360
361
362
363
364
365 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
366 {
367 struct vm_area_struct **p, *mpnt, *share;
368 struct inode * inode;
369
370 p = &t->mm->mmap;
371 while ((mpnt = *p) != NULL) {
372 if (mpnt->vm_start > vmp->vm_start)
373 break;
374 if (mpnt->vm_end > vmp->vm_start)
375 printk("insert_vm_struct: overlapping memory areas\n");
376 p = &mpnt->vm_next;
377 }
378 vmp->vm_next = mpnt;
379 *p = vmp;
380
381 inode = vmp->vm_inode;
382 if (!inode)
383 return;
384
385
386 if ((share = inode->i_mmap)) {
387 vmp->vm_next_share = share->vm_next_share;
388 vmp->vm_next_share->vm_prev_share = vmp;
389 share->vm_next_share = vmp;
390 vmp->vm_prev_share = share;
391 } else
392 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
393 }
394
395
396
397
398 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
399 {
400 struct inode * inode = mpnt->vm_inode;
401
402 if (!inode)
403 return;
404
405 if (mpnt->vm_next_share == mpnt) {
406 if (inode->i_mmap != mpnt)
407 printk("Inode i_mmap ring corrupted\n");
408 inode->i_mmap = NULL;
409 return;
410 }
411
412 if (inode->i_mmap == mpnt)
413 inode->i_mmap = mpnt->vm_next_share;
414
415 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
416 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
417 }
418
419
420
421
422
423
424 void merge_segments(struct vm_area_struct *mpnt)
425 {
426 struct vm_area_struct *prev, *next;
427
428 if (mpnt == NULL)
429 return;
430
431 for(prev = mpnt, mpnt = mpnt->vm_next;
432 mpnt != NULL;
433 prev = mpnt, mpnt = next)
434 {
435 next = mpnt->vm_next;
436
437
438
439
440 if (mpnt->vm_inode != prev->vm_inode)
441 continue;
442 if (mpnt->vm_pte != prev->vm_pte)
443 continue;
444 if (mpnt->vm_ops != prev->vm_ops)
445 continue;
446 if (mpnt->vm_page_prot != prev->vm_page_prot ||
447 mpnt->vm_flags != prev->vm_flags)
448 continue;
449 if (prev->vm_end != mpnt->vm_start)
450 continue;
451
452
453
454 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
455 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
456 continue;
457 }
458
459
460
461
462
463
464 prev->vm_end = mpnt->vm_end;
465 prev->vm_next = mpnt->vm_next;
466 if (mpnt->vm_ops && mpnt->vm_ops->close) {
467 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
468 mpnt->vm_start = mpnt->vm_end;
469 mpnt->vm_ops->close(mpnt);
470 }
471 remove_shared_vm_struct(mpnt);
472 if (mpnt->vm_inode)
473 mpnt->vm_inode->i_count--;
474 kfree_s(mpnt, sizeof(*mpnt));
475 mpnt = prev;
476 }
477 }
478
479
480
481
482
483 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
484 {
485 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
486 return -ENOMEM;
487 return 0;
488 }