This source file includes following definitions.
- do_mmap
- get_unmapped_area
- sys_mmap
- find_vma
- find_vma_intersection
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 pgprot_t protection_map[16] = {
40 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
41 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
42 };
43
44 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
45 unsigned long prot, unsigned long flags, unsigned long off)
46 {
47 int error;
48 struct vm_area_struct * vma;
49
50 if ((len = PAGE_ALIGN(len)) == 0)
51 return addr;
52
53 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
54 return -EINVAL;
55
56
57 if (off + len < off)
58 return -EINVAL;
59
60
61
62
63
64
65
66 if (file != NULL) {
67 switch (flags & MAP_TYPE) {
68 case MAP_SHARED:
69 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
70 return -EACCES;
71
72 case MAP_PRIVATE:
73 if (!(file->f_mode & 1))
74 return -EACCES;
75 break;
76
77 default:
78 return -EINVAL;
79 }
80 if ((flags & MAP_DENYWRITE) && (file->f_inode->i_wcount > 0))
81 return -ETXTBSY;
82 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
83 return -EINVAL;
84
85
86
87
88
89
90 if (flags & MAP_FIXED) {
91 if (addr & ~PAGE_MASK)
92 return -EINVAL;
93 if (len > TASK_SIZE || addr > TASK_SIZE - len)
94 return -EINVAL;
95 } else {
96 addr = get_unmapped_area(addr, len);
97 if (!addr)
98 return -ENOMEM;
99 }
100
101
102
103
104
105
106 if (file && (!file->f_op || !file->f_op->mmap))
107 return -ENODEV;
108
109 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
110 GFP_KERNEL);
111 if (!vma)
112 return -ENOMEM;
113
114 vma->vm_task = current;
115 vma->vm_start = addr;
116 vma->vm_end = addr + len;
117 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
118 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
119
120 if (file) {
121 if (file->f_mode & 1)
122 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
123 if (flags & MAP_SHARED) {
124 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
125
126
127
128
129
130
131
132
133
134
135 if (!(file->f_mode & 2))
136 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
137 }
138 } else
139 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
140 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
141 vma->vm_ops = NULL;
142 vma->vm_offset = off;
143 vma->vm_inode = NULL;
144 vma->vm_pte = 0;
145
146 do_munmap(addr, len);
147
148 if (file)
149 error = file->f_op->mmap(file->f_inode, file, vma);
150 else
151 error = anon_map(NULL, NULL, vma);
152
153 if (error) {
154 kfree(vma);
155 return error;
156 }
157 insert_vm_struct(current, vma);
158 merge_segments(current, vma->vm_start, vma->vm_end);
159 return addr;
160 }
161
162
163
164
165
166
167 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
168 {
169 struct vm_area_struct * vmm;
170
171 if (len > TASK_SIZE)
172 return 0;
173 if (!addr)
174 addr = TASK_SIZE / 3;
175 addr = PAGE_ALIGN(addr);
176
177 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
178 if (TASK_SIZE - len < addr)
179 return 0;
180 if (!vmm)
181 return addr;
182 if (addr > vmm->vm_end)
183 continue;
184 if (addr + len > vmm->vm_start) {
185 addr = vmm->vm_end;
186 continue;
187 }
188 return addr;
189 }
190 }
191
192 asmlinkage int sys_mmap(unsigned long *buffer)
193 {
194 int error;
195 unsigned long flags;
196 struct file * file = NULL;
197
198 error = verify_area(VERIFY_READ, buffer, 6*sizeof(long));
199 if (error)
200 return error;
201 flags = get_fs_long(buffer+3);
202 if (!(flags & MAP_ANONYMOUS)) {
203 unsigned long fd = get_fs_long(buffer+4);
204 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
205 return -EBADF;
206 }
207 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
208 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
209 }
210
211
212
213
214
215
216
217
218
219
220
221 #define vm_avl_key vm_end
222 #define vm_avl_key_t unsigned long
223
224
225
226
227
228
229
230
231
232
233 #define avl_empty (struct vm_area_struct *) NULL
234
235
236 #define avl_maxheight 41
237 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
238
239
240
241
242
243
244
245
246
247 struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
248 {
249 #if 0
250 struct vm_area_struct * vma;
251
252 for (vma = task->mm->mmap ; ; vma = vma->vm_next) {
253 if (!vma)
254 return NULL;
255 if (vma->vm_end > addr)
256 return vma;
257 }
258 #else
259 struct vm_area_struct * result = NULL;
260 struct vm_area_struct * tree;
261
262 for (tree = task->mm->mmap_avl ; ; ) {
263 if (tree == avl_empty)
264 return result;
265 if (tree->vm_end > addr) {
266 if (tree->vm_start <= addr)
267 return tree;
268 result = tree;
269 tree = tree->vm_avl_left;
270 } else
271 tree = tree->vm_avl_right;
272 }
273 #endif
274 }
275
276
277
278 struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
279 {
280 struct vm_area_struct * vma;
281
282 #if 0
283 for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
284 if (end_addr <= vma->vm_start)
285 break;
286 if (start_addr < vma->vm_end)
287 return vma;
288 }
289 return NULL;
290 #else
291 vma = find_vma(task,start_addr);
292 if (!vma || end_addr <= vma->vm_start)
293 return NULL;
294 return vma;
295 #endif
296 }
297
298
299 static void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
300 {
301 vm_avl_key_t key = node->vm_avl_key;
302
303 *to_the_left = *to_the_right = NULL;
304 for (;;) {
305 if (tree == avl_empty) {
306 printk("avl_neighbours: node not found in the tree\n");
307 return;
308 }
309 if (key == tree->vm_avl_key)
310 break;
311 if (key < tree->vm_avl_key) {
312 *to_the_right = tree;
313 tree = tree->vm_avl_left;
314 } else {
315 *to_the_left = tree;
316 tree = tree->vm_avl_right;
317 }
318 }
319 if (tree != node) {
320 printk("avl_neighbours: node not exactly found in the tree\n");
321 return;
322 }
323 if (tree->vm_avl_left != avl_empty) {
324 struct vm_area_struct * node;
325 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
326 continue;
327 *to_the_left = node;
328 }
329 if (tree->vm_avl_right != avl_empty) {
330 struct vm_area_struct * node;
331 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
332 continue;
333 *to_the_right = node;
334 }
335 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
336 printk("avl_neighbours: tree inconsistent with list\n");
337 }
338
339
340
341
342
343
344
345 static void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
346 {
347 for ( ; count > 0 ; count--) {
348 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
349 struct vm_area_struct * node = *nodeplace;
350 struct vm_area_struct * nodeleft = node->vm_avl_left;
351 struct vm_area_struct * noderight = node->vm_avl_right;
352 int heightleft = heightof(nodeleft);
353 int heightright = heightof(noderight);
354 if (heightright + 1 < heightleft) {
355
356
357
358
359
360 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
361 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
362 int heightleftright = heightof(nodeleftright);
363 if (heightof(nodeleftleft) >= heightleftright) {
364
365
366
367
368
369
370
371 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
372 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
373 *nodeplace = nodeleft;
374 } else {
375
376
377
378
379
380
381
382
383
384 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
385 node->vm_avl_left = nodeleftright->vm_avl_right;
386 nodeleftright->vm_avl_left = nodeleft;
387 nodeleftright->vm_avl_right = node;
388 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
389 nodeleftright->vm_avl_height = heightleft;
390 *nodeplace = nodeleftright;
391 }
392 }
393 else if (heightleft + 1 < heightright) {
394
395 struct vm_area_struct * noderightright = noderight->vm_avl_right;
396 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
397 int heightrightleft = heightof(noderightleft);
398 if (heightof(noderightright) >= heightrightleft) {
399 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
400 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
401 *nodeplace = noderight;
402 } else {
403 noderight->vm_avl_left = noderightleft->vm_avl_right;
404 node->vm_avl_right = noderightleft->vm_avl_left;
405 noderightleft->vm_avl_right = noderight;
406 noderightleft->vm_avl_left = node;
407 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
408 noderightleft->vm_avl_height = heightright;
409 *nodeplace = noderightleft;
410 }
411 }
412 else {
413 int height = (heightleft<heightright ? heightright : heightleft) + 1;
414 if (height == node->vm_avl_height)
415 break;
416 node->vm_avl_height = height;
417 }
418 }
419 }
420
421
422 static void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
423 {
424 vm_avl_key_t key = new_node->vm_avl_key;
425 struct vm_area_struct ** nodeplace = ptree;
426 struct vm_area_struct ** stack[avl_maxheight];
427 int stack_count = 0;
428 struct vm_area_struct *** stack_ptr = &stack[0];
429 for (;;) {
430 struct vm_area_struct * node = *nodeplace;
431 if (node == avl_empty)
432 break;
433 *stack_ptr++ = nodeplace; stack_count++;
434 if (key < node->vm_avl_key)
435 nodeplace = &node->vm_avl_left;
436 else
437 nodeplace = &node->vm_avl_right;
438 }
439 new_node->vm_avl_left = avl_empty;
440 new_node->vm_avl_right = avl_empty;
441 new_node->vm_avl_height = 1;
442 *nodeplace = new_node;
443 avl_rebalance(stack_ptr,stack_count);
444 }
445
446
447
448
449 static void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
450 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
451 {
452 vm_avl_key_t key = new_node->vm_avl_key;
453 struct vm_area_struct ** nodeplace = ptree;
454 struct vm_area_struct ** stack[avl_maxheight];
455 int stack_count = 0;
456 struct vm_area_struct *** stack_ptr = &stack[0];
457 *to_the_left = *to_the_right = NULL;
458 for (;;) {
459 struct vm_area_struct * node = *nodeplace;
460 if (node == avl_empty)
461 break;
462 *stack_ptr++ = nodeplace; stack_count++;
463 if (key < node->vm_avl_key) {
464 *to_the_right = node;
465 nodeplace = &node->vm_avl_left;
466 } else {
467 *to_the_left = node;
468 nodeplace = &node->vm_avl_right;
469 }
470 }
471 new_node->vm_avl_left = avl_empty;
472 new_node->vm_avl_right = avl_empty;
473 new_node->vm_avl_height = 1;
474 *nodeplace = new_node;
475 avl_rebalance(stack_ptr,stack_count);
476 }
477
478
479 static void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
480 {
481 vm_avl_key_t key = node_to_delete->vm_avl_key;
482 struct vm_area_struct ** nodeplace = ptree;
483 struct vm_area_struct ** stack[avl_maxheight];
484 int stack_count = 0;
485 struct vm_area_struct *** stack_ptr = &stack[0];
486 struct vm_area_struct ** nodeplace_to_delete;
487 for (;;) {
488 struct vm_area_struct * node = *nodeplace;
489 if (node == avl_empty) {
490
491 printk("avl_remove: node to delete not found in tree\n");
492 return;
493 }
494 *stack_ptr++ = nodeplace; stack_count++;
495 if (key == node->vm_avl_key)
496 break;
497 if (key < node->vm_avl_key)
498 nodeplace = &node->vm_avl_left;
499 else
500 nodeplace = &node->vm_avl_right;
501 }
502 nodeplace_to_delete = nodeplace;
503
504 if (node_to_delete->vm_avl_left == avl_empty) {
505 *nodeplace_to_delete = node_to_delete->vm_avl_right;
506 stack_ptr--; stack_count--;
507 } else {
508 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
509 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
510 struct vm_area_struct * node;
511 for (;;) {
512 node = *nodeplace;
513 if (node->vm_avl_right == avl_empty)
514 break;
515 *stack_ptr++ = nodeplace; stack_count++;
516 nodeplace = &node->vm_avl_right;
517 }
518 *nodeplace = node->vm_avl_left;
519
520 node->vm_avl_left = node_to_delete->vm_avl_left;
521 node->vm_avl_right = node_to_delete->vm_avl_right;
522 node->vm_avl_height = node_to_delete->vm_avl_height;
523 *nodeplace_to_delete = node;
524 *stack_ptr_to_delete = &node->vm_avl_left;
525 }
526 avl_rebalance(stack_ptr,stack_count);
527 }
528
529 #ifdef DEBUG_AVL
530
531
532 static void printk_list (struct vm_area_struct * vma)
533 {
534 printk("[");
535 while (vma) {
536 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
537 vma = vma->vm_next;
538 if (!vma)
539 break;
540 printk(" ");
541 }
542 printk("]");
543 }
544
545
546 static void printk_avl (struct vm_area_struct * tree)
547 {
548 if (tree != avl_empty) {
549 printk("(");
550 if (tree->vm_avl_left != avl_empty) {
551 printk_avl(tree->vm_avl_left);
552 printk("<");
553 }
554 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
555 if (tree->vm_avl_right != avl_empty) {
556 printk(">");
557 printk_avl(tree->vm_avl_right);
558 }
559 printk(")");
560 }
561 }
562
563 static char *avl_check_point = "somewhere";
564
565
566 static void avl_checkheights (struct vm_area_struct * tree)
567 {
568 int h, hl, hr;
569
570 if (tree == avl_empty)
571 return;
572 avl_checkheights(tree->vm_avl_left);
573 avl_checkheights(tree->vm_avl_right);
574 h = tree->vm_avl_height;
575 hl = heightof(tree->vm_avl_left);
576 hr = heightof(tree->vm_avl_right);
577 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
578 return;
579 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
580 return;
581 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
582 }
583
584
585 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
586 {
587 if (tree == avl_empty)
588 return;
589 avl_checkleft(tree->vm_avl_left,key);
590 avl_checkleft(tree->vm_avl_right,key);
591 if (tree->vm_avl_key < key)
592 return;
593 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
594 }
595
596
597 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
598 {
599 if (tree == avl_empty)
600 return;
601 avl_checkright(tree->vm_avl_left,key);
602 avl_checkright(tree->vm_avl_right,key);
603 if (tree->vm_avl_key > key)
604 return;
605 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
606 }
607
608
609 static void avl_checkorder (struct vm_area_struct * tree)
610 {
611 if (tree == avl_empty)
612 return;
613 avl_checkorder(tree->vm_avl_left);
614 avl_checkorder(tree->vm_avl_right);
615 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
616 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
617 }
618
619
620 static void avl_check (struct task_struct * task, char *caller)
621 {
622 avl_check_point = caller;
623
624
625
626 avl_checkheights(task->mm->mmap_avl);
627 avl_checkorder(task->mm->mmap_avl);
628 }
629
630 #endif
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655 void unmap_fixup(struct vm_area_struct *area,
656 unsigned long addr, size_t len)
657 {
658 struct vm_area_struct *mpnt;
659 unsigned long end = addr + len;
660
661 if (addr < area->vm_start || addr >= area->vm_end ||
662 end <= area->vm_start || end > area->vm_end ||
663 end < addr)
664 {
665 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
666 area->vm_start, area->vm_end, addr, end);
667 return;
668 }
669
670
671 if (addr == area->vm_start && end == area->vm_end) {
672 if (area->vm_ops && area->vm_ops->close)
673 area->vm_ops->close(area);
674 if (area->vm_inode)
675 iput(area->vm_inode);
676 return;
677 }
678
679
680 if (end == area->vm_end)
681 area->vm_end = addr;
682 else
683 if (addr == area->vm_start) {
684 area->vm_offset += (end - area->vm_start);
685 area->vm_start = end;
686 }
687 else {
688
689
690 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
691
692 if (!mpnt)
693 return;
694 *mpnt = *area;
695 mpnt->vm_offset += (end - area->vm_start);
696 mpnt->vm_start = end;
697 if (mpnt->vm_inode)
698 mpnt->vm_inode->i_count++;
699 if (mpnt->vm_ops && mpnt->vm_ops->open)
700 mpnt->vm_ops->open(mpnt);
701 area->vm_end = addr;
702 insert_vm_struct(current, mpnt);
703 }
704
705
706 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
707 if (!mpnt)
708 return;
709 *mpnt = *area;
710 if (mpnt->vm_ops && mpnt->vm_ops->open)
711 mpnt->vm_ops->open(mpnt);
712 if (area->vm_ops && area->vm_ops->close) {
713 area->vm_end = area->vm_start;
714 area->vm_ops->close(area);
715 }
716 insert_vm_struct(current, mpnt);
717 }
718
719 asmlinkage int sys_munmap(unsigned long addr, size_t len)
720 {
721 return do_munmap(addr, len);
722 }
723
724
725
726
727
728
729
730 int do_munmap(unsigned long addr, size_t len)
731 {
732 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
733
734 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
735 return -EINVAL;
736
737 if ((len = PAGE_ALIGN(len)) == 0)
738 return 0;
739
740
741
742
743
744
745
746 mpnt = find_vma(current, addr);
747 if (!mpnt)
748 return 0;
749 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
750
751
752
753 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
754 free = NULL;
755 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
756 *npp = mpnt->vm_next;
757 mpnt->vm_next = free;
758 free = mpnt;
759 avl_remove(mpnt, ¤t->mm->mmap_avl);
760 }
761
762 if (free == NULL)
763 return 0;
764
765
766
767
768
769
770
771 while (free) {
772 unsigned long st, end;
773
774 mpnt = free;
775 free = free->vm_next;
776
777 remove_shared_vm_struct(mpnt);
778
779 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
780 end = addr+len;
781 end = end > mpnt->vm_end ? mpnt->vm_end : end;
782
783 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
784 mpnt->vm_ops->unmap(mpnt, st, end-st);
785
786 unmap_fixup(mpnt, st, end-st);
787 kfree(mpnt);
788 }
789
790 unmap_page_range(addr, len);
791 return 0;
792 }
793
794
795 void build_mmap_avl(struct task_struct * task)
796 {
797 struct vm_area_struct * vma;
798
799 task->mm->mmap_avl = NULL;
800 for (vma = task->mm->mmap; vma; vma = vma->vm_next)
801 avl_insert(vma, &task->mm->mmap_avl);
802 }
803
804
805 void exit_mmap(struct task_struct * task)
806 {
807 struct vm_area_struct * mpnt;
808
809 mpnt = task->mm->mmap;
810 task->mm->mmap = NULL;
811 task->mm->mmap_avl = NULL;
812 while (mpnt) {
813 struct vm_area_struct * next = mpnt->vm_next;
814 if (mpnt->vm_ops && mpnt->vm_ops->close)
815 mpnt->vm_ops->close(mpnt);
816 remove_shared_vm_struct(mpnt);
817 if (mpnt->vm_inode)
818 iput(mpnt->vm_inode);
819 kfree(mpnt);
820 mpnt = next;
821 }
822 }
823
824
825
826
827
828 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
829 {
830 struct vm_area_struct *share;
831 struct inode * inode;
832
833 #if 0
834 struct vm_area_struct **p, *mpnt;
835
836 p = &t->mm->mmap;
837 while ((mpnt = *p) != NULL) {
838 if (mpnt->vm_start > vmp->vm_start)
839 break;
840 if (mpnt->vm_end > vmp->vm_start)
841 printk("insert_vm_struct: overlapping memory areas\n");
842 p = &mpnt->vm_next;
843 }
844 vmp->vm_next = mpnt;
845 *p = vmp;
846 #else
847 struct vm_area_struct * prev, * next;
848
849 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
850 if ((prev ? prev->vm_next : t->mm->mmap) != next)
851 printk("insert_vm_struct: tree inconsistent with list\n");
852 if (prev)
853 prev->vm_next = vmp;
854 else
855 t->mm->mmap = vmp;
856 vmp->vm_next = next;
857 #endif
858
859 inode = vmp->vm_inode;
860 if (!inode)
861 return;
862
863
864 if ((share = inode->i_mmap)) {
865 vmp->vm_next_share = share->vm_next_share;
866 vmp->vm_next_share->vm_prev_share = vmp;
867 share->vm_next_share = vmp;
868 vmp->vm_prev_share = share;
869 } else
870 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
871 }
872
873
874
875
876 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
877 {
878 struct inode * inode = mpnt->vm_inode;
879
880 if (!inode)
881 return;
882
883 if (mpnt->vm_next_share == mpnt) {
884 if (inode->i_mmap != mpnt)
885 printk("Inode i_mmap ring corrupted\n");
886 inode->i_mmap = NULL;
887 return;
888 }
889
890 if (inode->i_mmap == mpnt)
891 inode->i_mmap = mpnt->vm_next_share;
892
893 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
894 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
895 }
896
897
898
899
900
901
902
903
904 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
905 {
906 struct vm_area_struct *prev, *mpnt, *next;
907
908 mpnt = find_vma(task, start_addr);
909 if (!mpnt)
910 return;
911 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
912
913
914 if (!prev) {
915 prev = mpnt;
916 mpnt = next;
917 }
918
919
920
921
922 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
923 #if 0
924 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
925 #endif
926
927 next = mpnt->vm_next;
928
929
930
931
932 if (mpnt->vm_inode != prev->vm_inode)
933 continue;
934 if (mpnt->vm_pte != prev->vm_pte)
935 continue;
936 if (mpnt->vm_ops != prev->vm_ops)
937 continue;
938 if (mpnt->vm_flags != prev->vm_flags)
939 continue;
940 if (prev->vm_end != mpnt->vm_start)
941 continue;
942
943
944
945 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
946 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
947 continue;
948 }
949
950
951
952
953
954
955 avl_remove(mpnt, &task->mm->mmap_avl);
956 prev->vm_end = mpnt->vm_end;
957 prev->vm_next = mpnt->vm_next;
958 if (mpnt->vm_ops && mpnt->vm_ops->close) {
959 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
960 mpnt->vm_start = mpnt->vm_end;
961 mpnt->vm_ops->close(mpnt);
962 }
963 remove_shared_vm_struct(mpnt);
964 if (mpnt->vm_inode)
965 mpnt->vm_inode->i_count--;
966 kfree_s(mpnt, sizeof(*mpnt));
967 mpnt = prev;
968 }
969 }
970
971
972
973
974
975 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
976 {
977 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
978 return -ENOMEM;
979 return 0;
980 }