This source file includes following definitions.
- anon_map
- do_mmap
- get_unmapped_area
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20
21
22
23
24 static inline int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
25 {
26 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
27 return -ENOMEM;
28 return 0;
29 }
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 pgprot_t protection_map[16] = {
49 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
50 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
51 };
52
53 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
54 unsigned long prot, unsigned long flags, unsigned long off)
55 {
56 int error;
57 struct vm_area_struct * vma;
58
59 if ((len = PAGE_ALIGN(len)) == 0)
60 return addr;
61
62 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
63 return -EINVAL;
64
65
66 if (off + len < off)
67 return -EINVAL;
68
69
70
71
72
73
74
75 if (file != NULL) {
76 switch (flags & MAP_TYPE) {
77 case MAP_SHARED:
78 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
79 return -EACCES;
80
81 case MAP_PRIVATE:
82 if (!(file->f_mode & 1))
83 return -EACCES;
84 break;
85
86 default:
87 return -EINVAL;
88 }
89 if (flags & MAP_DENYWRITE) {
90 if (file->f_inode->i_wcount > 0)
91 return -ETXTBSY;
92 }
93 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
94 return -EINVAL;
95
96
97
98
99
100
101 if (flags & MAP_FIXED) {
102 if (addr & ~PAGE_MASK)
103 return -EINVAL;
104 if (len > TASK_SIZE || addr > TASK_SIZE - len)
105 return -EINVAL;
106 } else {
107 addr = get_unmapped_area(addr, len);
108 if (!addr)
109 return -ENOMEM;
110 }
111
112
113
114
115
116
117 if (file && (!file->f_op || !file->f_op->mmap))
118 return -ENODEV;
119
120 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
121 GFP_KERNEL);
122 if (!vma)
123 return -ENOMEM;
124
125 vma->vm_mm = current->mm;
126 vma->vm_start = addr;
127 vma->vm_end = addr + len;
128 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
129 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
130
131 if (file) {
132 if (file->f_mode & 1)
133 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
134 if (flags & MAP_SHARED) {
135 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
136
137
138
139
140
141
142
143
144
145
146 if (!(file->f_mode & 2))
147 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
148 }
149 } else
150 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
151 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
152 vma->vm_ops = NULL;
153 vma->vm_offset = off;
154 vma->vm_inode = NULL;
155 vma->vm_pte = 0;
156
157 do_munmap(addr, len);
158
159 if (file)
160 error = file->f_op->mmap(file->f_inode, file, vma);
161 else
162 error = anon_map(NULL, NULL, vma);
163
164 if (error) {
165 kfree(vma);
166 return error;
167 }
168 insert_vm_struct(current, vma);
169 merge_segments(current, vma->vm_start, vma->vm_end);
170 return addr;
171 }
172
173
174
175
176
177
178 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
179 {
180 struct vm_area_struct * vmm;
181
182 if (len > TASK_SIZE)
183 return 0;
184 if (!addr)
185 addr = TASK_SIZE / 3;
186 addr = PAGE_ALIGN(addr);
187
188 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
189 if (TASK_SIZE - len < addr)
190 return 0;
191 if (!vmm)
192 return addr;
193 if (addr > vmm->vm_end)
194 continue;
195 if (addr + len > vmm->vm_start) {
196 addr = vmm->vm_end;
197 continue;
198 }
199 return addr;
200 }
201 }
202
203
204
205
206
207
208
209
210
211
212 #define vm_avl_key vm_end
213 #define vm_avl_key_t unsigned long
214
215
216
217
218
219
220
221
222
223
224
225
226 #define avl_maxheight 41
227 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
228
229
230
231
232
233
234
235
236
237 static inline void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
238 {
239 vm_avl_key_t key = node->vm_avl_key;
240
241 *to_the_left = *to_the_right = NULL;
242 for (;;) {
243 if (tree == avl_empty) {
244 printk("avl_neighbours: node not found in the tree\n");
245 return;
246 }
247 if (key == tree->vm_avl_key)
248 break;
249 if (key < tree->vm_avl_key) {
250 *to_the_right = tree;
251 tree = tree->vm_avl_left;
252 } else {
253 *to_the_left = tree;
254 tree = tree->vm_avl_right;
255 }
256 }
257 if (tree != node) {
258 printk("avl_neighbours: node not exactly found in the tree\n");
259 return;
260 }
261 if (tree->vm_avl_left != avl_empty) {
262 struct vm_area_struct * node;
263 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
264 continue;
265 *to_the_left = node;
266 }
267 if (tree->vm_avl_right != avl_empty) {
268 struct vm_area_struct * node;
269 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
270 continue;
271 *to_the_right = node;
272 }
273 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
274 printk("avl_neighbours: tree inconsistent with list\n");
275 }
276
277
278
279
280
281
282
283 static inline void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
284 {
285 for ( ; count > 0 ; count--) {
286 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
287 struct vm_area_struct * node = *nodeplace;
288 struct vm_area_struct * nodeleft = node->vm_avl_left;
289 struct vm_area_struct * noderight = node->vm_avl_right;
290 int heightleft = heightof(nodeleft);
291 int heightright = heightof(noderight);
292 if (heightright + 1 < heightleft) {
293
294
295
296
297
298 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
299 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
300 int heightleftright = heightof(nodeleftright);
301 if (heightof(nodeleftleft) >= heightleftright) {
302
303
304
305
306
307
308
309 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
310 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
311 *nodeplace = nodeleft;
312 } else {
313
314
315
316
317
318
319
320
321
322 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
323 node->vm_avl_left = nodeleftright->vm_avl_right;
324 nodeleftright->vm_avl_left = nodeleft;
325 nodeleftright->vm_avl_right = node;
326 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
327 nodeleftright->vm_avl_height = heightleft;
328 *nodeplace = nodeleftright;
329 }
330 }
331 else if (heightleft + 1 < heightright) {
332
333 struct vm_area_struct * noderightright = noderight->vm_avl_right;
334 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
335 int heightrightleft = heightof(noderightleft);
336 if (heightof(noderightright) >= heightrightleft) {
337 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
338 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
339 *nodeplace = noderight;
340 } else {
341 noderight->vm_avl_left = noderightleft->vm_avl_right;
342 node->vm_avl_right = noderightleft->vm_avl_left;
343 noderightleft->vm_avl_right = noderight;
344 noderightleft->vm_avl_left = node;
345 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
346 noderightleft->vm_avl_height = heightright;
347 *nodeplace = noderightleft;
348 }
349 }
350 else {
351 int height = (heightleft<heightright ? heightright : heightleft) + 1;
352 if (height == node->vm_avl_height)
353 break;
354 node->vm_avl_height = height;
355 }
356 }
357 }
358
359
360 static inline void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
361 {
362 vm_avl_key_t key = new_node->vm_avl_key;
363 struct vm_area_struct ** nodeplace = ptree;
364 struct vm_area_struct ** stack[avl_maxheight];
365 int stack_count = 0;
366 struct vm_area_struct *** stack_ptr = &stack[0];
367 for (;;) {
368 struct vm_area_struct * node = *nodeplace;
369 if (node == avl_empty)
370 break;
371 *stack_ptr++ = nodeplace; stack_count++;
372 if (key < node->vm_avl_key)
373 nodeplace = &node->vm_avl_left;
374 else
375 nodeplace = &node->vm_avl_right;
376 }
377 new_node->vm_avl_left = avl_empty;
378 new_node->vm_avl_right = avl_empty;
379 new_node->vm_avl_height = 1;
380 *nodeplace = new_node;
381 avl_rebalance(stack_ptr,stack_count);
382 }
383
384
385
386
387 static inline void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
388 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
389 {
390 vm_avl_key_t key = new_node->vm_avl_key;
391 struct vm_area_struct ** nodeplace = ptree;
392 struct vm_area_struct ** stack[avl_maxheight];
393 int stack_count = 0;
394 struct vm_area_struct *** stack_ptr = &stack[0];
395 *to_the_left = *to_the_right = NULL;
396 for (;;) {
397 struct vm_area_struct * node = *nodeplace;
398 if (node == avl_empty)
399 break;
400 *stack_ptr++ = nodeplace; stack_count++;
401 if (key < node->vm_avl_key) {
402 *to_the_right = node;
403 nodeplace = &node->vm_avl_left;
404 } else {
405 *to_the_left = node;
406 nodeplace = &node->vm_avl_right;
407 }
408 }
409 new_node->vm_avl_left = avl_empty;
410 new_node->vm_avl_right = avl_empty;
411 new_node->vm_avl_height = 1;
412 *nodeplace = new_node;
413 avl_rebalance(stack_ptr,stack_count);
414 }
415
416
417 static inline void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
418 {
419 vm_avl_key_t key = node_to_delete->vm_avl_key;
420 struct vm_area_struct ** nodeplace = ptree;
421 struct vm_area_struct ** stack[avl_maxheight];
422 int stack_count = 0;
423 struct vm_area_struct *** stack_ptr = &stack[0];
424 struct vm_area_struct ** nodeplace_to_delete;
425 for (;;) {
426 struct vm_area_struct * node = *nodeplace;
427 if (node == avl_empty) {
428
429 printk("avl_remove: node to delete not found in tree\n");
430 return;
431 }
432 *stack_ptr++ = nodeplace; stack_count++;
433 if (key == node->vm_avl_key)
434 break;
435 if (key < node->vm_avl_key)
436 nodeplace = &node->vm_avl_left;
437 else
438 nodeplace = &node->vm_avl_right;
439 }
440 nodeplace_to_delete = nodeplace;
441
442 if (node_to_delete->vm_avl_left == avl_empty) {
443 *nodeplace_to_delete = node_to_delete->vm_avl_right;
444 stack_ptr--; stack_count--;
445 } else {
446 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
447 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
448 struct vm_area_struct * node;
449 for (;;) {
450 node = *nodeplace;
451 if (node->vm_avl_right == avl_empty)
452 break;
453 *stack_ptr++ = nodeplace; stack_count++;
454 nodeplace = &node->vm_avl_right;
455 }
456 *nodeplace = node->vm_avl_left;
457
458 node->vm_avl_left = node_to_delete->vm_avl_left;
459 node->vm_avl_right = node_to_delete->vm_avl_right;
460 node->vm_avl_height = node_to_delete->vm_avl_height;
461 *nodeplace_to_delete = node;
462 *stack_ptr_to_delete = &node->vm_avl_left;
463 }
464 avl_rebalance(stack_ptr,stack_count);
465 }
466
467 #ifdef DEBUG_AVL
468
469
470 static void printk_list (struct vm_area_struct * vma)
471 {
472 printk("[");
473 while (vma) {
474 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
475 vma = vma->vm_next;
476 if (!vma)
477 break;
478 printk(" ");
479 }
480 printk("]");
481 }
482
483
484 static void printk_avl (struct vm_area_struct * tree)
485 {
486 if (tree != avl_empty) {
487 printk("(");
488 if (tree->vm_avl_left != avl_empty) {
489 printk_avl(tree->vm_avl_left);
490 printk("<");
491 }
492 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
493 if (tree->vm_avl_right != avl_empty) {
494 printk(">");
495 printk_avl(tree->vm_avl_right);
496 }
497 printk(")");
498 }
499 }
500
501 static char *avl_check_point = "somewhere";
502
503
504 static void avl_checkheights (struct vm_area_struct * tree)
505 {
506 int h, hl, hr;
507
508 if (tree == avl_empty)
509 return;
510 avl_checkheights(tree->vm_avl_left);
511 avl_checkheights(tree->vm_avl_right);
512 h = tree->vm_avl_height;
513 hl = heightof(tree->vm_avl_left);
514 hr = heightof(tree->vm_avl_right);
515 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
516 return;
517 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
518 return;
519 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
520 }
521
522
523 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
524 {
525 if (tree == avl_empty)
526 return;
527 avl_checkleft(tree->vm_avl_left,key);
528 avl_checkleft(tree->vm_avl_right,key);
529 if (tree->vm_avl_key < key)
530 return;
531 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
532 }
533
534
535 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
536 {
537 if (tree == avl_empty)
538 return;
539 avl_checkright(tree->vm_avl_left,key);
540 avl_checkright(tree->vm_avl_right,key);
541 if (tree->vm_avl_key > key)
542 return;
543 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
544 }
545
546
547 static void avl_checkorder (struct vm_area_struct * tree)
548 {
549 if (tree == avl_empty)
550 return;
551 avl_checkorder(tree->vm_avl_left);
552 avl_checkorder(tree->vm_avl_right);
553 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
554 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
555 }
556
557
558 static void avl_check (struct task_struct * task, char *caller)
559 {
560 avl_check_point = caller;
561
562
563
564 avl_checkheights(task->mm->mmap_avl);
565 avl_checkorder(task->mm->mmap_avl);
566 }
567
568 #endif
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593 static void unmap_fixup(struct vm_area_struct *area,
594 unsigned long addr, size_t len)
595 {
596 struct vm_area_struct *mpnt;
597 unsigned long end = addr + len;
598
599 if (addr < area->vm_start || addr >= area->vm_end ||
600 end <= area->vm_start || end > area->vm_end ||
601 end < addr)
602 {
603 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
604 area->vm_start, area->vm_end, addr, end);
605 return;
606 }
607
608
609 if (addr == area->vm_start && end == area->vm_end) {
610 if (area->vm_ops && area->vm_ops->close)
611 area->vm_ops->close(area);
612 if (area->vm_inode)
613 iput(area->vm_inode);
614 return;
615 }
616
617
618 if (end == area->vm_end)
619 area->vm_end = addr;
620 else
621 if (addr == area->vm_start) {
622 area->vm_offset += (end - area->vm_start);
623 area->vm_start = end;
624 }
625 else {
626
627
628 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
629
630 if (!mpnt)
631 return;
632 *mpnt = *area;
633 mpnt->vm_offset += (end - area->vm_start);
634 mpnt->vm_start = end;
635 if (mpnt->vm_inode)
636 mpnt->vm_inode->i_count++;
637 if (mpnt->vm_ops && mpnt->vm_ops->open)
638 mpnt->vm_ops->open(mpnt);
639 area->vm_end = addr;
640 insert_vm_struct(current, mpnt);
641 }
642
643
644 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
645 if (!mpnt)
646 return;
647 *mpnt = *area;
648 if (mpnt->vm_ops && mpnt->vm_ops->open)
649 mpnt->vm_ops->open(mpnt);
650 if (area->vm_ops && area->vm_ops->close) {
651 area->vm_end = area->vm_start;
652 area->vm_ops->close(area);
653 }
654 insert_vm_struct(current, mpnt);
655 }
656
657 asmlinkage int sys_munmap(unsigned long addr, size_t len)
658 {
659 return do_munmap(addr, len);
660 }
661
662
663
664
665
666
667
668 int do_munmap(unsigned long addr, size_t len)
669 {
670 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
671
672 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
673 return -EINVAL;
674
675 if ((len = PAGE_ALIGN(len)) == 0)
676 return 0;
677
678
679
680
681
682
683
684 mpnt = find_vma(current, addr);
685 if (!mpnt)
686 return 0;
687 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
688
689
690
691 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
692 free = NULL;
693 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
694 *npp = mpnt->vm_next;
695 mpnt->vm_next = free;
696 free = mpnt;
697 avl_remove(mpnt, ¤t->mm->mmap_avl);
698 }
699
700 if (free == NULL)
701 return 0;
702
703
704
705
706
707
708
709 while (free) {
710 unsigned long st, end;
711
712 mpnt = free;
713 free = free->vm_next;
714
715 remove_shared_vm_struct(mpnt);
716
717 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
718 end = addr+len;
719 end = end > mpnt->vm_end ? mpnt->vm_end : end;
720
721 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
722 mpnt->vm_ops->unmap(mpnt, st, end-st);
723 zap_page_range(current->mm, st, end-st);
724 unmap_fixup(mpnt, st, end-st);
725 kfree(mpnt);
726 }
727
728 zap_page_range(current->mm, addr, len);
729 return 0;
730 }
731
732
733 void build_mmap_avl(struct mm_struct * mm)
734 {
735 struct vm_area_struct * vma;
736
737 mm->mmap_avl = NULL;
738 for (vma = mm->mmap; vma; vma = vma->vm_next)
739 avl_insert(vma, &mm->mmap_avl);
740 }
741
742
743 void exit_mmap(struct mm_struct * mm)
744 {
745 struct vm_area_struct * mpnt;
746
747 mpnt = mm->mmap;
748 mm->mmap = NULL;
749 mm->mmap_avl = NULL;
750 while (mpnt) {
751 struct vm_area_struct * next = mpnt->vm_next;
752 if (mpnt->vm_ops) {
753 if (mpnt->vm_ops->unmap)
754 mpnt->vm_ops->unmap(mpnt, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
755 if (mpnt->vm_ops->close)
756 mpnt->vm_ops->close(mpnt);
757 }
758 remove_shared_vm_struct(mpnt);
759 zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
760 if (mpnt->vm_inode)
761 iput(mpnt->vm_inode);
762 kfree(mpnt);
763 mpnt = next;
764 }
765 }
766
767
768
769
770
771 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
772 {
773 struct vm_area_struct *share;
774 struct inode * inode;
775
776 #if 0
777 struct vm_area_struct **p, *mpnt;
778
779 p = &t->mm->mmap;
780 while ((mpnt = *p) != NULL) {
781 if (mpnt->vm_start > vmp->vm_start)
782 break;
783 if (mpnt->vm_end > vmp->vm_start)
784 printk("insert_vm_struct: overlapping memory areas\n");
785 p = &mpnt->vm_next;
786 }
787 vmp->vm_next = mpnt;
788 *p = vmp;
789 #else
790 struct vm_area_struct * prev, * next;
791
792 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
793 if ((prev ? prev->vm_next : t->mm->mmap) != next)
794 printk("insert_vm_struct: tree inconsistent with list\n");
795 if (prev)
796 prev->vm_next = vmp;
797 else
798 t->mm->mmap = vmp;
799 vmp->vm_next = next;
800 #endif
801
802 inode = vmp->vm_inode;
803 if (!inode)
804 return;
805
806
807 if ((share = inode->i_mmap)) {
808 vmp->vm_next_share = share->vm_next_share;
809 vmp->vm_next_share->vm_prev_share = vmp;
810 share->vm_next_share = vmp;
811 vmp->vm_prev_share = share;
812 } else
813 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
814 }
815
816
817
818
819 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
820 {
821 struct inode * inode = mpnt->vm_inode;
822
823 if (!inode)
824 return;
825
826 if (mpnt->vm_next_share == mpnt) {
827 if (inode->i_mmap != mpnt)
828 printk("Inode i_mmap ring corrupted\n");
829 inode->i_mmap = NULL;
830 return;
831 }
832
833 if (inode->i_mmap == mpnt)
834 inode->i_mmap = mpnt->vm_next_share;
835
836 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
837 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
838 }
839
840
841
842
843
844
845
846
847 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
848 {
849 struct vm_area_struct *prev, *mpnt, *next;
850
851 mpnt = find_vma(task, start_addr);
852 if (!mpnt)
853 return;
854 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
855
856
857 if (!prev) {
858 prev = mpnt;
859 mpnt = next;
860 }
861
862
863
864
865 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
866 #if 0
867 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
868 #endif
869
870 next = mpnt->vm_next;
871
872
873
874
875 if (mpnt->vm_inode != prev->vm_inode)
876 continue;
877 if (mpnt->vm_pte != prev->vm_pte)
878 continue;
879 if (mpnt->vm_ops != prev->vm_ops)
880 continue;
881 if (mpnt->vm_flags != prev->vm_flags)
882 continue;
883 if (prev->vm_end != mpnt->vm_start)
884 continue;
885
886
887
888 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
889 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
890 continue;
891 }
892
893
894
895
896
897
898 avl_remove(mpnt, &task->mm->mmap_avl);
899 prev->vm_end = mpnt->vm_end;
900 prev->vm_next = mpnt->vm_next;
901 if (mpnt->vm_ops && mpnt->vm_ops->close) {
902 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
903 mpnt->vm_start = mpnt->vm_end;
904 mpnt->vm_ops->close(mpnt);
905 }
906 remove_shared_vm_struct(mpnt);
907 if (mpnt->vm_inode)
908 mpnt->vm_inode->i_count--;
909 kfree_s(mpnt, sizeof(*mpnt));
910 mpnt = prev;
911 }
912 }