This source file includes following definitions.
- do_mmap
- get_unmapped_area
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37 pgprot_t protection_map[16] = {
38 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
39 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
40 };
41
42 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
43 unsigned long prot, unsigned long flags, unsigned long off)
44 {
45 struct vm_area_struct * vma;
46
47 if ((len = PAGE_ALIGN(len)) == 0)
48 return addr;
49
50 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
51 return -EINVAL;
52
53
54 if (off + len < off)
55 return -EINVAL;
56
57
58 if (current->mm->def_flags & VM_LOCKED) {
59 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
60 locked += len;
61 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
62 return -EAGAIN;
63 }
64
65
66
67
68
69
70
71 if (file != NULL) {
72 switch (flags & MAP_TYPE) {
73 case MAP_SHARED:
74 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
75 return -EACCES;
76
77
78
79 if (locks_verify_locked(file->f_inode))
80 return -EAGAIN;
81
82 case MAP_PRIVATE:
83 if (!(file->f_mode & 1))
84 return -EACCES;
85 break;
86
87 default:
88 return -EINVAL;
89 }
90 if (flags & MAP_DENYWRITE) {
91 if (file->f_inode->i_writecount > 0)
92 return -ETXTBSY;
93 }
94 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
95 return -EINVAL;
96
97
98
99
100
101
102 if (flags & MAP_FIXED) {
103 if (addr & ~PAGE_MASK)
104 return -EINVAL;
105 if (len > TASK_SIZE || addr > TASK_SIZE - len)
106 return -EINVAL;
107 } else {
108 addr = get_unmapped_area(addr, len);
109 if (!addr)
110 return -ENOMEM;
111 }
112
113
114
115
116
117
118 if (file && (!file->f_op || !file->f_op->mmap))
119 return -ENODEV;
120
121 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
122 GFP_KERNEL);
123 if (!vma)
124 return -ENOMEM;
125
126 vma->vm_mm = current->mm;
127 vma->vm_start = addr;
128 vma->vm_end = addr + len;
129 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
130 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
131 vma->vm_flags |= current->mm->def_flags;
132
133 if (file) {
134 if (file->f_mode & 1)
135 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
136 if (flags & MAP_SHARED) {
137 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
138
139
140
141
142
143
144
145
146
147
148 if (!(file->f_mode & 2))
149 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
150 }
151 } else
152 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
153 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
154 vma->vm_ops = NULL;
155 vma->vm_offset = off;
156 vma->vm_inode = NULL;
157 vma->vm_pte = 0;
158
159 do_munmap(addr, len);
160
161 if (file) {
162 int error = file->f_op->mmap(file->f_inode, file, vma);
163
164 if (error) {
165 kfree(vma);
166 return error;
167 }
168 }
169
170 flags = vma->vm_flags;
171 insert_vm_struct(current, vma);
172 merge_segments(current, vma->vm_start, vma->vm_end);
173
174
175 current->mm->total_vm += len >> PAGE_SHIFT;
176 if (flags & VM_LOCKED) {
177 unsigned long start = addr;
178 current->mm->locked_vm += len >> PAGE_SHIFT;
179 do {
180 char c = get_user((char *) start);
181 len -= PAGE_SIZE;
182 start += PAGE_SIZE;
183 __asm__ __volatile__("": :"r" (c));
184 } while (len > 0);
185 }
186 return addr;
187 }
188
189
190
191
192
193
194 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
195 {
196 struct vm_area_struct * vmm;
197
198 if (len > TASK_SIZE)
199 return 0;
200 if (!addr)
201 addr = TASK_SIZE / 3;
202 addr = PAGE_ALIGN(addr);
203
204 for (vmm = find_vma(current, addr); ; vmm = vmm->vm_next) {
205
206 if (TASK_SIZE - len < addr)
207 return 0;
208 if (!vmm || addr + len <= vmm->vm_start)
209 return addr;
210 addr = vmm->vm_end;
211 }
212 }
213
214
215
216
217
218
219
220
221
222
223 #define vm_avl_key vm_end
224 #define vm_avl_key_t unsigned long
225
226
227
228
229
230
231
232
233
234
235
236
237 #define avl_maxheight 41
238 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
239
240
241
242
243
244
245
246
247
248 static inline void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
249 {
250 vm_avl_key_t key = node->vm_avl_key;
251
252 *to_the_left = *to_the_right = NULL;
253 for (;;) {
254 if (tree == avl_empty) {
255 printk("avl_neighbours: node not found in the tree\n");
256 return;
257 }
258 if (key == tree->vm_avl_key)
259 break;
260 if (key < tree->vm_avl_key) {
261 *to_the_right = tree;
262 tree = tree->vm_avl_left;
263 } else {
264 *to_the_left = tree;
265 tree = tree->vm_avl_right;
266 }
267 }
268 if (tree != node) {
269 printk("avl_neighbours: node not exactly found in the tree\n");
270 return;
271 }
272 if (tree->vm_avl_left != avl_empty) {
273 struct vm_area_struct * node;
274 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
275 continue;
276 *to_the_left = node;
277 }
278 if (tree->vm_avl_right != avl_empty) {
279 struct vm_area_struct * node;
280 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
281 continue;
282 *to_the_right = node;
283 }
284 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
285 printk("avl_neighbours: tree inconsistent with list\n");
286 }
287
288
289
290
291
292
293
294 static inline void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
295 {
296 for ( ; count > 0 ; count--) {
297 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
298 struct vm_area_struct * node = *nodeplace;
299 struct vm_area_struct * nodeleft = node->vm_avl_left;
300 struct vm_area_struct * noderight = node->vm_avl_right;
301 int heightleft = heightof(nodeleft);
302 int heightright = heightof(noderight);
303 if (heightright + 1 < heightleft) {
304
305
306
307
308
309 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
310 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
311 int heightleftright = heightof(nodeleftright);
312 if (heightof(nodeleftleft) >= heightleftright) {
313
314
315
316
317
318
319
320 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
321 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
322 *nodeplace = nodeleft;
323 } else {
324
325
326
327
328
329
330
331
332
333 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
334 node->vm_avl_left = nodeleftright->vm_avl_right;
335 nodeleftright->vm_avl_left = nodeleft;
336 nodeleftright->vm_avl_right = node;
337 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
338 nodeleftright->vm_avl_height = heightleft;
339 *nodeplace = nodeleftright;
340 }
341 }
342 else if (heightleft + 1 < heightright) {
343
344 struct vm_area_struct * noderightright = noderight->vm_avl_right;
345 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
346 int heightrightleft = heightof(noderightleft);
347 if (heightof(noderightright) >= heightrightleft) {
348 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
349 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
350 *nodeplace = noderight;
351 } else {
352 noderight->vm_avl_left = noderightleft->vm_avl_right;
353 node->vm_avl_right = noderightleft->vm_avl_left;
354 noderightleft->vm_avl_right = noderight;
355 noderightleft->vm_avl_left = node;
356 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
357 noderightleft->vm_avl_height = heightright;
358 *nodeplace = noderightleft;
359 }
360 }
361 else {
362 int height = (heightleft<heightright ? heightright : heightleft) + 1;
363 if (height == node->vm_avl_height)
364 break;
365 node->vm_avl_height = height;
366 }
367 }
368 }
369
370
371 static inline void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
372 {
373 vm_avl_key_t key = new_node->vm_avl_key;
374 struct vm_area_struct ** nodeplace = ptree;
375 struct vm_area_struct ** stack[avl_maxheight];
376 int stack_count = 0;
377 struct vm_area_struct *** stack_ptr = &stack[0];
378 for (;;) {
379 struct vm_area_struct * node = *nodeplace;
380 if (node == avl_empty)
381 break;
382 *stack_ptr++ = nodeplace; stack_count++;
383 if (key < node->vm_avl_key)
384 nodeplace = &node->vm_avl_left;
385 else
386 nodeplace = &node->vm_avl_right;
387 }
388 new_node->vm_avl_left = avl_empty;
389 new_node->vm_avl_right = avl_empty;
390 new_node->vm_avl_height = 1;
391 *nodeplace = new_node;
392 avl_rebalance(stack_ptr,stack_count);
393 }
394
395
396
397
398 static inline void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
399 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
400 {
401 vm_avl_key_t key = new_node->vm_avl_key;
402 struct vm_area_struct ** nodeplace = ptree;
403 struct vm_area_struct ** stack[avl_maxheight];
404 int stack_count = 0;
405 struct vm_area_struct *** stack_ptr = &stack[0];
406 *to_the_left = *to_the_right = NULL;
407 for (;;) {
408 struct vm_area_struct * node = *nodeplace;
409 if (node == avl_empty)
410 break;
411 *stack_ptr++ = nodeplace; stack_count++;
412 if (key < node->vm_avl_key) {
413 *to_the_right = node;
414 nodeplace = &node->vm_avl_left;
415 } else {
416 *to_the_left = node;
417 nodeplace = &node->vm_avl_right;
418 }
419 }
420 new_node->vm_avl_left = avl_empty;
421 new_node->vm_avl_right = avl_empty;
422 new_node->vm_avl_height = 1;
423 *nodeplace = new_node;
424 avl_rebalance(stack_ptr,stack_count);
425 }
426
427
428 static inline void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
429 {
430 vm_avl_key_t key = node_to_delete->vm_avl_key;
431 struct vm_area_struct ** nodeplace = ptree;
432 struct vm_area_struct ** stack[avl_maxheight];
433 int stack_count = 0;
434 struct vm_area_struct *** stack_ptr = &stack[0];
435 struct vm_area_struct ** nodeplace_to_delete;
436 for (;;) {
437 struct vm_area_struct * node = *nodeplace;
438 if (node == avl_empty) {
439
440 printk("avl_remove: node to delete not found in tree\n");
441 return;
442 }
443 *stack_ptr++ = nodeplace; stack_count++;
444 if (key == node->vm_avl_key)
445 break;
446 if (key < node->vm_avl_key)
447 nodeplace = &node->vm_avl_left;
448 else
449 nodeplace = &node->vm_avl_right;
450 }
451 nodeplace_to_delete = nodeplace;
452
453 if (node_to_delete->vm_avl_left == avl_empty) {
454 *nodeplace_to_delete = node_to_delete->vm_avl_right;
455 stack_ptr--; stack_count--;
456 } else {
457 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
458 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
459 struct vm_area_struct * node;
460 for (;;) {
461 node = *nodeplace;
462 if (node->vm_avl_right == avl_empty)
463 break;
464 *stack_ptr++ = nodeplace; stack_count++;
465 nodeplace = &node->vm_avl_right;
466 }
467 *nodeplace = node->vm_avl_left;
468
469 node->vm_avl_left = node_to_delete->vm_avl_left;
470 node->vm_avl_right = node_to_delete->vm_avl_right;
471 node->vm_avl_height = node_to_delete->vm_avl_height;
472 *nodeplace_to_delete = node;
473 *stack_ptr_to_delete = &node->vm_avl_left;
474 }
475 avl_rebalance(stack_ptr,stack_count);
476 }
477
478 #ifdef DEBUG_AVL
479
480
481 static void printk_list (struct vm_area_struct * vma)
482 {
483 printk("[");
484 while (vma) {
485 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
486 vma = vma->vm_next;
487 if (!vma)
488 break;
489 printk(" ");
490 }
491 printk("]");
492 }
493
494
495 static void printk_avl (struct vm_area_struct * tree)
496 {
497 if (tree != avl_empty) {
498 printk("(");
499 if (tree->vm_avl_left != avl_empty) {
500 printk_avl(tree->vm_avl_left);
501 printk("<");
502 }
503 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
504 if (tree->vm_avl_right != avl_empty) {
505 printk(">");
506 printk_avl(tree->vm_avl_right);
507 }
508 printk(")");
509 }
510 }
511
512 static char *avl_check_point = "somewhere";
513
514
515 static void avl_checkheights (struct vm_area_struct * tree)
516 {
517 int h, hl, hr;
518
519 if (tree == avl_empty)
520 return;
521 avl_checkheights(tree->vm_avl_left);
522 avl_checkheights(tree->vm_avl_right);
523 h = tree->vm_avl_height;
524 hl = heightof(tree->vm_avl_left);
525 hr = heightof(tree->vm_avl_right);
526 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
527 return;
528 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
529 return;
530 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
531 }
532
533
534 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
535 {
536 if (tree == avl_empty)
537 return;
538 avl_checkleft(tree->vm_avl_left,key);
539 avl_checkleft(tree->vm_avl_right,key);
540 if (tree->vm_avl_key < key)
541 return;
542 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
543 }
544
545
546 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
547 {
548 if (tree == avl_empty)
549 return;
550 avl_checkright(tree->vm_avl_left,key);
551 avl_checkright(tree->vm_avl_right,key);
552 if (tree->vm_avl_key > key)
553 return;
554 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
555 }
556
557
558 static void avl_checkorder (struct vm_area_struct * tree)
559 {
560 if (tree == avl_empty)
561 return;
562 avl_checkorder(tree->vm_avl_left);
563 avl_checkorder(tree->vm_avl_right);
564 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
565 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
566 }
567
568
569 static void avl_check (struct task_struct * task, char *caller)
570 {
571 avl_check_point = caller;
572
573
574
575 avl_checkheights(task->mm->mmap_avl);
576 avl_checkorder(task->mm->mmap_avl);
577 }
578
579 #endif
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 static void unmap_fixup(struct vm_area_struct *area,
605 unsigned long addr, size_t len)
606 {
607 struct vm_area_struct *mpnt;
608 unsigned long end = addr + len;
609
610 if (addr < area->vm_start || addr >= area->vm_end ||
611 end <= area->vm_start || end > area->vm_end ||
612 end < addr)
613 {
614 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
615 area->vm_start, area->vm_end, addr, end);
616 return;
617 }
618 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
619 if (area->vm_flags & VM_LOCKED)
620 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
621
622
623 if (addr == area->vm_start && end == area->vm_end) {
624 if (area->vm_ops && area->vm_ops->close)
625 area->vm_ops->close(area);
626 if (area->vm_inode)
627 iput(area->vm_inode);
628 return;
629 }
630
631
632 if (end == area->vm_end)
633 area->vm_end = addr;
634 else
635 if (addr == area->vm_start) {
636 area->vm_offset += (end - area->vm_start);
637 area->vm_start = end;
638 }
639 else {
640
641
642 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
643
644 if (!mpnt)
645 return;
646 *mpnt = *area;
647 mpnt->vm_offset += (end - area->vm_start);
648 mpnt->vm_start = end;
649 if (mpnt->vm_inode)
650 mpnt->vm_inode->i_count++;
651 if (mpnt->vm_ops && mpnt->vm_ops->open)
652 mpnt->vm_ops->open(mpnt);
653 area->vm_end = addr;
654 insert_vm_struct(current, mpnt);
655 }
656
657
658 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
659 if (!mpnt)
660 return;
661 *mpnt = *area;
662 if (mpnt->vm_ops && mpnt->vm_ops->open)
663 mpnt->vm_ops->open(mpnt);
664 if (area->vm_ops && area->vm_ops->close) {
665 area->vm_end = area->vm_start;
666 area->vm_ops->close(area);
667 }
668 insert_vm_struct(current, mpnt);
669 }
670
671 asmlinkage int sys_munmap(unsigned long addr, size_t len)
672 {
673 return do_munmap(addr, len);
674 }
675
676
677
678
679
680
681
682 int do_munmap(unsigned long addr, size_t len)
683 {
684 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
685
686 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
687 return -EINVAL;
688
689 if ((len = PAGE_ALIGN(len)) == 0)
690 return 0;
691
692
693
694
695
696
697
698 mpnt = find_vma(current, addr);
699 if (!mpnt)
700 return 0;
701 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
702
703
704
705 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
706 free = NULL;
707 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
708 *npp = mpnt->vm_next;
709 mpnt->vm_next = free;
710 free = mpnt;
711 avl_remove(mpnt, ¤t->mm->mmap_avl);
712 }
713
714 if (free == NULL)
715 return 0;
716
717
718
719
720
721
722
723 while (free) {
724 unsigned long st, end;
725
726 mpnt = free;
727 free = free->vm_next;
728
729 remove_shared_vm_struct(mpnt);
730
731 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
732 end = addr+len;
733 end = end > mpnt->vm_end ? mpnt->vm_end : end;
734
735 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
736 mpnt->vm_ops->unmap(mpnt, st, end-st);
737 zap_page_range(current->mm, st, end-st);
738 unmap_fixup(mpnt, st, end-st);
739 kfree(mpnt);
740 }
741
742 zap_page_range(current->mm, addr, len);
743 return 0;
744 }
745
746
747 void build_mmap_avl(struct mm_struct * mm)
748 {
749 struct vm_area_struct * vma;
750
751 mm->mmap_avl = NULL;
752 for (vma = mm->mmap; vma; vma = vma->vm_next)
753 avl_insert(vma, &mm->mmap_avl);
754 }
755
756
757 void exit_mmap(struct mm_struct * mm)
758 {
759 struct vm_area_struct * mpnt;
760
761 mpnt = mm->mmap;
762 mm->mmap = NULL;
763 mm->mmap_avl = NULL;
764 mm->rss = 0;
765 mm->total_vm = 0;
766 mm->locked_vm = 0;
767 while (mpnt) {
768 struct vm_area_struct * next = mpnt->vm_next;
769 if (mpnt->vm_ops) {
770 if (mpnt->vm_ops->unmap)
771 mpnt->vm_ops->unmap(mpnt, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
772 if (mpnt->vm_ops->close)
773 mpnt->vm_ops->close(mpnt);
774 }
775 remove_shared_vm_struct(mpnt);
776 zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
777 if (mpnt->vm_inode)
778 iput(mpnt->vm_inode);
779 kfree(mpnt);
780 mpnt = next;
781 }
782 }
783
784
785
786
787
788 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
789 {
790 struct vm_area_struct *share;
791 struct inode * inode;
792
793 #if 0
794 struct vm_area_struct **p, *mpnt;
795
796 p = &t->mm->mmap;
797 while ((mpnt = *p) != NULL) {
798 if (mpnt->vm_start > vmp->vm_start)
799 break;
800 if (mpnt->vm_end > vmp->vm_start)
801 printk("insert_vm_struct: overlapping memory areas\n");
802 p = &mpnt->vm_next;
803 }
804 vmp->vm_next = mpnt;
805 *p = vmp;
806 #else
807 struct vm_area_struct * prev, * next;
808
809 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
810 if ((prev ? prev->vm_next : t->mm->mmap) != next)
811 printk("insert_vm_struct: tree inconsistent with list\n");
812 if (prev)
813 prev->vm_next = vmp;
814 else
815 t->mm->mmap = vmp;
816 vmp->vm_next = next;
817 #endif
818
819 inode = vmp->vm_inode;
820 if (!inode)
821 return;
822
823
824 if ((share = inode->i_mmap)) {
825 vmp->vm_next_share = share->vm_next_share;
826 vmp->vm_next_share->vm_prev_share = vmp;
827 share->vm_next_share = vmp;
828 vmp->vm_prev_share = share;
829 } else
830 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
831 }
832
833
834
835
836 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
837 {
838 struct inode * inode = mpnt->vm_inode;
839
840 if (!inode)
841 return;
842
843 if (mpnt->vm_next_share == mpnt) {
844 if (inode->i_mmap != mpnt)
845 printk("Inode i_mmap ring corrupted\n");
846 inode->i_mmap = NULL;
847 return;
848 }
849
850 if (inode->i_mmap == mpnt)
851 inode->i_mmap = mpnt->vm_next_share;
852
853 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
854 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
855 }
856
857
858
859
860
861
862
863
864 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
865 {
866 struct vm_area_struct *prev, *mpnt, *next;
867
868 mpnt = find_vma(task, start_addr);
869 if (!mpnt)
870 return;
871 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
872
873
874 if (!prev) {
875 prev = mpnt;
876 mpnt = next;
877 }
878
879
880
881
882 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
883 #if 0
884 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
885 #endif
886
887 next = mpnt->vm_next;
888
889
890
891
892 if (mpnt->vm_inode != prev->vm_inode)
893 continue;
894 if (mpnt->vm_pte != prev->vm_pte)
895 continue;
896 if (mpnt->vm_ops != prev->vm_ops)
897 continue;
898 if (mpnt->vm_flags != prev->vm_flags)
899 continue;
900 if (prev->vm_end != mpnt->vm_start)
901 continue;
902
903
904
905 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
906 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
907 continue;
908 }
909
910
911
912
913
914
915 avl_remove(mpnt, &task->mm->mmap_avl);
916 prev->vm_end = mpnt->vm_end;
917 prev->vm_next = mpnt->vm_next;
918 if (mpnt->vm_ops && mpnt->vm_ops->close) {
919 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
920 mpnt->vm_start = mpnt->vm_end;
921 mpnt->vm_ops->close(mpnt);
922 }
923 remove_shared_vm_struct(mpnt);
924 if (mpnt->vm_inode)
925 mpnt->vm_inode->i_count--;
926 kfree_s(mpnt, sizeof(*mpnt));
927 mpnt = prev;
928 }
929 }