This source file includes following definitions.
- do_mmap
- get_unmapped_area
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37 pgprot_t protection_map[16] = {
38 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
39 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
40 };
41
42 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
43 unsigned long prot, unsigned long flags, unsigned long off)
44 {
45 struct vm_area_struct * vma;
46
47 if ((len = PAGE_ALIGN(len)) == 0)
48 return addr;
49
50 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
51 return -EINVAL;
52
53
54 if (off + len < off)
55 return -EINVAL;
56
57
58 if (current->mm->def_flags & VM_LOCKED) {
59 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
60 locked += len;
61 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
62 return -EAGAIN;
63 }
64
65
66
67
68
69
70
71 if (file != NULL) {
72 switch (flags & MAP_TYPE) {
73 case MAP_SHARED:
74 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
75 return -EACCES;
76
77 case MAP_PRIVATE:
78 if (!(file->f_mode & 1))
79 return -EACCES;
80 break;
81
82 default:
83 return -EINVAL;
84 }
85 if (flags & MAP_DENYWRITE) {
86 if (file->f_inode->i_writecount > 0)
87 return -ETXTBSY;
88 }
89 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
90 return -EINVAL;
91
92
93
94
95
96
97 if (flags & MAP_FIXED) {
98 if (addr & ~PAGE_MASK)
99 return -EINVAL;
100 if (len > TASK_SIZE || addr > TASK_SIZE - len)
101 return -EINVAL;
102 } else {
103 addr = get_unmapped_area(addr, len);
104 if (!addr)
105 return -ENOMEM;
106 }
107
108
109
110
111
112
113 if (file && (!file->f_op || !file->f_op->mmap))
114 return -ENODEV;
115
116 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
117 GFP_KERNEL);
118 if (!vma)
119 return -ENOMEM;
120
121 vma->vm_mm = current->mm;
122 vma->vm_start = addr;
123 vma->vm_end = addr + len;
124 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
125 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
126 vma->vm_flags |= current->mm->def_flags;
127
128 if (file) {
129 if (file->f_mode & 1)
130 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
131 if (flags & MAP_SHARED) {
132 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
133
134
135
136
137
138
139
140
141
142
143 if (!(file->f_mode & 2))
144 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
145 }
146 } else
147 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
148 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
149 vma->vm_ops = NULL;
150 vma->vm_offset = off;
151 vma->vm_inode = NULL;
152 vma->vm_pte = 0;
153
154 do_munmap(addr, len);
155
156 if (file) {
157 int error = file->f_op->mmap(file->f_inode, file, vma);
158
159 if (error) {
160 kfree(vma);
161 return error;
162 }
163 }
164
165 flags = vma->vm_flags;
166 insert_vm_struct(current, vma);
167 merge_segments(current, vma->vm_start, vma->vm_end);
168
169
170 current->mm->total_vm += len >> PAGE_SHIFT;
171 if (flags & VM_LOCKED) {
172 unsigned long start = addr;
173 current->mm->locked_vm += len >> PAGE_SHIFT;
174 do {
175 char c = get_user((char *) start);
176 len -= PAGE_SIZE;
177 start += PAGE_SIZE;
178 __asm__ __volatile__("": :"r" (c));
179 } while (len > 0);
180 }
181 return addr;
182 }
183
184
185
186
187
188
189 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
190 {
191 struct vm_area_struct * vmm;
192
193 if (len > TASK_SIZE)
194 return 0;
195 if (!addr)
196 addr = TASK_SIZE / 3;
197 addr = PAGE_ALIGN(addr);
198
199 for (vmm = find_vma(current, addr); ; vmm = vmm->vm_next) {
200
201 if (TASK_SIZE - len < addr)
202 return 0;
203 if (!vmm || addr + len <= vmm->vm_start)
204 return addr;
205 addr = vmm->vm_end;
206 }
207 }
208
209
210
211
212
213
214
215
216
217
218 #define vm_avl_key vm_end
219 #define vm_avl_key_t unsigned long
220
221
222
223
224
225
226
227
228
229
230
231
232 #define avl_maxheight 41
233 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
234
235
236
237
238
239
240
241
242
243 static inline void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
244 {
245 vm_avl_key_t key = node->vm_avl_key;
246
247 *to_the_left = *to_the_right = NULL;
248 for (;;) {
249 if (tree == avl_empty) {
250 printk("avl_neighbours: node not found in the tree\n");
251 return;
252 }
253 if (key == tree->vm_avl_key)
254 break;
255 if (key < tree->vm_avl_key) {
256 *to_the_right = tree;
257 tree = tree->vm_avl_left;
258 } else {
259 *to_the_left = tree;
260 tree = tree->vm_avl_right;
261 }
262 }
263 if (tree != node) {
264 printk("avl_neighbours: node not exactly found in the tree\n");
265 return;
266 }
267 if (tree->vm_avl_left != avl_empty) {
268 struct vm_area_struct * node;
269 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
270 continue;
271 *to_the_left = node;
272 }
273 if (tree->vm_avl_right != avl_empty) {
274 struct vm_area_struct * node;
275 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
276 continue;
277 *to_the_right = node;
278 }
279 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
280 printk("avl_neighbours: tree inconsistent with list\n");
281 }
282
283
284
285
286
287
288
289 static inline void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
290 {
291 for ( ; count > 0 ; count--) {
292 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
293 struct vm_area_struct * node = *nodeplace;
294 struct vm_area_struct * nodeleft = node->vm_avl_left;
295 struct vm_area_struct * noderight = node->vm_avl_right;
296 int heightleft = heightof(nodeleft);
297 int heightright = heightof(noderight);
298 if (heightright + 1 < heightleft) {
299
300
301
302
303
304 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
305 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
306 int heightleftright = heightof(nodeleftright);
307 if (heightof(nodeleftleft) >= heightleftright) {
308
309
310
311
312
313
314
315 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
316 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
317 *nodeplace = nodeleft;
318 } else {
319
320
321
322
323
324
325
326
327
328 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
329 node->vm_avl_left = nodeleftright->vm_avl_right;
330 nodeleftright->vm_avl_left = nodeleft;
331 nodeleftright->vm_avl_right = node;
332 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
333 nodeleftright->vm_avl_height = heightleft;
334 *nodeplace = nodeleftright;
335 }
336 }
337 else if (heightleft + 1 < heightright) {
338
339 struct vm_area_struct * noderightright = noderight->vm_avl_right;
340 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
341 int heightrightleft = heightof(noderightleft);
342 if (heightof(noderightright) >= heightrightleft) {
343 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
344 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
345 *nodeplace = noderight;
346 } else {
347 noderight->vm_avl_left = noderightleft->vm_avl_right;
348 node->vm_avl_right = noderightleft->vm_avl_left;
349 noderightleft->vm_avl_right = noderight;
350 noderightleft->vm_avl_left = node;
351 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
352 noderightleft->vm_avl_height = heightright;
353 *nodeplace = noderightleft;
354 }
355 }
356 else {
357 int height = (heightleft<heightright ? heightright : heightleft) + 1;
358 if (height == node->vm_avl_height)
359 break;
360 node->vm_avl_height = height;
361 }
362 }
363 }
364
365
366 static inline void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
367 {
368 vm_avl_key_t key = new_node->vm_avl_key;
369 struct vm_area_struct ** nodeplace = ptree;
370 struct vm_area_struct ** stack[avl_maxheight];
371 int stack_count = 0;
372 struct vm_area_struct *** stack_ptr = &stack[0];
373 for (;;) {
374 struct vm_area_struct * node = *nodeplace;
375 if (node == avl_empty)
376 break;
377 *stack_ptr++ = nodeplace; stack_count++;
378 if (key < node->vm_avl_key)
379 nodeplace = &node->vm_avl_left;
380 else
381 nodeplace = &node->vm_avl_right;
382 }
383 new_node->vm_avl_left = avl_empty;
384 new_node->vm_avl_right = avl_empty;
385 new_node->vm_avl_height = 1;
386 *nodeplace = new_node;
387 avl_rebalance(stack_ptr,stack_count);
388 }
389
390
391
392
393 static inline void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
394 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
395 {
396 vm_avl_key_t key = new_node->vm_avl_key;
397 struct vm_area_struct ** nodeplace = ptree;
398 struct vm_area_struct ** stack[avl_maxheight];
399 int stack_count = 0;
400 struct vm_area_struct *** stack_ptr = &stack[0];
401 *to_the_left = *to_the_right = NULL;
402 for (;;) {
403 struct vm_area_struct * node = *nodeplace;
404 if (node == avl_empty)
405 break;
406 *stack_ptr++ = nodeplace; stack_count++;
407 if (key < node->vm_avl_key) {
408 *to_the_right = node;
409 nodeplace = &node->vm_avl_left;
410 } else {
411 *to_the_left = node;
412 nodeplace = &node->vm_avl_right;
413 }
414 }
415 new_node->vm_avl_left = avl_empty;
416 new_node->vm_avl_right = avl_empty;
417 new_node->vm_avl_height = 1;
418 *nodeplace = new_node;
419 avl_rebalance(stack_ptr,stack_count);
420 }
421
422
423 static inline void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
424 {
425 vm_avl_key_t key = node_to_delete->vm_avl_key;
426 struct vm_area_struct ** nodeplace = ptree;
427 struct vm_area_struct ** stack[avl_maxheight];
428 int stack_count = 0;
429 struct vm_area_struct *** stack_ptr = &stack[0];
430 struct vm_area_struct ** nodeplace_to_delete;
431 for (;;) {
432 struct vm_area_struct * node = *nodeplace;
433 if (node == avl_empty) {
434
435 printk("avl_remove: node to delete not found in tree\n");
436 return;
437 }
438 *stack_ptr++ = nodeplace; stack_count++;
439 if (key == node->vm_avl_key)
440 break;
441 if (key < node->vm_avl_key)
442 nodeplace = &node->vm_avl_left;
443 else
444 nodeplace = &node->vm_avl_right;
445 }
446 nodeplace_to_delete = nodeplace;
447
448 if (node_to_delete->vm_avl_left == avl_empty) {
449 *nodeplace_to_delete = node_to_delete->vm_avl_right;
450 stack_ptr--; stack_count--;
451 } else {
452 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
453 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
454 struct vm_area_struct * node;
455 for (;;) {
456 node = *nodeplace;
457 if (node->vm_avl_right == avl_empty)
458 break;
459 *stack_ptr++ = nodeplace; stack_count++;
460 nodeplace = &node->vm_avl_right;
461 }
462 *nodeplace = node->vm_avl_left;
463
464 node->vm_avl_left = node_to_delete->vm_avl_left;
465 node->vm_avl_right = node_to_delete->vm_avl_right;
466 node->vm_avl_height = node_to_delete->vm_avl_height;
467 *nodeplace_to_delete = node;
468 *stack_ptr_to_delete = &node->vm_avl_left;
469 }
470 avl_rebalance(stack_ptr,stack_count);
471 }
472
473 #ifdef DEBUG_AVL
474
475
476 static void printk_list (struct vm_area_struct * vma)
477 {
478 printk("[");
479 while (vma) {
480 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
481 vma = vma->vm_next;
482 if (!vma)
483 break;
484 printk(" ");
485 }
486 printk("]");
487 }
488
489
490 static void printk_avl (struct vm_area_struct * tree)
491 {
492 if (tree != avl_empty) {
493 printk("(");
494 if (tree->vm_avl_left != avl_empty) {
495 printk_avl(tree->vm_avl_left);
496 printk("<");
497 }
498 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
499 if (tree->vm_avl_right != avl_empty) {
500 printk(">");
501 printk_avl(tree->vm_avl_right);
502 }
503 printk(")");
504 }
505 }
506
507 static char *avl_check_point = "somewhere";
508
509
510 static void avl_checkheights (struct vm_area_struct * tree)
511 {
512 int h, hl, hr;
513
514 if (tree == avl_empty)
515 return;
516 avl_checkheights(tree->vm_avl_left);
517 avl_checkheights(tree->vm_avl_right);
518 h = tree->vm_avl_height;
519 hl = heightof(tree->vm_avl_left);
520 hr = heightof(tree->vm_avl_right);
521 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
522 return;
523 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
524 return;
525 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
526 }
527
528
529 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
530 {
531 if (tree == avl_empty)
532 return;
533 avl_checkleft(tree->vm_avl_left,key);
534 avl_checkleft(tree->vm_avl_right,key);
535 if (tree->vm_avl_key < key)
536 return;
537 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
538 }
539
540
541 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
542 {
543 if (tree == avl_empty)
544 return;
545 avl_checkright(tree->vm_avl_left,key);
546 avl_checkright(tree->vm_avl_right,key);
547 if (tree->vm_avl_key > key)
548 return;
549 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
550 }
551
552
553 static void avl_checkorder (struct vm_area_struct * tree)
554 {
555 if (tree == avl_empty)
556 return;
557 avl_checkorder(tree->vm_avl_left);
558 avl_checkorder(tree->vm_avl_right);
559 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
560 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
561 }
562
563
564 static void avl_check (struct task_struct * task, char *caller)
565 {
566 avl_check_point = caller;
567
568
569
570 avl_checkheights(task->mm->mmap_avl);
571 avl_checkorder(task->mm->mmap_avl);
572 }
573
574 #endif
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599 static void unmap_fixup(struct vm_area_struct *area,
600 unsigned long addr, size_t len)
601 {
602 struct vm_area_struct *mpnt;
603 unsigned long end = addr + len;
604
605 if (addr < area->vm_start || addr >= area->vm_end ||
606 end <= area->vm_start || end > area->vm_end ||
607 end < addr)
608 {
609 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
610 area->vm_start, area->vm_end, addr, end);
611 return;
612 }
613 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
614 if (area->vm_flags & VM_LOCKED)
615 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
616
617
618 if (addr == area->vm_start && end == area->vm_end) {
619 if (area->vm_ops && area->vm_ops->close)
620 area->vm_ops->close(area);
621 if (area->vm_inode)
622 iput(area->vm_inode);
623 return;
624 }
625
626
627 if (end == area->vm_end)
628 area->vm_end = addr;
629 else
630 if (addr == area->vm_start) {
631 area->vm_offset += (end - area->vm_start);
632 area->vm_start = end;
633 }
634 else {
635
636
637 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
638
639 if (!mpnt)
640 return;
641 *mpnt = *area;
642 mpnt->vm_offset += (end - area->vm_start);
643 mpnt->vm_start = end;
644 if (mpnt->vm_inode)
645 mpnt->vm_inode->i_count++;
646 if (mpnt->vm_ops && mpnt->vm_ops->open)
647 mpnt->vm_ops->open(mpnt);
648 area->vm_end = addr;
649 insert_vm_struct(current, mpnt);
650 }
651
652
653 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
654 if (!mpnt)
655 return;
656 *mpnt = *area;
657 if (mpnt->vm_ops && mpnt->vm_ops->open)
658 mpnt->vm_ops->open(mpnt);
659 if (area->vm_ops && area->vm_ops->close) {
660 area->vm_end = area->vm_start;
661 area->vm_ops->close(area);
662 }
663 insert_vm_struct(current, mpnt);
664 }
665
666 asmlinkage int sys_munmap(unsigned long addr, size_t len)
667 {
668 return do_munmap(addr, len);
669 }
670
671
672
673
674
675
676
677 int do_munmap(unsigned long addr, size_t len)
678 {
679 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
680
681 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
682 return -EINVAL;
683
684 if ((len = PAGE_ALIGN(len)) == 0)
685 return 0;
686
687
688
689
690
691
692
693 mpnt = find_vma(current, addr);
694 if (!mpnt)
695 return 0;
696 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
697
698
699
700 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
701 free = NULL;
702 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
703 *npp = mpnt->vm_next;
704 mpnt->vm_next = free;
705 free = mpnt;
706 avl_remove(mpnt, ¤t->mm->mmap_avl);
707 }
708
709 if (free == NULL)
710 return 0;
711
712
713
714
715
716
717
718 while (free) {
719 unsigned long st, end;
720
721 mpnt = free;
722 free = free->vm_next;
723
724 remove_shared_vm_struct(mpnt);
725
726 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
727 end = addr+len;
728 end = end > mpnt->vm_end ? mpnt->vm_end : end;
729
730 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
731 mpnt->vm_ops->unmap(mpnt, st, end-st);
732 zap_page_range(current->mm, st, end-st);
733 unmap_fixup(mpnt, st, end-st);
734 kfree(mpnt);
735 }
736
737 zap_page_range(current->mm, addr, len);
738 return 0;
739 }
740
741
742 void build_mmap_avl(struct mm_struct * mm)
743 {
744 struct vm_area_struct * vma;
745
746 mm->mmap_avl = NULL;
747 for (vma = mm->mmap; vma; vma = vma->vm_next)
748 avl_insert(vma, &mm->mmap_avl);
749 }
750
751
752 void exit_mmap(struct mm_struct * mm)
753 {
754 struct vm_area_struct * mpnt;
755
756 mpnt = mm->mmap;
757 mm->mmap = NULL;
758 mm->mmap_avl = NULL;
759 mm->rss = 0;
760 mm->total_vm = 0;
761 mm->locked_vm = 0;
762 while (mpnt) {
763 struct vm_area_struct * next = mpnt->vm_next;
764 if (mpnt->vm_ops) {
765 if (mpnt->vm_ops->unmap)
766 mpnt->vm_ops->unmap(mpnt, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
767 if (mpnt->vm_ops->close)
768 mpnt->vm_ops->close(mpnt);
769 }
770 remove_shared_vm_struct(mpnt);
771 zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
772 if (mpnt->vm_inode)
773 iput(mpnt->vm_inode);
774 kfree(mpnt);
775 mpnt = next;
776 }
777 }
778
779
780
781
782
783 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
784 {
785 struct vm_area_struct *share;
786 struct inode * inode;
787
788 #if 0
789 struct vm_area_struct **p, *mpnt;
790
791 p = &t->mm->mmap;
792 while ((mpnt = *p) != NULL) {
793 if (mpnt->vm_start > vmp->vm_start)
794 break;
795 if (mpnt->vm_end > vmp->vm_start)
796 printk("insert_vm_struct: overlapping memory areas\n");
797 p = &mpnt->vm_next;
798 }
799 vmp->vm_next = mpnt;
800 *p = vmp;
801 #else
802 struct vm_area_struct * prev, * next;
803
804 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
805 if ((prev ? prev->vm_next : t->mm->mmap) != next)
806 printk("insert_vm_struct: tree inconsistent with list\n");
807 if (prev)
808 prev->vm_next = vmp;
809 else
810 t->mm->mmap = vmp;
811 vmp->vm_next = next;
812 #endif
813
814 inode = vmp->vm_inode;
815 if (!inode)
816 return;
817
818
819 if ((share = inode->i_mmap)) {
820 vmp->vm_next_share = share->vm_next_share;
821 vmp->vm_next_share->vm_prev_share = vmp;
822 share->vm_next_share = vmp;
823 vmp->vm_prev_share = share;
824 } else
825 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
826 }
827
828
829
830
831 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
832 {
833 struct inode * inode = mpnt->vm_inode;
834
835 if (!inode)
836 return;
837
838 if (mpnt->vm_next_share == mpnt) {
839 if (inode->i_mmap != mpnt)
840 printk("Inode i_mmap ring corrupted\n");
841 inode->i_mmap = NULL;
842 return;
843 }
844
845 if (inode->i_mmap == mpnt)
846 inode->i_mmap = mpnt->vm_next_share;
847
848 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
849 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
850 }
851
852
853
854
855
856
857
858
859 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
860 {
861 struct vm_area_struct *prev, *mpnt, *next;
862
863 mpnt = find_vma(task, start_addr);
864 if (!mpnt)
865 return;
866 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
867
868
869 if (!prev) {
870 prev = mpnt;
871 mpnt = next;
872 }
873
874
875
876
877 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
878 #if 0
879 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
880 #endif
881
882 next = mpnt->vm_next;
883
884
885
886
887 if (mpnt->vm_inode != prev->vm_inode)
888 continue;
889 if (mpnt->vm_pte != prev->vm_pte)
890 continue;
891 if (mpnt->vm_ops != prev->vm_ops)
892 continue;
893 if (mpnt->vm_flags != prev->vm_flags)
894 continue;
895 if (prev->vm_end != mpnt->vm_start)
896 continue;
897
898
899
900 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
901 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
902 continue;
903 }
904
905
906
907
908
909
910 avl_remove(mpnt, &task->mm->mmap_avl);
911 prev->vm_end = mpnt->vm_end;
912 prev->vm_next = mpnt->vm_next;
913 if (mpnt->vm_ops && mpnt->vm_ops->close) {
914 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
915 mpnt->vm_start = mpnt->vm_end;
916 mpnt->vm_ops->close(mpnt);
917 }
918 remove_shared_vm_struct(mpnt);
919 if (mpnt->vm_inode)
920 mpnt->vm_inode->i_count--;
921 kfree_s(mpnt, sizeof(*mpnt));
922 mpnt = prev;
923 }
924 }