This source file includes following definitions.
- anon_map
- do_mmap
- get_unmapped_area
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20
21
22
23
24 static inline int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
25 {
26 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
27 return -ENOMEM;
28 return 0;
29 }
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 pgprot_t protection_map[16] = {
49 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
50 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
51 };
52
53 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
54 unsigned long prot, unsigned long flags, unsigned long off)
55 {
56 int error;
57 struct vm_area_struct * vma;
58
59 if ((len = PAGE_ALIGN(len)) == 0)
60 return addr;
61
62 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
63 return -EINVAL;
64
65
66 if (off + len < off)
67 return -EINVAL;
68
69
70 if (current->mm->def_flags & VM_LOCKED) {
71 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
72 locked += len;
73 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
74 return -EAGAIN;
75 }
76
77
78
79
80
81
82
83 if (file != NULL) {
84 switch (flags & MAP_TYPE) {
85 case MAP_SHARED:
86 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
87 return -EACCES;
88
89 case MAP_PRIVATE:
90 if (!(file->f_mode & 1))
91 return -EACCES;
92 break;
93
94 default:
95 return -EINVAL;
96 }
97 if (flags & MAP_DENYWRITE) {
98 if (file->f_inode->i_writecount > 0)
99 return -ETXTBSY;
100 }
101 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
102 return -EINVAL;
103
104
105
106
107
108
109 if (flags & MAP_FIXED) {
110 if (addr & ~PAGE_MASK)
111 return -EINVAL;
112 if (len > TASK_SIZE || addr > TASK_SIZE - len)
113 return -EINVAL;
114 } else {
115 addr = get_unmapped_area(addr, len);
116 if (!addr)
117 return -ENOMEM;
118 }
119
120
121
122
123
124
125 if (file && (!file->f_op || !file->f_op->mmap))
126 return -ENODEV;
127
128 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
129 GFP_KERNEL);
130 if (!vma)
131 return -ENOMEM;
132
133 vma->vm_mm = current->mm;
134 vma->vm_start = addr;
135 vma->vm_end = addr + len;
136 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
137 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
138 vma->vm_flags |= current->mm->def_flags;
139
140 if (file) {
141 if (file->f_mode & 1)
142 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
143 if (flags & MAP_SHARED) {
144 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
145
146
147
148
149
150
151
152
153
154
155 if (!(file->f_mode & 2))
156 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
157 }
158 } else
159 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
160 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
161 vma->vm_ops = NULL;
162 vma->vm_offset = off;
163 vma->vm_inode = NULL;
164 vma->vm_pte = 0;
165
166 do_munmap(addr, len);
167
168 if (file)
169 error = file->f_op->mmap(file->f_inode, file, vma);
170 else
171 error = anon_map(NULL, NULL, vma);
172
173 if (error) {
174 kfree(vma);
175 return error;
176 }
177 insert_vm_struct(current, vma);
178 merge_segments(current, vma->vm_start, vma->vm_end);
179 current->mm->total_vm += len >> PAGE_SHIFT;
180 if (vma->vm_flags & VM_LOCKED) {
181 unsigned long start = vma->vm_start;
182 unsigned long end = vma->vm_end;
183 current->mm->locked_vm += len >> PAGE_SHIFT;
184 while (start < end) {
185 char c = get_user((char *) start);
186 __asm__ __volatile__("": :"r" (c));
187 start += PAGE_SIZE;
188 }
189 }
190 return addr;
191 }
192
193
194
195
196
197
198 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
199 {
200 struct vm_area_struct * vmm;
201
202 if (len > TASK_SIZE)
203 return 0;
204 if (!addr)
205 addr = TASK_SIZE / 3;
206 addr = PAGE_ALIGN(addr);
207
208 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
209 if (TASK_SIZE - len < addr)
210 return 0;
211 if (!vmm)
212 return addr;
213 if (addr > vmm->vm_end)
214 continue;
215 if (addr + len > vmm->vm_start) {
216 addr = vmm->vm_end;
217 continue;
218 }
219 return addr;
220 }
221 }
222
223
224
225
226
227
228
229
230
231
232 #define vm_avl_key vm_end
233 #define vm_avl_key_t unsigned long
234
235
236
237
238
239
240
241
242
243
244
245
246 #define avl_maxheight 41
247 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
248
249
250
251
252
253
254
255
256
257 static inline void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
258 {
259 vm_avl_key_t key = node->vm_avl_key;
260
261 *to_the_left = *to_the_right = NULL;
262 for (;;) {
263 if (tree == avl_empty) {
264 printk("avl_neighbours: node not found in the tree\n");
265 return;
266 }
267 if (key == tree->vm_avl_key)
268 break;
269 if (key < tree->vm_avl_key) {
270 *to_the_right = tree;
271 tree = tree->vm_avl_left;
272 } else {
273 *to_the_left = tree;
274 tree = tree->vm_avl_right;
275 }
276 }
277 if (tree != node) {
278 printk("avl_neighbours: node not exactly found in the tree\n");
279 return;
280 }
281 if (tree->vm_avl_left != avl_empty) {
282 struct vm_area_struct * node;
283 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
284 continue;
285 *to_the_left = node;
286 }
287 if (tree->vm_avl_right != avl_empty) {
288 struct vm_area_struct * node;
289 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
290 continue;
291 *to_the_right = node;
292 }
293 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
294 printk("avl_neighbours: tree inconsistent with list\n");
295 }
296
297
298
299
300
301
302
303 static inline void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
304 {
305 for ( ; count > 0 ; count--) {
306 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
307 struct vm_area_struct * node = *nodeplace;
308 struct vm_area_struct * nodeleft = node->vm_avl_left;
309 struct vm_area_struct * noderight = node->vm_avl_right;
310 int heightleft = heightof(nodeleft);
311 int heightright = heightof(noderight);
312 if (heightright + 1 < heightleft) {
313
314
315
316
317
318 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
319 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
320 int heightleftright = heightof(nodeleftright);
321 if (heightof(nodeleftleft) >= heightleftright) {
322
323
324
325
326
327
328
329 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
330 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
331 *nodeplace = nodeleft;
332 } else {
333
334
335
336
337
338
339
340
341
342 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
343 node->vm_avl_left = nodeleftright->vm_avl_right;
344 nodeleftright->vm_avl_left = nodeleft;
345 nodeleftright->vm_avl_right = node;
346 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
347 nodeleftright->vm_avl_height = heightleft;
348 *nodeplace = nodeleftright;
349 }
350 }
351 else if (heightleft + 1 < heightright) {
352
353 struct vm_area_struct * noderightright = noderight->vm_avl_right;
354 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
355 int heightrightleft = heightof(noderightleft);
356 if (heightof(noderightright) >= heightrightleft) {
357 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
358 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
359 *nodeplace = noderight;
360 } else {
361 noderight->vm_avl_left = noderightleft->vm_avl_right;
362 node->vm_avl_right = noderightleft->vm_avl_left;
363 noderightleft->vm_avl_right = noderight;
364 noderightleft->vm_avl_left = node;
365 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
366 noderightleft->vm_avl_height = heightright;
367 *nodeplace = noderightleft;
368 }
369 }
370 else {
371 int height = (heightleft<heightright ? heightright : heightleft) + 1;
372 if (height == node->vm_avl_height)
373 break;
374 node->vm_avl_height = height;
375 }
376 }
377 }
378
379
380 static inline void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
381 {
382 vm_avl_key_t key = new_node->vm_avl_key;
383 struct vm_area_struct ** nodeplace = ptree;
384 struct vm_area_struct ** stack[avl_maxheight];
385 int stack_count = 0;
386 struct vm_area_struct *** stack_ptr = &stack[0];
387 for (;;) {
388 struct vm_area_struct * node = *nodeplace;
389 if (node == avl_empty)
390 break;
391 *stack_ptr++ = nodeplace; stack_count++;
392 if (key < node->vm_avl_key)
393 nodeplace = &node->vm_avl_left;
394 else
395 nodeplace = &node->vm_avl_right;
396 }
397 new_node->vm_avl_left = avl_empty;
398 new_node->vm_avl_right = avl_empty;
399 new_node->vm_avl_height = 1;
400 *nodeplace = new_node;
401 avl_rebalance(stack_ptr,stack_count);
402 }
403
404
405
406
407 static inline void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
408 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
409 {
410 vm_avl_key_t key = new_node->vm_avl_key;
411 struct vm_area_struct ** nodeplace = ptree;
412 struct vm_area_struct ** stack[avl_maxheight];
413 int stack_count = 0;
414 struct vm_area_struct *** stack_ptr = &stack[0];
415 *to_the_left = *to_the_right = NULL;
416 for (;;) {
417 struct vm_area_struct * node = *nodeplace;
418 if (node == avl_empty)
419 break;
420 *stack_ptr++ = nodeplace; stack_count++;
421 if (key < node->vm_avl_key) {
422 *to_the_right = node;
423 nodeplace = &node->vm_avl_left;
424 } else {
425 *to_the_left = node;
426 nodeplace = &node->vm_avl_right;
427 }
428 }
429 new_node->vm_avl_left = avl_empty;
430 new_node->vm_avl_right = avl_empty;
431 new_node->vm_avl_height = 1;
432 *nodeplace = new_node;
433 avl_rebalance(stack_ptr,stack_count);
434 }
435
436
437 static inline void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
438 {
439 vm_avl_key_t key = node_to_delete->vm_avl_key;
440 struct vm_area_struct ** nodeplace = ptree;
441 struct vm_area_struct ** stack[avl_maxheight];
442 int stack_count = 0;
443 struct vm_area_struct *** stack_ptr = &stack[0];
444 struct vm_area_struct ** nodeplace_to_delete;
445 for (;;) {
446 struct vm_area_struct * node = *nodeplace;
447 if (node == avl_empty) {
448
449 printk("avl_remove: node to delete not found in tree\n");
450 return;
451 }
452 *stack_ptr++ = nodeplace; stack_count++;
453 if (key == node->vm_avl_key)
454 break;
455 if (key < node->vm_avl_key)
456 nodeplace = &node->vm_avl_left;
457 else
458 nodeplace = &node->vm_avl_right;
459 }
460 nodeplace_to_delete = nodeplace;
461
462 if (node_to_delete->vm_avl_left == avl_empty) {
463 *nodeplace_to_delete = node_to_delete->vm_avl_right;
464 stack_ptr--; stack_count--;
465 } else {
466 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
467 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
468 struct vm_area_struct * node;
469 for (;;) {
470 node = *nodeplace;
471 if (node->vm_avl_right == avl_empty)
472 break;
473 *stack_ptr++ = nodeplace; stack_count++;
474 nodeplace = &node->vm_avl_right;
475 }
476 *nodeplace = node->vm_avl_left;
477
478 node->vm_avl_left = node_to_delete->vm_avl_left;
479 node->vm_avl_right = node_to_delete->vm_avl_right;
480 node->vm_avl_height = node_to_delete->vm_avl_height;
481 *nodeplace_to_delete = node;
482 *stack_ptr_to_delete = &node->vm_avl_left;
483 }
484 avl_rebalance(stack_ptr,stack_count);
485 }
486
487 #ifdef DEBUG_AVL
488
489
490 static void printk_list (struct vm_area_struct * vma)
491 {
492 printk("[");
493 while (vma) {
494 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
495 vma = vma->vm_next;
496 if (!vma)
497 break;
498 printk(" ");
499 }
500 printk("]");
501 }
502
503
504 static void printk_avl (struct vm_area_struct * tree)
505 {
506 if (tree != avl_empty) {
507 printk("(");
508 if (tree->vm_avl_left != avl_empty) {
509 printk_avl(tree->vm_avl_left);
510 printk("<");
511 }
512 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
513 if (tree->vm_avl_right != avl_empty) {
514 printk(">");
515 printk_avl(tree->vm_avl_right);
516 }
517 printk(")");
518 }
519 }
520
521 static char *avl_check_point = "somewhere";
522
523
524 static void avl_checkheights (struct vm_area_struct * tree)
525 {
526 int h, hl, hr;
527
528 if (tree == avl_empty)
529 return;
530 avl_checkheights(tree->vm_avl_left);
531 avl_checkheights(tree->vm_avl_right);
532 h = tree->vm_avl_height;
533 hl = heightof(tree->vm_avl_left);
534 hr = heightof(tree->vm_avl_right);
535 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
536 return;
537 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
538 return;
539 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
540 }
541
542
543 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
544 {
545 if (tree == avl_empty)
546 return;
547 avl_checkleft(tree->vm_avl_left,key);
548 avl_checkleft(tree->vm_avl_right,key);
549 if (tree->vm_avl_key < key)
550 return;
551 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
552 }
553
554
555 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
556 {
557 if (tree == avl_empty)
558 return;
559 avl_checkright(tree->vm_avl_left,key);
560 avl_checkright(tree->vm_avl_right,key);
561 if (tree->vm_avl_key > key)
562 return;
563 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
564 }
565
566
567 static void avl_checkorder (struct vm_area_struct * tree)
568 {
569 if (tree == avl_empty)
570 return;
571 avl_checkorder(tree->vm_avl_left);
572 avl_checkorder(tree->vm_avl_right);
573 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
574 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
575 }
576
577
578 static void avl_check (struct task_struct * task, char *caller)
579 {
580 avl_check_point = caller;
581
582
583
584 avl_checkheights(task->mm->mmap_avl);
585 avl_checkorder(task->mm->mmap_avl);
586 }
587
588 #endif
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613 static void unmap_fixup(struct vm_area_struct *area,
614 unsigned long addr, size_t len)
615 {
616 struct vm_area_struct *mpnt;
617 unsigned long end = addr + len;
618
619 if (addr < area->vm_start || addr >= area->vm_end ||
620 end <= area->vm_start || end > area->vm_end ||
621 end < addr)
622 {
623 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
624 area->vm_start, area->vm_end, addr, end);
625 return;
626 }
627 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
628 if (area->vm_flags & VM_LOCKED)
629 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
630
631
632 if (addr == area->vm_start && end == area->vm_end) {
633 if (area->vm_ops && area->vm_ops->close)
634 area->vm_ops->close(area);
635 if (area->vm_inode)
636 iput(area->vm_inode);
637 return;
638 }
639
640
641 if (end == area->vm_end)
642 area->vm_end = addr;
643 else
644 if (addr == area->vm_start) {
645 area->vm_offset += (end - area->vm_start);
646 area->vm_start = end;
647 }
648 else {
649
650
651 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
652
653 if (!mpnt)
654 return;
655 *mpnt = *area;
656 mpnt->vm_offset += (end - area->vm_start);
657 mpnt->vm_start = end;
658 if (mpnt->vm_inode)
659 mpnt->vm_inode->i_count++;
660 if (mpnt->vm_ops && mpnt->vm_ops->open)
661 mpnt->vm_ops->open(mpnt);
662 area->vm_end = addr;
663 insert_vm_struct(current, mpnt);
664 }
665
666
667 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
668 if (!mpnt)
669 return;
670 *mpnt = *area;
671 if (mpnt->vm_ops && mpnt->vm_ops->open)
672 mpnt->vm_ops->open(mpnt);
673 if (area->vm_ops && area->vm_ops->close) {
674 area->vm_end = area->vm_start;
675 area->vm_ops->close(area);
676 }
677 insert_vm_struct(current, mpnt);
678 }
679
680 asmlinkage int sys_munmap(unsigned long addr, size_t len)
681 {
682 return do_munmap(addr, len);
683 }
684
685
686
687
688
689
690
691 int do_munmap(unsigned long addr, size_t len)
692 {
693 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
694
695 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
696 return -EINVAL;
697
698 if ((len = PAGE_ALIGN(len)) == 0)
699 return 0;
700
701
702
703
704
705
706
707 mpnt = find_vma(current, addr);
708 if (!mpnt)
709 return 0;
710 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
711
712
713
714 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
715 free = NULL;
716 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
717 *npp = mpnt->vm_next;
718 mpnt->vm_next = free;
719 free = mpnt;
720 avl_remove(mpnt, ¤t->mm->mmap_avl);
721 }
722
723 if (free == NULL)
724 return 0;
725
726
727
728
729
730
731
732 while (free) {
733 unsigned long st, end;
734
735 mpnt = free;
736 free = free->vm_next;
737
738 remove_shared_vm_struct(mpnt);
739
740 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
741 end = addr+len;
742 end = end > mpnt->vm_end ? mpnt->vm_end : end;
743
744 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
745 mpnt->vm_ops->unmap(mpnt, st, end-st);
746 zap_page_range(current->mm, st, end-st);
747 unmap_fixup(mpnt, st, end-st);
748 kfree(mpnt);
749 }
750
751 zap_page_range(current->mm, addr, len);
752 return 0;
753 }
754
755
756 void build_mmap_avl(struct mm_struct * mm)
757 {
758 struct vm_area_struct * vma;
759
760 mm->mmap_avl = NULL;
761 for (vma = mm->mmap; vma; vma = vma->vm_next)
762 avl_insert(vma, &mm->mmap_avl);
763 }
764
765
766 void exit_mmap(struct mm_struct * mm)
767 {
768 struct vm_area_struct * mpnt;
769
770 mpnt = mm->mmap;
771 mm->mmap = NULL;
772 mm->mmap_avl = NULL;
773 mm->rss = 0;
774 mm->total_vm = 0;
775 mm->locked_vm = 0;
776 while (mpnt) {
777 struct vm_area_struct * next = mpnt->vm_next;
778 if (mpnt->vm_ops) {
779 if (mpnt->vm_ops->unmap)
780 mpnt->vm_ops->unmap(mpnt, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
781 if (mpnt->vm_ops->close)
782 mpnt->vm_ops->close(mpnt);
783 }
784 remove_shared_vm_struct(mpnt);
785 zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
786 if (mpnt->vm_inode)
787 iput(mpnt->vm_inode);
788 kfree(mpnt);
789 mpnt = next;
790 }
791 }
792
793
794
795
796
797 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
798 {
799 struct vm_area_struct *share;
800 struct inode * inode;
801
802 #if 0
803 struct vm_area_struct **p, *mpnt;
804
805 p = &t->mm->mmap;
806 while ((mpnt = *p) != NULL) {
807 if (mpnt->vm_start > vmp->vm_start)
808 break;
809 if (mpnt->vm_end > vmp->vm_start)
810 printk("insert_vm_struct: overlapping memory areas\n");
811 p = &mpnt->vm_next;
812 }
813 vmp->vm_next = mpnt;
814 *p = vmp;
815 #else
816 struct vm_area_struct * prev, * next;
817
818 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
819 if ((prev ? prev->vm_next : t->mm->mmap) != next)
820 printk("insert_vm_struct: tree inconsistent with list\n");
821 if (prev)
822 prev->vm_next = vmp;
823 else
824 t->mm->mmap = vmp;
825 vmp->vm_next = next;
826 #endif
827
828 inode = vmp->vm_inode;
829 if (!inode)
830 return;
831
832
833 if ((share = inode->i_mmap)) {
834 vmp->vm_next_share = share->vm_next_share;
835 vmp->vm_next_share->vm_prev_share = vmp;
836 share->vm_next_share = vmp;
837 vmp->vm_prev_share = share;
838 } else
839 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
840 }
841
842
843
844
845 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
846 {
847 struct inode * inode = mpnt->vm_inode;
848
849 if (!inode)
850 return;
851
852 if (mpnt->vm_next_share == mpnt) {
853 if (inode->i_mmap != mpnt)
854 printk("Inode i_mmap ring corrupted\n");
855 inode->i_mmap = NULL;
856 return;
857 }
858
859 if (inode->i_mmap == mpnt)
860 inode->i_mmap = mpnt->vm_next_share;
861
862 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
863 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
864 }
865
866
867
868
869
870
871
872
873 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
874 {
875 struct vm_area_struct *prev, *mpnt, *next;
876
877 mpnt = find_vma(task, start_addr);
878 if (!mpnt)
879 return;
880 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
881
882
883 if (!prev) {
884 prev = mpnt;
885 mpnt = next;
886 }
887
888
889
890
891 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
892 #if 0
893 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
894 #endif
895
896 next = mpnt->vm_next;
897
898
899
900
901 if (mpnt->vm_inode != prev->vm_inode)
902 continue;
903 if (mpnt->vm_pte != prev->vm_pte)
904 continue;
905 if (mpnt->vm_ops != prev->vm_ops)
906 continue;
907 if (mpnt->vm_flags != prev->vm_flags)
908 continue;
909 if (prev->vm_end != mpnt->vm_start)
910 continue;
911
912
913
914 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
915 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
916 continue;
917 }
918
919
920
921
922
923
924 avl_remove(mpnt, &task->mm->mmap_avl);
925 prev->vm_end = mpnt->vm_end;
926 prev->vm_next = mpnt->vm_next;
927 if (mpnt->vm_ops && mpnt->vm_ops->close) {
928 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
929 mpnt->vm_start = mpnt->vm_end;
930 mpnt->vm_ops->close(mpnt);
931 }
932 remove_shared_vm_struct(mpnt);
933 if (mpnt->vm_inode)
934 mpnt->vm_inode->i_count--;
935 kfree_s(mpnt, sizeof(*mpnt));
936 mpnt = prev;
937 }
938 }