This source file includes following definitions.
- anon_map
- do_mmap
- get_unmapped_area
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20
21
22
23
24 static inline int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
25 {
26 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
27 return -ENOMEM;
28 return 0;
29 }
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 pgprot_t protection_map[16] = {
49 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
50 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
51 };
52
53 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
54 unsigned long prot, unsigned long flags, unsigned long off)
55 {
56 int error;
57 struct vm_area_struct * vma;
58
59 if ((len = PAGE_ALIGN(len)) == 0)
60 return addr;
61
62 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
63 return -EINVAL;
64
65
66 if (off + len < off)
67 return -EINVAL;
68
69
70 if (current->mm->def_flags & VM_LOCKED) {
71 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
72 locked += len;
73 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
74 return -EAGAIN;
75 }
76
77
78
79
80
81
82
83 if (file != NULL) {
84 switch (flags & MAP_TYPE) {
85 case MAP_SHARED:
86 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
87 return -EACCES;
88
89 case MAP_PRIVATE:
90 if (!(file->f_mode & 1))
91 return -EACCES;
92 break;
93
94 default:
95 return -EINVAL;
96 }
97 if (flags & MAP_DENYWRITE) {
98 if (file->f_inode->i_writecount > 0)
99 return -ETXTBSY;
100 }
101 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
102 return -EINVAL;
103
104
105
106
107
108
109 if (flags & MAP_FIXED) {
110 if (addr & ~PAGE_MASK)
111 return -EINVAL;
112 if (len > TASK_SIZE || addr > TASK_SIZE - len)
113 return -EINVAL;
114 } else {
115 addr = get_unmapped_area(addr, len);
116 if (!addr)
117 return -ENOMEM;
118 }
119
120
121
122
123
124
125 if (file && (!file->f_op || !file->f_op->mmap))
126 return -ENODEV;
127
128 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
129 GFP_KERNEL);
130 if (!vma)
131 return -ENOMEM;
132
133 vma->vm_mm = current->mm;
134 vma->vm_start = addr;
135 vma->vm_end = addr + len;
136 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
137 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
138 vma->vm_flags |= current->mm->def_flags;
139
140 if (file) {
141 if (file->f_mode & 1)
142 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
143 if (flags & MAP_SHARED) {
144 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
145
146
147
148
149
150
151
152
153
154
155 if (!(file->f_mode & 2))
156 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
157 }
158 } else
159 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
160 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
161 vma->vm_ops = NULL;
162 vma->vm_offset = off;
163 vma->vm_inode = NULL;
164 vma->vm_pte = 0;
165
166 do_munmap(addr, len);
167
168 if (file)
169 error = file->f_op->mmap(file->f_inode, file, vma);
170 else
171 error = anon_map(NULL, NULL, vma);
172
173 if (error) {
174 kfree(vma);
175 return error;
176 }
177 flags = vma->vm_flags;
178 insert_vm_struct(current, vma);
179 merge_segments(current, vma->vm_start, vma->vm_end);
180
181
182 current->mm->total_vm += len >> PAGE_SHIFT;
183 if (flags & VM_LOCKED) {
184 unsigned long start = addr;
185 current->mm->locked_vm += len >> PAGE_SHIFT;
186 do {
187 char c = get_user((char *) start);
188 len -= PAGE_SIZE;
189 start += PAGE_SIZE;
190 __asm__ __volatile__("": :"r" (c));
191 } while (len > 0);
192 }
193 return addr;
194 }
195
196
197
198
199
200
201 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
202 {
203 struct vm_area_struct * vmm;
204
205 if (len > TASK_SIZE)
206 return 0;
207 if (!addr)
208 addr = TASK_SIZE / 3;
209 addr = PAGE_ALIGN(addr);
210
211 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
212 if (TASK_SIZE - len < addr)
213 return 0;
214 if (!vmm)
215 return addr;
216 if (addr > vmm->vm_end)
217 continue;
218 if (addr + len > vmm->vm_start) {
219 addr = vmm->vm_end;
220 continue;
221 }
222 return addr;
223 }
224 }
225
226
227
228
229
230
231
232
233
234
235 #define vm_avl_key vm_end
236 #define vm_avl_key_t unsigned long
237
238
239
240
241
242
243
244
245
246
247
248
249 #define avl_maxheight 41
250 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
251
252
253
254
255
256
257
258
259
260 static inline void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
261 {
262 vm_avl_key_t key = node->vm_avl_key;
263
264 *to_the_left = *to_the_right = NULL;
265 for (;;) {
266 if (tree == avl_empty) {
267 printk("avl_neighbours: node not found in the tree\n");
268 return;
269 }
270 if (key == tree->vm_avl_key)
271 break;
272 if (key < tree->vm_avl_key) {
273 *to_the_right = tree;
274 tree = tree->vm_avl_left;
275 } else {
276 *to_the_left = tree;
277 tree = tree->vm_avl_right;
278 }
279 }
280 if (tree != node) {
281 printk("avl_neighbours: node not exactly found in the tree\n");
282 return;
283 }
284 if (tree->vm_avl_left != avl_empty) {
285 struct vm_area_struct * node;
286 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
287 continue;
288 *to_the_left = node;
289 }
290 if (tree->vm_avl_right != avl_empty) {
291 struct vm_area_struct * node;
292 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
293 continue;
294 *to_the_right = node;
295 }
296 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
297 printk("avl_neighbours: tree inconsistent with list\n");
298 }
299
300
301
302
303
304
305
306 static inline void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
307 {
308 for ( ; count > 0 ; count--) {
309 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
310 struct vm_area_struct * node = *nodeplace;
311 struct vm_area_struct * nodeleft = node->vm_avl_left;
312 struct vm_area_struct * noderight = node->vm_avl_right;
313 int heightleft = heightof(nodeleft);
314 int heightright = heightof(noderight);
315 if (heightright + 1 < heightleft) {
316
317
318
319
320
321 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
322 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
323 int heightleftright = heightof(nodeleftright);
324 if (heightof(nodeleftleft) >= heightleftright) {
325
326
327
328
329
330
331
332 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
333 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
334 *nodeplace = nodeleft;
335 } else {
336
337
338
339
340
341
342
343
344
345 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
346 node->vm_avl_left = nodeleftright->vm_avl_right;
347 nodeleftright->vm_avl_left = nodeleft;
348 nodeleftright->vm_avl_right = node;
349 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
350 nodeleftright->vm_avl_height = heightleft;
351 *nodeplace = nodeleftright;
352 }
353 }
354 else if (heightleft + 1 < heightright) {
355
356 struct vm_area_struct * noderightright = noderight->vm_avl_right;
357 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
358 int heightrightleft = heightof(noderightleft);
359 if (heightof(noderightright) >= heightrightleft) {
360 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
361 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
362 *nodeplace = noderight;
363 } else {
364 noderight->vm_avl_left = noderightleft->vm_avl_right;
365 node->vm_avl_right = noderightleft->vm_avl_left;
366 noderightleft->vm_avl_right = noderight;
367 noderightleft->vm_avl_left = node;
368 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
369 noderightleft->vm_avl_height = heightright;
370 *nodeplace = noderightleft;
371 }
372 }
373 else {
374 int height = (heightleft<heightright ? heightright : heightleft) + 1;
375 if (height == node->vm_avl_height)
376 break;
377 node->vm_avl_height = height;
378 }
379 }
380 }
381
382
383 static inline void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
384 {
385 vm_avl_key_t key = new_node->vm_avl_key;
386 struct vm_area_struct ** nodeplace = ptree;
387 struct vm_area_struct ** stack[avl_maxheight];
388 int stack_count = 0;
389 struct vm_area_struct *** stack_ptr = &stack[0];
390 for (;;) {
391 struct vm_area_struct * node = *nodeplace;
392 if (node == avl_empty)
393 break;
394 *stack_ptr++ = nodeplace; stack_count++;
395 if (key < node->vm_avl_key)
396 nodeplace = &node->vm_avl_left;
397 else
398 nodeplace = &node->vm_avl_right;
399 }
400 new_node->vm_avl_left = avl_empty;
401 new_node->vm_avl_right = avl_empty;
402 new_node->vm_avl_height = 1;
403 *nodeplace = new_node;
404 avl_rebalance(stack_ptr,stack_count);
405 }
406
407
408
409
410 static inline void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
411 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
412 {
413 vm_avl_key_t key = new_node->vm_avl_key;
414 struct vm_area_struct ** nodeplace = ptree;
415 struct vm_area_struct ** stack[avl_maxheight];
416 int stack_count = 0;
417 struct vm_area_struct *** stack_ptr = &stack[0];
418 *to_the_left = *to_the_right = NULL;
419 for (;;) {
420 struct vm_area_struct * node = *nodeplace;
421 if (node == avl_empty)
422 break;
423 *stack_ptr++ = nodeplace; stack_count++;
424 if (key < node->vm_avl_key) {
425 *to_the_right = node;
426 nodeplace = &node->vm_avl_left;
427 } else {
428 *to_the_left = node;
429 nodeplace = &node->vm_avl_right;
430 }
431 }
432 new_node->vm_avl_left = avl_empty;
433 new_node->vm_avl_right = avl_empty;
434 new_node->vm_avl_height = 1;
435 *nodeplace = new_node;
436 avl_rebalance(stack_ptr,stack_count);
437 }
438
439
440 static inline void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
441 {
442 vm_avl_key_t key = node_to_delete->vm_avl_key;
443 struct vm_area_struct ** nodeplace = ptree;
444 struct vm_area_struct ** stack[avl_maxheight];
445 int stack_count = 0;
446 struct vm_area_struct *** stack_ptr = &stack[0];
447 struct vm_area_struct ** nodeplace_to_delete;
448 for (;;) {
449 struct vm_area_struct * node = *nodeplace;
450 if (node == avl_empty) {
451
452 printk("avl_remove: node to delete not found in tree\n");
453 return;
454 }
455 *stack_ptr++ = nodeplace; stack_count++;
456 if (key == node->vm_avl_key)
457 break;
458 if (key < node->vm_avl_key)
459 nodeplace = &node->vm_avl_left;
460 else
461 nodeplace = &node->vm_avl_right;
462 }
463 nodeplace_to_delete = nodeplace;
464
465 if (node_to_delete->vm_avl_left == avl_empty) {
466 *nodeplace_to_delete = node_to_delete->vm_avl_right;
467 stack_ptr--; stack_count--;
468 } else {
469 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
470 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
471 struct vm_area_struct * node;
472 for (;;) {
473 node = *nodeplace;
474 if (node->vm_avl_right == avl_empty)
475 break;
476 *stack_ptr++ = nodeplace; stack_count++;
477 nodeplace = &node->vm_avl_right;
478 }
479 *nodeplace = node->vm_avl_left;
480
481 node->vm_avl_left = node_to_delete->vm_avl_left;
482 node->vm_avl_right = node_to_delete->vm_avl_right;
483 node->vm_avl_height = node_to_delete->vm_avl_height;
484 *nodeplace_to_delete = node;
485 *stack_ptr_to_delete = &node->vm_avl_left;
486 }
487 avl_rebalance(stack_ptr,stack_count);
488 }
489
490 #ifdef DEBUG_AVL
491
492
493 static void printk_list (struct vm_area_struct * vma)
494 {
495 printk("[");
496 while (vma) {
497 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
498 vma = vma->vm_next;
499 if (!vma)
500 break;
501 printk(" ");
502 }
503 printk("]");
504 }
505
506
507 static void printk_avl (struct vm_area_struct * tree)
508 {
509 if (tree != avl_empty) {
510 printk("(");
511 if (tree->vm_avl_left != avl_empty) {
512 printk_avl(tree->vm_avl_left);
513 printk("<");
514 }
515 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
516 if (tree->vm_avl_right != avl_empty) {
517 printk(">");
518 printk_avl(tree->vm_avl_right);
519 }
520 printk(")");
521 }
522 }
523
524 static char *avl_check_point = "somewhere";
525
526
527 static void avl_checkheights (struct vm_area_struct * tree)
528 {
529 int h, hl, hr;
530
531 if (tree == avl_empty)
532 return;
533 avl_checkheights(tree->vm_avl_left);
534 avl_checkheights(tree->vm_avl_right);
535 h = tree->vm_avl_height;
536 hl = heightof(tree->vm_avl_left);
537 hr = heightof(tree->vm_avl_right);
538 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
539 return;
540 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
541 return;
542 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
543 }
544
545
546 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
547 {
548 if (tree == avl_empty)
549 return;
550 avl_checkleft(tree->vm_avl_left,key);
551 avl_checkleft(tree->vm_avl_right,key);
552 if (tree->vm_avl_key < key)
553 return;
554 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
555 }
556
557
558 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
559 {
560 if (tree == avl_empty)
561 return;
562 avl_checkright(tree->vm_avl_left,key);
563 avl_checkright(tree->vm_avl_right,key);
564 if (tree->vm_avl_key > key)
565 return;
566 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
567 }
568
569
570 static void avl_checkorder (struct vm_area_struct * tree)
571 {
572 if (tree == avl_empty)
573 return;
574 avl_checkorder(tree->vm_avl_left);
575 avl_checkorder(tree->vm_avl_right);
576 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
577 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
578 }
579
580
581 static void avl_check (struct task_struct * task, char *caller)
582 {
583 avl_check_point = caller;
584
585
586
587 avl_checkheights(task->mm->mmap_avl);
588 avl_checkorder(task->mm->mmap_avl);
589 }
590
591 #endif
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616 static void unmap_fixup(struct vm_area_struct *area,
617 unsigned long addr, size_t len)
618 {
619 struct vm_area_struct *mpnt;
620 unsigned long end = addr + len;
621
622 if (addr < area->vm_start || addr >= area->vm_end ||
623 end <= area->vm_start || end > area->vm_end ||
624 end < addr)
625 {
626 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
627 area->vm_start, area->vm_end, addr, end);
628 return;
629 }
630 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
631 if (area->vm_flags & VM_LOCKED)
632 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
633
634
635 if (addr == area->vm_start && end == area->vm_end) {
636 if (area->vm_ops && area->vm_ops->close)
637 area->vm_ops->close(area);
638 if (area->vm_inode)
639 iput(area->vm_inode);
640 return;
641 }
642
643
644 if (end == area->vm_end)
645 area->vm_end = addr;
646 else
647 if (addr == area->vm_start) {
648 area->vm_offset += (end - area->vm_start);
649 area->vm_start = end;
650 }
651 else {
652
653
654 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
655
656 if (!mpnt)
657 return;
658 *mpnt = *area;
659 mpnt->vm_offset += (end - area->vm_start);
660 mpnt->vm_start = end;
661 if (mpnt->vm_inode)
662 mpnt->vm_inode->i_count++;
663 if (mpnt->vm_ops && mpnt->vm_ops->open)
664 mpnt->vm_ops->open(mpnt);
665 area->vm_end = addr;
666 insert_vm_struct(current, mpnt);
667 }
668
669
670 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
671 if (!mpnt)
672 return;
673 *mpnt = *area;
674 if (mpnt->vm_ops && mpnt->vm_ops->open)
675 mpnt->vm_ops->open(mpnt);
676 if (area->vm_ops && area->vm_ops->close) {
677 area->vm_end = area->vm_start;
678 area->vm_ops->close(area);
679 }
680 insert_vm_struct(current, mpnt);
681 }
682
683 asmlinkage int sys_munmap(unsigned long addr, size_t len)
684 {
685 return do_munmap(addr, len);
686 }
687
688
689
690
691
692
693
694 int do_munmap(unsigned long addr, size_t len)
695 {
696 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
697
698 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
699 return -EINVAL;
700
701 if ((len = PAGE_ALIGN(len)) == 0)
702 return 0;
703
704
705
706
707
708
709
710 mpnt = find_vma(current, addr);
711 if (!mpnt)
712 return 0;
713 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
714
715
716
717 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
718 free = NULL;
719 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
720 *npp = mpnt->vm_next;
721 mpnt->vm_next = free;
722 free = mpnt;
723 avl_remove(mpnt, ¤t->mm->mmap_avl);
724 }
725
726 if (free == NULL)
727 return 0;
728
729
730
731
732
733
734
735 while (free) {
736 unsigned long st, end;
737
738 mpnt = free;
739 free = free->vm_next;
740
741 remove_shared_vm_struct(mpnt);
742
743 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
744 end = addr+len;
745 end = end > mpnt->vm_end ? mpnt->vm_end : end;
746
747 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
748 mpnt->vm_ops->unmap(mpnt, st, end-st);
749 zap_page_range(current->mm, st, end-st);
750 unmap_fixup(mpnt, st, end-st);
751 kfree(mpnt);
752 }
753
754 zap_page_range(current->mm, addr, len);
755 return 0;
756 }
757
758
759 void build_mmap_avl(struct mm_struct * mm)
760 {
761 struct vm_area_struct * vma;
762
763 mm->mmap_avl = NULL;
764 for (vma = mm->mmap; vma; vma = vma->vm_next)
765 avl_insert(vma, &mm->mmap_avl);
766 }
767
768
769 void exit_mmap(struct mm_struct * mm)
770 {
771 struct vm_area_struct * mpnt;
772
773 mpnt = mm->mmap;
774 mm->mmap = NULL;
775 mm->mmap_avl = NULL;
776 mm->rss = 0;
777 mm->total_vm = 0;
778 mm->locked_vm = 0;
779 while (mpnt) {
780 struct vm_area_struct * next = mpnt->vm_next;
781 if (mpnt->vm_ops) {
782 if (mpnt->vm_ops->unmap)
783 mpnt->vm_ops->unmap(mpnt, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
784 if (mpnt->vm_ops->close)
785 mpnt->vm_ops->close(mpnt);
786 }
787 remove_shared_vm_struct(mpnt);
788 zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
789 if (mpnt->vm_inode)
790 iput(mpnt->vm_inode);
791 kfree(mpnt);
792 mpnt = next;
793 }
794 }
795
796
797
798
799
800 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
801 {
802 struct vm_area_struct *share;
803 struct inode * inode;
804
805 #if 0
806 struct vm_area_struct **p, *mpnt;
807
808 p = &t->mm->mmap;
809 while ((mpnt = *p) != NULL) {
810 if (mpnt->vm_start > vmp->vm_start)
811 break;
812 if (mpnt->vm_end > vmp->vm_start)
813 printk("insert_vm_struct: overlapping memory areas\n");
814 p = &mpnt->vm_next;
815 }
816 vmp->vm_next = mpnt;
817 *p = vmp;
818 #else
819 struct vm_area_struct * prev, * next;
820
821 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
822 if ((prev ? prev->vm_next : t->mm->mmap) != next)
823 printk("insert_vm_struct: tree inconsistent with list\n");
824 if (prev)
825 prev->vm_next = vmp;
826 else
827 t->mm->mmap = vmp;
828 vmp->vm_next = next;
829 #endif
830
831 inode = vmp->vm_inode;
832 if (!inode)
833 return;
834
835
836 if ((share = inode->i_mmap)) {
837 vmp->vm_next_share = share->vm_next_share;
838 vmp->vm_next_share->vm_prev_share = vmp;
839 share->vm_next_share = vmp;
840 vmp->vm_prev_share = share;
841 } else
842 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
843 }
844
845
846
847
848 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
849 {
850 struct inode * inode = mpnt->vm_inode;
851
852 if (!inode)
853 return;
854
855 if (mpnt->vm_next_share == mpnt) {
856 if (inode->i_mmap != mpnt)
857 printk("Inode i_mmap ring corrupted\n");
858 inode->i_mmap = NULL;
859 return;
860 }
861
862 if (inode->i_mmap == mpnt)
863 inode->i_mmap = mpnt->vm_next_share;
864
865 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
866 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
867 }
868
869
870
871
872
873
874
875
876 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
877 {
878 struct vm_area_struct *prev, *mpnt, *next;
879
880 mpnt = find_vma(task, start_addr);
881 if (!mpnt)
882 return;
883 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
884
885
886 if (!prev) {
887 prev = mpnt;
888 mpnt = next;
889 }
890
891
892
893
894 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
895 #if 0
896 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
897 #endif
898
899 next = mpnt->vm_next;
900
901
902
903
904 if (mpnt->vm_inode != prev->vm_inode)
905 continue;
906 if (mpnt->vm_pte != prev->vm_pte)
907 continue;
908 if (mpnt->vm_ops != prev->vm_ops)
909 continue;
910 if (mpnt->vm_flags != prev->vm_flags)
911 continue;
912 if (prev->vm_end != mpnt->vm_start)
913 continue;
914
915
916
917 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
918 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
919 continue;
920 }
921
922
923
924
925
926
927 avl_remove(mpnt, &task->mm->mmap_avl);
928 prev->vm_end = mpnt->vm_end;
929 prev->vm_next = mpnt->vm_next;
930 if (mpnt->vm_ops && mpnt->vm_ops->close) {
931 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
932 mpnt->vm_start = mpnt->vm_end;
933 mpnt->vm_ops->close(mpnt);
934 }
935 remove_shared_vm_struct(mpnt);
936 if (mpnt->vm_inode)
937 mpnt->vm_inode->i_count--;
938 kfree_s(mpnt, sizeof(*mpnt));
939 mpnt = prev;
940 }
941 }