This source file includes following definitions.
- do_mmap
- get_unmapped_area
- find_vma
- find_vma_intersection
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 pgprot_t protection_map[16] = {
40 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
41 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
42 };
43
44 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
45 unsigned long prot, unsigned long flags, unsigned long off)
46 {
47 int error;
48 struct vm_area_struct * vma;
49
50 if ((len = PAGE_ALIGN(len)) == 0)
51 return addr;
52
53 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
54 return -EINVAL;
55
56
57 if (off + len < off)
58 return -EINVAL;
59
60
61
62
63
64
65
66 if (file != NULL) {
67 switch (flags & MAP_TYPE) {
68 case MAP_SHARED:
69 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
70 return -EACCES;
71
72 case MAP_PRIVATE:
73 if (!(file->f_mode & 1))
74 return -EACCES;
75 break;
76
77 default:
78 return -EINVAL;
79 }
80 if (flags & MAP_DENYWRITE) {
81 if (file->f_inode->i_wcount > 0)
82 return -ETXTBSY;
83 }
84 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
85 return -EINVAL;
86
87
88
89
90
91
92 if (flags & MAP_FIXED) {
93 if (addr & ~PAGE_MASK)
94 return -EINVAL;
95 if (len > TASK_SIZE || addr > TASK_SIZE - len)
96 return -EINVAL;
97 } else {
98 addr = get_unmapped_area(addr, len);
99 if (!addr)
100 return -ENOMEM;
101 }
102
103
104
105
106
107
108 if (file && (!file->f_op || !file->f_op->mmap))
109 return -ENODEV;
110
111 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
112 GFP_KERNEL);
113 if (!vma)
114 return -ENOMEM;
115
116 vma->vm_mm = current->mm;
117 vma->vm_start = addr;
118 vma->vm_end = addr + len;
119 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
120 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
121
122 if (file) {
123 if (file->f_mode & 1)
124 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
125 if (flags & MAP_SHARED) {
126 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
127
128
129
130
131
132
133
134
135
136
137 if (!(file->f_mode & 2))
138 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
139 }
140 } else
141 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
142 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
143 vma->vm_ops = NULL;
144 vma->vm_offset = off;
145 vma->vm_inode = NULL;
146 vma->vm_pte = 0;
147
148 do_munmap(addr, len);
149
150 if (file)
151 error = file->f_op->mmap(file->f_inode, file, vma);
152 else
153 error = anon_map(NULL, NULL, vma);
154
155 if (error) {
156 kfree(vma);
157 return error;
158 }
159 insert_vm_struct(current, vma);
160 merge_segments(current, vma->vm_start, vma->vm_end);
161 return addr;
162 }
163
164
165
166
167
168
169 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
170 {
171 struct vm_area_struct * vmm;
172
173 if (len > TASK_SIZE)
174 return 0;
175 if (!addr)
176 addr = TASK_SIZE / 3;
177 addr = PAGE_ALIGN(addr);
178
179 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
180 if (TASK_SIZE - len < addr)
181 return 0;
182 if (!vmm)
183 return addr;
184 if (addr > vmm->vm_end)
185 continue;
186 if (addr + len > vmm->vm_start) {
187 addr = vmm->vm_end;
188 continue;
189 }
190 return addr;
191 }
192 }
193
194
195
196
197
198
199
200
201
202
203 #define vm_avl_key vm_end
204 #define vm_avl_key_t unsigned long
205
206
207
208
209
210
211
212
213
214
215 #define avl_empty (struct vm_area_struct *) NULL
216
217
218 #define avl_maxheight 41
219 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
220
221
222
223
224
225
226
227
228
229 struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
230 {
231 #if 0
232 struct vm_area_struct * vma;
233
234 if (!task->mm)
235 return NULL;
236 for (vma = task->mm->mmap ; ; vma = vma->vm_next) {
237 if (!vma)
238 return NULL;
239 if (vma->vm_end > addr)
240 return vma;
241 }
242 #else
243 struct vm_area_struct * result = NULL;
244 struct vm_area_struct * tree;
245
246 if (!task->mm)
247 return NULL;
248 for (tree = task->mm->mmap_avl ; ; ) {
249 if (tree == avl_empty)
250 return result;
251 if (tree->vm_end > addr) {
252 if (tree->vm_start <= addr)
253 return tree;
254 result = tree;
255 tree = tree->vm_avl_left;
256 } else
257 tree = tree->vm_avl_right;
258 }
259 #endif
260 }
261
262
263
264 struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
265 {
266 struct vm_area_struct * vma;
267
268 #if 0
269 for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
270 if (end_addr <= vma->vm_start)
271 break;
272 if (start_addr < vma->vm_end)
273 return vma;
274 }
275 return NULL;
276 #else
277 vma = find_vma(task,start_addr);
278 if (!vma || end_addr <= vma->vm_start)
279 return NULL;
280 return vma;
281 #endif
282 }
283
284
285 static void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
286 {
287 vm_avl_key_t key = node->vm_avl_key;
288
289 *to_the_left = *to_the_right = NULL;
290 for (;;) {
291 if (tree == avl_empty) {
292 printk("avl_neighbours: node not found in the tree\n");
293 return;
294 }
295 if (key == tree->vm_avl_key)
296 break;
297 if (key < tree->vm_avl_key) {
298 *to_the_right = tree;
299 tree = tree->vm_avl_left;
300 } else {
301 *to_the_left = tree;
302 tree = tree->vm_avl_right;
303 }
304 }
305 if (tree != node) {
306 printk("avl_neighbours: node not exactly found in the tree\n");
307 return;
308 }
309 if (tree->vm_avl_left != avl_empty) {
310 struct vm_area_struct * node;
311 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
312 continue;
313 *to_the_left = node;
314 }
315 if (tree->vm_avl_right != avl_empty) {
316 struct vm_area_struct * node;
317 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
318 continue;
319 *to_the_right = node;
320 }
321 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
322 printk("avl_neighbours: tree inconsistent with list\n");
323 }
324
325
326
327
328
329
330
331 static void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
332 {
333 for ( ; count > 0 ; count--) {
334 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
335 struct vm_area_struct * node = *nodeplace;
336 struct vm_area_struct * nodeleft = node->vm_avl_left;
337 struct vm_area_struct * noderight = node->vm_avl_right;
338 int heightleft = heightof(nodeleft);
339 int heightright = heightof(noderight);
340 if (heightright + 1 < heightleft) {
341
342
343
344
345
346 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
347 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
348 int heightleftright = heightof(nodeleftright);
349 if (heightof(nodeleftleft) >= heightleftright) {
350
351
352
353
354
355
356
357 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
358 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
359 *nodeplace = nodeleft;
360 } else {
361
362
363
364
365
366
367
368
369
370 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
371 node->vm_avl_left = nodeleftright->vm_avl_right;
372 nodeleftright->vm_avl_left = nodeleft;
373 nodeleftright->vm_avl_right = node;
374 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
375 nodeleftright->vm_avl_height = heightleft;
376 *nodeplace = nodeleftright;
377 }
378 }
379 else if (heightleft + 1 < heightright) {
380
381 struct vm_area_struct * noderightright = noderight->vm_avl_right;
382 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
383 int heightrightleft = heightof(noderightleft);
384 if (heightof(noderightright) >= heightrightleft) {
385 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
386 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
387 *nodeplace = noderight;
388 } else {
389 noderight->vm_avl_left = noderightleft->vm_avl_right;
390 node->vm_avl_right = noderightleft->vm_avl_left;
391 noderightleft->vm_avl_right = noderight;
392 noderightleft->vm_avl_left = node;
393 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
394 noderightleft->vm_avl_height = heightright;
395 *nodeplace = noderightleft;
396 }
397 }
398 else {
399 int height = (heightleft<heightright ? heightright : heightleft) + 1;
400 if (height == node->vm_avl_height)
401 break;
402 node->vm_avl_height = height;
403 }
404 }
405 }
406
407
408 static void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
409 {
410 vm_avl_key_t key = new_node->vm_avl_key;
411 struct vm_area_struct ** nodeplace = ptree;
412 struct vm_area_struct ** stack[avl_maxheight];
413 int stack_count = 0;
414 struct vm_area_struct *** stack_ptr = &stack[0];
415 for (;;) {
416 struct vm_area_struct * node = *nodeplace;
417 if (node == avl_empty)
418 break;
419 *stack_ptr++ = nodeplace; stack_count++;
420 if (key < node->vm_avl_key)
421 nodeplace = &node->vm_avl_left;
422 else
423 nodeplace = &node->vm_avl_right;
424 }
425 new_node->vm_avl_left = avl_empty;
426 new_node->vm_avl_right = avl_empty;
427 new_node->vm_avl_height = 1;
428 *nodeplace = new_node;
429 avl_rebalance(stack_ptr,stack_count);
430 }
431
432
433
434
435 static void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
436 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
437 {
438 vm_avl_key_t key = new_node->vm_avl_key;
439 struct vm_area_struct ** nodeplace = ptree;
440 struct vm_area_struct ** stack[avl_maxheight];
441 int stack_count = 0;
442 struct vm_area_struct *** stack_ptr = &stack[0];
443 *to_the_left = *to_the_right = NULL;
444 for (;;) {
445 struct vm_area_struct * node = *nodeplace;
446 if (node == avl_empty)
447 break;
448 *stack_ptr++ = nodeplace; stack_count++;
449 if (key < node->vm_avl_key) {
450 *to_the_right = node;
451 nodeplace = &node->vm_avl_left;
452 } else {
453 *to_the_left = node;
454 nodeplace = &node->vm_avl_right;
455 }
456 }
457 new_node->vm_avl_left = avl_empty;
458 new_node->vm_avl_right = avl_empty;
459 new_node->vm_avl_height = 1;
460 *nodeplace = new_node;
461 avl_rebalance(stack_ptr,stack_count);
462 }
463
464
465 static void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
466 {
467 vm_avl_key_t key = node_to_delete->vm_avl_key;
468 struct vm_area_struct ** nodeplace = ptree;
469 struct vm_area_struct ** stack[avl_maxheight];
470 int stack_count = 0;
471 struct vm_area_struct *** stack_ptr = &stack[0];
472 struct vm_area_struct ** nodeplace_to_delete;
473 for (;;) {
474 struct vm_area_struct * node = *nodeplace;
475 if (node == avl_empty) {
476
477 printk("avl_remove: node to delete not found in tree\n");
478 return;
479 }
480 *stack_ptr++ = nodeplace; stack_count++;
481 if (key == node->vm_avl_key)
482 break;
483 if (key < node->vm_avl_key)
484 nodeplace = &node->vm_avl_left;
485 else
486 nodeplace = &node->vm_avl_right;
487 }
488 nodeplace_to_delete = nodeplace;
489
490 if (node_to_delete->vm_avl_left == avl_empty) {
491 *nodeplace_to_delete = node_to_delete->vm_avl_right;
492 stack_ptr--; stack_count--;
493 } else {
494 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
495 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
496 struct vm_area_struct * node;
497 for (;;) {
498 node = *nodeplace;
499 if (node->vm_avl_right == avl_empty)
500 break;
501 *stack_ptr++ = nodeplace; stack_count++;
502 nodeplace = &node->vm_avl_right;
503 }
504 *nodeplace = node->vm_avl_left;
505
506 node->vm_avl_left = node_to_delete->vm_avl_left;
507 node->vm_avl_right = node_to_delete->vm_avl_right;
508 node->vm_avl_height = node_to_delete->vm_avl_height;
509 *nodeplace_to_delete = node;
510 *stack_ptr_to_delete = &node->vm_avl_left;
511 }
512 avl_rebalance(stack_ptr,stack_count);
513 }
514
515 #ifdef DEBUG_AVL
516
517
518 static void printk_list (struct vm_area_struct * vma)
519 {
520 printk("[");
521 while (vma) {
522 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
523 vma = vma->vm_next;
524 if (!vma)
525 break;
526 printk(" ");
527 }
528 printk("]");
529 }
530
531
532 static void printk_avl (struct vm_area_struct * tree)
533 {
534 if (tree != avl_empty) {
535 printk("(");
536 if (tree->vm_avl_left != avl_empty) {
537 printk_avl(tree->vm_avl_left);
538 printk("<");
539 }
540 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
541 if (tree->vm_avl_right != avl_empty) {
542 printk(">");
543 printk_avl(tree->vm_avl_right);
544 }
545 printk(")");
546 }
547 }
548
549 static char *avl_check_point = "somewhere";
550
551
552 static void avl_checkheights (struct vm_area_struct * tree)
553 {
554 int h, hl, hr;
555
556 if (tree == avl_empty)
557 return;
558 avl_checkheights(tree->vm_avl_left);
559 avl_checkheights(tree->vm_avl_right);
560 h = tree->vm_avl_height;
561 hl = heightof(tree->vm_avl_left);
562 hr = heightof(tree->vm_avl_right);
563 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
564 return;
565 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
566 return;
567 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
568 }
569
570
571 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
572 {
573 if (tree == avl_empty)
574 return;
575 avl_checkleft(tree->vm_avl_left,key);
576 avl_checkleft(tree->vm_avl_right,key);
577 if (tree->vm_avl_key < key)
578 return;
579 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
580 }
581
582
583 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
584 {
585 if (tree == avl_empty)
586 return;
587 avl_checkright(tree->vm_avl_left,key);
588 avl_checkright(tree->vm_avl_right,key);
589 if (tree->vm_avl_key > key)
590 return;
591 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
592 }
593
594
595 static void avl_checkorder (struct vm_area_struct * tree)
596 {
597 if (tree == avl_empty)
598 return;
599 avl_checkorder(tree->vm_avl_left);
600 avl_checkorder(tree->vm_avl_right);
601 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
602 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
603 }
604
605
606 static void avl_check (struct task_struct * task, char *caller)
607 {
608 avl_check_point = caller;
609
610
611
612 avl_checkheights(task->mm->mmap_avl);
613 avl_checkorder(task->mm->mmap_avl);
614 }
615
616 #endif
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641 void unmap_fixup(struct vm_area_struct *area,
642 unsigned long addr, size_t len)
643 {
644 struct vm_area_struct *mpnt;
645 unsigned long end = addr + len;
646
647 if (addr < area->vm_start || addr >= area->vm_end ||
648 end <= area->vm_start || end > area->vm_end ||
649 end < addr)
650 {
651 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
652 area->vm_start, area->vm_end, addr, end);
653 return;
654 }
655
656
657 if (addr == area->vm_start && end == area->vm_end) {
658 if (area->vm_ops && area->vm_ops->close)
659 area->vm_ops->close(area);
660 if (area->vm_inode)
661 iput(area->vm_inode);
662 return;
663 }
664
665
666 if (end == area->vm_end)
667 area->vm_end = addr;
668 else
669 if (addr == area->vm_start) {
670 area->vm_offset += (end - area->vm_start);
671 area->vm_start = end;
672 }
673 else {
674
675
676 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
677
678 if (!mpnt)
679 return;
680 *mpnt = *area;
681 mpnt->vm_offset += (end - area->vm_start);
682 mpnt->vm_start = end;
683 if (mpnt->vm_inode)
684 mpnt->vm_inode->i_count++;
685 if (mpnt->vm_ops && mpnt->vm_ops->open)
686 mpnt->vm_ops->open(mpnt);
687 area->vm_end = addr;
688 insert_vm_struct(current, mpnt);
689 }
690
691
692 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
693 if (!mpnt)
694 return;
695 *mpnt = *area;
696 if (mpnt->vm_ops && mpnt->vm_ops->open)
697 mpnt->vm_ops->open(mpnt);
698 if (area->vm_ops && area->vm_ops->close) {
699 area->vm_end = area->vm_start;
700 area->vm_ops->close(area);
701 }
702 insert_vm_struct(current, mpnt);
703 }
704
705 asmlinkage int sys_munmap(unsigned long addr, size_t len)
706 {
707 return do_munmap(addr, len);
708 }
709
710
711
712
713
714
715
716 int do_munmap(unsigned long addr, size_t len)
717 {
718 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
719
720 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
721 return -EINVAL;
722
723 if ((len = PAGE_ALIGN(len)) == 0)
724 return 0;
725
726
727
728
729
730
731
732 mpnt = find_vma(current, addr);
733 if (!mpnt)
734 return 0;
735 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
736
737
738
739 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
740 free = NULL;
741 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
742 *npp = mpnt->vm_next;
743 mpnt->vm_next = free;
744 free = mpnt;
745 avl_remove(mpnt, ¤t->mm->mmap_avl);
746 }
747
748 if (free == NULL)
749 return 0;
750
751
752
753
754
755
756
757 while (free) {
758 unsigned long st, end;
759
760 mpnt = free;
761 free = free->vm_next;
762
763 remove_shared_vm_struct(mpnt);
764
765 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
766 end = addr+len;
767 end = end > mpnt->vm_end ? mpnt->vm_end : end;
768
769 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
770 mpnt->vm_ops->unmap(mpnt, st, end-st);
771
772 unmap_fixup(mpnt, st, end-st);
773 kfree(mpnt);
774 }
775
776 unmap_page_range(addr, len);
777 return 0;
778 }
779
780
781 void build_mmap_avl(struct mm_struct * mm)
782 {
783 struct vm_area_struct * vma;
784
785 mm->mmap_avl = NULL;
786 for (vma = mm->mmap; vma; vma = vma->vm_next)
787 avl_insert(vma, &mm->mmap_avl);
788 }
789
790
791 void exit_mmap(struct mm_struct * mm)
792 {
793 struct vm_area_struct * mpnt;
794
795 mpnt = mm->mmap;
796 mm->mmap = NULL;
797 mm->mmap_avl = NULL;
798 while (mpnt) {
799 struct vm_area_struct * next = mpnt->vm_next;
800 if (mpnt->vm_ops && mpnt->vm_ops->close)
801 mpnt->vm_ops->close(mpnt);
802 remove_shared_vm_struct(mpnt);
803 if (mpnt->vm_inode)
804 iput(mpnt->vm_inode);
805 zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
806 kfree(mpnt);
807 mpnt = next;
808 }
809 }
810
811
812
813
814
815 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
816 {
817 struct vm_area_struct *share;
818 struct inode * inode;
819
820 #if 0
821 struct vm_area_struct **p, *mpnt;
822
823 p = &t->mm->mmap;
824 while ((mpnt = *p) != NULL) {
825 if (mpnt->vm_start > vmp->vm_start)
826 break;
827 if (mpnt->vm_end > vmp->vm_start)
828 printk("insert_vm_struct: overlapping memory areas\n");
829 p = &mpnt->vm_next;
830 }
831 vmp->vm_next = mpnt;
832 *p = vmp;
833 #else
834 struct vm_area_struct * prev, * next;
835
836 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
837 if ((prev ? prev->vm_next : t->mm->mmap) != next)
838 printk("insert_vm_struct: tree inconsistent with list\n");
839 if (prev)
840 prev->vm_next = vmp;
841 else
842 t->mm->mmap = vmp;
843 vmp->vm_next = next;
844 #endif
845
846 inode = vmp->vm_inode;
847 if (!inode)
848 return;
849
850
851 if ((share = inode->i_mmap)) {
852 vmp->vm_next_share = share->vm_next_share;
853 vmp->vm_next_share->vm_prev_share = vmp;
854 share->vm_next_share = vmp;
855 vmp->vm_prev_share = share;
856 } else
857 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
858 }
859
860
861
862
863 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
864 {
865 struct inode * inode = mpnt->vm_inode;
866
867 if (!inode)
868 return;
869
870 if (mpnt->vm_next_share == mpnt) {
871 if (inode->i_mmap != mpnt)
872 printk("Inode i_mmap ring corrupted\n");
873 inode->i_mmap = NULL;
874 return;
875 }
876
877 if (inode->i_mmap == mpnt)
878 inode->i_mmap = mpnt->vm_next_share;
879
880 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
881 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
882 }
883
884
885
886
887
888
889
890
891 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
892 {
893 struct vm_area_struct *prev, *mpnt, *next;
894
895 mpnt = find_vma(task, start_addr);
896 if (!mpnt)
897 return;
898 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
899
900
901 if (!prev) {
902 prev = mpnt;
903 mpnt = next;
904 }
905
906
907
908
909 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
910 #if 0
911 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
912 #endif
913
914 next = mpnt->vm_next;
915
916
917
918
919 if (mpnt->vm_inode != prev->vm_inode)
920 continue;
921 if (mpnt->vm_pte != prev->vm_pte)
922 continue;
923 if (mpnt->vm_ops != prev->vm_ops)
924 continue;
925 if (mpnt->vm_flags != prev->vm_flags)
926 continue;
927 if (prev->vm_end != mpnt->vm_start)
928 continue;
929
930
931
932 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
933 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
934 continue;
935 }
936
937
938
939
940
941
942 avl_remove(mpnt, &task->mm->mmap_avl);
943 prev->vm_end = mpnt->vm_end;
944 prev->vm_next = mpnt->vm_next;
945 if (mpnt->vm_ops && mpnt->vm_ops->close) {
946 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
947 mpnt->vm_start = mpnt->vm_end;
948 mpnt->vm_ops->close(mpnt);
949 }
950 remove_shared_vm_struct(mpnt);
951 if (mpnt->vm_inode)
952 mpnt->vm_inode->i_count--;
953 kfree_s(mpnt, sizeof(*mpnt));
954 mpnt = prev;
955 }
956 }
957
958
959
960
961
962 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
963 {
964 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
965 return -ENOMEM;
966 return 0;
967 }