This source file includes following definitions.
- do_mmap
- get_unmapped_area
- find_vma
- find_vma_intersection
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 pgprot_t protection_map[16] = {
40 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
41 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
42 };
43
44 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
45 unsigned long prot, unsigned long flags, unsigned long off)
46 {
47 int error;
48 struct vm_area_struct * vma;
49
50 if ((len = PAGE_ALIGN(len)) == 0)
51 return addr;
52
53 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
54 return -EINVAL;
55
56
57 if (off + len < off)
58 return -EINVAL;
59
60
61
62
63
64
65
66 if (file != NULL) {
67 switch (flags & MAP_TYPE) {
68 case MAP_SHARED:
69 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
70 return -EACCES;
71
72 case MAP_PRIVATE:
73 if (!(file->f_mode & 1))
74 return -EACCES;
75 break;
76
77 default:
78 return -EINVAL;
79 }
80 if (flags & MAP_DENYWRITE) {
81 if (file->f_inode->i_wcount > 0)
82 return -ETXTBSY;
83 }
84 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
85 return -EINVAL;
86
87
88
89
90
91
92 if (flags & MAP_FIXED) {
93 if (addr & ~PAGE_MASK)
94 return -EINVAL;
95 if (len > TASK_SIZE || addr > TASK_SIZE - len)
96 return -EINVAL;
97 } else {
98 addr = get_unmapped_area(addr, len);
99 if (!addr)
100 return -ENOMEM;
101 }
102
103
104
105
106
107
108 if (file && (!file->f_op || !file->f_op->mmap))
109 return -ENODEV;
110
111 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
112 GFP_KERNEL);
113 if (!vma)
114 return -ENOMEM;
115
116 vma->vm_mm = current->mm;
117 vma->vm_start = addr;
118 vma->vm_end = addr + len;
119 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
120 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
121
122 if (file) {
123 if (file->f_mode & 1)
124 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
125 if (flags & MAP_SHARED) {
126 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
127
128
129
130
131
132
133
134
135
136
137 if (!(file->f_mode & 2))
138 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
139 }
140 } else
141 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
142 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
143 vma->vm_ops = NULL;
144 vma->vm_offset = off;
145 vma->vm_inode = NULL;
146 vma->vm_pte = 0;
147
148 do_munmap(addr, len);
149
150 if (file)
151 error = file->f_op->mmap(file->f_inode, file, vma);
152 else
153 error = anon_map(NULL, NULL, vma);
154
155 if (error) {
156 kfree(vma);
157 return error;
158 }
159 insert_vm_struct(current, vma);
160 merge_segments(current, vma->vm_start, vma->vm_end);
161 return addr;
162 }
163
164
165
166
167
168
169 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
170 {
171 struct vm_area_struct * vmm;
172
173 if (len > TASK_SIZE)
174 return 0;
175 if (!addr)
176 addr = TASK_SIZE / 3;
177 addr = PAGE_ALIGN(addr);
178
179 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
180 if (TASK_SIZE - len < addr)
181 return 0;
182 if (!vmm)
183 return addr;
184 if (addr > vmm->vm_end)
185 continue;
186 if (addr + len > vmm->vm_start) {
187 addr = vmm->vm_end;
188 continue;
189 }
190 return addr;
191 }
192 }
193
194
195
196
197
198
199
200
201
202
203 #define vm_avl_key vm_end
204 #define vm_avl_key_t unsigned long
205
206
207
208
209
210
211
212
213
214
215 #define avl_empty (struct vm_area_struct *) NULL
216
217
218 #define avl_maxheight 41
219 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
220
221
222
223
224
225
226
227
228
229 struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
230 {
231 #if 0
232 struct vm_area_struct * vma;
233
234 for (vma = task->mm->mmap ; ; vma = vma->vm_next) {
235 if (!vma)
236 return NULL;
237 if (vma->vm_end > addr)
238 return vma;
239 }
240 #else
241 struct vm_area_struct * result = NULL;
242 struct vm_area_struct * tree;
243
244 for (tree = task->mm->mmap_avl ; ; ) {
245 if (tree == avl_empty)
246 return result;
247 if (tree->vm_end > addr) {
248 if (tree->vm_start <= addr)
249 return tree;
250 result = tree;
251 tree = tree->vm_avl_left;
252 } else
253 tree = tree->vm_avl_right;
254 }
255 #endif
256 }
257
258
259
260 struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
261 {
262 struct vm_area_struct * vma;
263
264 #if 0
265 for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
266 if (end_addr <= vma->vm_start)
267 break;
268 if (start_addr < vma->vm_end)
269 return vma;
270 }
271 return NULL;
272 #else
273 vma = find_vma(task,start_addr);
274 if (!vma || end_addr <= vma->vm_start)
275 return NULL;
276 return vma;
277 #endif
278 }
279
280
281 static void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
282 {
283 vm_avl_key_t key = node->vm_avl_key;
284
285 *to_the_left = *to_the_right = NULL;
286 for (;;) {
287 if (tree == avl_empty) {
288 printk("avl_neighbours: node not found in the tree\n");
289 return;
290 }
291 if (key == tree->vm_avl_key)
292 break;
293 if (key < tree->vm_avl_key) {
294 *to_the_right = tree;
295 tree = tree->vm_avl_left;
296 } else {
297 *to_the_left = tree;
298 tree = tree->vm_avl_right;
299 }
300 }
301 if (tree != node) {
302 printk("avl_neighbours: node not exactly found in the tree\n");
303 return;
304 }
305 if (tree->vm_avl_left != avl_empty) {
306 struct vm_area_struct * node;
307 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
308 continue;
309 *to_the_left = node;
310 }
311 if (tree->vm_avl_right != avl_empty) {
312 struct vm_area_struct * node;
313 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
314 continue;
315 *to_the_right = node;
316 }
317 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
318 printk("avl_neighbours: tree inconsistent with list\n");
319 }
320
321
322
323
324
325
326
327 static void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
328 {
329 for ( ; count > 0 ; count--) {
330 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
331 struct vm_area_struct * node = *nodeplace;
332 struct vm_area_struct * nodeleft = node->vm_avl_left;
333 struct vm_area_struct * noderight = node->vm_avl_right;
334 int heightleft = heightof(nodeleft);
335 int heightright = heightof(noderight);
336 if (heightright + 1 < heightleft) {
337
338
339
340
341
342 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
343 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
344 int heightleftright = heightof(nodeleftright);
345 if (heightof(nodeleftleft) >= heightleftright) {
346
347
348
349
350
351
352
353 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
354 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
355 *nodeplace = nodeleft;
356 } else {
357
358
359
360
361
362
363
364
365
366 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
367 node->vm_avl_left = nodeleftright->vm_avl_right;
368 nodeleftright->vm_avl_left = nodeleft;
369 nodeleftright->vm_avl_right = node;
370 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
371 nodeleftright->vm_avl_height = heightleft;
372 *nodeplace = nodeleftright;
373 }
374 }
375 else if (heightleft + 1 < heightright) {
376
377 struct vm_area_struct * noderightright = noderight->vm_avl_right;
378 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
379 int heightrightleft = heightof(noderightleft);
380 if (heightof(noderightright) >= heightrightleft) {
381 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
382 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
383 *nodeplace = noderight;
384 } else {
385 noderight->vm_avl_left = noderightleft->vm_avl_right;
386 node->vm_avl_right = noderightleft->vm_avl_left;
387 noderightleft->vm_avl_right = noderight;
388 noderightleft->vm_avl_left = node;
389 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
390 noderightleft->vm_avl_height = heightright;
391 *nodeplace = noderightleft;
392 }
393 }
394 else {
395 int height = (heightleft<heightright ? heightright : heightleft) + 1;
396 if (height == node->vm_avl_height)
397 break;
398 node->vm_avl_height = height;
399 }
400 }
401 }
402
403
404 static void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
405 {
406 vm_avl_key_t key = new_node->vm_avl_key;
407 struct vm_area_struct ** nodeplace = ptree;
408 struct vm_area_struct ** stack[avl_maxheight];
409 int stack_count = 0;
410 struct vm_area_struct *** stack_ptr = &stack[0];
411 for (;;) {
412 struct vm_area_struct * node = *nodeplace;
413 if (node == avl_empty)
414 break;
415 *stack_ptr++ = nodeplace; stack_count++;
416 if (key < node->vm_avl_key)
417 nodeplace = &node->vm_avl_left;
418 else
419 nodeplace = &node->vm_avl_right;
420 }
421 new_node->vm_avl_left = avl_empty;
422 new_node->vm_avl_right = avl_empty;
423 new_node->vm_avl_height = 1;
424 *nodeplace = new_node;
425 avl_rebalance(stack_ptr,stack_count);
426 }
427
428
429
430
431 static void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
432 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
433 {
434 vm_avl_key_t key = new_node->vm_avl_key;
435 struct vm_area_struct ** nodeplace = ptree;
436 struct vm_area_struct ** stack[avl_maxheight];
437 int stack_count = 0;
438 struct vm_area_struct *** stack_ptr = &stack[0];
439 *to_the_left = *to_the_right = NULL;
440 for (;;) {
441 struct vm_area_struct * node = *nodeplace;
442 if (node == avl_empty)
443 break;
444 *stack_ptr++ = nodeplace; stack_count++;
445 if (key < node->vm_avl_key) {
446 *to_the_right = node;
447 nodeplace = &node->vm_avl_left;
448 } else {
449 *to_the_left = node;
450 nodeplace = &node->vm_avl_right;
451 }
452 }
453 new_node->vm_avl_left = avl_empty;
454 new_node->vm_avl_right = avl_empty;
455 new_node->vm_avl_height = 1;
456 *nodeplace = new_node;
457 avl_rebalance(stack_ptr,stack_count);
458 }
459
460
461 static void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
462 {
463 vm_avl_key_t key = node_to_delete->vm_avl_key;
464 struct vm_area_struct ** nodeplace = ptree;
465 struct vm_area_struct ** stack[avl_maxheight];
466 int stack_count = 0;
467 struct vm_area_struct *** stack_ptr = &stack[0];
468 struct vm_area_struct ** nodeplace_to_delete;
469 for (;;) {
470 struct vm_area_struct * node = *nodeplace;
471 if (node == avl_empty) {
472
473 printk("avl_remove: node to delete not found in tree\n");
474 return;
475 }
476 *stack_ptr++ = nodeplace; stack_count++;
477 if (key == node->vm_avl_key)
478 break;
479 if (key < node->vm_avl_key)
480 nodeplace = &node->vm_avl_left;
481 else
482 nodeplace = &node->vm_avl_right;
483 }
484 nodeplace_to_delete = nodeplace;
485
486 if (node_to_delete->vm_avl_left == avl_empty) {
487 *nodeplace_to_delete = node_to_delete->vm_avl_right;
488 stack_ptr--; stack_count--;
489 } else {
490 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
491 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
492 struct vm_area_struct * node;
493 for (;;) {
494 node = *nodeplace;
495 if (node->vm_avl_right == avl_empty)
496 break;
497 *stack_ptr++ = nodeplace; stack_count++;
498 nodeplace = &node->vm_avl_right;
499 }
500 *nodeplace = node->vm_avl_left;
501
502 node->vm_avl_left = node_to_delete->vm_avl_left;
503 node->vm_avl_right = node_to_delete->vm_avl_right;
504 node->vm_avl_height = node_to_delete->vm_avl_height;
505 *nodeplace_to_delete = node;
506 *stack_ptr_to_delete = &node->vm_avl_left;
507 }
508 avl_rebalance(stack_ptr,stack_count);
509 }
510
511 #ifdef DEBUG_AVL
512
513
514 static void printk_list (struct vm_area_struct * vma)
515 {
516 printk("[");
517 while (vma) {
518 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
519 vma = vma->vm_next;
520 if (!vma)
521 break;
522 printk(" ");
523 }
524 printk("]");
525 }
526
527
528 static void printk_avl (struct vm_area_struct * tree)
529 {
530 if (tree != avl_empty) {
531 printk("(");
532 if (tree->vm_avl_left != avl_empty) {
533 printk_avl(tree->vm_avl_left);
534 printk("<");
535 }
536 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
537 if (tree->vm_avl_right != avl_empty) {
538 printk(">");
539 printk_avl(tree->vm_avl_right);
540 }
541 printk(")");
542 }
543 }
544
545 static char *avl_check_point = "somewhere";
546
547
548 static void avl_checkheights (struct vm_area_struct * tree)
549 {
550 int h, hl, hr;
551
552 if (tree == avl_empty)
553 return;
554 avl_checkheights(tree->vm_avl_left);
555 avl_checkheights(tree->vm_avl_right);
556 h = tree->vm_avl_height;
557 hl = heightof(tree->vm_avl_left);
558 hr = heightof(tree->vm_avl_right);
559 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
560 return;
561 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
562 return;
563 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
564 }
565
566
567 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
568 {
569 if (tree == avl_empty)
570 return;
571 avl_checkleft(tree->vm_avl_left,key);
572 avl_checkleft(tree->vm_avl_right,key);
573 if (tree->vm_avl_key < key)
574 return;
575 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
576 }
577
578
579 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
580 {
581 if (tree == avl_empty)
582 return;
583 avl_checkright(tree->vm_avl_left,key);
584 avl_checkright(tree->vm_avl_right,key);
585 if (tree->vm_avl_key > key)
586 return;
587 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
588 }
589
590
591 static void avl_checkorder (struct vm_area_struct * tree)
592 {
593 if (tree == avl_empty)
594 return;
595 avl_checkorder(tree->vm_avl_left);
596 avl_checkorder(tree->vm_avl_right);
597 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
598 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
599 }
600
601
602 static void avl_check (struct task_struct * task, char *caller)
603 {
604 avl_check_point = caller;
605
606
607
608 avl_checkheights(task->mm->mmap_avl);
609 avl_checkorder(task->mm->mmap_avl);
610 }
611
612 #endif
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637 void unmap_fixup(struct vm_area_struct *area,
638 unsigned long addr, size_t len)
639 {
640 struct vm_area_struct *mpnt;
641 unsigned long end = addr + len;
642
643 if (addr < area->vm_start || addr >= area->vm_end ||
644 end <= area->vm_start || end > area->vm_end ||
645 end < addr)
646 {
647 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
648 area->vm_start, area->vm_end, addr, end);
649 return;
650 }
651
652
653 if (addr == area->vm_start && end == area->vm_end) {
654 if (area->vm_ops && area->vm_ops->close)
655 area->vm_ops->close(area);
656 if (area->vm_inode)
657 iput(area->vm_inode);
658 return;
659 }
660
661
662 if (end == area->vm_end)
663 area->vm_end = addr;
664 else
665 if (addr == area->vm_start) {
666 area->vm_offset += (end - area->vm_start);
667 area->vm_start = end;
668 }
669 else {
670
671
672 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
673
674 if (!mpnt)
675 return;
676 *mpnt = *area;
677 mpnt->vm_offset += (end - area->vm_start);
678 mpnt->vm_start = end;
679 if (mpnt->vm_inode)
680 mpnt->vm_inode->i_count++;
681 if (mpnt->vm_ops && mpnt->vm_ops->open)
682 mpnt->vm_ops->open(mpnt);
683 area->vm_end = addr;
684 insert_vm_struct(current, mpnt);
685 }
686
687
688 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
689 if (!mpnt)
690 return;
691 *mpnt = *area;
692 if (mpnt->vm_ops && mpnt->vm_ops->open)
693 mpnt->vm_ops->open(mpnt);
694 if (area->vm_ops && area->vm_ops->close) {
695 area->vm_end = area->vm_start;
696 area->vm_ops->close(area);
697 }
698 insert_vm_struct(current, mpnt);
699 }
700
701 asmlinkage int sys_munmap(unsigned long addr, size_t len)
702 {
703 return do_munmap(addr, len);
704 }
705
706
707
708
709
710
711
712 int do_munmap(unsigned long addr, size_t len)
713 {
714 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
715
716 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
717 return -EINVAL;
718
719 if ((len = PAGE_ALIGN(len)) == 0)
720 return 0;
721
722
723
724
725
726
727
728 mpnt = find_vma(current, addr);
729 if (!mpnt)
730 return 0;
731 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
732
733
734
735 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
736 free = NULL;
737 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
738 *npp = mpnt->vm_next;
739 mpnt->vm_next = free;
740 free = mpnt;
741 avl_remove(mpnt, ¤t->mm->mmap_avl);
742 }
743
744 if (free == NULL)
745 return 0;
746
747
748
749
750
751
752
753 while (free) {
754 unsigned long st, end;
755
756 mpnt = free;
757 free = free->vm_next;
758
759 remove_shared_vm_struct(mpnt);
760
761 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
762 end = addr+len;
763 end = end > mpnt->vm_end ? mpnt->vm_end : end;
764
765 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
766 mpnt->vm_ops->unmap(mpnt, st, end-st);
767
768 unmap_fixup(mpnt, st, end-st);
769 kfree(mpnt);
770 }
771
772 unmap_page_range(addr, len);
773 return 0;
774 }
775
776
777 void build_mmap_avl(struct mm_struct * mm)
778 {
779 struct vm_area_struct * vma;
780
781 mm->mmap_avl = NULL;
782 for (vma = mm->mmap; vma; vma = vma->vm_next)
783 avl_insert(vma, &mm->mmap_avl);
784 }
785
786
787 void exit_mmap(struct mm_struct * mm)
788 {
789 struct vm_area_struct * mpnt;
790
791 mpnt = mm->mmap;
792 mm->mmap = NULL;
793 mm->mmap_avl = NULL;
794 while (mpnt) {
795 struct vm_area_struct * next = mpnt->vm_next;
796 if (mpnt->vm_ops && mpnt->vm_ops->close)
797 mpnt->vm_ops->close(mpnt);
798 remove_shared_vm_struct(mpnt);
799 if (mpnt->vm_inode)
800 iput(mpnt->vm_inode);
801 kfree(mpnt);
802 mpnt = next;
803 }
804 }
805
806
807
808
809
810 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
811 {
812 struct vm_area_struct *share;
813 struct inode * inode;
814
815 #if 0
816 struct vm_area_struct **p, *mpnt;
817
818 p = &t->mm->mmap;
819 while ((mpnt = *p) != NULL) {
820 if (mpnt->vm_start > vmp->vm_start)
821 break;
822 if (mpnt->vm_end > vmp->vm_start)
823 printk("insert_vm_struct: overlapping memory areas\n");
824 p = &mpnt->vm_next;
825 }
826 vmp->vm_next = mpnt;
827 *p = vmp;
828 #else
829 struct vm_area_struct * prev, * next;
830
831 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
832 if ((prev ? prev->vm_next : t->mm->mmap) != next)
833 printk("insert_vm_struct: tree inconsistent with list\n");
834 if (prev)
835 prev->vm_next = vmp;
836 else
837 t->mm->mmap = vmp;
838 vmp->vm_next = next;
839 #endif
840
841 inode = vmp->vm_inode;
842 if (!inode)
843 return;
844
845
846 if ((share = inode->i_mmap)) {
847 vmp->vm_next_share = share->vm_next_share;
848 vmp->vm_next_share->vm_prev_share = vmp;
849 share->vm_next_share = vmp;
850 vmp->vm_prev_share = share;
851 } else
852 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
853 }
854
855
856
857
858 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
859 {
860 struct inode * inode = mpnt->vm_inode;
861
862 if (!inode)
863 return;
864
865 if (mpnt->vm_next_share == mpnt) {
866 if (inode->i_mmap != mpnt)
867 printk("Inode i_mmap ring corrupted\n");
868 inode->i_mmap = NULL;
869 return;
870 }
871
872 if (inode->i_mmap == mpnt)
873 inode->i_mmap = mpnt->vm_next_share;
874
875 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
876 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
877 }
878
879
880
881
882
883
884
885
886 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
887 {
888 struct vm_area_struct *prev, *mpnt, *next;
889
890 mpnt = find_vma(task, start_addr);
891 if (!mpnt)
892 return;
893 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
894
895
896 if (!prev) {
897 prev = mpnt;
898 mpnt = next;
899 }
900
901
902
903
904 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
905 #if 0
906 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
907 #endif
908
909 next = mpnt->vm_next;
910
911
912
913
914 if (mpnt->vm_inode != prev->vm_inode)
915 continue;
916 if (mpnt->vm_pte != prev->vm_pte)
917 continue;
918 if (mpnt->vm_ops != prev->vm_ops)
919 continue;
920 if (mpnt->vm_flags != prev->vm_flags)
921 continue;
922 if (prev->vm_end != mpnt->vm_start)
923 continue;
924
925
926
927 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
928 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
929 continue;
930 }
931
932
933
934
935
936
937 avl_remove(mpnt, &task->mm->mmap_avl);
938 prev->vm_end = mpnt->vm_end;
939 prev->vm_next = mpnt->vm_next;
940 if (mpnt->vm_ops && mpnt->vm_ops->close) {
941 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
942 mpnt->vm_start = mpnt->vm_end;
943 mpnt->vm_ops->close(mpnt);
944 }
945 remove_shared_vm_struct(mpnt);
946 if (mpnt->vm_inode)
947 mpnt->vm_inode->i_count--;
948 kfree_s(mpnt, sizeof(*mpnt));
949 mpnt = prev;
950 }
951 }
952
953
954
955
956
957 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
958 {
959 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
960 return -ENOMEM;
961 return 0;
962 }