This source file includes following definitions.
- do_mmap
- get_unmapped_area
- find_vma
- find_vma_intersection
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 pgprot_t protection_map[16] = {
40 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
41 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
42 };
43
44 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
45 unsigned long prot, unsigned long flags, unsigned long off)
46 {
47 int error;
48 struct vm_area_struct * vma;
49
50 if ((len = PAGE_ALIGN(len)) == 0)
51 return addr;
52
53 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
54 return -EINVAL;
55
56
57 if (off + len < off)
58 return -EINVAL;
59
60
61
62
63
64
65
66 if (file != NULL) {
67 switch (flags & MAP_TYPE) {
68 case MAP_SHARED:
69 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
70 return -EACCES;
71
72 case MAP_PRIVATE:
73 if (!(file->f_mode & 1))
74 return -EACCES;
75 break;
76
77 default:
78 return -EINVAL;
79 }
80 if ((flags & MAP_DENYWRITE) && (file->f_inode->i_wcount > 0))
81 return -ETXTBSY;
82 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
83 return -EINVAL;
84
85
86
87
88
89
90 if (flags & MAP_FIXED) {
91 if (addr & ~PAGE_MASK)
92 return -EINVAL;
93 if (len > TASK_SIZE || addr > TASK_SIZE - len)
94 return -EINVAL;
95 } else {
96 addr = get_unmapped_area(addr, len);
97 if (!addr)
98 return -ENOMEM;
99 }
100
101
102
103
104
105
106 if (file && (!file->f_op || !file->f_op->mmap))
107 return -ENODEV;
108
109 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
110 GFP_KERNEL);
111 if (!vma)
112 return -ENOMEM;
113
114 vma->vm_task = current;
115 vma->vm_start = addr;
116 vma->vm_end = addr + len;
117 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
118 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
119
120 if (file) {
121 if (file->f_mode & 1)
122 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
123 if (flags & MAP_SHARED) {
124 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
125
126
127
128
129
130
131
132
133
134
135 if (!(file->f_mode & 2))
136 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
137 }
138 } else
139 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
140 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
141 vma->vm_ops = NULL;
142 vma->vm_offset = off;
143 vma->vm_inode = NULL;
144 vma->vm_pte = 0;
145
146 do_munmap(addr, len);
147
148 if (file)
149 error = file->f_op->mmap(file->f_inode, file, vma);
150 else
151 error = anon_map(NULL, NULL, vma);
152
153 if (error) {
154 kfree(vma);
155 return error;
156 }
157 insert_vm_struct(current, vma);
158 merge_segments(current, vma->vm_start, vma->vm_end);
159 return addr;
160 }
161
162
163
164
165
166
167 unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
168 {
169 struct vm_area_struct * vmm;
170
171 if (len > TASK_SIZE)
172 return 0;
173 if (!addr)
174 addr = TASK_SIZE / 3;
175 addr = PAGE_ALIGN(addr);
176
177 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
178 if (TASK_SIZE - len < addr)
179 return 0;
180 if (!vmm)
181 return addr;
182 if (addr > vmm->vm_end)
183 continue;
184 if (addr + len > vmm->vm_start) {
185 addr = vmm->vm_end;
186 continue;
187 }
188 return addr;
189 }
190 }
191
192
193
194
195
196
197
198
199
200
201 #define vm_avl_key vm_end
202 #define vm_avl_key_t unsigned long
203
204
205
206
207
208
209
210
211
212
213 #define avl_empty (struct vm_area_struct *) NULL
214
215
216 #define avl_maxheight 41
217 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
218
219
220
221
222
223
224
225
226
227 struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
228 {
229 #if 0
230 struct vm_area_struct * vma;
231
232 for (vma = task->mm->mmap ; ; vma = vma->vm_next) {
233 if (!vma)
234 return NULL;
235 if (vma->vm_end > addr)
236 return vma;
237 }
238 #else
239 struct vm_area_struct * result = NULL;
240 struct vm_area_struct * tree;
241
242 for (tree = task->mm->mmap_avl ; ; ) {
243 if (tree == avl_empty)
244 return result;
245 if (tree->vm_end > addr) {
246 if (tree->vm_start <= addr)
247 return tree;
248 result = tree;
249 tree = tree->vm_avl_left;
250 } else
251 tree = tree->vm_avl_right;
252 }
253 #endif
254 }
255
256
257
258 struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
259 {
260 struct vm_area_struct * vma;
261
262 #if 0
263 for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
264 if (end_addr <= vma->vm_start)
265 break;
266 if (start_addr < vma->vm_end)
267 return vma;
268 }
269 return NULL;
270 #else
271 vma = find_vma(task,start_addr);
272 if (!vma || end_addr <= vma->vm_start)
273 return NULL;
274 return vma;
275 #endif
276 }
277
278
279 static void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
280 {
281 vm_avl_key_t key = node->vm_avl_key;
282
283 *to_the_left = *to_the_right = NULL;
284 for (;;) {
285 if (tree == avl_empty) {
286 printk("avl_neighbours: node not found in the tree\n");
287 return;
288 }
289 if (key == tree->vm_avl_key)
290 break;
291 if (key < tree->vm_avl_key) {
292 *to_the_right = tree;
293 tree = tree->vm_avl_left;
294 } else {
295 *to_the_left = tree;
296 tree = tree->vm_avl_right;
297 }
298 }
299 if (tree != node) {
300 printk("avl_neighbours: node not exactly found in the tree\n");
301 return;
302 }
303 if (tree->vm_avl_left != avl_empty) {
304 struct vm_area_struct * node;
305 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
306 continue;
307 *to_the_left = node;
308 }
309 if (tree->vm_avl_right != avl_empty) {
310 struct vm_area_struct * node;
311 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
312 continue;
313 *to_the_right = node;
314 }
315 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
316 printk("avl_neighbours: tree inconsistent with list\n");
317 }
318
319
320
321
322
323
324
325 static void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
326 {
327 for ( ; count > 0 ; count--) {
328 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
329 struct vm_area_struct * node = *nodeplace;
330 struct vm_area_struct * nodeleft = node->vm_avl_left;
331 struct vm_area_struct * noderight = node->vm_avl_right;
332 int heightleft = heightof(nodeleft);
333 int heightright = heightof(noderight);
334 if (heightright + 1 < heightleft) {
335
336
337
338
339
340 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
341 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
342 int heightleftright = heightof(nodeleftright);
343 if (heightof(nodeleftleft) >= heightleftright) {
344
345
346
347
348
349
350
351 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
352 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
353 *nodeplace = nodeleft;
354 } else {
355
356
357
358
359
360
361
362
363
364 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
365 node->vm_avl_left = nodeleftright->vm_avl_right;
366 nodeleftright->vm_avl_left = nodeleft;
367 nodeleftright->vm_avl_right = node;
368 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
369 nodeleftright->vm_avl_height = heightleft;
370 *nodeplace = nodeleftright;
371 }
372 }
373 else if (heightleft + 1 < heightright) {
374
375 struct vm_area_struct * noderightright = noderight->vm_avl_right;
376 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
377 int heightrightleft = heightof(noderightleft);
378 if (heightof(noderightright) >= heightrightleft) {
379 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
380 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
381 *nodeplace = noderight;
382 } else {
383 noderight->vm_avl_left = noderightleft->vm_avl_right;
384 node->vm_avl_right = noderightleft->vm_avl_left;
385 noderightleft->vm_avl_right = noderight;
386 noderightleft->vm_avl_left = node;
387 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
388 noderightleft->vm_avl_height = heightright;
389 *nodeplace = noderightleft;
390 }
391 }
392 else {
393 int height = (heightleft<heightright ? heightright : heightleft) + 1;
394 if (height == node->vm_avl_height)
395 break;
396 node->vm_avl_height = height;
397 }
398 }
399 }
400
401
402 static void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
403 {
404 vm_avl_key_t key = new_node->vm_avl_key;
405 struct vm_area_struct ** nodeplace = ptree;
406 struct vm_area_struct ** stack[avl_maxheight];
407 int stack_count = 0;
408 struct vm_area_struct *** stack_ptr = &stack[0];
409 for (;;) {
410 struct vm_area_struct * node = *nodeplace;
411 if (node == avl_empty)
412 break;
413 *stack_ptr++ = nodeplace; stack_count++;
414 if (key < node->vm_avl_key)
415 nodeplace = &node->vm_avl_left;
416 else
417 nodeplace = &node->vm_avl_right;
418 }
419 new_node->vm_avl_left = avl_empty;
420 new_node->vm_avl_right = avl_empty;
421 new_node->vm_avl_height = 1;
422 *nodeplace = new_node;
423 avl_rebalance(stack_ptr,stack_count);
424 }
425
426
427
428
429 static void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
430 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
431 {
432 vm_avl_key_t key = new_node->vm_avl_key;
433 struct vm_area_struct ** nodeplace = ptree;
434 struct vm_area_struct ** stack[avl_maxheight];
435 int stack_count = 0;
436 struct vm_area_struct *** stack_ptr = &stack[0];
437 *to_the_left = *to_the_right = NULL;
438 for (;;) {
439 struct vm_area_struct * node = *nodeplace;
440 if (node == avl_empty)
441 break;
442 *stack_ptr++ = nodeplace; stack_count++;
443 if (key < node->vm_avl_key) {
444 *to_the_right = node;
445 nodeplace = &node->vm_avl_left;
446 } else {
447 *to_the_left = node;
448 nodeplace = &node->vm_avl_right;
449 }
450 }
451 new_node->vm_avl_left = avl_empty;
452 new_node->vm_avl_right = avl_empty;
453 new_node->vm_avl_height = 1;
454 *nodeplace = new_node;
455 avl_rebalance(stack_ptr,stack_count);
456 }
457
458
459 static void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
460 {
461 vm_avl_key_t key = node_to_delete->vm_avl_key;
462 struct vm_area_struct ** nodeplace = ptree;
463 struct vm_area_struct ** stack[avl_maxheight];
464 int stack_count = 0;
465 struct vm_area_struct *** stack_ptr = &stack[0];
466 struct vm_area_struct ** nodeplace_to_delete;
467 for (;;) {
468 struct vm_area_struct * node = *nodeplace;
469 if (node == avl_empty) {
470
471 printk("avl_remove: node to delete not found in tree\n");
472 return;
473 }
474 *stack_ptr++ = nodeplace; stack_count++;
475 if (key == node->vm_avl_key)
476 break;
477 if (key < node->vm_avl_key)
478 nodeplace = &node->vm_avl_left;
479 else
480 nodeplace = &node->vm_avl_right;
481 }
482 nodeplace_to_delete = nodeplace;
483
484 if (node_to_delete->vm_avl_left == avl_empty) {
485 *nodeplace_to_delete = node_to_delete->vm_avl_right;
486 stack_ptr--; stack_count--;
487 } else {
488 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
489 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
490 struct vm_area_struct * node;
491 for (;;) {
492 node = *nodeplace;
493 if (node->vm_avl_right == avl_empty)
494 break;
495 *stack_ptr++ = nodeplace; stack_count++;
496 nodeplace = &node->vm_avl_right;
497 }
498 *nodeplace = node->vm_avl_left;
499
500 node->vm_avl_left = node_to_delete->vm_avl_left;
501 node->vm_avl_right = node_to_delete->vm_avl_right;
502 node->vm_avl_height = node_to_delete->vm_avl_height;
503 *nodeplace_to_delete = node;
504 *stack_ptr_to_delete = &node->vm_avl_left;
505 }
506 avl_rebalance(stack_ptr,stack_count);
507 }
508
509 #ifdef DEBUG_AVL
510
511
512 static void printk_list (struct vm_area_struct * vma)
513 {
514 printk("[");
515 while (vma) {
516 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
517 vma = vma->vm_next;
518 if (!vma)
519 break;
520 printk(" ");
521 }
522 printk("]");
523 }
524
525
526 static void printk_avl (struct vm_area_struct * tree)
527 {
528 if (tree != avl_empty) {
529 printk("(");
530 if (tree->vm_avl_left != avl_empty) {
531 printk_avl(tree->vm_avl_left);
532 printk("<");
533 }
534 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
535 if (tree->vm_avl_right != avl_empty) {
536 printk(">");
537 printk_avl(tree->vm_avl_right);
538 }
539 printk(")");
540 }
541 }
542
543 static char *avl_check_point = "somewhere";
544
545
546 static void avl_checkheights (struct vm_area_struct * tree)
547 {
548 int h, hl, hr;
549
550 if (tree == avl_empty)
551 return;
552 avl_checkheights(tree->vm_avl_left);
553 avl_checkheights(tree->vm_avl_right);
554 h = tree->vm_avl_height;
555 hl = heightof(tree->vm_avl_left);
556 hr = heightof(tree->vm_avl_right);
557 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
558 return;
559 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
560 return;
561 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
562 }
563
564
565 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
566 {
567 if (tree == avl_empty)
568 return;
569 avl_checkleft(tree->vm_avl_left,key);
570 avl_checkleft(tree->vm_avl_right,key);
571 if (tree->vm_avl_key < key)
572 return;
573 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
574 }
575
576
577 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
578 {
579 if (tree == avl_empty)
580 return;
581 avl_checkright(tree->vm_avl_left,key);
582 avl_checkright(tree->vm_avl_right,key);
583 if (tree->vm_avl_key > key)
584 return;
585 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
586 }
587
588
589 static void avl_checkorder (struct vm_area_struct * tree)
590 {
591 if (tree == avl_empty)
592 return;
593 avl_checkorder(tree->vm_avl_left);
594 avl_checkorder(tree->vm_avl_right);
595 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
596 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
597 }
598
599
600 static void avl_check (struct task_struct * task, char *caller)
601 {
602 avl_check_point = caller;
603
604
605
606 avl_checkheights(task->mm->mmap_avl);
607 avl_checkorder(task->mm->mmap_avl);
608 }
609
610 #endif
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635 void unmap_fixup(struct vm_area_struct *area,
636 unsigned long addr, size_t len)
637 {
638 struct vm_area_struct *mpnt;
639 unsigned long end = addr + len;
640
641 if (addr < area->vm_start || addr >= area->vm_end ||
642 end <= area->vm_start || end > area->vm_end ||
643 end < addr)
644 {
645 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
646 area->vm_start, area->vm_end, addr, end);
647 return;
648 }
649
650
651 if (addr == area->vm_start && end == area->vm_end) {
652 if (area->vm_ops && area->vm_ops->close)
653 area->vm_ops->close(area);
654 if (area->vm_inode)
655 iput(area->vm_inode);
656 return;
657 }
658
659
660 if (end == area->vm_end)
661 area->vm_end = addr;
662 else
663 if (addr == area->vm_start) {
664 area->vm_offset += (end - area->vm_start);
665 area->vm_start = end;
666 }
667 else {
668
669
670 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
671
672 if (!mpnt)
673 return;
674 *mpnt = *area;
675 mpnt->vm_offset += (end - area->vm_start);
676 mpnt->vm_start = end;
677 if (mpnt->vm_inode)
678 mpnt->vm_inode->i_count++;
679 if (mpnt->vm_ops && mpnt->vm_ops->open)
680 mpnt->vm_ops->open(mpnt);
681 area->vm_end = addr;
682 insert_vm_struct(current, mpnt);
683 }
684
685
686 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
687 if (!mpnt)
688 return;
689 *mpnt = *area;
690 if (mpnt->vm_ops && mpnt->vm_ops->open)
691 mpnt->vm_ops->open(mpnt);
692 if (area->vm_ops && area->vm_ops->close) {
693 area->vm_end = area->vm_start;
694 area->vm_ops->close(area);
695 }
696 insert_vm_struct(current, mpnt);
697 }
698
699 asmlinkage int sys_munmap(unsigned long addr, size_t len)
700 {
701 return do_munmap(addr, len);
702 }
703
704
705
706
707
708
709
710 int do_munmap(unsigned long addr, size_t len)
711 {
712 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
713
714 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
715 return -EINVAL;
716
717 if ((len = PAGE_ALIGN(len)) == 0)
718 return 0;
719
720
721
722
723
724
725
726 mpnt = find_vma(current, addr);
727 if (!mpnt)
728 return 0;
729 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
730
731
732
733 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
734 free = NULL;
735 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
736 *npp = mpnt->vm_next;
737 mpnt->vm_next = free;
738 free = mpnt;
739 avl_remove(mpnt, ¤t->mm->mmap_avl);
740 }
741
742 if (free == NULL)
743 return 0;
744
745
746
747
748
749
750
751 while (free) {
752 unsigned long st, end;
753
754 mpnt = free;
755 free = free->vm_next;
756
757 remove_shared_vm_struct(mpnt);
758
759 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
760 end = addr+len;
761 end = end > mpnt->vm_end ? mpnt->vm_end : end;
762
763 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
764 mpnt->vm_ops->unmap(mpnt, st, end-st);
765
766 unmap_fixup(mpnt, st, end-st);
767 kfree(mpnt);
768 }
769
770 unmap_page_range(addr, len);
771 return 0;
772 }
773
774
775 void build_mmap_avl(struct task_struct * task)
776 {
777 struct vm_area_struct * vma;
778
779 task->mm->mmap_avl = NULL;
780 for (vma = task->mm->mmap; vma; vma = vma->vm_next)
781 avl_insert(vma, &task->mm->mmap_avl);
782 }
783
784
785 void exit_mmap(struct task_struct * task)
786 {
787 struct vm_area_struct * mpnt;
788
789 mpnt = task->mm->mmap;
790 task->mm->mmap = NULL;
791 task->mm->mmap_avl = NULL;
792 while (mpnt) {
793 struct vm_area_struct * next = mpnt->vm_next;
794 if (mpnt->vm_ops && mpnt->vm_ops->close)
795 mpnt->vm_ops->close(mpnt);
796 remove_shared_vm_struct(mpnt);
797 if (mpnt->vm_inode)
798 iput(mpnt->vm_inode);
799 kfree(mpnt);
800 mpnt = next;
801 }
802 }
803
804
805
806
807
808 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
809 {
810 struct vm_area_struct *share;
811 struct inode * inode;
812
813 #if 0
814 struct vm_area_struct **p, *mpnt;
815
816 p = &t->mm->mmap;
817 while ((mpnt = *p) != NULL) {
818 if (mpnt->vm_start > vmp->vm_start)
819 break;
820 if (mpnt->vm_end > vmp->vm_start)
821 printk("insert_vm_struct: overlapping memory areas\n");
822 p = &mpnt->vm_next;
823 }
824 vmp->vm_next = mpnt;
825 *p = vmp;
826 #else
827 struct vm_area_struct * prev, * next;
828
829 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
830 if ((prev ? prev->vm_next : t->mm->mmap) != next)
831 printk("insert_vm_struct: tree inconsistent with list\n");
832 if (prev)
833 prev->vm_next = vmp;
834 else
835 t->mm->mmap = vmp;
836 vmp->vm_next = next;
837 #endif
838
839 inode = vmp->vm_inode;
840 if (!inode)
841 return;
842
843
844 if ((share = inode->i_mmap)) {
845 vmp->vm_next_share = share->vm_next_share;
846 vmp->vm_next_share->vm_prev_share = vmp;
847 share->vm_next_share = vmp;
848 vmp->vm_prev_share = share;
849 } else
850 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
851 }
852
853
854
855
856 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
857 {
858 struct inode * inode = mpnt->vm_inode;
859
860 if (!inode)
861 return;
862
863 if (mpnt->vm_next_share == mpnt) {
864 if (inode->i_mmap != mpnt)
865 printk("Inode i_mmap ring corrupted\n");
866 inode->i_mmap = NULL;
867 return;
868 }
869
870 if (inode->i_mmap == mpnt)
871 inode->i_mmap = mpnt->vm_next_share;
872
873 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
874 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
875 }
876
877
878
879
880
881
882
883
884 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
885 {
886 struct vm_area_struct *prev, *mpnt, *next;
887
888 mpnt = find_vma(task, start_addr);
889 if (!mpnt)
890 return;
891 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
892
893
894 if (!prev) {
895 prev = mpnt;
896 mpnt = next;
897 }
898
899
900
901
902 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
903 #if 0
904 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
905 #endif
906
907 next = mpnt->vm_next;
908
909
910
911
912 if (mpnt->vm_inode != prev->vm_inode)
913 continue;
914 if (mpnt->vm_pte != prev->vm_pte)
915 continue;
916 if (mpnt->vm_ops != prev->vm_ops)
917 continue;
918 if (mpnt->vm_flags != prev->vm_flags)
919 continue;
920 if (prev->vm_end != mpnt->vm_start)
921 continue;
922
923
924
925 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
926 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
927 continue;
928 }
929
930
931
932
933
934
935 avl_remove(mpnt, &task->mm->mmap_avl);
936 prev->vm_end = mpnt->vm_end;
937 prev->vm_next = mpnt->vm_next;
938 if (mpnt->vm_ops && mpnt->vm_ops->close) {
939 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
940 mpnt->vm_start = mpnt->vm_end;
941 mpnt->vm_ops->close(mpnt);
942 }
943 remove_shared_vm_struct(mpnt);
944 if (mpnt->vm_inode)
945 mpnt->vm_inode->i_count--;
946 kfree_s(mpnt, sizeof(*mpnt));
947 mpnt = prev;
948 }
949 }
950
951
952
953
954
955 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
956 {
957 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
958 return -ENOMEM;
959 return 0;
960 }