This source file includes following definitions.
- do_mmap
- get_unmapped_area
- sys_mmap
- find_vma
- find_vma_intersection
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 pgprot_t protection_map[16] = {
39 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
40 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
41 };
42
43 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
44 unsigned long prot, unsigned long flags, unsigned long off)
45 {
46 int error;
47 struct vm_area_struct * vma;
48
49 if ((len = PAGE_ALIGN(len)) == 0)
50 return addr;
51
52 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
53 return -EINVAL;
54
55
56 if (off + len < off)
57 return -EINVAL;
58
59
60
61
62
63
64
65 if (file != NULL) {
66 switch (flags & MAP_TYPE) {
67 case MAP_SHARED:
68 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
69 return -EACCES;
70
71 case MAP_PRIVATE:
72 if (!(file->f_mode & 1))
73 return -EACCES;
74 break;
75
76 default:
77 return -EINVAL;
78 }
79 if ((flags & MAP_DENYWRITE) && (file->f_inode->i_wcount > 0))
80 return -ETXTBSY;
81 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
82 return -EINVAL;
83
84
85
86
87
88
89 if (flags & MAP_FIXED) {
90 if (addr & ~PAGE_MASK)
91 return -EINVAL;
92 if (len > TASK_SIZE || addr > TASK_SIZE - len)
93 return -EINVAL;
94 } else {
95 addr = get_unmapped_area(len);
96 if (!addr)
97 return -ENOMEM;
98 }
99
100
101
102
103
104
105 if (file && (!file->f_op || !file->f_op->mmap))
106 return -ENODEV;
107
108 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
109 GFP_KERNEL);
110 if (!vma)
111 return -ENOMEM;
112
113 vma->vm_task = current;
114 vma->vm_start = addr;
115 vma->vm_end = addr + len;
116 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
117 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
118
119 if (file) {
120 if (file->f_mode & 1)
121 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
122 if (flags & MAP_SHARED) {
123 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
124 if (!(file->f_mode & 2))
125 vma->vm_flags &= ~VM_MAYWRITE;
126 }
127 } else
128 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
129 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
130 vma->vm_ops = NULL;
131 vma->vm_offset = off;
132 vma->vm_inode = NULL;
133 vma->vm_pte = 0;
134
135 do_munmap(addr, len);
136
137 if (file)
138 error = file->f_op->mmap(file->f_inode, file, vma);
139 else
140 error = anon_map(NULL, NULL, vma);
141
142 if (error) {
143 kfree(vma);
144 return error;
145 }
146 insert_vm_struct(current, vma);
147 merge_segments(current, vma->vm_start, vma->vm_end);
148 return addr;
149 }
150
151
152
153
154
155
156 unsigned long get_unmapped_area(unsigned long len)
157 {
158 struct vm_area_struct * vmm;
159 unsigned long gap_start = 0, gap_end;
160
161 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
162 if (gap_start < SHM_RANGE_START)
163 gap_start = SHM_RANGE_START;
164 if (!vmm || ((gap_end = vmm->vm_start) > SHM_RANGE_END))
165 gap_end = SHM_RANGE_END;
166 gap_start = PAGE_ALIGN(gap_start);
167 gap_end &= PAGE_MASK;
168 if ((gap_start <= gap_end) && (gap_end - gap_start >= len))
169 return gap_start;
170 if (!vmm)
171 return 0;
172 gap_start = vmm->vm_end;
173 }
174 }
175
176 asmlinkage int sys_mmap(unsigned long *buffer)
177 {
178 int error;
179 unsigned long flags;
180 struct file * file = NULL;
181
182 error = verify_area(VERIFY_READ, buffer, 6*sizeof(long));
183 if (error)
184 return error;
185 flags = get_fs_long(buffer+3);
186 if (!(flags & MAP_ANONYMOUS)) {
187 unsigned long fd = get_fs_long(buffer+4);
188 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
189 return -EBADF;
190 }
191 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
192 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
193 }
194
195
196
197
198
199
200
201
202
203
204
205 #define vm_avl_key vm_end
206 #define vm_avl_key_t unsigned long
207
208
209
210
211
212
213
214
215
216
217 #define avl_empty (struct vm_area_struct *) NULL
218
219
220 #define avl_maxheight 41
221 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
222
223
224
225
226
227
228
229
230
231 struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
232 {
233 #if 0
234 struct vm_area_struct * vma;
235
236 for (vma = task->mm->mmap ; ; vma = vma->vm_next) {
237 if (!vma)
238 return NULL;
239 if (vma->vm_end > addr)
240 return vma;
241 }
242 #else
243 struct vm_area_struct * result = NULL;
244 struct vm_area_struct * tree;
245
246 for (tree = task->mm->mmap_avl ; ; ) {
247 if (tree == avl_empty)
248 return result;
249 if (tree->vm_end > addr) {
250 if (tree->vm_start <= addr)
251 return tree;
252 result = tree;
253 tree = tree->vm_avl_left;
254 } else
255 tree = tree->vm_avl_right;
256 }
257 #endif
258 }
259
260
261
262 struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
263 {
264 struct vm_area_struct * vma;
265
266 #if 0
267 for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
268 if (end_addr <= vma->vm_start)
269 break;
270 if (start_addr < vma->vm_end)
271 return vma;
272 }
273 return NULL;
274 #else
275 vma = find_vma(task,start_addr);
276 if (!vma || end_addr <= vma->vm_start)
277 return NULL;
278 return vma;
279 #endif
280 }
281
282
283 static void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
284 {
285 vm_avl_key_t key = node->vm_avl_key;
286
287 *to_the_left = *to_the_right = NULL;
288 for (;;) {
289 if (tree == avl_empty) {
290 printk("avl_neighbours: node not found in the tree\n");
291 return;
292 }
293 if (key == tree->vm_avl_key)
294 break;
295 if (key < tree->vm_avl_key) {
296 *to_the_right = tree;
297 tree = tree->vm_avl_left;
298 } else {
299 *to_the_left = tree;
300 tree = tree->vm_avl_right;
301 }
302 }
303 if (tree != node) {
304 printk("avl_neighbours: node not exactly found in the tree\n");
305 return;
306 }
307 if (tree->vm_avl_left != avl_empty) {
308 struct vm_area_struct * node;
309 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
310 continue;
311 *to_the_left = node;
312 }
313 if (tree->vm_avl_right != avl_empty) {
314 struct vm_area_struct * node;
315 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
316 continue;
317 *to_the_right = node;
318 }
319 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
320 printk("avl_neighbours: tree inconsistent with list\n");
321 }
322
323
324
325
326
327
328
329 static void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
330 {
331 for ( ; count > 0 ; count--) {
332 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
333 struct vm_area_struct * node = *nodeplace;
334 struct vm_area_struct * nodeleft = node->vm_avl_left;
335 struct vm_area_struct * noderight = node->vm_avl_right;
336 int heightleft = heightof(nodeleft);
337 int heightright = heightof(noderight);
338 if (heightright + 1 < heightleft) {
339
340
341
342
343
344 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
345 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
346 int heightleftright = heightof(nodeleftright);
347 if (heightof(nodeleftleft) >= heightleftright) {
348
349
350
351
352
353
354
355 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
356 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
357 *nodeplace = nodeleft;
358 } else {
359
360
361
362
363
364
365
366
367
368 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
369 node->vm_avl_left = nodeleftright->vm_avl_right;
370 nodeleftright->vm_avl_left = nodeleft;
371 nodeleftright->vm_avl_right = node;
372 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
373 nodeleftright->vm_avl_height = heightleft;
374 *nodeplace = nodeleftright;
375 }
376 }
377 else if (heightleft + 1 < heightright) {
378
379 struct vm_area_struct * noderightright = noderight->vm_avl_right;
380 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
381 int heightrightleft = heightof(noderightleft);
382 if (heightof(noderightright) >= heightrightleft) {
383 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
384 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
385 *nodeplace = noderight;
386 } else {
387 noderight->vm_avl_left = noderightleft->vm_avl_right;
388 node->vm_avl_right = noderightleft->vm_avl_left;
389 noderightleft->vm_avl_right = noderight;
390 noderightleft->vm_avl_left = node;
391 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
392 noderightleft->vm_avl_height = heightright;
393 *nodeplace = noderightleft;
394 }
395 }
396 else {
397 int height = (heightleft<heightright ? heightright : heightleft) + 1;
398 if (height == node->vm_avl_height)
399 break;
400 node->vm_avl_height = height;
401 }
402 }
403 }
404
405
406 static void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
407 {
408 vm_avl_key_t key = new_node->vm_avl_key;
409 struct vm_area_struct ** nodeplace = ptree;
410 struct vm_area_struct ** stack[avl_maxheight];
411 int stack_count = 0;
412 struct vm_area_struct *** stack_ptr = &stack[0];
413 for (;;) {
414 struct vm_area_struct * node = *nodeplace;
415 if (node == avl_empty)
416 break;
417 *stack_ptr++ = nodeplace; stack_count++;
418 if (key < node->vm_avl_key)
419 nodeplace = &node->vm_avl_left;
420 else
421 nodeplace = &node->vm_avl_right;
422 }
423 new_node->vm_avl_left = avl_empty;
424 new_node->vm_avl_right = avl_empty;
425 new_node->vm_avl_height = 1;
426 *nodeplace = new_node;
427 avl_rebalance(stack_ptr,stack_count);
428 }
429
430
431
432
433 static void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
434 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
435 {
436 vm_avl_key_t key = new_node->vm_avl_key;
437 struct vm_area_struct ** nodeplace = ptree;
438 struct vm_area_struct ** stack[avl_maxheight];
439 int stack_count = 0;
440 struct vm_area_struct *** stack_ptr = &stack[0];
441 *to_the_left = *to_the_right = NULL;
442 for (;;) {
443 struct vm_area_struct * node = *nodeplace;
444 if (node == avl_empty)
445 break;
446 *stack_ptr++ = nodeplace; stack_count++;
447 if (key < node->vm_avl_key) {
448 *to_the_right = node;
449 nodeplace = &node->vm_avl_left;
450 } else {
451 *to_the_left = node;
452 nodeplace = &node->vm_avl_right;
453 }
454 }
455 new_node->vm_avl_left = avl_empty;
456 new_node->vm_avl_right = avl_empty;
457 new_node->vm_avl_height = 1;
458 *nodeplace = new_node;
459 avl_rebalance(stack_ptr,stack_count);
460 }
461
462
463 static void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
464 {
465 vm_avl_key_t key = node_to_delete->vm_avl_key;
466 struct vm_area_struct ** nodeplace = ptree;
467 struct vm_area_struct ** stack[avl_maxheight];
468 int stack_count = 0;
469 struct vm_area_struct *** stack_ptr = &stack[0];
470 struct vm_area_struct ** nodeplace_to_delete;
471 for (;;) {
472 struct vm_area_struct * node = *nodeplace;
473 if (node == avl_empty) {
474
475 printk("avl_remove: node to delete not found in tree\n");
476 return;
477 }
478 *stack_ptr++ = nodeplace; stack_count++;
479 if (key == node->vm_avl_key)
480 break;
481 if (key < node->vm_avl_key)
482 nodeplace = &node->vm_avl_left;
483 else
484 nodeplace = &node->vm_avl_right;
485 }
486 nodeplace_to_delete = nodeplace;
487
488 if (node_to_delete->vm_avl_left == avl_empty) {
489 *nodeplace_to_delete = node_to_delete->vm_avl_right;
490 stack_ptr--; stack_count--;
491 } else {
492 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
493 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
494 struct vm_area_struct * node;
495 for (;;) {
496 node = *nodeplace;
497 if (node->vm_avl_right == avl_empty)
498 break;
499 *stack_ptr++ = nodeplace; stack_count++;
500 nodeplace = &node->vm_avl_right;
501 }
502 *nodeplace = node->vm_avl_left;
503
504 node->vm_avl_left = node_to_delete->vm_avl_left;
505 node->vm_avl_right = node_to_delete->vm_avl_right;
506 node->vm_avl_height = node_to_delete->vm_avl_height;
507 *nodeplace_to_delete = node;
508 *stack_ptr_to_delete = &node->vm_avl_left;
509 }
510 avl_rebalance(stack_ptr,stack_count);
511 }
512
513 #ifdef DEBUG_AVL
514
515
516 static void printk_list (struct vm_area_struct * vma)
517 {
518 printk("[");
519 while (vma) {
520 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
521 vma = vma->vm_next;
522 if (!vma)
523 break;
524 printk(" ");
525 }
526 printk("]");
527 }
528
529
530 static void printk_avl (struct vm_area_struct * tree)
531 {
532 if (tree != avl_empty) {
533 printk("(");
534 if (tree->vm_avl_left != avl_empty) {
535 printk_avl(tree->vm_avl_left);
536 printk("<");
537 }
538 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
539 if (tree->vm_avl_right != avl_empty) {
540 printk(">");
541 printk_avl(tree->vm_avl_right);
542 }
543 printk(")");
544 }
545 }
546
547 static char *avl_check_point = "somewhere";
548
549
550 static void avl_checkheights (struct vm_area_struct * tree)
551 {
552 int h, hl, hr;
553
554 if (tree == avl_empty)
555 return;
556 avl_checkheights(tree->vm_avl_left);
557 avl_checkheights(tree->vm_avl_right);
558 h = tree->vm_avl_height;
559 hl = heightof(tree->vm_avl_left);
560 hr = heightof(tree->vm_avl_right);
561 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
562 return;
563 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
564 return;
565 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
566 }
567
568
569 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
570 {
571 if (tree == avl_empty)
572 return;
573 avl_checkleft(tree->vm_avl_left,key);
574 avl_checkleft(tree->vm_avl_right,key);
575 if (tree->vm_avl_key < key)
576 return;
577 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
578 }
579
580
581 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
582 {
583 if (tree == avl_empty)
584 return;
585 avl_checkright(tree->vm_avl_left,key);
586 avl_checkright(tree->vm_avl_right,key);
587 if (tree->vm_avl_key > key)
588 return;
589 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
590 }
591
592
593 static void avl_checkorder (struct vm_area_struct * tree)
594 {
595 if (tree == avl_empty)
596 return;
597 avl_checkorder(tree->vm_avl_left);
598 avl_checkorder(tree->vm_avl_right);
599 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
600 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
601 }
602
603
604 static void avl_check (struct task_struct * task, char *caller)
605 {
606 avl_check_point = caller;
607
608
609
610 avl_checkheights(task->mm->mmap_avl);
611 avl_checkorder(task->mm->mmap_avl);
612 }
613
614 #endif
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639 void unmap_fixup(struct vm_area_struct *area,
640 unsigned long addr, size_t len)
641 {
642 struct vm_area_struct *mpnt;
643 unsigned long end = addr + len;
644
645 if (addr < area->vm_start || addr >= area->vm_end ||
646 end <= area->vm_start || end > area->vm_end ||
647 end < addr)
648 {
649 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
650 area->vm_start, area->vm_end, addr, end);
651 return;
652 }
653
654
655 if (addr == area->vm_start && end == area->vm_end) {
656 if (area->vm_ops && area->vm_ops->close)
657 area->vm_ops->close(area);
658 if (area->vm_inode)
659 iput(area->vm_inode);
660 return;
661 }
662
663
664 if (end == area->vm_end)
665 area->vm_end = addr;
666 else
667 if (addr == area->vm_start) {
668 area->vm_offset += (end - area->vm_start);
669 area->vm_start = end;
670 }
671 else {
672
673
674 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
675
676 if (!mpnt)
677 return;
678 *mpnt = *area;
679 mpnt->vm_offset += (end - area->vm_start);
680 mpnt->vm_start = end;
681 if (mpnt->vm_inode)
682 mpnt->vm_inode->i_count++;
683 if (mpnt->vm_ops && mpnt->vm_ops->open)
684 mpnt->vm_ops->open(mpnt);
685 area->vm_end = addr;
686 insert_vm_struct(current, mpnt);
687 }
688
689
690 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
691 if (!mpnt)
692 return;
693 *mpnt = *area;
694 if (mpnt->vm_ops && mpnt->vm_ops->open)
695 mpnt->vm_ops->open(mpnt);
696 if (area->vm_ops && area->vm_ops->close) {
697 area->vm_end = area->vm_start;
698 area->vm_ops->close(area);
699 }
700 insert_vm_struct(current, mpnt);
701 }
702
703 asmlinkage int sys_munmap(unsigned long addr, size_t len)
704 {
705 return do_munmap(addr, len);
706 }
707
708
709
710
711
712
713
714 int do_munmap(unsigned long addr, size_t len)
715 {
716 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
717
718 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
719 return -EINVAL;
720
721 if ((len = PAGE_ALIGN(len)) == 0)
722 return 0;
723
724
725
726
727
728
729
730 mpnt = find_vma(current, addr);
731 if (!mpnt)
732 return 0;
733 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
734
735
736
737 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
738 free = NULL;
739 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
740 *npp = mpnt->vm_next;
741 mpnt->vm_next = free;
742 free = mpnt;
743 avl_remove(mpnt, ¤t->mm->mmap_avl);
744 }
745
746 if (free == NULL)
747 return 0;
748
749
750
751
752
753
754
755 while (free) {
756 unsigned long st, end;
757
758 mpnt = free;
759 free = free->vm_next;
760
761 remove_shared_vm_struct(mpnt);
762
763 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
764 end = addr+len;
765 end = end > mpnt->vm_end ? mpnt->vm_end : end;
766
767 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
768 mpnt->vm_ops->unmap(mpnt, st, end-st);
769
770 unmap_fixup(mpnt, st, end-st);
771 kfree(mpnt);
772 }
773
774 unmap_page_range(addr, len);
775 return 0;
776 }
777
778
779 void build_mmap_avl(struct task_struct * task)
780 {
781 struct vm_area_struct * vma;
782
783 task->mm->mmap_avl = NULL;
784 for (vma = task->mm->mmap; vma; vma = vma->vm_next)
785 avl_insert(vma, &task->mm->mmap_avl);
786 }
787
788
789 void exit_mmap(struct task_struct * task)
790 {
791 struct vm_area_struct * mpnt;
792
793 mpnt = task->mm->mmap;
794 task->mm->mmap = NULL;
795 task->mm->mmap_avl = NULL;
796 while (mpnt) {
797 struct vm_area_struct * next = mpnt->vm_next;
798 if (mpnt->vm_ops && mpnt->vm_ops->close)
799 mpnt->vm_ops->close(mpnt);
800 remove_shared_vm_struct(mpnt);
801 if (mpnt->vm_inode)
802 iput(mpnt->vm_inode);
803 kfree(mpnt);
804 mpnt = next;
805 }
806 }
807
808
809
810
811
812 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
813 {
814 struct vm_area_struct *share;
815 struct inode * inode;
816
817 #if 0
818 struct vm_area_struct **p, *mpnt;
819
820 p = &t->mm->mmap;
821 while ((mpnt = *p) != NULL) {
822 if (mpnt->vm_start > vmp->vm_start)
823 break;
824 if (mpnt->vm_end > vmp->vm_start)
825 printk("insert_vm_struct: overlapping memory areas\n");
826 p = &mpnt->vm_next;
827 }
828 vmp->vm_next = mpnt;
829 *p = vmp;
830 #else
831 struct vm_area_struct * prev, * next;
832
833 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
834 if ((prev ? prev->vm_next : t->mm->mmap) != next)
835 printk("insert_vm_struct: tree inconsistent with list\n");
836 if (prev)
837 prev->vm_next = vmp;
838 else
839 t->mm->mmap = vmp;
840 vmp->vm_next = next;
841 #endif
842
843 inode = vmp->vm_inode;
844 if (!inode)
845 return;
846
847
848 if ((share = inode->i_mmap)) {
849 vmp->vm_next_share = share->vm_next_share;
850 vmp->vm_next_share->vm_prev_share = vmp;
851 share->vm_next_share = vmp;
852 vmp->vm_prev_share = share;
853 } else
854 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
855 }
856
857
858
859
860 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
861 {
862 struct inode * inode = mpnt->vm_inode;
863
864 if (!inode)
865 return;
866
867 if (mpnt->vm_next_share == mpnt) {
868 if (inode->i_mmap != mpnt)
869 printk("Inode i_mmap ring corrupted\n");
870 inode->i_mmap = NULL;
871 return;
872 }
873
874 if (inode->i_mmap == mpnt)
875 inode->i_mmap = mpnt->vm_next_share;
876
877 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
878 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
879 }
880
881
882
883
884
885
886
887
888 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
889 {
890 struct vm_area_struct *prev, *mpnt, *next;
891
892 mpnt = find_vma(task, start_addr);
893 if (!mpnt)
894 return;
895 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
896
897
898 if (!prev) {
899 prev = mpnt;
900 mpnt = next;
901 }
902
903
904
905
906 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
907 #if 0
908 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
909 #endif
910
911 next = mpnt->vm_next;
912
913
914
915
916 if (mpnt->vm_inode != prev->vm_inode)
917 continue;
918 if (mpnt->vm_pte != prev->vm_pte)
919 continue;
920 if (mpnt->vm_ops != prev->vm_ops)
921 continue;
922 if (mpnt->vm_flags != prev->vm_flags)
923 continue;
924 if (prev->vm_end != mpnt->vm_start)
925 continue;
926
927
928
929 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
930 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
931 continue;
932 }
933
934
935
936
937
938
939 avl_remove(mpnt, &task->mm->mmap_avl);
940 prev->vm_end = mpnt->vm_end;
941 prev->vm_next = mpnt->vm_next;
942 if (mpnt->vm_ops && mpnt->vm_ops->close) {
943 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
944 mpnt->vm_start = mpnt->vm_end;
945 mpnt->vm_ops->close(mpnt);
946 }
947 remove_shared_vm_struct(mpnt);
948 if (mpnt->vm_inode)
949 mpnt->vm_inode->i_count--;
950 kfree_s(mpnt, sizeof(*mpnt));
951 mpnt = prev;
952 }
953 }
954
955
956
957
958
959 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
960 {
961 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
962 return -ENOMEM;
963 return 0;
964 }