This source file includes following definitions.
- do_mmap
- get_unmapped_area
- sys_mmap
- find_vma
- find_vma_intersection
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 pgprot_t protection_map[16] = {
39 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
40 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
41 };
42
43 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
44 unsigned long prot, unsigned long flags, unsigned long off)
45 {
46 int error;
47 struct vm_area_struct * vma;
48
49 if ((len = PAGE_ALIGN(len)) == 0)
50 return addr;
51
52 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
53 return -EINVAL;
54
55
56 if (off + len < off)
57 return -EINVAL;
58
59
60
61
62
63
64
65 if (file != NULL) {
66 switch (flags & MAP_TYPE) {
67 case MAP_SHARED:
68 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
69 return -EACCES;
70
71 case MAP_PRIVATE:
72 if (!(file->f_mode & 1))
73 return -EACCES;
74 break;
75
76 default:
77 return -EINVAL;
78 }
79 if ((flags & MAP_DENYWRITE) && (file->f_inode->i_wcount > 0))
80 return -ETXTBSY;
81 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
82 return -EINVAL;
83
84
85
86
87
88
89 if (flags & MAP_FIXED) {
90 if (addr & ~PAGE_MASK)
91 return -EINVAL;
92 if (len > TASK_SIZE || addr > TASK_SIZE - len)
93 return -EINVAL;
94 } else {
95 addr = get_unmapped_area(len);
96 if (!addr)
97 return -ENOMEM;
98 }
99
100
101
102
103
104
105 if (file && (!file->f_op || !file->f_op->mmap))
106 return -ENODEV;
107
108 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
109 GFP_KERNEL);
110 if (!vma)
111 return -ENOMEM;
112
113 vma->vm_task = current;
114 vma->vm_start = addr;
115 vma->vm_end = addr + len;
116 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
117 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
118
119 if (file) {
120 if (file->f_mode & 1)
121 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
122 if (flags & MAP_SHARED) {
123 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
124
125
126
127
128
129
130
131
132
133
134 if (!(file->f_mode & 2))
135 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
136 }
137 } else
138 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
139 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
140 vma->vm_ops = NULL;
141 vma->vm_offset = off;
142 vma->vm_inode = NULL;
143 vma->vm_pte = 0;
144
145 do_munmap(addr, len);
146
147 if (file)
148 error = file->f_op->mmap(file->f_inode, file, vma);
149 else
150 error = anon_map(NULL, NULL, vma);
151
152 if (error) {
153 kfree(vma);
154 return error;
155 }
156 insert_vm_struct(current, vma);
157 merge_segments(current, vma->vm_start, vma->vm_end);
158 return addr;
159 }
160
161
162
163
164
165
166 unsigned long get_unmapped_area(unsigned long len)
167 {
168 struct vm_area_struct * vmm;
169 unsigned long gap_start = 0, gap_end;
170
171 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
172 if (gap_start < SHM_RANGE_START)
173 gap_start = SHM_RANGE_START;
174 if (!vmm || ((gap_end = vmm->vm_start) > SHM_RANGE_END))
175 gap_end = SHM_RANGE_END;
176 gap_start = PAGE_ALIGN(gap_start);
177 gap_end &= PAGE_MASK;
178 if ((gap_start <= gap_end) && (gap_end - gap_start >= len))
179 return gap_start;
180 if (!vmm)
181 return 0;
182 gap_start = vmm->vm_end;
183 }
184 }
185
186 asmlinkage int sys_mmap(unsigned long *buffer)
187 {
188 int error;
189 unsigned long flags;
190 struct file * file = NULL;
191
192 error = verify_area(VERIFY_READ, buffer, 6*sizeof(long));
193 if (error)
194 return error;
195 flags = get_fs_long(buffer+3);
196 if (!(flags & MAP_ANONYMOUS)) {
197 unsigned long fd = get_fs_long(buffer+4);
198 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
199 return -EBADF;
200 }
201 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
202 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
203 }
204
205
206
207
208
209
210
211
212
213
214
215 #define vm_avl_key vm_end
216 #define vm_avl_key_t unsigned long
217
218
219
220
221
222
223
224
225
226
227 #define avl_empty (struct vm_area_struct *) NULL
228
229
230 #define avl_maxheight 41
231 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
232
233
234
235
236
237
238
239
240
241 struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
242 {
243 #if 0
244 struct vm_area_struct * vma;
245
246 for (vma = task->mm->mmap ; ; vma = vma->vm_next) {
247 if (!vma)
248 return NULL;
249 if (vma->vm_end > addr)
250 return vma;
251 }
252 #else
253 struct vm_area_struct * result = NULL;
254 struct vm_area_struct * tree;
255
256 for (tree = task->mm->mmap_avl ; ; ) {
257 if (tree == avl_empty)
258 return result;
259 if (tree->vm_end > addr) {
260 if (tree->vm_start <= addr)
261 return tree;
262 result = tree;
263 tree = tree->vm_avl_left;
264 } else
265 tree = tree->vm_avl_right;
266 }
267 #endif
268 }
269
270
271
272 struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
273 {
274 struct vm_area_struct * vma;
275
276 #if 0
277 for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
278 if (end_addr <= vma->vm_start)
279 break;
280 if (start_addr < vma->vm_end)
281 return vma;
282 }
283 return NULL;
284 #else
285 vma = find_vma(task,start_addr);
286 if (!vma || end_addr <= vma->vm_start)
287 return NULL;
288 return vma;
289 #endif
290 }
291
292
293 static void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
294 {
295 vm_avl_key_t key = node->vm_avl_key;
296
297 *to_the_left = *to_the_right = NULL;
298 for (;;) {
299 if (tree == avl_empty) {
300 printk("avl_neighbours: node not found in the tree\n");
301 return;
302 }
303 if (key == tree->vm_avl_key)
304 break;
305 if (key < tree->vm_avl_key) {
306 *to_the_right = tree;
307 tree = tree->vm_avl_left;
308 } else {
309 *to_the_left = tree;
310 tree = tree->vm_avl_right;
311 }
312 }
313 if (tree != node) {
314 printk("avl_neighbours: node not exactly found in the tree\n");
315 return;
316 }
317 if (tree->vm_avl_left != avl_empty) {
318 struct vm_area_struct * node;
319 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
320 continue;
321 *to_the_left = node;
322 }
323 if (tree->vm_avl_right != avl_empty) {
324 struct vm_area_struct * node;
325 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
326 continue;
327 *to_the_right = node;
328 }
329 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
330 printk("avl_neighbours: tree inconsistent with list\n");
331 }
332
333
334
335
336
337
338
339 static void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
340 {
341 for ( ; count > 0 ; count--) {
342 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
343 struct vm_area_struct * node = *nodeplace;
344 struct vm_area_struct * nodeleft = node->vm_avl_left;
345 struct vm_area_struct * noderight = node->vm_avl_right;
346 int heightleft = heightof(nodeleft);
347 int heightright = heightof(noderight);
348 if (heightright + 1 < heightleft) {
349
350
351
352
353
354 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
355 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
356 int heightleftright = heightof(nodeleftright);
357 if (heightof(nodeleftleft) >= heightleftright) {
358
359
360
361
362
363
364
365 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
366 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
367 *nodeplace = nodeleft;
368 } else {
369
370
371
372
373
374
375
376
377
378 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
379 node->vm_avl_left = nodeleftright->vm_avl_right;
380 nodeleftright->vm_avl_left = nodeleft;
381 nodeleftright->vm_avl_right = node;
382 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
383 nodeleftright->vm_avl_height = heightleft;
384 *nodeplace = nodeleftright;
385 }
386 }
387 else if (heightleft + 1 < heightright) {
388
389 struct vm_area_struct * noderightright = noderight->vm_avl_right;
390 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
391 int heightrightleft = heightof(noderightleft);
392 if (heightof(noderightright) >= heightrightleft) {
393 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
394 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
395 *nodeplace = noderight;
396 } else {
397 noderight->vm_avl_left = noderightleft->vm_avl_right;
398 node->vm_avl_right = noderightleft->vm_avl_left;
399 noderightleft->vm_avl_right = noderight;
400 noderightleft->vm_avl_left = node;
401 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
402 noderightleft->vm_avl_height = heightright;
403 *nodeplace = noderightleft;
404 }
405 }
406 else {
407 int height = (heightleft<heightright ? heightright : heightleft) + 1;
408 if (height == node->vm_avl_height)
409 break;
410 node->vm_avl_height = height;
411 }
412 }
413 }
414
415
416 static void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
417 {
418 vm_avl_key_t key = new_node->vm_avl_key;
419 struct vm_area_struct ** nodeplace = ptree;
420 struct vm_area_struct ** stack[avl_maxheight];
421 int stack_count = 0;
422 struct vm_area_struct *** stack_ptr = &stack[0];
423 for (;;) {
424 struct vm_area_struct * node = *nodeplace;
425 if (node == avl_empty)
426 break;
427 *stack_ptr++ = nodeplace; stack_count++;
428 if (key < node->vm_avl_key)
429 nodeplace = &node->vm_avl_left;
430 else
431 nodeplace = &node->vm_avl_right;
432 }
433 new_node->vm_avl_left = avl_empty;
434 new_node->vm_avl_right = avl_empty;
435 new_node->vm_avl_height = 1;
436 *nodeplace = new_node;
437 avl_rebalance(stack_ptr,stack_count);
438 }
439
440
441
442
443 static void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
444 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
445 {
446 vm_avl_key_t key = new_node->vm_avl_key;
447 struct vm_area_struct ** nodeplace = ptree;
448 struct vm_area_struct ** stack[avl_maxheight];
449 int stack_count = 0;
450 struct vm_area_struct *** stack_ptr = &stack[0];
451 *to_the_left = *to_the_right = NULL;
452 for (;;) {
453 struct vm_area_struct * node = *nodeplace;
454 if (node == avl_empty)
455 break;
456 *stack_ptr++ = nodeplace; stack_count++;
457 if (key < node->vm_avl_key) {
458 *to_the_right = node;
459 nodeplace = &node->vm_avl_left;
460 } else {
461 *to_the_left = node;
462 nodeplace = &node->vm_avl_right;
463 }
464 }
465 new_node->vm_avl_left = avl_empty;
466 new_node->vm_avl_right = avl_empty;
467 new_node->vm_avl_height = 1;
468 *nodeplace = new_node;
469 avl_rebalance(stack_ptr,stack_count);
470 }
471
472
473 static void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
474 {
475 vm_avl_key_t key = node_to_delete->vm_avl_key;
476 struct vm_area_struct ** nodeplace = ptree;
477 struct vm_area_struct ** stack[avl_maxheight];
478 int stack_count = 0;
479 struct vm_area_struct *** stack_ptr = &stack[0];
480 struct vm_area_struct ** nodeplace_to_delete;
481 for (;;) {
482 struct vm_area_struct * node = *nodeplace;
483 if (node == avl_empty) {
484
485 printk("avl_remove: node to delete not found in tree\n");
486 return;
487 }
488 *stack_ptr++ = nodeplace; stack_count++;
489 if (key == node->vm_avl_key)
490 break;
491 if (key < node->vm_avl_key)
492 nodeplace = &node->vm_avl_left;
493 else
494 nodeplace = &node->vm_avl_right;
495 }
496 nodeplace_to_delete = nodeplace;
497
498 if (node_to_delete->vm_avl_left == avl_empty) {
499 *nodeplace_to_delete = node_to_delete->vm_avl_right;
500 stack_ptr--; stack_count--;
501 } else {
502 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
503 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
504 struct vm_area_struct * node;
505 for (;;) {
506 node = *nodeplace;
507 if (node->vm_avl_right == avl_empty)
508 break;
509 *stack_ptr++ = nodeplace; stack_count++;
510 nodeplace = &node->vm_avl_right;
511 }
512 *nodeplace = node->vm_avl_left;
513
514 node->vm_avl_left = node_to_delete->vm_avl_left;
515 node->vm_avl_right = node_to_delete->vm_avl_right;
516 node->vm_avl_height = node_to_delete->vm_avl_height;
517 *nodeplace_to_delete = node;
518 *stack_ptr_to_delete = &node->vm_avl_left;
519 }
520 avl_rebalance(stack_ptr,stack_count);
521 }
522
523 #ifdef DEBUG_AVL
524
525
526 static void printk_list (struct vm_area_struct * vma)
527 {
528 printk("[");
529 while (vma) {
530 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
531 vma = vma->vm_next;
532 if (!vma)
533 break;
534 printk(" ");
535 }
536 printk("]");
537 }
538
539
540 static void printk_avl (struct vm_area_struct * tree)
541 {
542 if (tree != avl_empty) {
543 printk("(");
544 if (tree->vm_avl_left != avl_empty) {
545 printk_avl(tree->vm_avl_left);
546 printk("<");
547 }
548 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
549 if (tree->vm_avl_right != avl_empty) {
550 printk(">");
551 printk_avl(tree->vm_avl_right);
552 }
553 printk(")");
554 }
555 }
556
557 static char *avl_check_point = "somewhere";
558
559
560 static void avl_checkheights (struct vm_area_struct * tree)
561 {
562 int h, hl, hr;
563
564 if (tree == avl_empty)
565 return;
566 avl_checkheights(tree->vm_avl_left);
567 avl_checkheights(tree->vm_avl_right);
568 h = tree->vm_avl_height;
569 hl = heightof(tree->vm_avl_left);
570 hr = heightof(tree->vm_avl_right);
571 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
572 return;
573 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
574 return;
575 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
576 }
577
578
579 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
580 {
581 if (tree == avl_empty)
582 return;
583 avl_checkleft(tree->vm_avl_left,key);
584 avl_checkleft(tree->vm_avl_right,key);
585 if (tree->vm_avl_key < key)
586 return;
587 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
588 }
589
590
591 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
592 {
593 if (tree == avl_empty)
594 return;
595 avl_checkright(tree->vm_avl_left,key);
596 avl_checkright(tree->vm_avl_right,key);
597 if (tree->vm_avl_key > key)
598 return;
599 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
600 }
601
602
603 static void avl_checkorder (struct vm_area_struct * tree)
604 {
605 if (tree == avl_empty)
606 return;
607 avl_checkorder(tree->vm_avl_left);
608 avl_checkorder(tree->vm_avl_right);
609 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
610 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
611 }
612
613
614 static void avl_check (struct task_struct * task, char *caller)
615 {
616 avl_check_point = caller;
617
618
619
620 avl_checkheights(task->mm->mmap_avl);
621 avl_checkorder(task->mm->mmap_avl);
622 }
623
624 #endif
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649 void unmap_fixup(struct vm_area_struct *area,
650 unsigned long addr, size_t len)
651 {
652 struct vm_area_struct *mpnt;
653 unsigned long end = addr + len;
654
655 if (addr < area->vm_start || addr >= area->vm_end ||
656 end <= area->vm_start || end > area->vm_end ||
657 end < addr)
658 {
659 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
660 area->vm_start, area->vm_end, addr, end);
661 return;
662 }
663
664
665 if (addr == area->vm_start && end == area->vm_end) {
666 if (area->vm_ops && area->vm_ops->close)
667 area->vm_ops->close(area);
668 if (area->vm_inode)
669 iput(area->vm_inode);
670 return;
671 }
672
673
674 if (end == area->vm_end)
675 area->vm_end = addr;
676 else
677 if (addr == area->vm_start) {
678 area->vm_offset += (end - area->vm_start);
679 area->vm_start = end;
680 }
681 else {
682
683
684 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
685
686 if (!mpnt)
687 return;
688 *mpnt = *area;
689 mpnt->vm_offset += (end - area->vm_start);
690 mpnt->vm_start = end;
691 if (mpnt->vm_inode)
692 mpnt->vm_inode->i_count++;
693 if (mpnt->vm_ops && mpnt->vm_ops->open)
694 mpnt->vm_ops->open(mpnt);
695 area->vm_end = addr;
696 insert_vm_struct(current, mpnt);
697 }
698
699
700 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
701 if (!mpnt)
702 return;
703 *mpnt = *area;
704 if (mpnt->vm_ops && mpnt->vm_ops->open)
705 mpnt->vm_ops->open(mpnt);
706 if (area->vm_ops && area->vm_ops->close) {
707 area->vm_end = area->vm_start;
708 area->vm_ops->close(area);
709 }
710 insert_vm_struct(current, mpnt);
711 }
712
713 asmlinkage int sys_munmap(unsigned long addr, size_t len)
714 {
715 return do_munmap(addr, len);
716 }
717
718
719
720
721
722
723
724 int do_munmap(unsigned long addr, size_t len)
725 {
726 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
727
728 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
729 return -EINVAL;
730
731 if ((len = PAGE_ALIGN(len)) == 0)
732 return 0;
733
734
735
736
737
738
739
740 mpnt = find_vma(current, addr);
741 if (!mpnt)
742 return 0;
743 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
744
745
746
747 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
748 free = NULL;
749 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
750 *npp = mpnt->vm_next;
751 mpnt->vm_next = free;
752 free = mpnt;
753 avl_remove(mpnt, ¤t->mm->mmap_avl);
754 }
755
756 if (free == NULL)
757 return 0;
758
759
760
761
762
763
764
765 while (free) {
766 unsigned long st, end;
767
768 mpnt = free;
769 free = free->vm_next;
770
771 remove_shared_vm_struct(mpnt);
772
773 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
774 end = addr+len;
775 end = end > mpnt->vm_end ? mpnt->vm_end : end;
776
777 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
778 mpnt->vm_ops->unmap(mpnt, st, end-st);
779
780 unmap_fixup(mpnt, st, end-st);
781 kfree(mpnt);
782 }
783
784 unmap_page_range(addr, len);
785 return 0;
786 }
787
788
789 void build_mmap_avl(struct task_struct * task)
790 {
791 struct vm_area_struct * vma;
792
793 task->mm->mmap_avl = NULL;
794 for (vma = task->mm->mmap; vma; vma = vma->vm_next)
795 avl_insert(vma, &task->mm->mmap_avl);
796 }
797
798
799 void exit_mmap(struct task_struct * task)
800 {
801 struct vm_area_struct * mpnt;
802
803 mpnt = task->mm->mmap;
804 task->mm->mmap = NULL;
805 task->mm->mmap_avl = NULL;
806 while (mpnt) {
807 struct vm_area_struct * next = mpnt->vm_next;
808 if (mpnt->vm_ops && mpnt->vm_ops->close)
809 mpnt->vm_ops->close(mpnt);
810 remove_shared_vm_struct(mpnt);
811 if (mpnt->vm_inode)
812 iput(mpnt->vm_inode);
813 kfree(mpnt);
814 mpnt = next;
815 }
816 }
817
818
819
820
821
822 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
823 {
824 struct vm_area_struct *share;
825 struct inode * inode;
826
827 #if 0
828 struct vm_area_struct **p, *mpnt;
829
830 p = &t->mm->mmap;
831 while ((mpnt = *p) != NULL) {
832 if (mpnt->vm_start > vmp->vm_start)
833 break;
834 if (mpnt->vm_end > vmp->vm_start)
835 printk("insert_vm_struct: overlapping memory areas\n");
836 p = &mpnt->vm_next;
837 }
838 vmp->vm_next = mpnt;
839 *p = vmp;
840 #else
841 struct vm_area_struct * prev, * next;
842
843 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
844 if ((prev ? prev->vm_next : t->mm->mmap) != next)
845 printk("insert_vm_struct: tree inconsistent with list\n");
846 if (prev)
847 prev->vm_next = vmp;
848 else
849 t->mm->mmap = vmp;
850 vmp->vm_next = next;
851 #endif
852
853 inode = vmp->vm_inode;
854 if (!inode)
855 return;
856
857
858 if ((share = inode->i_mmap)) {
859 vmp->vm_next_share = share->vm_next_share;
860 vmp->vm_next_share->vm_prev_share = vmp;
861 share->vm_next_share = vmp;
862 vmp->vm_prev_share = share;
863 } else
864 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
865 }
866
867
868
869
870 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
871 {
872 struct inode * inode = mpnt->vm_inode;
873
874 if (!inode)
875 return;
876
877 if (mpnt->vm_next_share == mpnt) {
878 if (inode->i_mmap != mpnt)
879 printk("Inode i_mmap ring corrupted\n");
880 inode->i_mmap = NULL;
881 return;
882 }
883
884 if (inode->i_mmap == mpnt)
885 inode->i_mmap = mpnt->vm_next_share;
886
887 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
888 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
889 }
890
891
892
893
894
895
896
897
898 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
899 {
900 struct vm_area_struct *prev, *mpnt, *next;
901
902 mpnt = find_vma(task, start_addr);
903 if (!mpnt)
904 return;
905 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
906
907
908 if (!prev) {
909 prev = mpnt;
910 mpnt = next;
911 }
912
913
914
915
916 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
917 #if 0
918 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
919 #endif
920
921 next = mpnt->vm_next;
922
923
924
925
926 if (mpnt->vm_inode != prev->vm_inode)
927 continue;
928 if (mpnt->vm_pte != prev->vm_pte)
929 continue;
930 if (mpnt->vm_ops != prev->vm_ops)
931 continue;
932 if (mpnt->vm_flags != prev->vm_flags)
933 continue;
934 if (prev->vm_end != mpnt->vm_start)
935 continue;
936
937
938
939 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
940 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
941 continue;
942 }
943
944
945
946
947
948
949 avl_remove(mpnt, &task->mm->mmap_avl);
950 prev->vm_end = mpnt->vm_end;
951 prev->vm_next = mpnt->vm_next;
952 if (mpnt->vm_ops && mpnt->vm_ops->close) {
953 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
954 mpnt->vm_start = mpnt->vm_end;
955 mpnt->vm_ops->close(mpnt);
956 }
957 remove_shared_vm_struct(mpnt);
958 if (mpnt->vm_inode)
959 mpnt->vm_inode->i_count--;
960 kfree_s(mpnt, sizeof(*mpnt));
961 mpnt = prev;
962 }
963 }
964
965
966
967
968
969 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
970 {
971 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
972 return -ENOMEM;
973 return 0;
974 }