This source file includes following definitions.
- do_mmap
- get_unmapped_area
- sys_mmap
- find_vma
- find_vma_intersection
- avl_neighbours
- avl_rebalance
- avl_insert
- avl_insert_neighbours
- avl_remove
- printk_list
- printk_avl
- avl_checkheights
- avl_checkleft
- avl_checkright
- avl_checkorder
- avl_check
- unmap_fixup
- sys_munmap
- do_munmap
- build_mmap_avl
- exit_mmap
- insert_vm_struct
- remove_shared_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 pgprot_t protection_map[16] = {
40 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
41 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
42 };
43
44 unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
45 unsigned long prot, unsigned long flags, unsigned long off)
46 {
47 int error;
48 struct vm_area_struct * vma;
49
50 if ((len = PAGE_ALIGN(len)) == 0)
51 return addr;
52
53 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
54 return -EINVAL;
55
56
57 if (off + len < off)
58 return -EINVAL;
59
60
61
62
63
64
65
66 if (file != NULL) {
67 switch (flags & MAP_TYPE) {
68 case MAP_SHARED:
69 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
70 return -EACCES;
71
72 case MAP_PRIVATE:
73 if (!(file->f_mode & 1))
74 return -EACCES;
75 break;
76
77 default:
78 return -EINVAL;
79 }
80 if ((flags & MAP_DENYWRITE) && (file->f_inode->i_wcount > 0))
81 return -ETXTBSY;
82 } else if ((flags & MAP_TYPE) != MAP_PRIVATE)
83 return -EINVAL;
84
85
86
87
88
89
90 if (flags & MAP_FIXED) {
91 if (addr & ~PAGE_MASK)
92 return -EINVAL;
93 if (len > TASK_SIZE || addr > TASK_SIZE - len)
94 return -EINVAL;
95 } else {
96 addr = get_unmapped_area(len);
97 if (!addr)
98 return -ENOMEM;
99 }
100
101
102
103
104
105
106 if (file && (!file->f_op || !file->f_op->mmap))
107 return -ENODEV;
108
109 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
110 GFP_KERNEL);
111 if (!vma)
112 return -ENOMEM;
113
114 vma->vm_task = current;
115 vma->vm_start = addr;
116 vma->vm_end = addr + len;
117 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
118 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
119
120 if (file) {
121 if (file->f_mode & 1)
122 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
123 if (flags & MAP_SHARED) {
124 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
125
126
127
128
129
130
131
132
133
134
135 if (!(file->f_mode & 2))
136 vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
137 }
138 } else
139 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
140 vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
141 vma->vm_ops = NULL;
142 vma->vm_offset = off;
143 vma->vm_inode = NULL;
144 vma->vm_pte = 0;
145
146 do_munmap(addr, len);
147
148 if (file)
149 error = file->f_op->mmap(file->f_inode, file, vma);
150 else
151 error = anon_map(NULL, NULL, vma);
152
153 if (error) {
154 kfree(vma);
155 return error;
156 }
157 insert_vm_struct(current, vma);
158 merge_segments(current, vma->vm_start, vma->vm_end);
159 return addr;
160 }
161
162
163
164
165
166
167 unsigned long get_unmapped_area(unsigned long len)
168 {
169 struct vm_area_struct * vmm;
170 unsigned long gap_start = 0, gap_end;
171
172 for (vmm = current->mm->mmap; ; vmm = vmm->vm_next) {
173 if (gap_start < SHM_RANGE_START)
174 gap_start = SHM_RANGE_START;
175 if (!vmm || ((gap_end = vmm->vm_start) > SHM_RANGE_END))
176 gap_end = SHM_RANGE_END;
177 gap_start = PAGE_ALIGN(gap_start);
178 gap_end &= PAGE_MASK;
179 if ((gap_start <= gap_end) && (gap_end - gap_start >= len))
180 return gap_start;
181 if (!vmm)
182 return 0;
183 gap_start = vmm->vm_end;
184 }
185 }
186
187 asmlinkage int sys_mmap(unsigned long *buffer)
188 {
189 int error;
190 unsigned long flags;
191 struct file * file = NULL;
192
193 error = verify_area(VERIFY_READ, buffer, 6*sizeof(long));
194 if (error)
195 return error;
196 flags = get_fs_long(buffer+3);
197 if (!(flags & MAP_ANONYMOUS)) {
198 unsigned long fd = get_fs_long(buffer+4);
199 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
200 return -EBADF;
201 }
202 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
203 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
204 }
205
206
207
208
209
210
211
212
213
214
215
216 #define vm_avl_key vm_end
217 #define vm_avl_key_t unsigned long
218
219
220
221
222
223
224
225
226
227
228 #define avl_empty (struct vm_area_struct *) NULL
229
230
231 #define avl_maxheight 41
232 #define heightof(tree) ((tree) == avl_empty ? 0 : (tree)->vm_avl_height)
233
234
235
236
237
238
239
240
241
242 struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
243 {
244 #if 0
245 struct vm_area_struct * vma;
246
247 for (vma = task->mm->mmap ; ; vma = vma->vm_next) {
248 if (!vma)
249 return NULL;
250 if (vma->vm_end > addr)
251 return vma;
252 }
253 #else
254 struct vm_area_struct * result = NULL;
255 struct vm_area_struct * tree;
256
257 for (tree = task->mm->mmap_avl ; ; ) {
258 if (tree == avl_empty)
259 return result;
260 if (tree->vm_end > addr) {
261 if (tree->vm_start <= addr)
262 return tree;
263 result = tree;
264 tree = tree->vm_avl_left;
265 } else
266 tree = tree->vm_avl_right;
267 }
268 #endif
269 }
270
271
272
273 struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
274 {
275 struct vm_area_struct * vma;
276
277 #if 0
278 for (vma = task->mm->mmap; vma; vma = vma->vm_next) {
279 if (end_addr <= vma->vm_start)
280 break;
281 if (start_addr < vma->vm_end)
282 return vma;
283 }
284 return NULL;
285 #else
286 vma = find_vma(task,start_addr);
287 if (!vma || end_addr <= vma->vm_start)
288 return NULL;
289 return vma;
290 #endif
291 }
292
293
294 static void avl_neighbours (struct vm_area_struct * node, struct vm_area_struct * tree, struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
295 {
296 vm_avl_key_t key = node->vm_avl_key;
297
298 *to_the_left = *to_the_right = NULL;
299 for (;;) {
300 if (tree == avl_empty) {
301 printk("avl_neighbours: node not found in the tree\n");
302 return;
303 }
304 if (key == tree->vm_avl_key)
305 break;
306 if (key < tree->vm_avl_key) {
307 *to_the_right = tree;
308 tree = tree->vm_avl_left;
309 } else {
310 *to_the_left = tree;
311 tree = tree->vm_avl_right;
312 }
313 }
314 if (tree != node) {
315 printk("avl_neighbours: node not exactly found in the tree\n");
316 return;
317 }
318 if (tree->vm_avl_left != avl_empty) {
319 struct vm_area_struct * node;
320 for (node = tree->vm_avl_left; node->vm_avl_right != avl_empty; node = node->vm_avl_right)
321 continue;
322 *to_the_left = node;
323 }
324 if (tree->vm_avl_right != avl_empty) {
325 struct vm_area_struct * node;
326 for (node = tree->vm_avl_right; node->vm_avl_left != avl_empty; node = node->vm_avl_left)
327 continue;
328 *to_the_right = node;
329 }
330 if ((*to_the_left && ((*to_the_left)->vm_next != node)) || (node->vm_next != *to_the_right))
331 printk("avl_neighbours: tree inconsistent with list\n");
332 }
333
334
335
336
337
338
339
340 static void avl_rebalance (struct vm_area_struct *** nodeplaces_ptr, int count)
341 {
342 for ( ; count > 0 ; count--) {
343 struct vm_area_struct ** nodeplace = *--nodeplaces_ptr;
344 struct vm_area_struct * node = *nodeplace;
345 struct vm_area_struct * nodeleft = node->vm_avl_left;
346 struct vm_area_struct * noderight = node->vm_avl_right;
347 int heightleft = heightof(nodeleft);
348 int heightright = heightof(noderight);
349 if (heightright + 1 < heightleft) {
350
351
352
353
354
355 struct vm_area_struct * nodeleftleft = nodeleft->vm_avl_left;
356 struct vm_area_struct * nodeleftright = nodeleft->vm_avl_right;
357 int heightleftright = heightof(nodeleftright);
358 if (heightof(nodeleftleft) >= heightleftright) {
359
360
361
362
363
364
365
366 node->vm_avl_left = nodeleftright; nodeleft->vm_avl_right = node;
367 nodeleft->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightleftright);
368 *nodeplace = nodeleft;
369 } else {
370
371
372
373
374
375
376
377
378
379 nodeleft->vm_avl_right = nodeleftright->vm_avl_left;
380 node->vm_avl_left = nodeleftright->vm_avl_right;
381 nodeleftright->vm_avl_left = nodeleft;
382 nodeleftright->vm_avl_right = node;
383 nodeleft->vm_avl_height = node->vm_avl_height = heightleftright;
384 nodeleftright->vm_avl_height = heightleft;
385 *nodeplace = nodeleftright;
386 }
387 }
388 else if (heightleft + 1 < heightright) {
389
390 struct vm_area_struct * noderightright = noderight->vm_avl_right;
391 struct vm_area_struct * noderightleft = noderight->vm_avl_left;
392 int heightrightleft = heightof(noderightleft);
393 if (heightof(noderightright) >= heightrightleft) {
394 node->vm_avl_right = noderightleft; noderight->vm_avl_left = node;
395 noderight->vm_avl_height = 1 + (node->vm_avl_height = 1 + heightrightleft);
396 *nodeplace = noderight;
397 } else {
398 noderight->vm_avl_left = noderightleft->vm_avl_right;
399 node->vm_avl_right = noderightleft->vm_avl_left;
400 noderightleft->vm_avl_right = noderight;
401 noderightleft->vm_avl_left = node;
402 noderight->vm_avl_height = node->vm_avl_height = heightrightleft;
403 noderightleft->vm_avl_height = heightright;
404 *nodeplace = noderightleft;
405 }
406 }
407 else {
408 int height = (heightleft<heightright ? heightright : heightleft) + 1;
409 if (height == node->vm_avl_height)
410 break;
411 node->vm_avl_height = height;
412 }
413 }
414 }
415
416
417 static void avl_insert (struct vm_area_struct * new_node, struct vm_area_struct ** ptree)
418 {
419 vm_avl_key_t key = new_node->vm_avl_key;
420 struct vm_area_struct ** nodeplace = ptree;
421 struct vm_area_struct ** stack[avl_maxheight];
422 int stack_count = 0;
423 struct vm_area_struct *** stack_ptr = &stack[0];
424 for (;;) {
425 struct vm_area_struct * node = *nodeplace;
426 if (node == avl_empty)
427 break;
428 *stack_ptr++ = nodeplace; stack_count++;
429 if (key < node->vm_avl_key)
430 nodeplace = &node->vm_avl_left;
431 else
432 nodeplace = &node->vm_avl_right;
433 }
434 new_node->vm_avl_left = avl_empty;
435 new_node->vm_avl_right = avl_empty;
436 new_node->vm_avl_height = 1;
437 *nodeplace = new_node;
438 avl_rebalance(stack_ptr,stack_count);
439 }
440
441
442
443
444 static void avl_insert_neighbours (struct vm_area_struct * new_node, struct vm_area_struct ** ptree,
445 struct vm_area_struct ** to_the_left, struct vm_area_struct ** to_the_right)
446 {
447 vm_avl_key_t key = new_node->vm_avl_key;
448 struct vm_area_struct ** nodeplace = ptree;
449 struct vm_area_struct ** stack[avl_maxheight];
450 int stack_count = 0;
451 struct vm_area_struct *** stack_ptr = &stack[0];
452 *to_the_left = *to_the_right = NULL;
453 for (;;) {
454 struct vm_area_struct * node = *nodeplace;
455 if (node == avl_empty)
456 break;
457 *stack_ptr++ = nodeplace; stack_count++;
458 if (key < node->vm_avl_key) {
459 *to_the_right = node;
460 nodeplace = &node->vm_avl_left;
461 } else {
462 *to_the_left = node;
463 nodeplace = &node->vm_avl_right;
464 }
465 }
466 new_node->vm_avl_left = avl_empty;
467 new_node->vm_avl_right = avl_empty;
468 new_node->vm_avl_height = 1;
469 *nodeplace = new_node;
470 avl_rebalance(stack_ptr,stack_count);
471 }
472
473
474 static void avl_remove (struct vm_area_struct * node_to_delete, struct vm_area_struct ** ptree)
475 {
476 vm_avl_key_t key = node_to_delete->vm_avl_key;
477 struct vm_area_struct ** nodeplace = ptree;
478 struct vm_area_struct ** stack[avl_maxheight];
479 int stack_count = 0;
480 struct vm_area_struct *** stack_ptr = &stack[0];
481 struct vm_area_struct ** nodeplace_to_delete;
482 for (;;) {
483 struct vm_area_struct * node = *nodeplace;
484 if (node == avl_empty) {
485
486 printk("avl_remove: node to delete not found in tree\n");
487 return;
488 }
489 *stack_ptr++ = nodeplace; stack_count++;
490 if (key == node->vm_avl_key)
491 break;
492 if (key < node->vm_avl_key)
493 nodeplace = &node->vm_avl_left;
494 else
495 nodeplace = &node->vm_avl_right;
496 }
497 nodeplace_to_delete = nodeplace;
498
499 if (node_to_delete->vm_avl_left == avl_empty) {
500 *nodeplace_to_delete = node_to_delete->vm_avl_right;
501 stack_ptr--; stack_count--;
502 } else {
503 struct vm_area_struct *** stack_ptr_to_delete = stack_ptr;
504 struct vm_area_struct ** nodeplace = &node_to_delete->vm_avl_left;
505 struct vm_area_struct * node;
506 for (;;) {
507 node = *nodeplace;
508 if (node->vm_avl_right == avl_empty)
509 break;
510 *stack_ptr++ = nodeplace; stack_count++;
511 nodeplace = &node->vm_avl_right;
512 }
513 *nodeplace = node->vm_avl_left;
514
515 node->vm_avl_left = node_to_delete->vm_avl_left;
516 node->vm_avl_right = node_to_delete->vm_avl_right;
517 node->vm_avl_height = node_to_delete->vm_avl_height;
518 *nodeplace_to_delete = node;
519 *stack_ptr_to_delete = &node->vm_avl_left;
520 }
521 avl_rebalance(stack_ptr,stack_count);
522 }
523
524 #ifdef DEBUG_AVL
525
526
527 static void printk_list (struct vm_area_struct * vma)
528 {
529 printk("[");
530 while (vma) {
531 printk("%08lX-%08lX", vma->vm_start, vma->vm_end);
532 vma = vma->vm_next;
533 if (!vma)
534 break;
535 printk(" ");
536 }
537 printk("]");
538 }
539
540
541 static void printk_avl (struct vm_area_struct * tree)
542 {
543 if (tree != avl_empty) {
544 printk("(");
545 if (tree->vm_avl_left != avl_empty) {
546 printk_avl(tree->vm_avl_left);
547 printk("<");
548 }
549 printk("%08lX-%08lX", tree->vm_start, tree->vm_end);
550 if (tree->vm_avl_right != avl_empty) {
551 printk(">");
552 printk_avl(tree->vm_avl_right);
553 }
554 printk(")");
555 }
556 }
557
558 static char *avl_check_point = "somewhere";
559
560
561 static void avl_checkheights (struct vm_area_struct * tree)
562 {
563 int h, hl, hr;
564
565 if (tree == avl_empty)
566 return;
567 avl_checkheights(tree->vm_avl_left);
568 avl_checkheights(tree->vm_avl_right);
569 h = tree->vm_avl_height;
570 hl = heightof(tree->vm_avl_left);
571 hr = heightof(tree->vm_avl_right);
572 if ((h == hl+1) && (hr <= hl) && (hl <= hr+1))
573 return;
574 if ((h == hr+1) && (hl <= hr) && (hr <= hl+1))
575 return;
576 printk("%s: avl_checkheights: heights inconsistent\n",avl_check_point);
577 }
578
579
580 static void avl_checkleft (struct vm_area_struct * tree, vm_avl_key_t key)
581 {
582 if (tree == avl_empty)
583 return;
584 avl_checkleft(tree->vm_avl_left,key);
585 avl_checkleft(tree->vm_avl_right,key);
586 if (tree->vm_avl_key < key)
587 return;
588 printk("%s: avl_checkleft: left key %lu >= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
589 }
590
591
592 static void avl_checkright (struct vm_area_struct * tree, vm_avl_key_t key)
593 {
594 if (tree == avl_empty)
595 return;
596 avl_checkright(tree->vm_avl_left,key);
597 avl_checkright(tree->vm_avl_right,key);
598 if (tree->vm_avl_key > key)
599 return;
600 printk("%s: avl_checkright: right key %lu <= top key %lu\n",avl_check_point,tree->vm_avl_key,key);
601 }
602
603
604 static void avl_checkorder (struct vm_area_struct * tree)
605 {
606 if (tree == avl_empty)
607 return;
608 avl_checkorder(tree->vm_avl_left);
609 avl_checkorder(tree->vm_avl_right);
610 avl_checkleft(tree->vm_avl_left,tree->vm_avl_key);
611 avl_checkright(tree->vm_avl_right,tree->vm_avl_key);
612 }
613
614
615 static void avl_check (struct task_struct * task, char *caller)
616 {
617 avl_check_point = caller;
618
619
620
621 avl_checkheights(task->mm->mmap_avl);
622 avl_checkorder(task->mm->mmap_avl);
623 }
624
625 #endif
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 void unmap_fixup(struct vm_area_struct *area,
651 unsigned long addr, size_t len)
652 {
653 struct vm_area_struct *mpnt;
654 unsigned long end = addr + len;
655
656 if (addr < area->vm_start || addr >= area->vm_end ||
657 end <= area->vm_start || end > area->vm_end ||
658 end < addr)
659 {
660 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
661 area->vm_start, area->vm_end, addr, end);
662 return;
663 }
664
665
666 if (addr == area->vm_start && end == area->vm_end) {
667 if (area->vm_ops && area->vm_ops->close)
668 area->vm_ops->close(area);
669 if (area->vm_inode)
670 iput(area->vm_inode);
671 return;
672 }
673
674
675 if (end == area->vm_end)
676 area->vm_end = addr;
677 else
678 if (addr == area->vm_start) {
679 area->vm_offset += (end - area->vm_start);
680 area->vm_start = end;
681 }
682 else {
683
684
685 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
686
687 if (!mpnt)
688 return;
689 *mpnt = *area;
690 mpnt->vm_offset += (end - area->vm_start);
691 mpnt->vm_start = end;
692 if (mpnt->vm_inode)
693 mpnt->vm_inode->i_count++;
694 if (mpnt->vm_ops && mpnt->vm_ops->open)
695 mpnt->vm_ops->open(mpnt);
696 area->vm_end = addr;
697 insert_vm_struct(current, mpnt);
698 }
699
700
701 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
702 if (!mpnt)
703 return;
704 *mpnt = *area;
705 if (mpnt->vm_ops && mpnt->vm_ops->open)
706 mpnt->vm_ops->open(mpnt);
707 if (area->vm_ops && area->vm_ops->close) {
708 area->vm_end = area->vm_start;
709 area->vm_ops->close(area);
710 }
711 insert_vm_struct(current, mpnt);
712 }
713
714 asmlinkage int sys_munmap(unsigned long addr, size_t len)
715 {
716 return do_munmap(addr, len);
717 }
718
719
720
721
722
723
724
725 int do_munmap(unsigned long addr, size_t len)
726 {
727 struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
728
729 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
730 return -EINVAL;
731
732 if ((len = PAGE_ALIGN(len)) == 0)
733 return 0;
734
735
736
737
738
739
740
741 mpnt = find_vma(current, addr);
742 if (!mpnt)
743 return 0;
744 avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);
745
746
747
748 npp = (prev ? &prev->vm_next : ¤t->mm->mmap);
749 free = NULL;
750 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
751 *npp = mpnt->vm_next;
752 mpnt->vm_next = free;
753 free = mpnt;
754 avl_remove(mpnt, ¤t->mm->mmap_avl);
755 }
756
757 if (free == NULL)
758 return 0;
759
760
761
762
763
764
765
766 while (free) {
767 unsigned long st, end;
768
769 mpnt = free;
770 free = free->vm_next;
771
772 remove_shared_vm_struct(mpnt);
773
774 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
775 end = addr+len;
776 end = end > mpnt->vm_end ? mpnt->vm_end : end;
777
778 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
779 mpnt->vm_ops->unmap(mpnt, st, end-st);
780
781 unmap_fixup(mpnt, st, end-st);
782 kfree(mpnt);
783 }
784
785 unmap_page_range(addr, len);
786 return 0;
787 }
788
789
790 void build_mmap_avl(struct task_struct * task)
791 {
792 struct vm_area_struct * vma;
793
794 task->mm->mmap_avl = NULL;
795 for (vma = task->mm->mmap; vma; vma = vma->vm_next)
796 avl_insert(vma, &task->mm->mmap_avl);
797 }
798
799
800 void exit_mmap(struct task_struct * task)
801 {
802 struct vm_area_struct * mpnt;
803
804 mpnt = task->mm->mmap;
805 task->mm->mmap = NULL;
806 task->mm->mmap_avl = NULL;
807 while (mpnt) {
808 struct vm_area_struct * next = mpnt->vm_next;
809 if (mpnt->vm_ops && mpnt->vm_ops->close)
810 mpnt->vm_ops->close(mpnt);
811 remove_shared_vm_struct(mpnt);
812 if (mpnt->vm_inode)
813 iput(mpnt->vm_inode);
814 kfree(mpnt);
815 mpnt = next;
816 }
817 }
818
819
820
821
822
823 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
824 {
825 struct vm_area_struct *share;
826 struct inode * inode;
827
828 #if 0
829 struct vm_area_struct **p, *mpnt;
830
831 p = &t->mm->mmap;
832 while ((mpnt = *p) != NULL) {
833 if (mpnt->vm_start > vmp->vm_start)
834 break;
835 if (mpnt->vm_end > vmp->vm_start)
836 printk("insert_vm_struct: overlapping memory areas\n");
837 p = &mpnt->vm_next;
838 }
839 vmp->vm_next = mpnt;
840 *p = vmp;
841 #else
842 struct vm_area_struct * prev, * next;
843
844 avl_insert_neighbours(vmp, &t->mm->mmap_avl, &prev, &next);
845 if ((prev ? prev->vm_next : t->mm->mmap) != next)
846 printk("insert_vm_struct: tree inconsistent with list\n");
847 if (prev)
848 prev->vm_next = vmp;
849 else
850 t->mm->mmap = vmp;
851 vmp->vm_next = next;
852 #endif
853
854 inode = vmp->vm_inode;
855 if (!inode)
856 return;
857
858
859 if ((share = inode->i_mmap)) {
860 vmp->vm_next_share = share->vm_next_share;
861 vmp->vm_next_share->vm_prev_share = vmp;
862 share->vm_next_share = vmp;
863 vmp->vm_prev_share = share;
864 } else
865 inode->i_mmap = vmp->vm_next_share = vmp->vm_prev_share = vmp;
866 }
867
868
869
870
871 void remove_shared_vm_struct(struct vm_area_struct *mpnt)
872 {
873 struct inode * inode = mpnt->vm_inode;
874
875 if (!inode)
876 return;
877
878 if (mpnt->vm_next_share == mpnt) {
879 if (inode->i_mmap != mpnt)
880 printk("Inode i_mmap ring corrupted\n");
881 inode->i_mmap = NULL;
882 return;
883 }
884
885 if (inode->i_mmap == mpnt)
886 inode->i_mmap = mpnt->vm_next_share;
887
888 mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share;
889 mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share;
890 }
891
892
893
894
895
896
897
898
899 void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
900 {
901 struct vm_area_struct *prev, *mpnt, *next;
902
903 mpnt = find_vma(task, start_addr);
904 if (!mpnt)
905 return;
906 avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);
907
908
909 if (!prev) {
910 prev = mpnt;
911 mpnt = next;
912 }
913
914
915
916
917 for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) {
918 #if 0
919 printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt);
920 #endif
921
922 next = mpnt->vm_next;
923
924
925
926
927 if (mpnt->vm_inode != prev->vm_inode)
928 continue;
929 if (mpnt->vm_pte != prev->vm_pte)
930 continue;
931 if (mpnt->vm_ops != prev->vm_ops)
932 continue;
933 if (mpnt->vm_flags != prev->vm_flags)
934 continue;
935 if (prev->vm_end != mpnt->vm_start)
936 continue;
937
938
939
940 if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) {
941 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
942 continue;
943 }
944
945
946
947
948
949
950 avl_remove(mpnt, &task->mm->mmap_avl);
951 prev->vm_end = mpnt->vm_end;
952 prev->vm_next = mpnt->vm_next;
953 if (mpnt->vm_ops && mpnt->vm_ops->close) {
954 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
955 mpnt->vm_start = mpnt->vm_end;
956 mpnt->vm_ops->close(mpnt);
957 }
958 remove_shared_vm_struct(mpnt);
959 if (mpnt->vm_inode)
960 mpnt->vm_inode->i_count--;
961 kfree_s(mpnt, sizeof(*mpnt));
962 mpnt = prev;
963 }
964 }
965
966
967
968
969
970 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
971 {
972 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
973 return -ENOMEM;
974 return 0;
975 }