tag | line | file | source code |
mpnt | 96 | fs/binfmt_elf.c | struct vm_area_struct *mpnt; |
mpnt | 98 | fs/binfmt_elf.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 99 | fs/binfmt_elf.c | if (mpnt) { |
mpnt | 100 | fs/binfmt_elf.c | mpnt->vm_mm = current->mm; |
mpnt | 101 | fs/binfmt_elf.c | mpnt->vm_start = PAGE_MASK & (unsigned long) p; |
mpnt | 102 | fs/binfmt_elf.c | mpnt->vm_end = TASK_SIZE; |
mpnt | 103 | fs/binfmt_elf.c | mpnt->vm_page_prot = PAGE_COPY; |
mpnt | 104 | fs/binfmt_elf.c | mpnt->vm_flags = VM_STACK_FLAGS; |
mpnt | 105 | fs/binfmt_elf.c | mpnt->vm_pte = 0; |
mpnt | 106 | fs/binfmt_elf.c | mpnt->vm_inode = NULL; |
mpnt | 107 | fs/binfmt_elf.c | mpnt->vm_offset = 0; |
mpnt | 108 | fs/binfmt_elf.c | mpnt->vm_ops = NULL; |
mpnt | 109 | fs/binfmt_elf.c | insert_vm_struct(current, mpnt); |
mpnt | 110 | fs/binfmt_elf.c | current->mm->total_vm += (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; |
mpnt | 315 | fs/exec.c | struct vm_area_struct *mpnt; |
mpnt | 319 | fs/exec.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 320 | fs/exec.c | if (mpnt) { |
mpnt | 321 | fs/exec.c | mpnt->vm_mm = current->mm; |
mpnt | 322 | fs/exec.c | mpnt->vm_start = PAGE_MASK & (unsigned long) p; |
mpnt | 323 | fs/exec.c | mpnt->vm_end = STACK_TOP; |
mpnt | 324 | fs/exec.c | mpnt->vm_page_prot = PAGE_COPY; |
mpnt | 325 | fs/exec.c | mpnt->vm_flags = VM_STACK_FLAGS; |
mpnt | 326 | fs/exec.c | mpnt->vm_ops = NULL; |
mpnt | 327 | fs/exec.c | mpnt->vm_offset = 0; |
mpnt | 328 | fs/exec.c | mpnt->vm_inode = NULL; |
mpnt | 329 | fs/exec.c | mpnt->vm_pte = 0; |
mpnt | 330 | fs/exec.c | insert_vm_struct(current, mpnt); |
mpnt | 331 | fs/exec.c | current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; |
mpnt | 129 | fs/namei.c | struct vm_area_struct * mpnt; |
mpnt | 132 | fs/namei.c | for(mpnt = p->mm->mmap; mpnt; mpnt = mpnt->vm_next) { |
mpnt | 133 | fs/namei.c | if (inode != mpnt->vm_inode) |
mpnt | 135 | fs/namei.c | if (mpnt->vm_flags & VM_DENYWRITE) |
mpnt | 80 | kernel/fork.c | struct vm_area_struct * mpnt, **p, *tmp; |
mpnt | 84 | kernel/fork.c | for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { |
mpnt | 90 | kernel/fork.c | *tmp = *mpnt; |
mpnt | 98 | kernel/fork.c | mpnt->vm_next_share = tmp; |
mpnt | 99 | kernel/fork.c | tmp->vm_prev_share = mpnt; |
mpnt | 878 | mm/memory.c | struct vm_area_struct * mpnt; |
mpnt | 892 | mm/memory.c | for (mpnt = area->vm_next_share; mpnt != area; mpnt = mpnt->vm_next_share) { |
mpnt | 894 | mm/memory.c | if (mpnt->vm_inode != inode) { |
mpnt | 899 | mm/memory.c | if ((mpnt->vm_offset ^ area->vm_offset) & ~PAGE_MASK) |
mpnt | 902 | mm/memory.c | from_address = offset + mpnt->vm_start - mpnt->vm_offset; |
mpnt | 903 | mm/memory.c | if (from_address < mpnt->vm_start || from_address >= mpnt->vm_end) |
mpnt | 906 | mm/memory.c | if (!try_to_share(address, area, from_address, mpnt, give_page)) |
mpnt | 987 | mm/memory.c | struct vm_area_struct * mpnt; |
mpnt | 992 | mm/memory.c | mpnt = inode->i_mmap; |
mpnt | 993 | mm/memory.c | if (!mpnt) { |
mpnt | 998 | mm/memory.c | unsigned long start = mpnt->vm_start; |
mpnt | 999 | mm/memory.c | unsigned long len = mpnt->vm_end - start; |
mpnt | 1003 | mm/memory.c | if (mpnt->vm_offset >= offset) { |
mpnt | 1004 | mm/memory.c | zap_page_range(mpnt->vm_mm, start, len); |
mpnt | 1008 | mm/memory.c | diff = offset - mpnt->vm_offset; |
mpnt | 1016 | mm/memory.c | if (unshare(mpnt, start, page)) |
mpnt | 1020 | mm/memory.c | zap_page_range(mpnt->vm_mm, start, len); |
mpnt | 1021 | mm/memory.c | } while ((mpnt = mpnt->vm_next_share) != inode->i_mmap); |
mpnt | 616 | mm/mmap.c | struct vm_area_struct *mpnt; |
mpnt | 651 | mm/mmap.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 653 | mm/mmap.c | if (!mpnt) |
mpnt | 655 | mm/mmap.c | *mpnt = *area; |
mpnt | 656 | mm/mmap.c | mpnt->vm_offset += (end - area->vm_start); |
mpnt | 657 | mm/mmap.c | mpnt->vm_start = end; |
mpnt | 658 | mm/mmap.c | if (mpnt->vm_inode) |
mpnt | 659 | mm/mmap.c | mpnt->vm_inode->i_count++; |
mpnt | 660 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->open) |
mpnt | 661 | mm/mmap.c | mpnt->vm_ops->open(mpnt); |
mpnt | 663 | mm/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 667 | mm/mmap.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 668 | mm/mmap.c | if (!mpnt) |
mpnt | 670 | mm/mmap.c | *mpnt = *area; |
mpnt | 671 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->open) |
mpnt | 672 | mm/mmap.c | mpnt->vm_ops->open(mpnt); |
mpnt | 677 | mm/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 693 | mm/mmap.c | struct vm_area_struct *mpnt, *prev, *next, **npp, *free; |
mpnt | 707 | mm/mmap.c | mpnt = find_vma(current, addr); |
mpnt | 708 | mm/mmap.c | if (!mpnt) |
mpnt | 710 | mm/mmap.c | avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next); |
mpnt | 716 | mm/mmap.c | for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) { |
mpnt | 717 | mm/mmap.c | *npp = mpnt->vm_next; |
mpnt | 718 | mm/mmap.c | mpnt->vm_next = free; |
mpnt | 719 | mm/mmap.c | free = mpnt; |
mpnt | 720 | mm/mmap.c | avl_remove(mpnt, ¤t->mm->mmap_avl); |
mpnt | 735 | mm/mmap.c | mpnt = free; |
mpnt | 738 | mm/mmap.c | remove_shared_vm_struct(mpnt); |
mpnt | 740 | mm/mmap.c | st = addr < mpnt->vm_start ? mpnt->vm_start : addr; |
mpnt | 742 | mm/mmap.c | end = end > mpnt->vm_end ? mpnt->vm_end : end; |
mpnt | 744 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->unmap) |
mpnt | 745 | mm/mmap.c | mpnt->vm_ops->unmap(mpnt, st, end-st); |
mpnt | 747 | mm/mmap.c | unmap_fixup(mpnt, st, end-st); |
mpnt | 748 | mm/mmap.c | kfree(mpnt); |
mpnt | 768 | mm/mmap.c | struct vm_area_struct * mpnt; |
mpnt | 770 | mm/mmap.c | mpnt = mm->mmap; |
mpnt | 776 | mm/mmap.c | while (mpnt) { |
mpnt | 777 | mm/mmap.c | struct vm_area_struct * next = mpnt->vm_next; |
mpnt | 778 | mm/mmap.c | if (mpnt->vm_ops) { |
mpnt | 779 | mm/mmap.c | if (mpnt->vm_ops->unmap) |
mpnt | 780 | mm/mmap.c | mpnt->vm_ops->unmap(mpnt, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start); |
mpnt | 781 | mm/mmap.c | if (mpnt->vm_ops->close) |
mpnt | 782 | mm/mmap.c | mpnt->vm_ops->close(mpnt); |
mpnt | 784 | mm/mmap.c | remove_shared_vm_struct(mpnt); |
mpnt | 785 | mm/mmap.c | zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start); |
mpnt | 786 | mm/mmap.c | if (mpnt->vm_inode) |
mpnt | 787 | mm/mmap.c | iput(mpnt->vm_inode); |
mpnt | 788 | mm/mmap.c | kfree(mpnt); |
mpnt | 789 | mm/mmap.c | mpnt = next; |
mpnt | 803 | mm/mmap.c | struct vm_area_struct **p, *mpnt; |
mpnt | 806 | mm/mmap.c | while ((mpnt = *p) != NULL) { |
mpnt | 807 | mm/mmap.c | if (mpnt->vm_start > vmp->vm_start) |
mpnt | 809 | mm/mmap.c | if (mpnt->vm_end > vmp->vm_start) |
mpnt | 811 | mm/mmap.c | p = &mpnt->vm_next; |
mpnt | 813 | mm/mmap.c | vmp->vm_next = mpnt; |
mpnt | 845 | mm/mmap.c | void remove_shared_vm_struct(struct vm_area_struct *mpnt) |
mpnt | 847 | mm/mmap.c | struct inode * inode = mpnt->vm_inode; |
mpnt | 852 | mm/mmap.c | if (mpnt->vm_next_share == mpnt) { |
mpnt | 853 | mm/mmap.c | if (inode->i_mmap != mpnt) |
mpnt | 859 | mm/mmap.c | if (inode->i_mmap == mpnt) |
mpnt | 860 | mm/mmap.c | inode->i_mmap = mpnt->vm_next_share; |
mpnt | 862 | mm/mmap.c | mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share; |
mpnt | 863 | mm/mmap.c | mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share; |
mpnt | 875 | mm/mmap.c | struct vm_area_struct *prev, *mpnt, *next; |
mpnt | 877 | mm/mmap.c | mpnt = find_vma(task, start_addr); |
mpnt | 878 | mm/mmap.c | if (!mpnt) |
mpnt | 880 | mm/mmap.c | avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next); |
mpnt | 884 | mm/mmap.c | prev = mpnt; |
mpnt | 885 | mm/mmap.c | mpnt = next; |
mpnt | 891 | mm/mmap.c | for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) { |
mpnt | 893 | mm/mmap.c | printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt); |
mpnt | 896 | mm/mmap.c | next = mpnt->vm_next; |
mpnt | 901 | mm/mmap.c | if (mpnt->vm_inode != prev->vm_inode) |
mpnt | 903 | mm/mmap.c | if (mpnt->vm_pte != prev->vm_pte) |
mpnt | 905 | mm/mmap.c | if (mpnt->vm_ops != prev->vm_ops) |
mpnt | 907 | mm/mmap.c | if (mpnt->vm_flags != prev->vm_flags) |
mpnt | 909 | mm/mmap.c | if (prev->vm_end != mpnt->vm_start) |
mpnt | 914 | mm/mmap.c | if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) { |
mpnt | 915 | mm/mmap.c | if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset) |
mpnt | 924 | mm/mmap.c | avl_remove(mpnt, &task->mm->mmap_avl); |
mpnt | 925 | mm/mmap.c | prev->vm_end = mpnt->vm_end; |
mpnt | 926 | mm/mmap.c | prev->vm_next = mpnt->vm_next; |
mpnt | 927 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->close) { |
mpnt | 928 | mm/mmap.c | mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start; |
mpnt | 929 | mm/mmap.c | mpnt->vm_start = mpnt->vm_end; |
mpnt | 930 | mm/mmap.c | mpnt->vm_ops->close(mpnt); |
mpnt | 932 | mm/mmap.c | remove_shared_vm_struct(mpnt); |
mpnt | 933 | mm/mmap.c | if (mpnt->vm_inode) |
mpnt | 934 | mm/mmap.c | mpnt->vm_inode->i_count--; |
mpnt | 935 | mm/mmap.c | kfree_s(mpnt, sizeof(*mpnt)); |
mpnt | 936 | mm/mmap.c | mpnt = prev; |