tag | line | file | source code |
mpnt | 97 | fs/binfmt_elf.c | struct vm_area_struct *mpnt; |
mpnt | 99 | fs/binfmt_elf.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 100 | fs/binfmt_elf.c | if (mpnt) { |
mpnt | 101 | fs/binfmt_elf.c | mpnt->vm_task = current; |
mpnt | 102 | fs/binfmt_elf.c | mpnt->vm_start = PAGE_MASK & (unsigned long) p; |
mpnt | 103 | fs/binfmt_elf.c | mpnt->vm_end = TASK_SIZE; |
mpnt | 104 | fs/binfmt_elf.c | mpnt->vm_page_prot = PAGE_COPY; |
mpnt | 106 | fs/binfmt_elf.c | mpnt->vm_flags = VM_STACK_FLAGS; |
mpnt | 107 | fs/binfmt_elf.c | mpnt->vm_pte = 0; |
mpnt | 110 | fs/binfmt_elf.c | mpnt->vm_flags = VM_GROWSDOWN; |
mpnt | 113 | fs/binfmt_elf.c | mpnt->vm_inode = NULL; |
mpnt | 114 | fs/binfmt_elf.c | mpnt->vm_offset = 0; |
mpnt | 115 | fs/binfmt_elf.c | mpnt->vm_ops = NULL; |
mpnt | 116 | fs/binfmt_elf.c | insert_vm_struct(current, mpnt); |
mpnt | 118 | fs/binfmt_elf.c | current->mm->stk_vma = mpnt; |
mpnt | 302 | fs/exec.c | struct vm_area_struct *mpnt; |
mpnt | 306 | fs/exec.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 307 | fs/exec.c | if (mpnt) { |
mpnt | 308 | fs/exec.c | mpnt->vm_task = current; |
mpnt | 309 | fs/exec.c | mpnt->vm_start = PAGE_MASK & (unsigned long) p; |
mpnt | 310 | fs/exec.c | mpnt->vm_end = STACK_TOP; |
mpnt | 311 | fs/exec.c | mpnt->vm_page_prot = PAGE_COPY; |
mpnt | 312 | fs/exec.c | mpnt->vm_flags = VM_STACK_FLAGS; |
mpnt | 313 | fs/exec.c | mpnt->vm_ops = NULL; |
mpnt | 314 | fs/exec.c | mpnt->vm_offset = 0; |
mpnt | 315 | fs/exec.c | mpnt->vm_inode = NULL; |
mpnt | 316 | fs/exec.c | mpnt->vm_pte = 0; |
mpnt | 317 | fs/exec.c | insert_vm_struct(current, mpnt); |
mpnt | 129 | fs/namei.c | struct vm_area_struct * mpnt; |
mpnt | 132 | fs/namei.c | for(mpnt = (*p)->mm->mmap; mpnt; mpnt = mpnt->vm_next) { |
mpnt | 133 | fs/namei.c | if (inode != mpnt->vm_inode) |
mpnt | 135 | fs/namei.c | if (mpnt->vm_flags & VM_DENYWRITE) |
mpnt | 87 | kernel/fork.c | struct vm_area_struct * mpnt, **p, *tmp; |
mpnt | 91 | kernel/fork.c | for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { |
mpnt | 97 | kernel/fork.c | *tmp = *mpnt; |
mpnt | 104 | kernel/fork.c | mpnt->vm_next_share = tmp; |
mpnt | 105 | kernel/fork.c | tmp->vm_prev_share = mpnt; |
mpnt | 915 | mm/memory.c | struct vm_area_struct * mpnt; |
mpnt | 929 | mm/memory.c | for (mpnt = area->vm_next_share; mpnt != area; mpnt = mpnt->vm_next_share) { |
mpnt | 931 | mm/memory.c | if (mpnt->vm_inode != inode) { |
mpnt | 936 | mm/memory.c | if ((mpnt->vm_offset ^ area->vm_offset) & ~PAGE_MASK) |
mpnt | 939 | mm/memory.c | from_address = offset + mpnt->vm_start - mpnt->vm_offset; |
mpnt | 940 | mm/memory.c | if (from_address < mpnt->vm_start || from_address >= mpnt->vm_end) |
mpnt | 943 | mm/memory.c | if (!try_to_share(address, area, from_address, mpnt, give_page)) |
mpnt | 638 | mm/mmap.c | struct vm_area_struct *mpnt; |
mpnt | 670 | mm/mmap.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 672 | mm/mmap.c | if (!mpnt) |
mpnt | 674 | mm/mmap.c | *mpnt = *area; |
mpnt | 675 | mm/mmap.c | mpnt->vm_offset += (end - area->vm_start); |
mpnt | 676 | mm/mmap.c | mpnt->vm_start = end; |
mpnt | 677 | mm/mmap.c | if (mpnt->vm_inode) |
mpnt | 678 | mm/mmap.c | mpnt->vm_inode->i_count++; |
mpnt | 679 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->open) |
mpnt | 680 | mm/mmap.c | mpnt->vm_ops->open(mpnt); |
mpnt | 682 | mm/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 686 | mm/mmap.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 687 | mm/mmap.c | if (!mpnt) |
mpnt | 689 | mm/mmap.c | *mpnt = *area; |
mpnt | 690 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->open) |
mpnt | 691 | mm/mmap.c | mpnt->vm_ops->open(mpnt); |
mpnt | 696 | mm/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 712 | mm/mmap.c | struct vm_area_struct *mpnt, *prev, *next, **npp, *free; |
mpnt | 726 | mm/mmap.c | mpnt = find_vma(current, addr); |
mpnt | 727 | mm/mmap.c | if (!mpnt) |
mpnt | 729 | mm/mmap.c | avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next); |
mpnt | 735 | mm/mmap.c | for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) { |
mpnt | 736 | mm/mmap.c | *npp = mpnt->vm_next; |
mpnt | 737 | mm/mmap.c | mpnt->vm_next = free; |
mpnt | 738 | mm/mmap.c | free = mpnt; |
mpnt | 739 | mm/mmap.c | avl_remove(mpnt, ¤t->mm->mmap_avl); |
mpnt | 754 | mm/mmap.c | mpnt = free; |
mpnt | 757 | mm/mmap.c | remove_shared_vm_struct(mpnt); |
mpnt | 759 | mm/mmap.c | st = addr < mpnt->vm_start ? mpnt->vm_start : addr; |
mpnt | 761 | mm/mmap.c | end = end > mpnt->vm_end ? mpnt->vm_end : end; |
mpnt | 763 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->unmap) |
mpnt | 764 | mm/mmap.c | mpnt->vm_ops->unmap(mpnt, st, end-st); |
mpnt | 766 | mm/mmap.c | unmap_fixup(mpnt, st, end-st); |
mpnt | 767 | mm/mmap.c | kfree(mpnt); |
mpnt | 787 | mm/mmap.c | struct vm_area_struct * mpnt; |
mpnt | 789 | mm/mmap.c | mpnt = task->mm->mmap; |
mpnt | 792 | mm/mmap.c | while (mpnt) { |
mpnt | 793 | mm/mmap.c | struct vm_area_struct * next = mpnt->vm_next; |
mpnt | 794 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->close) |
mpnt | 795 | mm/mmap.c | mpnt->vm_ops->close(mpnt); |
mpnt | 796 | mm/mmap.c | remove_shared_vm_struct(mpnt); |
mpnt | 797 | mm/mmap.c | if (mpnt->vm_inode) |
mpnt | 798 | mm/mmap.c | iput(mpnt->vm_inode); |
mpnt | 799 | mm/mmap.c | kfree(mpnt); |
mpnt | 800 | mm/mmap.c | mpnt = next; |
mpnt | 814 | mm/mmap.c | struct vm_area_struct **p, *mpnt; |
mpnt | 817 | mm/mmap.c | while ((mpnt = *p) != NULL) { |
mpnt | 818 | mm/mmap.c | if (mpnt->vm_start > vmp->vm_start) |
mpnt | 820 | mm/mmap.c | if (mpnt->vm_end > vmp->vm_start) |
mpnt | 822 | mm/mmap.c | p = &mpnt->vm_next; |
mpnt | 824 | mm/mmap.c | vmp->vm_next = mpnt; |
mpnt | 856 | mm/mmap.c | void remove_shared_vm_struct(struct vm_area_struct *mpnt) |
mpnt | 858 | mm/mmap.c | struct inode * inode = mpnt->vm_inode; |
mpnt | 863 | mm/mmap.c | if (mpnt->vm_next_share == mpnt) { |
mpnt | 864 | mm/mmap.c | if (inode->i_mmap != mpnt) |
mpnt | 870 | mm/mmap.c | if (inode->i_mmap == mpnt) |
mpnt | 871 | mm/mmap.c | inode->i_mmap = mpnt->vm_next_share; |
mpnt | 873 | mm/mmap.c | mpnt->vm_prev_share->vm_next_share = mpnt->vm_next_share; |
mpnt | 874 | mm/mmap.c | mpnt->vm_next_share->vm_prev_share = mpnt->vm_prev_share; |
mpnt | 886 | mm/mmap.c | struct vm_area_struct *prev, *mpnt, *next; |
mpnt | 888 | mm/mmap.c | mpnt = find_vma(task, start_addr); |
mpnt | 889 | mm/mmap.c | if (!mpnt) |
mpnt | 891 | mm/mmap.c | avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next); |
mpnt | 895 | mm/mmap.c | prev = mpnt; |
mpnt | 896 | mm/mmap.c | mpnt = next; |
mpnt | 902 | mm/mmap.c | for ( ; mpnt && prev->vm_start < end_addr ; prev = mpnt, mpnt = next) { |
mpnt | 904 | mm/mmap.c | printk("looping in merge_segments, mpnt=0x%lX\n", (unsigned long) mpnt); |
mpnt | 907 | mm/mmap.c | next = mpnt->vm_next; |
mpnt | 912 | mm/mmap.c | if (mpnt->vm_inode != prev->vm_inode) |
mpnt | 914 | mm/mmap.c | if (mpnt->vm_pte != prev->vm_pte) |
mpnt | 916 | mm/mmap.c | if (mpnt->vm_ops != prev->vm_ops) |
mpnt | 918 | mm/mmap.c | if (mpnt->vm_flags != prev->vm_flags) |
mpnt | 920 | mm/mmap.c | if (prev->vm_end != mpnt->vm_start) |
mpnt | 925 | mm/mmap.c | if ((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) { |
mpnt | 926 | mm/mmap.c | if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset) |
mpnt | 935 | mm/mmap.c | avl_remove(mpnt, &task->mm->mmap_avl); |
mpnt | 936 | mm/mmap.c | prev->vm_end = mpnt->vm_end; |
mpnt | 937 | mm/mmap.c | prev->vm_next = mpnt->vm_next; |
mpnt | 938 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->close) { |
mpnt | 939 | mm/mmap.c | mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start; |
mpnt | 940 | mm/mmap.c | mpnt->vm_start = mpnt->vm_end; |
mpnt | 941 | mm/mmap.c | mpnt->vm_ops->close(mpnt); |
mpnt | 943 | mm/mmap.c | remove_shared_vm_struct(mpnt); |
mpnt | 944 | mm/mmap.c | if (mpnt->vm_inode) |
mpnt | 945 | mm/mmap.c | mpnt->vm_inode->i_count--; |
mpnt | 946 | mm/mmap.c | kfree_s(mpnt, sizeof(*mpnt)); |
mpnt | 947 | mm/mmap.c | mpnt = prev; |