tag | line | file | source code |
mpnt | 89 | drivers/char/mem.c | struct vm_area_struct * mpnt; |
mpnt | 98 | drivers/char/mem.c | mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL); |
mpnt | 99 | drivers/char/mem.c | if (!mpnt) |
mpnt | 102 | drivers/char/mem.c | mpnt->vm_task = current; |
mpnt | 103 | drivers/char/mem.c | mpnt->vm_start = addr; |
mpnt | 104 | drivers/char/mem.c | mpnt->vm_end = addr + len; |
mpnt | 105 | drivers/char/mem.c | mpnt->vm_page_prot = prot; |
mpnt | 106 | drivers/char/mem.c | mpnt->vm_share = NULL; |
mpnt | 107 | drivers/char/mem.c | mpnt->vm_inode = inode; |
mpnt | 109 | drivers/char/mem.c | mpnt->vm_offset = off; |
mpnt | 110 | drivers/char/mem.c | mpnt->vm_ops = NULL; |
mpnt | 111 | drivers/char/mem.c | insert_vm_struct(current, mpnt); |
mpnt | 182 | drivers/char/mem.c | struct vm_area_struct *mpnt; |
mpnt | 192 | drivers/char/mem.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 193 | drivers/char/mem.c | if (!mpnt) |
mpnt | 196 | drivers/char/mem.c | mpnt->vm_task = current; |
mpnt | 197 | drivers/char/mem.c | mpnt->vm_start = addr; |
mpnt | 198 | drivers/char/mem.c | mpnt->vm_end = addr + len; |
mpnt | 199 | drivers/char/mem.c | mpnt->vm_page_prot = prot; |
mpnt | 200 | drivers/char/mem.c | mpnt->vm_share = NULL; |
mpnt | 201 | drivers/char/mem.c | mpnt->vm_inode = NULL; |
mpnt | 202 | drivers/char/mem.c | mpnt->vm_offset = off; |
mpnt | 203 | drivers/char/mem.c | mpnt->vm_ops = NULL; |
mpnt | 204 | drivers/char/mem.c | insert_vm_struct(current, mpnt); |
mpnt | 53 | fs/binfmt_elf.c | struct vm_area_struct *mpnt; |
mpnt | 55 | fs/binfmt_elf.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 56 | fs/binfmt_elf.c | if (mpnt) { |
mpnt | 57 | fs/binfmt_elf.c | mpnt->vm_task = current; |
mpnt | 58 | fs/binfmt_elf.c | mpnt->vm_start = PAGE_MASK & (unsigned long) p; |
mpnt | 59 | fs/binfmt_elf.c | mpnt->vm_end = TASK_SIZE; |
mpnt | 60 | fs/binfmt_elf.c | mpnt->vm_page_prot = PAGE_PRIVATE|PAGE_DIRTY; |
mpnt | 61 | fs/binfmt_elf.c | mpnt->vm_share = NULL; |
mpnt | 62 | fs/binfmt_elf.c | mpnt->vm_inode = NULL; |
mpnt | 63 | fs/binfmt_elf.c | mpnt->vm_offset = 0; |
mpnt | 64 | fs/binfmt_elf.c | mpnt->vm_ops = NULL; |
mpnt | 65 | fs/binfmt_elf.c | insert_vm_struct(current, mpnt); |
mpnt | 66 | fs/binfmt_elf.c | current->mm->stk_vma = mpnt; |
mpnt | 313 | fs/exec.c | struct vm_area_struct *mpnt; |
mpnt | 315 | fs/exec.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 316 | fs/exec.c | if (mpnt) { |
mpnt | 317 | fs/exec.c | mpnt->vm_task = current; |
mpnt | 318 | fs/exec.c | mpnt->vm_start = PAGE_MASK & (unsigned long) p; |
mpnt | 319 | fs/exec.c | mpnt->vm_end = TASK_SIZE; |
mpnt | 320 | fs/exec.c | mpnt->vm_page_prot = PAGE_PRIVATE|PAGE_DIRTY; |
mpnt | 321 | fs/exec.c | mpnt->vm_share = NULL; |
mpnt | 322 | fs/exec.c | mpnt->vm_inode = NULL; |
mpnt | 323 | fs/exec.c | mpnt->vm_offset = 0; |
mpnt | 324 | fs/exec.c | mpnt->vm_ops = NULL; |
mpnt | 325 | fs/exec.c | insert_vm_struct(current, mpnt); |
mpnt | 326 | fs/exec.c | current->mm->stk_vma = mpnt; |
mpnt | 510 | fs/exec.c | struct vm_area_struct * mpnt, *mpnt1; |
mpnt | 530 | fs/exec.c | mpnt = current->mm->mmap; |
mpnt | 533 | fs/exec.c | while (mpnt) { |
mpnt | 534 | fs/exec.c | mpnt1 = mpnt->vm_next; |
mpnt | 535 | fs/exec.c | if (mpnt->vm_ops && mpnt->vm_ops->close) |
mpnt | 536 | fs/exec.c | mpnt->vm_ops->close(mpnt); |
mpnt | 537 | fs/exec.c | kfree(mpnt); |
mpnt | 538 | fs/exec.c | mpnt = mpnt1; |
mpnt | 353 | fs/namei.c | struct vm_area_struct * mpnt; |
mpnt | 360 | fs/namei.c | for(mpnt = (*p)->mm->mmap; mpnt; mpnt = mpnt->vm_next) { |
mpnt | 361 | fs/namei.c | if (mpnt->vm_page_prot & PAGE_RW) |
mpnt | 363 | fs/namei.c | if (inode == mpnt->vm_inode) { |
mpnt | 54 | fs/nfs/mmap.c | struct vm_area_struct * mpnt; |
mpnt | 67 | fs/nfs/mmap.c | mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL); |
mpnt | 68 | fs/nfs/mmap.c | if (!mpnt) |
mpnt | 72 | fs/nfs/mmap.c | mpnt->vm_task = current; |
mpnt | 73 | fs/nfs/mmap.c | mpnt->vm_start = addr; |
mpnt | 74 | fs/nfs/mmap.c | mpnt->vm_end = addr + len; |
mpnt | 75 | fs/nfs/mmap.c | mpnt->vm_page_prot = prot; |
mpnt | 76 | fs/nfs/mmap.c | mpnt->vm_share = NULL; |
mpnt | 77 | fs/nfs/mmap.c | mpnt->vm_inode = inode; |
mpnt | 79 | fs/nfs/mmap.c | mpnt->vm_offset = off; |
mpnt | 80 | fs/nfs/mmap.c | mpnt->vm_ops = &nfs_file_mmap; |
mpnt | 81 | fs/nfs/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 113 | fs/proc/fd.c | struct vm_area_struct * mpnt; |
mpnt | 114 | fs/proc/fd.c | for (mpnt = p->mm->mmap; mpnt; mpnt = mpnt->vm_next) |
mpnt | 115 | fs/proc/fd.c | if (mpnt->vm_inode) |
mpnt | 170 | fs/proc/fd.c | struct vm_area_struct * mpnt; |
mpnt | 171 | fs/proc/fd.c | for (mpnt = p->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) |
mpnt | 172 | fs/proc/fd.c | if (mpnt->vm_inode) |
mpnt | 182 | fs/proc/inode.c | struct vm_area_struct * mpnt; |
mpnt | 183 | fs/proc/inode.c | for (mpnt = p->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) |
mpnt | 184 | fs/proc/inode.c | if(mpnt->vm_inode) |
mpnt | 87 | fs/proc/link.c | struct vm_area_struct * mpnt; |
mpnt | 88 | fs/proc/link.c | for(mpnt = p->mm->mmap; mpnt && j >= 0; |
mpnt | 89 | fs/proc/link.c | mpnt = mpnt->vm_next){ |
mpnt | 90 | fs/proc/link.c | if(mpnt->vm_inode) { |
mpnt | 92 | fs/proc/link.c | inode = mpnt->vm_inode; |
mpnt | 53 | ibcs/binfmt_elf.c | struct vm_area_struct *mpnt; |
mpnt | 55 | ibcs/binfmt_elf.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 56 | ibcs/binfmt_elf.c | if (mpnt) { |
mpnt | 57 | ibcs/binfmt_elf.c | mpnt->vm_task = current; |
mpnt | 58 | ibcs/binfmt_elf.c | mpnt->vm_start = PAGE_MASK & (unsigned long) p; |
mpnt | 59 | ibcs/binfmt_elf.c | mpnt->vm_end = TASK_SIZE; |
mpnt | 60 | ibcs/binfmt_elf.c | mpnt->vm_page_prot = PAGE_PRIVATE|PAGE_DIRTY; |
mpnt | 61 | ibcs/binfmt_elf.c | mpnt->vm_share = NULL; |
mpnt | 62 | ibcs/binfmt_elf.c | mpnt->vm_inode = NULL; |
mpnt | 63 | ibcs/binfmt_elf.c | mpnt->vm_offset = 0; |
mpnt | 64 | ibcs/binfmt_elf.c | mpnt->vm_ops = NULL; |
mpnt | 65 | ibcs/binfmt_elf.c | insert_vm_struct(current, mpnt); |
mpnt | 66 | ibcs/binfmt_elf.c | current->mm->stk_vma = mpnt; |
mpnt | 360 | kernel/exit.c | struct vm_area_struct * mpnt; |
mpnt | 362 | kernel/exit.c | mpnt = current->mm->mmap; |
mpnt | 364 | kernel/exit.c | while (mpnt) { |
mpnt | 365 | kernel/exit.c | struct vm_area_struct * next = mpnt->vm_next; |
mpnt | 366 | kernel/exit.c | if (mpnt->vm_ops && mpnt->vm_ops->close) |
mpnt | 367 | kernel/exit.c | mpnt->vm_ops->close(mpnt); |
mpnt | 368 | kernel/exit.c | kfree(mpnt); |
mpnt | 369 | kernel/exit.c | mpnt = next; |
mpnt | 94 | kernel/fork.c | struct vm_area_struct * mpnt, **p, *tmp; |
mpnt | 99 | kernel/fork.c | for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { |
mpnt | 103 | kernel/fork.c | *tmp = *mpnt; |
mpnt | 110 | kernel/fork.c | if (current->mm->stk_vma == mpnt) |
mpnt | 779 | mm/memory.c | struct vm_area_struct * mpnt; |
mpnt | 780 | mm/memory.c | for (mpnt = (*p)->mm->mmap; mpnt; mpnt = mpnt->vm_next) { |
mpnt | 781 | mm/memory.c | if (mpnt->vm_ops == area->vm_ops && |
mpnt | 782 | mm/memory.c | mpnt->vm_inode->i_ino == area->vm_inode->i_ino&& |
mpnt | 783 | mm/memory.c | mpnt->vm_inode->i_dev == area->vm_inode->i_dev){ |
mpnt | 784 | mm/memory.c | if (mpnt->vm_ops->share(mpnt, area, address)) |
mpnt | 788 | mm/memory.c | if (!mpnt) continue; /* Nope. Nuthin here */ |
mpnt | 836 | mm/memory.c | struct vm_area_struct * mpnt; |
mpnt | 854 | mm/memory.c | for (mpnt = tsk->mm->mmap; mpnt != NULL; mpnt = mpnt->vm_next) { |
mpnt | 855 | mm/memory.c | if (address < mpnt->vm_start) |
mpnt | 857 | mm/memory.c | if (address >= mpnt->vm_end) { |
mpnt | 858 | mm/memory.c | tmp = mpnt->vm_end; |
mpnt | 861 | mm/memory.c | if (!mpnt->vm_ops || !mpnt->vm_ops->nopage) { |
mpnt | 866 | mm/memory.c | mpnt->vm_ops->nopage(error_code, mpnt, address); |
mpnt | 873 | mm/memory.c | if (mpnt && mpnt == tsk->mm->stk_vma && |
mpnt | 874 | mm/memory.c | address - tmp > mpnt->vm_start - address && |
mpnt | 875 | mm/memory.c | tsk->rlim[RLIMIT_STACK].rlim_cur > mpnt->vm_end - address) { |
mpnt | 876 | mm/memory.c | mpnt->vm_start = address; |
mpnt | 181 | mm/mmap.c | struct vm_area_struct *mpnt; |
mpnt | 212 | mm/mmap.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 214 | mm/mmap.c | *mpnt = *area; |
mpnt | 215 | mm/mmap.c | mpnt->vm_offset += (end - area->vm_start); |
mpnt | 216 | mm/mmap.c | mpnt->vm_start = end; |
mpnt | 217 | mm/mmap.c | if (mpnt->vm_inode) |
mpnt | 218 | mm/mmap.c | mpnt->vm_inode->i_count++; |
mpnt | 219 | mm/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 224 | mm/mmap.c | mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); |
mpnt | 225 | mm/mmap.c | *mpnt = *area; |
mpnt | 226 | mm/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 248 | mm/mmap.c | struct vm_area_struct *mpnt, **npp, *free; |
mpnt | 264 | mm/mmap.c | for (mpnt = *npp; mpnt != NULL; mpnt = *npp) { |
mpnt | 267 | mm/mmap.c | if ((addr < mpnt->vm_start && end <= mpnt->vm_start) || |
mpnt | 268 | mm/mmap.c | (addr >= mpnt->vm_end && end > mpnt->vm_end)) |
mpnt | 270 | mm/mmap.c | npp = &mpnt->vm_next; |
mpnt | 274 | mm/mmap.c | *npp = mpnt->vm_next; |
mpnt | 275 | mm/mmap.c | mpnt->vm_next = free; |
mpnt | 276 | mm/mmap.c | free = mpnt; |
mpnt | 291 | mm/mmap.c | mpnt = free; |
mpnt | 294 | mm/mmap.c | st = addr < mpnt->vm_start ? mpnt->vm_start : addr; |
mpnt | 296 | mm/mmap.c | end = end > mpnt->vm_end ? mpnt->vm_end : end; |
mpnt | 298 | mm/mmap.c | if (mpnt->vm_ops && mpnt->vm_ops->unmap) |
mpnt | 299 | mm/mmap.c | mpnt->vm_ops->unmap(mpnt, st, end-st); |
mpnt | 301 | mm/mmap.c | unmap_fixup(mpnt, st, end-st); |
mpnt | 303 | mm/mmap.c | kfree(mpnt); |
mpnt | 314 | mm/mmap.c | struct vm_area_struct * mpnt; |
mpnt | 334 | mm/mmap.c | mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL); |
mpnt | 335 | mm/mmap.c | if (!mpnt) |
mpnt | 339 | mm/mmap.c | mpnt->vm_task = current; |
mpnt | 340 | mm/mmap.c | mpnt->vm_start = addr; |
mpnt | 341 | mm/mmap.c | mpnt->vm_end = addr + len; |
mpnt | 342 | mm/mmap.c | mpnt->vm_page_prot = prot; |
mpnt | 343 | mm/mmap.c | mpnt->vm_share = NULL; |
mpnt | 344 | mm/mmap.c | mpnt->vm_inode = inode; |
mpnt | 346 | mm/mmap.c | mpnt->vm_offset = off; |
mpnt | 347 | mm/mmap.c | mpnt->vm_ops = &file_mmap; |
mpnt | 348 | mm/mmap.c | insert_vm_struct(current, mpnt); |
mpnt | 362 | mm/mmap.c | struct vm_area_struct **nxtpp, *mpnt; |
mpnt | 366 | mm/mmap.c | for(mpnt = t->mm->mmap; mpnt != NULL; mpnt = mpnt->vm_next) |
mpnt | 368 | mm/mmap.c | if (mpnt->vm_start > vmp->vm_start) |
mpnt | 370 | mm/mmap.c | nxtpp = &mpnt->vm_next; |
mpnt | 372 | mm/mmap.c | if ((vmp->vm_start >= mpnt->vm_start && |
mpnt | 373 | mm/mmap.c | vmp->vm_start < mpnt->vm_end) || |
mpnt | 374 | mm/mmap.c | (vmp->vm_end >= mpnt->vm_start && |
mpnt | 375 | mm/mmap.c | vmp->vm_end < mpnt->vm_end)) |
mpnt | 378 | mm/mmap.c | mpnt->vm_start, vmp->vm_end); |
mpnt | 381 | mm/mmap.c | vmp->vm_next = mpnt; |
mpnt | 391 | mm/mmap.c | void merge_segments(struct vm_area_struct *mpnt, |
mpnt | 396 | mm/mmap.c | if (mpnt == NULL) |
mpnt | 399 | mm/mmap.c | for(prev = mpnt, mpnt = mpnt->vm_next; |
mpnt | 400 | mm/mmap.c | mpnt != NULL; |
mpnt | 401 | mm/mmap.c | prev = mpnt, mpnt = next) |
mpnt | 405 | mm/mmap.c | next = mpnt->vm_next; |
mpnt | 410 | mm/mmap.c | mp = prev->vm_offset + psz == mpnt->vm_offset; |
mpnt | 413 | mm/mmap.c | mp = (*mergep)(prev, mpnt, mpd); |
mpnt | 420 | mm/mmap.c | if (prev->vm_ops != mpnt->vm_ops || |
mpnt | 421 | mm/mmap.c | prev->vm_page_prot != mpnt->vm_page_prot || |
mpnt | 422 | mm/mmap.c | prev->vm_inode != mpnt->vm_inode || |
mpnt | 423 | mm/mmap.c | prev->vm_end != mpnt->vm_start || |
mpnt | 425 | mm/mmap.c | prev->vm_share != mpnt->vm_share || /* ?? */ |
mpnt | 426 | mm/mmap.c | prev->vm_next != mpnt) /* !!! */ |
mpnt | 434 | mm/mmap.c | prev->vm_end = mpnt->vm_end; |
mpnt | 435 | mm/mmap.c | prev->vm_next = mpnt->vm_next; |
mpnt | 436 | mm/mmap.c | kfree_s(mpnt, sizeof(*mpnt)); |
mpnt | 437 | mm/mmap.c | mpnt = prev; |
mpnt | 449 | mm/mmap.c | struct vm_area_struct * mpnt; |
mpnt | 454 | mm/mmap.c | mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL); |
mpnt | 455 | mm/mmap.c | if (!mpnt) |
mpnt | 458 | mm/mmap.c | mpnt->vm_task = current; |
mpnt | 459 | mm/mmap.c | mpnt->vm_start = addr; |
mpnt | 460 | mm/mmap.c | mpnt->vm_end = addr + len; |
mpnt | 461 | mm/mmap.c | mpnt->vm_page_prot = mask; |
mpnt | 462 | mm/mmap.c | mpnt->vm_share = NULL; |
mpnt | 463 | mm/mmap.c | mpnt->vm_inode = NULL; |
mpnt | 464 | mm/mmap.c | mpnt->vm_offset = 0; |
mpnt | 465 | mm/mmap.c | mpnt->vm_ops = NULL; |
mpnt | 466 | mm/mmap.c | insert_vm_struct(current, mpnt); |