taglinefilesource code
vma164arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma, unsigned long addr)
vma173arch/alpha/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma175arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma185arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma195arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma215arch/alpha/kernel/ptrace.cstatic void put_long(struct task_struct * tsk, struct vm_area_struct * vma,
vma224arch/alpha/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma226arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma236arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma246arch/alpha/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma251arch/alpha/kernel/ptrace.cdo_wp_page(tsk, vma, addr, 1);
vma259arch/alpha/kernel/ptrace.cset_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma266arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma;
vma269arch/alpha/kernel/ptrace.cvma = find_vma(tsk,addr);
vma270arch/alpha/kernel/ptrace.cif (!vma)
vma272arch/alpha/kernel/ptrace.cif (vma->vm_start <= addr)
vma273arch/alpha/kernel/ptrace.creturn vma;
vma274arch/alpha/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma276arch/alpha/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma278arch/alpha/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma279arch/alpha/kernel/ptrace.cvma->vm_start = addr;
vma280arch/alpha/kernel/ptrace.creturn vma;
vma290arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma293arch/alpha/kernel/ptrace.cif (!vma) {
vma298arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma301arch/alpha/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma302arch/alpha/kernel/ptrace.cvma_high = vma->vm_next;
vma303arch/alpha/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma308arch/alpha/kernel/ptrace.clow = get_long(tsk, vma, addr);
vma318arch/alpha/kernel/ptrace.clong l = get_long(tsk, vma, addr);
vma333arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma335arch/alpha/kernel/ptrace.cif (!vma)
vma339arch/alpha/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma341arch/alpha/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma342arch/alpha/kernel/ptrace.cvma_high = vma->vm_next;
vma343arch/alpha/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma348arch/alpha/kernel/ptrace.clow  = get_long(tsk, vma, addr);
vma354arch/alpha/kernel/ptrace.cput_long(tsk, vma, addr, low);
vma357arch/alpha/kernel/ptrace.cput_long(tsk, vma, addr, data);
vma48arch/alpha/mm/fault.cstruct vm_area_struct * vma;
vma50arch/alpha/mm/fault.cvma = find_vma(current, address);
vma51arch/alpha/mm/fault.cif (!vma)
vma53arch/alpha/mm/fault.cif (vma->vm_start <= address)
vma55arch/alpha/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma57arch/alpha/mm/fault.cif (expand_stack(vma, address))
vma65arch/alpha/mm/fault.cif (!(vma->vm_flags & VM_EXEC))
vma69arch/alpha/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_WRITE)))
vma72arch/alpha/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma76arch/alpha/mm/fault.chandle_mm_fault(vma, address, cause > 0);
vma87arch/i386/kernel/ptrace.cstruct vm_area_struct * vma, unsigned long addr)
vma95arch/i386/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma97arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma107arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma117arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 0);
vma137arch/i386/kernel/ptrace.cstatic void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
vma146arch/i386/kernel/ptrace.cpgdir = pgd_offset(vma->vm_mm, addr);
vma148arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma158arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma168arch/i386/kernel/ptrace.cdo_no_page(tsk, vma, addr, 1);
vma173arch/i386/kernel/ptrace.cdo_wp_page(tsk, vma, addr, 1);
vma181arch/i386/kernel/ptrace.cset_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma187arch/i386/kernel/ptrace.cstruct vm_area_struct * vma;
vma190arch/i386/kernel/ptrace.cvma = find_vma(tsk,addr);
vma191arch/i386/kernel/ptrace.cif (!vma)
vma193arch/i386/kernel/ptrace.cif (vma->vm_start <= addr)
vma194arch/i386/kernel/ptrace.creturn vma;
vma195arch/i386/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma197arch/i386/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma199arch/i386/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma200arch/i386/kernel/ptrace.cvma->vm_start = addr;
vma201arch/i386/kernel/ptrace.creturn vma;
vma211arch/i386/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma213arch/i386/kernel/ptrace.cif (!vma)
vma217arch/i386/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma219arch/i386/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma220arch/i386/kernel/ptrace.cvma_high = vma->vm_next;
vma221arch/i386/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma224arch/i386/kernel/ptrace.clow = get_long(tsk, vma, addr & ~(sizeof(long)-1));
vma242arch/i386/kernel/ptrace.c*result = get_long(tsk, vma, addr);
vma253arch/i386/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma255arch/i386/kernel/ptrace.cif (!vma)
vma259arch/i386/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma261arch/i386/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma262arch/i386/kernel/ptrace.cvma_high = vma->vm_next;
vma263arch/i386/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma266arch/i386/kernel/ptrace.clow = get_long(tsk, vma, addr & ~(sizeof(long)-1));
vma291arch/i386/kernel/ptrace.cput_long(tsk, vma, addr & ~(sizeof(long)-1),low);
vma294arch/i386/kernel/ptrace.cput_long(tsk, vma, addr, data);
vma36arch/i386/mm/fault.cstruct vm_area_struct * vma;
vma42arch/i386/mm/fault.cvma = find_vma(current, address);
vma43arch/i386/mm/fault.cif (!vma)
vma45arch/i386/mm/fault.cif (vma->vm_start <= address)
vma47arch/i386/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma59arch/i386/mm/fault.cif (expand_stack(vma, address))
vma70arch/i386/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma76arch/i386/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma92arch/i386/mm/fault.cdo_wp_page(current, vma, address, error_code & 2);
vma95arch/i386/mm/fault.cdo_no_page(current, vma, address, error_code & 2);
vma85arch/mips/kernel/ptrace.cstatic unsigned long get_long(struct vm_area_struct * vma, unsigned long addr)
vma92arch/mips/kernel/ptrace.cpgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr);
vma94arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 0);
vma104arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 0);
vma124arch/mips/kernel/ptrace.cstatic void put_long(struct vm_area_struct * vma, unsigned long addr,
vma132arch/mips/kernel/ptrace.cpgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr);
vma134arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 1);
vma144arch/mips/kernel/ptrace.cdo_no_page(vma, addr, 1);
vma149arch/mips/kernel/ptrace.cdo_wp_page(vma, addr, 1);
vma159arch/mips/kernel/ptrace.cset_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma165arch/mips/kernel/ptrace.cstruct vm_area_struct * vma;
vma168arch/mips/kernel/ptrace.cvma = find_vma(tsk, addr);
vma169arch/mips/kernel/ptrace.cif (!vma)
vma171arch/mips/kernel/ptrace.cif (vma->vm_start <= addr)
vma172arch/mips/kernel/ptrace.creturn vma;
vma173arch/mips/kernel/ptrace.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma175arch/mips/kernel/ptrace.cif (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
vma177arch/mips/kernel/ptrace.cvma->vm_offset -= vma->vm_start - addr;
vma178arch/mips/kernel/ptrace.cvma->vm_start = addr;
vma179arch/mips/kernel/ptrace.creturn vma;
vma189arch/mips/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma191arch/mips/kernel/ptrace.cif (!vma)
vma195arch/mips/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma197arch/mips/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma198arch/mips/kernel/ptrace.cvma_high = vma->vm_next;
vma199arch/mips/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma202arch/mips/kernel/ptrace.clow = get_long(vma, addr & ~(sizeof(long)-1));
vma220arch/mips/kernel/ptrace.c*result = get_long(vma, addr);
vma231arch/mips/kernel/ptrace.cstruct vm_area_struct * vma = find_extend_vma(tsk, addr);
vma233arch/mips/kernel/ptrace.cif (!vma)
vma237arch/mips/kernel/ptrace.cstruct vm_area_struct * vma_high = vma;
vma239arch/mips/kernel/ptrace.cif (addr + sizeof(long) >= vma->vm_end) {
vma240arch/mips/kernel/ptrace.cvma_high = vma->vm_next;
vma241arch/mips/kernel/ptrace.cif (!vma_high || vma_high->vm_start != vma->vm_end)
vma244arch/mips/kernel/ptrace.clow = get_long(vma, addr & ~(sizeof(long)-1));
vma269arch/mips/kernel/ptrace.cput_long(vma, addr & ~(sizeof(long)-1),low);
vma272arch/mips/kernel/ptrace.cput_long(vma, addr, data);
vma39arch/mips/mm/fault.cstruct vm_area_struct * vma;
vma47arch/mips/mm/fault.cfor (vma = current->mm->mmap ; ; vma = vma->vm_next) {
vma48arch/mips/mm/fault.cif (!vma)
vma50arch/mips/mm/fault.cif (vma->vm_end > address)
vma53arch/mips/mm/fault.cif (vma->vm_start <= address)
vma55arch/mips/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma57arch/mips/mm/fault.cif (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
vma59arch/mips/mm/fault.cvma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
vma60arch/mips/mm/fault.cvma->vm_start = (address & PAGE_MASK);
vma70arch/mips/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma76arch/mips/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma80arch/mips/mm/fault.cdo_wp_page(vma, address, error_code & 2);
vma83arch/mips/mm/fault.cdo_no_page(vma, address, error_code & 2);
vma139arch/ppc/mm/fault.cstruct vm_area_struct * vma;
vma143arch/ppc/mm/fault.cfor (vma = current->mm->mmap ; ; vma = vma->vm_next)
vma145arch/ppc/mm/fault.cif (!vma)
vma161arch/ppc/mm/fault.cif (vma->vm_end > address)
vma165arch/ppc/mm/fault.cvma = find_vma(current, address);
vma166arch/ppc/mm/fault.cif (!vma)
vma169arch/ppc/mm/fault.cif (vma->vm_start <= address){
vma172arch/ppc/mm/fault.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma177arch/ppc/mm/fault.cif (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
vma181arch/ppc/mm/fault.cvma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
vma182arch/ppc/mm/fault.cvma->vm_start = (address & PAGE_MASK);
vma193arch/ppc/mm/fault.cif (!(vma->vm_flags & VM_WRITE))
vma197arch/ppc/mm/fault.ccurrent,address,vma->vm_flags,current->mm,vma,vma->vm_start,vma->vm_end);
vma207arch/ppc/mm/fault.cif (!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma210arch/ppc/mm/fault.c_printk("vma = %x\n", vma);
vma211arch/ppc/mm/fault.c_printk("vma->vm_flags = %x\n", vma->vm_flags);      
vma216arch/ppc/mm/fault.cvma, VM_READ,VM_EXEC);
vma218arch/ppc/mm/fault.cvma->vm_start, vma->vm_end);
vma221arch/ppc/mm/fault.cprintk("vma->vm_flags = %x\n", vma->vm_flags);
vma227arch/ppc/mm/fault.chandle_mm_fault(vma, address, error_code & 2); 
vma245arch/ppc/mm/fault.ccurrent,address,vma->vm_flags,current->mm,vma,vma->vm_start,vma->vm_end);
vma130arch/sparc/mm/fault.cstruct vm_area_struct *vma;
vma157arch/sparc/mm/fault.cvma = find_vma(current, address);
vma158arch/sparc/mm/fault.cif(!vma)
vma160arch/sparc/mm/fault.cif(vma->vm_start <= address)
vma162arch/sparc/mm/fault.cif(!(vma->vm_flags & VM_GROWSDOWN))
vma164arch/sparc/mm/fault.cif(expand_stack(vma, address))
vma172arch/sparc/mm/fault.cif(!(vma->vm_flags & VM_WRITE))
vma176arch/sparc/mm/fault.cif(!(vma->vm_flags & (VM_READ | VM_EXEC)))
vma179arch/sparc/mm/fault.chandle_mm_fault(vma, address, error_code & FAULT_CODE_WRITE);
vma32arch/sparc/mm/loadmmu.cvoid (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
vma805arch/sparc/mm/srmmu.cvoid srmmu_update_mmu_cache(struct vm_area_struct * vma,
vma681arch/sparc/mm/sun4c.cvoid sun4c_update_mmu_cache(struct vm_area_struct * vma,
vma95drivers/char/mem.cstatic int mmap_mem(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma97drivers/char/mem.cif (vma->vm_offset & ~PAGE_MASK)
vma106drivers/char/mem.cif (x86 > 3 && vma->vm_offset >= high_memory)
vma107drivers/char/mem.cpgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
vma109drivers/char/mem.cif (remap_page_range(vma->vm_start, vma->vm_offset, vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma111drivers/char/mem.cvma->vm_inode = inode;
vma179drivers/char/mem.cstatic int mmap_zero(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma181drivers/char/mem.cif (vma->vm_flags & VM_SHARED)
vma183drivers/char/mem.cif (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma851fs/binfmt_elf.cstatic inline int maydump(struct vm_area_struct *vma)
vma854fs/binfmt_elf.cif (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
vma856fs/binfmt_elf.cif (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
vma952fs/binfmt_elf.cstruct vm_area_struct *vma;
vma973fs/binfmt_elf.cfor(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
vma974fs/binfmt_elf.cint sz = vma->vm_end-vma->vm_start;
vma976fs/binfmt_elf.cif (!maydump(vma))
vma1156fs/binfmt_elf.cfor(vma = current->mm->mmap, i = 0;
vma1157fs/binfmt_elf.ci < segs && vma != NULL; vma = vma->vm_next) {
vma1161fs/binfmt_elf.cif (!maydump(vma))
vma1165fs/binfmt_elf.csz = vma->vm_end - vma->vm_start;
vma1169fs/binfmt_elf.cphdr.p_vaddr = vma->vm_start;
vma1174fs/binfmt_elf.cphdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
vma1175fs/binfmt_elf.cif (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
vma1176fs/binfmt_elf.cif (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
vma1190fs/binfmt_elf.cfor(i = 0, vma = current->mm->mmap;
vma1191fs/binfmt_elf.ci < segs && vma != NULL;
vma1192fs/binfmt_elf.cvma = vma->vm_next) {
vma1193fs/binfmt_elf.cunsigned long addr = vma->vm_start;
vma1194fs/binfmt_elf.cunsigned long len = vma->vm_end - vma->vm_start;
vma1196fs/binfmt_elf.cif (!maydump(vma))
vma91fs/msdos/mmap.cint msdos_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma93fs/msdos/mmap.cif (vma->vm_flags & VM_SHARED)  /* only PAGE_COW or read-only supported now */
vma95fs/msdos/mmap.cif (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
vma104fs/msdos/mmap.cvma->vm_inode = inode;
vma106fs/msdos/mmap.cvma->vm_ops = &msdos_file_mmap;
vma31fs/namei.cstruct vm_area_struct * vma;
vma35fs/namei.cvma = find_vma(current, address);
vma36fs/namei.cif (!vma || vma->vm_start > address || !(vma->vm_flags & VM_READ))
vma38fs/namei.caddress = vma->vm_end - address;
vma41fs/namei.cif (vma->vm_next && vma->vm_next->vm_start == vma->vm_end &&
vma42fs/namei.c(vma->vm_next->vm_flags & VM_READ))
vma95fs/nfs/mmap.cint nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma97fs/nfs/mmap.cif (vma->vm_flags & VM_SHARED)  /* only PAGE_COW or read-only supported now */
vma106fs/nfs/mmap.cvma->vm_inode = inode;
vma108fs/nfs/mmap.cvma->vm_ops = &nfs_file_mmap;
vma463fs/proc/array.cstruct vm_area_struct *vma = tsk->mm->mmap;
vma464fs/proc/array.cwhile (vma) {
vma465fs/proc/array.cvsize += vma->vm_end - vma->vm_start;
vma466fs/proc/array.cvma = vma->vm_next;
vma622fs/proc/array.cstruct vm_area_struct * vma = tsk->mm->mmap;
vma624fs/proc/array.cwhile (vma) {
vma625fs/proc/array.cpgd_t *pgd = pgd_offset(tsk->mm, vma->vm_start);
vma628fs/proc/array.cstatm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total);
vma633fs/proc/array.cif (vma->vm_flags & VM_EXECUTABLE)
vma635fs/proc/array.celse if (vma->vm_flags & VM_GROWSDOWN)
vma637fs/proc/array.celse if (vma->vm_end > 0x60000000)
vma641fs/proc/array.cvma = vma->vm_next;
vma103fs/proc/link.cstruct vm_area_struct * vma;
vma106fs/proc/link.cvma = p->mm->mmap;
vma107fs/proc/link.cwhile (vma) {
vma108fs/proc/link.cif (vma->vm_flags & VM_EXECUTABLE) {
vma109fs/proc/link.cnew_inode = vma->vm_inode;
vma112fs/proc/link.cvma = vma->vm_next;
vma28fs/proc/mem.cstruct vm_area_struct *vma;
vma31fs/proc/mem.cvma = find_vma(tsk, addr);
vma32fs/proc/mem.cif (!vma)
vma34fs/proc/mem.cif (vma->vm_start > addr)
vma36fs/proc/mem.cif (!(vma->vm_flags & VM_READ))
vma38fs/proc/mem.cwhile ((retval = vma->vm_end - addr) < count) {
vma39fs/proc/mem.cstruct vm_area_struct *next = vma->vm_next;
vma42fs/proc/mem.cif (vma->vm_end != next->vm_start)
vma46fs/proc/mem.cvma = next;
vma204fs/proc/mem.cstruct vm_area_struct * vma)
vma232fs/proc/mem.cstmp = vma->vm_offset;
vma233fs/proc/mem.cwhile (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) {
vma267fs/proc/mem.cstmp    = vma->vm_offset;
vma268fs/proc/mem.cdtmp    = vma->vm_start;
vma270fs/proc/mem.cwhile (dtmp < vma->vm_end) {
vma289fs/proc/mem.cif ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table))
vma100fs/smbfs/mmap.csmb_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma105fs/smbfs/mmap.cif (vma->vm_flags & VM_SHARED)  
vma114fs/smbfs/mmap.cvma->vm_inode = inode;
vma116fs/smbfs/mmap.cvma->vm_ops = &smb_file_mmap;
vma553fs/super.cstruct vm_area_struct * vma;
vma559fs/super.cvma = find_vma(current, (unsigned long) data);
vma560fs/super.cif (!vma || (unsigned long) data < vma->vm_start)
vma562fs/super.ci = vma->vm_end - (unsigned long) data;
vma401include/asm-alpha/pgtable.hextern inline void update_mmu_cache(struct vm_area_struct * vma,
vma341include/asm-i386/pgtable.hextern inline void update_mmu_cache(struct vm_area_struct * vma,
vma483include/asm-ppc/pgtable.hextern inline void update_mmu_cache(struct vm_area_struct * vma,
vma487include/asm-ppc/pgtable.hprintk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
vma488include/asm-ppc/pgtable.h_printk("Update MMU cache - VMA: %x, Addr: %x, PTE: %x\n", vma, address, *(long *)&_pte);
vma270include/asm-sparc/pgtable.hextern void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
vma170include/linux/mm.hextern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
vma175include/linux/mm.hextern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
vma176include/linux/mm.hextern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
vma177include/linux/mm.hextern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
vma233include/linux/mm.hstatic inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
vma238include/linux/mm.hif (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
vma240include/linux/mm.hgrow = vma->vm_start - address;
vma241include/linux/mm.hvma->vm_start = address;
vma242include/linux/mm.hvma->vm_offset -= grow;
vma243include/linux/mm.hvma->vm_mm->total_vm += grow >> PAGE_SHIFT;
vma244include/linux/mm.hif (vma->vm_flags & VM_LOCKED)
vma245include/linux/mm.hvma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
vma274include/linux/mm.hstruct vm_area_struct * vma;
vma276include/linux/mm.hvma = find_vma(task,start_addr);
vma277include/linux/mm.hif (!vma || end_addr <= vma->vm_start)
vma279include/linux/mm.hreturn vma;
vma120include/linux/nfs_fs.hextern int nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma);
vma180include/linux/smb_fs.hint smb_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma);
vma74mm/filemap.cstatic int filemap_write_page(struct vm_area_struct * vma,
vma95mm/filemap.cinode = vma->vm_inode;
vma135mm/filemap.cint filemap_swapout(struct vm_area_struct * vma,
vma145mm/filemap.cerror = filemap_write_page(vma, offset, page);
vma157mm/filemap.cstatic pte_t filemap_swapin(struct vm_area_struct * vma,
vma165mm/filemap.creturn mk_pte(page,vma->vm_page_prot);
vma169mm/filemap.cstatic inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
vma199mm/filemap.cerror = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
vma206mm/filemap.cstruct vm_area_struct *vma, unsigned long offset, unsigned int flags)
vma227mm/filemap.cerror |= filemap_sync_pte(pte, vma, address + offset, flags);
vma236mm/filemap.cstruct vm_area_struct *vma, unsigned int flags)
vma257mm/filemap.cerror |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
vma264mm/filemap.cstatic int filemap_sync(struct vm_area_struct * vma, unsigned long address,
vma273mm/filemap.cerror |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
vma284mm/filemap.cstatic void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
vma286mm/filemap.cfilemap_sync(vma, start, len, MS_ASYNC);
vma327mm/filemap.cint generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
vma331mm/filemap.cif ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
vma335mm/filemap.cif (vma->vm_offset & (PAGE_SIZE - 1))
vma339mm/filemap.cif (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
vma350mm/filemap.cvma->vm_inode = inode;
vma352mm/filemap.cvma->vm_ops = ops;
vma361mm/filemap.cstatic int msync_interval(struct vm_area_struct * vma,
vma364mm/filemap.cif (!vma->vm_inode)
vma366mm/filemap.cif (vma->vm_ops->sync) {
vma368mm/filemap.cerror = vma->vm_ops->sync(vma, start, end-start, flags);
vma372mm/filemap.creturn file_fsync(vma->vm_inode, NULL);
vma381mm/filemap.cstruct vm_area_struct * vma;
vma398mm/filemap.cvma = find_vma(current, start);
vma402mm/filemap.cif (!vma)
vma405mm/filemap.cif (start < vma->vm_start) {
vma407mm/filemap.cstart = vma->vm_start;
vma410mm/filemap.cif (end <= vma->vm_end) {
vma412mm/filemap.cerror = msync_interval(vma, start, end, flags);
vma419mm/filemap.cerror = msync_interval(vma, start, vma->vm_end, flags);
vma422mm/filemap.cstart = vma->vm_end;
vma423mm/filemap.cvma = vma->vm_next;
vma273mm/memory.cstruct vm_area_struct *vma)
vma276mm/memory.cunsigned long address = vma->vm_start;
vma277mm/memory.cunsigned long end = vma->vm_end;
vma574mm/memory.cvoid do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
vma583mm/memory.cpage_dir = pgd_offset(vma->vm_mm, address);
vma609mm/memory.c++vma->vm_mm->rss;
vma611mm/memory.cset_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
vma649mm/memory.cstruct vm_area_struct * vma;
vma659mm/memory.cvma = find_vma(current, start);
vma660mm/memory.cif (!vma)
vma662mm/memory.cif (vma->vm_start <= start)
vma664mm/memory.cif (!(vma->vm_flags & VM_GROWSDOWN))
vma666mm/memory.cif (expand_stack(vma, start))
vma674mm/memory.cif (!(vma->vm_flags & VM_READ))
vma676mm/memory.cif (vma->vm_end - start >= size)
vma678mm/memory.cnext = vma->vm_next;
vma679mm/memory.cif (!next || vma->vm_end != next->vm_start)
vma681mm/memory.cvma = next;
vma685mm/memory.cif (!(vma->vm_flags & VM_WRITE))
vma690mm/memory.cif (vma->vm_end - start >= size)
vma692mm/memory.cif (!vma->vm_next || vma->vm_end != vma->vm_next->vm_start)
vma694mm/memory.cvma = vma->vm_next;
vma695mm/memory.cif (!(vma->vm_flags & VM_WRITE))
vma707mm/memory.cdo_wp_page(current, vma, start, 1);
vma712mm/memory.cif (start < vma->vm_end)
vma714mm/memory.cvma = vma->vm_next;
vma715mm/memory.cif (!vma || vma->vm_start != start)
vma717mm/memory.cif (!(vma->vm_flags & VM_WRITE))
vma726mm/memory.cstatic inline void get_empty_page(struct task_struct * tsk, struct vm_area_struct * vma, pte_t * page_table)
vma735mm/memory.cput_page(page_table, pte_mkwrite(mk_pte(tmp, vma->vm_page_prot)));
vma908mm/memory.cstatic int unshare(struct vm_area_struct *vma, unsigned long address, unsigned long new_page)
vma916mm/memory.cpage_dir = pgd_offset(vma->vm_mm, address);
vma1034mm/memory.cstruct vm_area_struct * vma, unsigned long address,
vma1039mm/memory.cif (!vma->vm_ops || !vma->vm_ops->swapin) {
vma1040mm/memory.cswap_in(tsk, vma, page_table, pte_val(entry), write_access);
vma1043mm/memory.cpage = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
vma1048mm/memory.cif (mem_map[MAP_NR(pte_page(page))].count > 1 && !(vma->vm_flags & VM_SHARED))
vma1050mm/memory.c++vma->vm_mm->rss;
vma1062mm/memory.cvoid do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
vma1076mm/memory.cdo_swap_page(tsk, vma, address, page_table, entry, write_access);
vma1080mm/memory.cif (!vma->vm_ops || !vma->vm_ops->nopage) {
vma1081mm/memory.c++vma->vm_mm->rss;
vma1083mm/memory.cget_empty_page(tsk, vma, page_table);
vma1087mm/memory.cif (share_page(vma, address, write_access, page)) {
vma1088mm/memory.c++vma->vm_mm->rss;
vma1098mm/memory.c++vma->vm_mm->rss;
vma1104mm/memory.cpage = vma->vm_ops->nopage(vma, address, page,
vma1105mm/memory.cwrite_access && !(vma->vm_flags & VM_SHARED));
vma1106mm/memory.cif (share_page(vma, address, write_access, 0)) {
vma1120mm/memory.centry = mk_pte(page, vma->vm_page_prot);
vma1123mm/memory.c} else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED))
vma1141mm/memory.cstatic inline void handle_pte_fault(struct vm_area_struct * vma, unsigned long address,
vma1145mm/memory.cdo_no_page(current, vma, address, write_access);
vma1155mm/memory.cdo_wp_page(current, vma, address, write_access);
vma1158mm/memory.cvoid handle_mm_fault(struct vm_area_struct * vma, unsigned long address,
vma1165mm/memory.cpgd = pgd_offset(vma->vm_mm, address);
vma1172mm/memory.chandle_pte_fault(vma, address, write_access, pte);
vma1173mm/memory.cupdate_mmu_cache(vma, address, *pte);
vma20mm/mlock.cstatic inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
vma22mm/mlock.cvma->vm_flags = newflags;
vma26mm/mlock.cstatic inline int mlock_fixup_start(struct vm_area_struct * vma,
vma34mm/mlock.c*n = *vma;
vma35mm/mlock.cvma->vm_start = end;
vma37mm/mlock.cvma->vm_offset += vma->vm_start - n->vm_start;
vma47mm/mlock.cstatic inline int mlock_fixup_end(struct vm_area_struct * vma,
vma55mm/mlock.c*n = *vma;
vma56mm/mlock.cvma->vm_end = start;
vma58mm/mlock.cn->vm_offset += n->vm_start - vma->vm_start;
vma68mm/mlock.cstatic inline int mlock_fixup_middle(struct vm_area_struct * vma,
vma81mm/mlock.c*left = *vma;
vma82mm/mlock.c*right = *vma;
vma84mm/mlock.cvma->vm_start = start;
vma85mm/mlock.cvma->vm_end = end;
vma87mm/mlock.cvma->vm_offset += vma->vm_start - left->vm_start;
vma89mm/mlock.cvma->vm_flags = newflags;
vma90mm/mlock.cif (vma->vm_inode)
vma91mm/mlock.cvma->vm_inode->i_count += 2;
vma92mm/mlock.cif (vma->vm_ops && vma->vm_ops->open) {
vma93mm/mlock.cvma->vm_ops->open(left);
vma94mm/mlock.cvma->vm_ops->open(right);
vma101mm/mlock.cstatic int mlock_fixup(struct vm_area_struct * vma, 
vma106mm/mlock.cif (newflags == vma->vm_flags)
vma113mm/mlock.cvma->vm_mm->locked_vm += pages;
vma115mm/mlock.cif (start == vma->vm_start) {
vma116mm/mlock.cif (end == vma->vm_end)
vma117mm/mlock.cretval = mlock_fixup_all(vma, newflags);
vma119mm/mlock.cretval = mlock_fixup_start(vma, end, newflags);
vma121mm/mlock.cif (end == vma->vm_end)
vma122mm/mlock.cretval = mlock_fixup_end(vma, start, newflags);
vma124mm/mlock.cretval = mlock_fixup_middle(vma, start, end, newflags);
vma139mm/mlock.cstruct vm_area_struct * vma, * next;
vma150mm/mlock.cvma = find_vma(current, start);
vma151mm/mlock.cif (!vma || vma->vm_start > start)
vma159mm/mlock.cnewflags = vma->vm_flags | VM_LOCKED;
vma163mm/mlock.cif (vma->vm_end >= end) {
vma164mm/mlock.cerror = mlock_fixup(vma, nstart, end, newflags);
vma168mm/mlock.ctmp = vma->vm_end;
vma169mm/mlock.cnext = vma->vm_next;
vma170mm/mlock.cerror = mlock_fixup(vma, nstart, tmp, newflags);
vma174mm/mlock.cvma = next;
vma175mm/mlock.cif (!vma || vma->vm_start != nstart) {
vma221mm/mlock.cstruct vm_area_struct * vma;
vma232mm/mlock.cfor (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
vma235mm/mlock.cnewflags = vma->vm_flags | VM_LOCKED;
vma238mm/mlock.cerror = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
vma24mm/mmap.cstatic inline int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
vma26mm/mmap.cif (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
vma57mm/mmap.cstruct vm_area_struct * vma;
vma128mm/mmap.cvma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
vma130mm/mmap.cif (!vma)
vma133mm/mmap.cvma->vm_mm = current->mm;
vma134mm/mmap.cvma->vm_start = addr;
vma135mm/mmap.cvma->vm_end = addr + len;
vma136mm/mmap.cvma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
vma137mm/mmap.cvma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
vma138mm/mmap.cvma->vm_flags |= current->mm->def_flags;
vma142mm/mmap.cvma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
vma144mm/mmap.cvma->vm_flags |= VM_SHARED | VM_MAYSHARE;
vma156mm/mmap.cvma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
vma159mm/mmap.cvma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
vma160mm/mmap.cvma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
vma161mm/mmap.cvma->vm_ops = NULL;
vma162mm/mmap.cvma->vm_offset = off;
vma163mm/mmap.cvma->vm_inode = NULL;
vma164mm/mmap.cvma->vm_pte = 0;
vma169mm/mmap.cerror = file->f_op->mmap(file->f_inode, file, vma);
vma171mm/mmap.cerror = anon_map(NULL, NULL, vma);
vma174mm/mmap.ckfree(vma);
vma177mm/mmap.cinsert_vm_struct(current, vma);
vma178mm/mmap.cmerge_segments(current, vma->vm_start, vma->vm_end);
vma180mm/mmap.cif (vma->vm_flags & VM_LOCKED) {
vma181mm/mmap.cunsigned long start = vma->vm_start;
vma182mm/mmap.cunsigned long end = vma->vm_end;
vma490mm/mmap.cstatic void printk_list (struct vm_area_struct * vma)
vma493mm/mmap.cwhile (vma) {
vma494mm/mmap.cprintk("%08lX-%08lX", vma->vm_start, vma->vm_end);
vma495mm/mmap.cvma = vma->vm_next;
vma496mm/mmap.cif (!vma)
vma758mm/mmap.cstruct vm_area_struct * vma;
vma761mm/mmap.cfor (vma = mm->mmap; vma; vma = vma->vm_next)
vma762mm/mmap.cavl_insert(vma, &mm->mmap_avl);
vma86mm/mprotect.cstatic inline int mprotect_fixup_all(struct vm_area_struct * vma,
vma89mm/mprotect.cvma->vm_flags = newflags;
vma90mm/mprotect.cvma->vm_page_prot = prot;
vma94mm/mprotect.cstatic inline int mprotect_fixup_start(struct vm_area_struct * vma,
vma103mm/mprotect.c*n = *vma;
vma104mm/mprotect.cvma->vm_start = end;
vma106mm/mprotect.cvma->vm_offset += vma->vm_start - n->vm_start;
vma117mm/mprotect.cstatic inline int mprotect_fixup_end(struct vm_area_struct * vma,
vma126mm/mprotect.c*n = *vma;
vma127mm/mprotect.cvma->vm_end = start;
vma129mm/mprotect.cn->vm_offset += n->vm_start - vma->vm_start;
vma140mm/mprotect.cstatic inline int mprotect_fixup_middle(struct vm_area_struct * vma,
vma154mm/mprotect.c*left = *vma;
vma155mm/mprotect.c*right = *vma;
vma157mm/mprotect.cvma->vm_start = start;
vma158mm/mprotect.cvma->vm_end = end;
vma160mm/mprotect.cvma->vm_offset += vma->vm_start - left->vm_start;
vma162mm/mprotect.cvma->vm_flags = newflags;
vma163mm/mprotect.cvma->vm_page_prot = prot;
vma164mm/mprotect.cif (vma->vm_inode)
vma165mm/mprotect.cvma->vm_inode->i_count += 2;
vma166mm/mprotect.cif (vma->vm_ops && vma->vm_ops->open) {
vma167mm/mprotect.cvma->vm_ops->open(left);
vma168mm/mprotect.cvma->vm_ops->open(right);
vma175mm/mprotect.cstatic int mprotect_fixup(struct vm_area_struct * vma, 
vma181mm/mprotect.cif (newflags == vma->vm_flags)
vma184mm/mprotect.cif (start == vma->vm_start)
vma185mm/mprotect.cif (end == vma->vm_end)
vma186mm/mprotect.cerror = mprotect_fixup_all(vma, newflags, newprot);
vma188mm/mprotect.cerror = mprotect_fixup_start(vma, end, newflags, newprot);
vma189mm/mprotect.celse if (end == vma->vm_end)
vma190mm/mprotect.cerror = mprotect_fixup_end(vma, start, newflags, newprot);
vma192mm/mprotect.cerror = mprotect_fixup_middle(vma, start, end, newflags, newprot);
vma204mm/mprotect.cstruct vm_area_struct * vma, * next;
vma217mm/mprotect.cvma = find_vma(current, start);
vma218mm/mprotect.cif (!vma || vma->vm_start > start)
vma226mm/mprotect.cnewflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
vma232mm/mprotect.cif (vma->vm_end >= end) {
vma233mm/mprotect.cerror = mprotect_fixup(vma, nstart, end, newflags);
vma237mm/mprotect.ctmp = vma->vm_end;
vma238mm/mprotect.cnext = vma->vm_next;
vma239mm/mprotect.cerror = mprotect_fixup(vma, nstart, tmp, newflags);
vma243mm/mprotect.cvma = next;
vma244mm/mprotect.cif (!vma || vma->vm_start != nstart) {
vma375mm/swap.cvoid swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
vma395mm/swap.cvma->vm_mm->rss++;
vma398mm/swap.cset_pte(page_table, mk_pte(page, vma->vm_page_prot));
vma401mm/swap.cset_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
vma417mm/swap.cstatic inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struct* vma,
vma448mm/swap.cif (vma->vm_ops && vma->vm_ops->swapout) {
vma450mm/swap.cvma->vm_mm->rss--;
vma451mm/swap.cif (vma->vm_ops->swapout(vma, address - vma->vm_start + vma->vm_offset, page_table))
vma458mm/swap.cvma->vm_mm->rss--;
vma473mm/swap.cvma->vm_mm->rss--;
vma479mm/swap.cvma->vm_mm->rss--;
vma501mm/swap.cstatic inline int swap_out_pmd(struct task_struct * tsk, struct vm_area_struct * vma,
vma524mm/swap.cresult = try_to_swap_out(tsk, vma, address, pte, limit);
vma533mm/swap.cstatic inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct * vma,
vma554mm/swap.cint result = swap_out_pmd(tsk, vma, pmd, address, end, limit);
vma563mm/swap.cstatic int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
vma570mm/swap.cif (vma->vm_flags & (VM_SHM | VM_LOCKED))
vma573mm/swap.cend = vma->vm_end;
vma575mm/swap.cint result = swap_out_pgd(tsk, vma, pgdir, start, end, limit);
vma587mm/swap.cstruct vm_area_struct* vma;
vma598mm/swap.cvma = find_vma(p, address);
vma599mm/swap.cif (!vma)
vma601mm/swap.cif (address < vma->vm_start)
vma602mm/swap.caddress = vma->vm_start;
vma605mm/swap.cint result = swap_out_vma(p, vma, pgd_offset(p->mm, address), address, limit);
vma608mm/swap.cvma = vma->vm_next;
vma609mm/swap.cif (!vma)
vma611mm/swap.caddress = vma->vm_start;
vma900mm/swap.cstatic inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
vma926mm/swap.cset_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
vma927mm/swap.c++vma->vm_mm->rss;
vma932mm/swap.cstatic inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
vma953mm/swap.cif (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page))
vma961mm/swap.cstatic inline int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
vma982mm/swap.cif (unuse_pmd(vma, pmd, address, end - address, offset, type, page))
vma990mm/swap.cstatic int unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
vma995mm/swap.cif (unuse_pgd(vma, pgdir, start, end - start, type, page))
vma1005mm/swap.cstruct vm_area_struct* vma;
vma1012mm/swap.cvma = p->mm->mmap;
vma1013mm/swap.cwhile (vma) {
vma1014mm/swap.cpgd_t * pgd = pgd_offset(p->mm, vma->vm_start);
vma1015mm/swap.cif (unuse_vma(vma, pgd, vma->vm_start, vma->vm_end, type, page))
vma1017mm/swap.cvma = vma->vm_next;